diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..a2381c55 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,30 @@ +version: 2 +updates: +- package-ecosystem: pip + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 + ignore: + - dependency-name: black + versions: + - 21.4b0 + - 21.4b1 + - dependency-name: pylint + versions: + - 2.6.2 + - 2.7.0 + - 2.7.1 + - 2.7.2 + - 2.7.3 + - 2.7.4 + - 2.8.1 + - dependency-name: tox + versions: + - 3.21.3 + - 3.22.0 +- package-ecosystem: docker + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 00000000..abcdedab --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,82 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "develop", "master" ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ "develop" ] + schedule: + - cron: '38 9 * * 2' + +jobs: + analyze: + name: Analyze + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners + # Consider using larger runners for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby', 'swift' ] + # Use only 'java' to analyze code written in Java, Kotlin or both + # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # ℹī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml deleted file mode 100644 index 1175ed01..00000000 --- a/.github/workflows/docker-publish.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: Docker - -on: - push: - # Publish `develop` as Docker `latest` image. - branches: - - develop - - # Publish `v1.2.3` tags as releases. - tags: - - '*' - - # Run tests for any PRs. - pull_request: - -env: - IMAGE_NAME: salt-sproxy - -jobs: - # Run tests. - # See also https://docs.docker.com/docker-hub/builds/automated-testing/ - test: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Run tests - run: | - if [ -f docker-compose.test.yml ]; then - docker-compose --file docker-compose.test.yml build - docker-compose --file docker-compose.test.yml run sut - else - docker build . --file Dockerfile - fi - - # Push image to GitHub Packages. - # See also https://docs.docker.com/docker-hub/builds/ - push: - # Ensure test job passes before pushing image. - needs: test - - runs-on: ubuntu-latest - if: github.event_name == 'push' - - steps: - - uses: actions/checkout@v2 - - - name: Build image - run: docker build . --file Dockerfile --tag $IMAGE_NAME - - - name: Log into GitHub Container Registry - run: echo "${{ secrets.CR_PAT }}" | docker login https://ghcr.io -u ${{ github.actor }} --password-stdin - - - name: Push image to GitHub Container Registry - run: | - IMAGE_ID=ghcr.io/${{ github.repository_owner }}/$IMAGE_NAME - - # Change all uppercase to lowercase - IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') - - # Strip git ref prefix from version - VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') - - # Strip "v" prefix from tag name - [[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//') - - # Publish develop under the develop tag - [ "$VERSION" == "develop" ] && VERSION=develop - - # Publish master under the latest tag - [ "$VERSION" == "master" ] && VERSION=latest - - echo IMAGE_ID=$IMAGE_ID - echo VERSION=$VERSION - - docker tag $IMAGE_NAME $IMAGE_ID:$VERSION - docker push $IMAGE_ID:$VERSION diff --git a/.github/workflows/gh-reg-publish.yml b/.github/workflows/gh-reg-publish.yml new file mode 100644 index 00000000..3117ff72 --- /dev/null +++ b/.github/workflows/gh-reg-publish.yml @@ -0,0 +1,98 @@ +name: Docker Publish + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +on: + push: + branches: + - develop + - master + tags: + - "v*.*.*" + - ".*" + # Run tests for any PRs. + pull_request: + +env: + # Use docker.io for Docker Hub if empty + REGISTRY: ghcr.io + # github.repository as / + IMAGE_NAME: ${{ github.repository }} + + +jobs: + build: + + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + # This is used to complete the identity challenge + # with sigstore/fulcio when running outside of PRs. + id-token: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Install the cosign tool except on PR + # https://github.com/sigstore/cosign-installer + - name: Install cosign + if: github.event_name != 'pull_request' + uses: sigstore/cosign-installer@6e04d228eb30da1757ee4e1dd75a0ec73a653e06 #v3.1.1 + with: + cosign-release: 'v2.1.1' + + # Workaround: https://github.com/docker/build-push-action/issues/461 + - name: Setup Docker buildx + uses: docker/setup-buildx-action@79abd3f86f79a9d68a23c75a09a9a85889262adf + + # Login against a Docker registry except on PR + # https://github.com/docker/login-action + - name: Log into registry ${{ env.REGISTRY }} + if: github.event_name != 'pull_request' + uses: docker/login-action@28218f9b04b4f3f62068d7b6ce6ca5b26e35336c + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.REGISTRY_TOKEN }} + + # Extract metadata (tags, labels) for Docker + # https://github.com/docker/metadata-action + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + # Build and push Docker image with Buildx (don't push on PR) + # https://github.com/docker/build-push-action + - name: Build and push Docker image + id: build-and-push + uses: docker/build-push-action@ac9327eae2b366085ac7f6a2d02df8aa8ead720a + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + + + # Sign the resulting Docker image digest except on PRs. + # This will only write to the public Rekor transparency log when the Docker + # repository is public to avoid leaking data. If you would like to publish + # transparency data even for private images, pass --force to cosign below. + # https://github.com/sigstore/cosign + - name: Sign the published Docker image + if: ${{ github.event_name != 'pull_request' }} + env: + # https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable + TAGS: ${{ steps.meta.outputs.tags }} + DIGEST: ${{ steps.build-and-push.outputs.digest }} + # This step uses the identity token to provision an ephemeral certificate + # against the sigstore community Fulcio instance. + run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST} diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 8678b3a6..5d28416f 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -15,12 +15,12 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.6, 3.7] + python-version: [3.8, 3.9, "3.10", "3.11"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} - name: Install dependencies diff --git a/.github/workflows/pythonpublish.yml b/.github/workflows/pythonpublish.yml index 8a8592f7..40b25dd8 100644 --- a/.github/workflows/pythonpublish.yml +++ b/.github/workflows/pythonpublish.yml @@ -9,11 +9,11 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v1 + uses: actions/setup-python@v3 with: - python-version: '3.6' + python-version: '3.x' - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.pylintrc b/.pylintrc index 2244d887..092d1165 100644 --- a/.pylintrc +++ b/.pylintrc @@ -156,6 +156,10 @@ disable=R, E8731, W0232, C0415, + E9405, + E9402, + E0599, + W1699, 3rd-party-local-module-not-gated, pep8-reserved-keywords diff --git a/Dockerfile b/Dockerfile index 74b2bfce..35cfb721 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,14 @@ -FROM python:3.7.9-slim-stretch +FROM python:3.11-slim-bookworm MAINTAINER ping@mirceaulinic.net -ARG SALT_VERSION="2019.2.5" +ARG SALT_VERSION="3006.1" COPY ./ /var/cache/salt-sproxy/ COPY ./master /etc/salt/master RUN apt-get update \ - && apt-get install -y python-zmq gcc \ + && apt-get install -y python3-zmq gcc \ && pip --no-cache-dir install salt==$SALT_VERSION \ && pip --no-cache-dir install /var/cache/salt-sproxy/ \ && rm -rf /var/cache/salt-sproxy/ \ diff --git a/LICENSE b/LICENSE index e11f53eb..b0411549 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2019-2020 Mircea Ulinic + Copyright 2019-2023 Mircea Ulinic Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 5d19ade7..7cca74f7 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ In brief, here are some benefits you can get by using *salt-sproxy*: - Say goodbye to the burden of managing hundreds of system services for the Proxy Minion processes. - Reuse your existing extension modules, templates, Pillars, States, etc., you - may have already developed in your environment, transparently. + may have already developed in your Salt environment, transparently. - You can run it locally, on your own computer. - You can use *salt-sproxy* to uniformly manage network devices, servers (either using regular Minions, or [SSH]( @@ -321,8 +321,8 @@ Docker ------ A Docker image is available at -[https://hub.docker.com/r/mirceaulinic/salt-sproxy](https://hub.docker.com/r/mirceaulinic/salt-sproxy), -and you can pull it, e.g., ``docker pull mirceaulinic/salt-sproxy``. See +[https://github.com/mirceaulinic/salt-sproxy/pkgs/container/salt-sproxy](https://github.com/mirceaulinic/salt-sproxy/pkgs/container/salt-sproxy), +and you can pull it, e.g., ``docker pull ghcr.io/mirceaulinic/salt-sproxy:develop``. See [https://salt-sproxy.readthedocs.io/en/latest/#docker](https://salt-sproxy.readthedocs.io/en/latest/#docker) for further usage instructions and examples. diff --git a/README.rst b/README.rst index ede2656d..953a4288 100644 --- a/README.rst +++ b/README.rst @@ -59,7 +59,7 @@ In brief, here are some benefits you can get by using *salt-sproxy*: - Say goodbye to the burden of managing hundreds of system services for the Proxy Minion processes. - Reuse your existing extension modules, templates, Pillars, States, etc., you - may have already developed in your environment, transparently. + may have already developed in your Salt environment, transparently. - You can run it locally, on your own computer. - You can use *salt-sproxy* to uniformly manage network devices, servers (either using regular Minions, or `SSH @@ -350,8 +350,8 @@ Docker ------ A Docker image is available at -https://hub.docker.com/r/mirceaulinic/salt-sproxy, and you can pull it, e.g., -``docker pull mirceaulinic/salt-sproxy``. See +https://github.com/mirceaulinic/salt-sproxy/pkgs/container/salt-sproxy, and you can pull it, e.g., +``docker pull ghcr.io/mirceaulinic/salt-sproxy:develop``. See https://salt-sproxy.readthedocs.io/en/latest/#docker for further usage instructions and examples. diff --git a/docs/_templates/links.html b/docs/_templates/links.html index 6c536c26..20deca31 100644 --- a/docs/_templates/links.html +++ b/docs/_templates/links.html @@ -15,6 +15,6 @@

Useful Links

  • salt-sproxy Google Group
  • salt-sproxy tag @ Stack Overflow
  • salt-sproxy @ PyPI
  • -
  • salt-sproxy @ Docker Hub
  • +
  • salt-sproxy @ GitHub Container Registry
  • Issue Tracker
  • diff --git a/docs/conf.py b/docs/conf.py index a45afbcb..70a8f14a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -24,9 +24,9 @@ import jinja2 -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('../salt_sproxy')) -sys.path.insert(0, os.path.abspath('_themes')) +sys.path.insert(0, os.path.abspath("../")) +sys.path.insert(0, os.path.abspath("../salt_sproxy")) +sys.path.insert(0, os.path.abspath("_themes")) log = logging.getLogger(__name__) @@ -40,30 +40,30 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.coverage', - 'sphinx.ext.viewcode', - 'sphinx.ext.githubpages', + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.viewcode", + "sphinx.ext.githubpages", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'salt-sproxy' -copyright = u'2019-2020, Mircea Ulinic' -author = u'Mircea Ulinic' +project = "salt-sproxy" +copyright = "2019-2020, Mircea Ulinic" +author = "Mircea Ulinic" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -84,10 +84,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'flask_theme_support.FlaskyStyle' +pygments_style = "flask_theme_support.FlaskyStyle" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -97,24 +97,24 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { - 'show_powered_by': False, - 'github_user': 'mirceaulinic', - 'github_repo': 'salt-sproxy', - 'github_banner': True, - 'show_related': False, + "show_powered_by": False, + "github_user": "mirceaulinic", + "github_repo": "salt-sproxy", + "github_banner": True, + "show_related": False, } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. @@ -122,13 +122,13 @@ # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { - '**': [ - 'about.html', - 'navigation.html', - 'links.html', - 'relations.html', # needs 'show_related': True theme option to display - 'searchbox.html', - 'donate.html', + "**": [ + "about.html", + "navigation.html", + "links.html", + "relations.html", # needs 'show_related': True theme option to display + "searchbox.html", + "donate.html", ] } @@ -144,7 +144,7 @@ # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'salt-sproxy' +htmlhelp_basename = "salt-sproxy" # -- Options for LaTeX output --------------------------------------------- @@ -170,10 +170,10 @@ latex_documents = [ ( master_doc, - 'salt-sproxy.tex', - u'salt-sproxy Documentation', - u'Mircea Ulinic', - 'manual', + "salt-sproxy.tex", + "salt-sproxy Documentation", + "Mircea Ulinic", + "manual", ) ] @@ -183,8 +183,8 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'salt-sproxy', u'salt-sproxy Documentation', [author], 1), - ('salt_sapi', 'salt-sapi', u'salt-sapi Documentation', [author], 1), + (master_doc, "salt-sproxy", "salt-sproxy Documentation", [author], 1), + ("salt_sapi", "salt-sapi", "salt-sapi Documentation", [author], 1), ] @@ -196,12 +196,12 @@ texinfo_documents = [ ( master_doc, - 'salt-sproxy', - u'salt-sproxy Documentation', + "salt-sproxy", + "salt-sproxy Documentation", author, - 'salt-sproxy', - 'Salt plugin for interacting with network devices, without running Minions', - 'Miscellaneous', + "salt-sproxy", + "Salt plugin for interacting with network devices, without running Minions", + "Miscellaneous", ) ] @@ -223,15 +223,15 @@ # epub_uid = '' # A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] +epub_exclude_files = ["search.html"] curdir = os.path.abspath(os.path.dirname(__file__)) -doc_examples_dir = os.path.join(curdir, 'examples') +doc_examples_dir = os.path.join(curdir, "examples") try: os.mkdir(doc_examples_dir) except OSError: pass -examples_path = os.path.abspath(os.path.join(curdir, os.path.pardir, 'examples')) +examples_path = os.path.abspath(os.path.join(curdir, os.path.pardir, "examples")) examples_dirs = [ name for name in os.listdir(examples_path) @@ -240,15 +240,15 @@ examples = [] for example_dir in examples_dirs: - example_readme = os.path.join(examples_path, example_dir, 'README.rst') - example_doc = os.path.join(doc_examples_dir, '{}.rst'.format(example_dir)) + example_readme = os.path.join(examples_path, example_dir, "README.rst") + example_doc = os.path.join(doc_examples_dir, "{}.rst".format(example_dir)) if os.path.exists(example_readme): copyfile(example_readme, example_doc) examples.append(example_dir) -env = jinja2.Environment(loader=jinja2.FileSystemLoader('.')) -examples_template = env.get_template('examples_index.jinja') +env = jinja2.Environment(loader=jinja2.FileSystemLoader(".")) +examples_template = env.get_template("examples_index.jinja") rendered_template = examples_template.render(examples=examples) -examples_index = os.path.join(doc_examples_dir, 'index.rst') -with open(examples_index, 'w') as rst_fh: +examples_index = os.path.join(doc_examples_dir, "index.rst") +with open(examples_index, "w") as rst_fh: rst_fh.write(rendered_template) diff --git a/docs/index.rst b/docs/index.rst index 4ffe7545..184eeffe 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -271,18 +271,14 @@ Docker ------ There are Docker images available should you need or prefer: -https://hub.docker.com/r/mirceaulinic/salt-sproxy. +https://github.com/mirceaulinic/salt-sproxy/pkgs/container/salt-sproxy. You can see here the available tags: -https://hub.docker.com/r/mirceaulinic/salt-sproxy/tags. ``latest`` provides the -code merged into the ``master`` branch, and ``allinone-latest`` is the code -merged into the ``master`` branch with several libraries such as -`NAPALM `__, -`Netmiko `__, -`ciscoconfparse `__, or Ansible -which you may need for your modules or Roster (if you'd want to use the -`Ansible Roster `__, -for example). +https://github.com/mirceaulinic/salt-sproxy/pkgs/container/salt-sproxy. Beware +that the `develop +`__ +tag can be unstable so it's recommended to rather use one of the specific tags +corresponding to one of the latest versions. These can be used in various scenarios. For example, if you would like to use ``salt-proxy`` but without installing it, and prefer to use Docker instead, you @@ -290,7 +286,7 @@ can define the following convoluted alias: .. code-block:: bash - alias salt-sproxy='f(){ docker run --rm --network host -v $SALT_PROXY_PILLAR_DIR:/etc/salt/pillar/ -ti mirceaulinic/salt-sproxy salt-sproxy $@; }; f' + alias salt-sproxy='f(){ docker run --rm --network host -v $SALT_PROXY_PILLAR_DIR:/etc/salt/pillar/ -ti ghcr.io/mirceaulinic/salt-sproxy:develop salt-sproxy $@; }; f' And in the ``SALT_PROXY_PILLAR_DIR`` environment variable, you set the path to the directory where you have the Pillars, e.g., diff --git a/docs/releases/2023.8.0.rst b/docs/releases/2023.8.0.rst new file mode 100644 index 00000000..28228ddd --- /dev/null +++ b/docs/releases/2023.8.0.rst @@ -0,0 +1,36 @@ +.. _release-2023.8.0: + +================ +Release 2023.8.0 +================ + +While this release doesn't necessarily bring any new features, it fixes various +compatibility issues with recent Salt releases, starting with Salt 3004: + +- `Chris Hills `__: https://github.com/mirceaulinic/salt-sproxy/pull/264 +- `Zpell82 `__: https://github.com/mirceaulinic/salt-sproxy/pull/270 +- https://github.com/mirceaulinic/salt-sproxy/pull/266 +- https://github.com/mirceaulinic/salt-sproxy/pull/265 + +.. important:: + + Beginning with Salt release 3006, in order to have enable the ``sproxy`` + and ``sproxy_async`` clients, you need to explicitly list them under the + ``netapi_enable_clients`` configuration option, otherwise, Salt will reject + any API requests to either of these. + See + https://docs.saltproject.io/en/master/topics/netapi/netapi-enable-clients.html + for more details. + + Example: ``/etc/salt/master`` + + .. code-block:: yaml + + netapi_enable_clients: + - local + - local_async + - sproxy + - sproxy_async + + See also https://salt-sproxy.readthedocs.io/en/latest/salt_api.html for + more documentation notes. diff --git a/docs/releases/index.rst b/docs/releases/index.rst index 9b0c6106..245835db 100644 --- a/docs/releases/index.rst +++ b/docs/releases/index.rst @@ -10,7 +10,7 @@ Latest Release .. toctree:: :maxdepth: 1 - 2021.6.0 + 2023.8.0 Previous Releases ^^^^^^^^^^^^^^^^^ @@ -18,6 +18,7 @@ Previous Releases .. toctree:: :maxdepth: 1 + 2021.6.0 2020.10.0 2020.7.0 2020.3.0 diff --git a/docs/salt_api.rst b/docs/salt_api.rst index c85244ec..0617718e 100644 --- a/docs/salt_api.rst +++ b/docs/salt_api.rst @@ -52,6 +52,26 @@ together with *salt-sproxy*. Everything stay the exact same as usually, the only difference being the special ``sproxy`` and ``sproxy_async`` clients for simplified usage. +.. important:: + + Beginning with Salt release 3006, in order to have enable the ``sproxy`` + and ``sproxy_async`` clients, you need to explicitly list them under the + ``netapi_enable_clients`` configuration option, otherwise, Salt will reject + any API requests to either of these. + See + https://docs.saltproject.io/en/master/topics/netapi/netapi-enable-clients.html + for more details. + + Example: ``/etc/salt/master`` + + .. code-block:: yaml + + netapi_enable_clients: + - local + - local_async + - sproxy + - sproxy_async + A major advantage of using the ``sproxy`` / ``sproxy_async`` clients is that the usage is very similar to the ``local`` / ``local_async`` clients (see https://docs.saltstack.com/en/latest/ref/netapi/all/salt.netapi.rest_cherrypy.html#usage), diff --git a/pypi.rst b/pypi.rst index f80dac45..5e9eacb5 100644 --- a/pypi.rst +++ b/pypi.rst @@ -7,9 +7,6 @@ Salt plugin for interacting with network devices, without running Minions. This is NOT a SaltStack product. - This package may eventually be integrated in a future version of the - official Salt releases, in this form or slightly different. - Install ------- diff --git a/requirements-dev.txt b/requirements-dev.txt index 31bbf63b..8ebb96d3 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,6 +1,6 @@ -r requirements.txt -tox==3.24.1 -black==19.10b0 -pylint==2.6.0 +tox==4.6.4 +black==23.7.0 +pylint==2.9.6 SaltPylint==2020.9.28 -CherryPy==18.6.1 +CherryPy==18.8.0 diff --git a/requirements.txt b/requirements.txt index ce431c71..c7bf4e3e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ +six salt progressbar2 diff --git a/salt_sproxy/__init__.py b/salt_sproxy/__init__.py index 68c04af4..03d08ffd 100644 --- a/salt_sproxy/__init__.py +++ b/salt_sproxy/__init__.py @@ -1,2 +1,2 @@ # -*- coding: utf-8 -*- -__import__('pkg_resources').declare_namespace(__name__) +__import__("pkg_resources").declare_namespace(__name__) diff --git a/salt_sproxy/_executors/ssh.py b/salt_sproxy/_executors/ssh.py index c6002f60..b5821e49 100644 --- a/salt_sproxy/_executors/ssh.py +++ b/salt_sproxy/_executors/ssh.py @@ -1,44 +1,44 @@ # -*- coding: utf-8 -*- -''' +""" SSH Executor module =================== Used in conjunction with the SSH Proxy, to invoke Salt functions through Salt thin, on a remove mchine accessed via SSH. -''' +""" from __future__ import absolute_import, unicode_literals import logging -__virtualname__ = 'ssh' -__proxyenabled__ = ['ssh'] +__virtualname__ = "ssh" +__proxyenabled__ = ["ssh"] log = logging.getLogger(__name__) def __virtual__(): - if 'proxy' not in __opts__: - return False, 'SSH Executor is only meant to be used with SSH Proxy Minions' - if __opts__.get('proxy', {}).get('proxytype') != __virtualname__: - return False, 'Proxytype does not match: {0}'.format(__virtualname__) + if "proxy" not in __opts__: + return False, "SSH Executor is only meant to be used with SSH Proxy Minions" + if __opts__.get("proxy", {}).get("proxytype") != __virtualname__: + return False, "Proxytype does not match: {0}".format(__virtualname__) return True def execute(opts, data, func, args, kwargs): - ''' + """ Directly calls the given function with arguments - ''' - if data['fun'] == 'saltutil.find_job': - return __executors__['direct_call.execute'](opts, data, func, args, kwargs) - return __proxy__['ssh.call'](data['fun'], *args, **kwargs) + """ + if data["fun"] == "saltutil.find_job": + return __executors__["direct_call.execute"](opts, data, func, args, kwargs) + return __proxy__["ssh.call"](data["fun"], *args, **kwargs) def allow_missing_func(function): # pylint: disable=unused-argument - ''' + """ Allow all calls to be passed through to docker container. The ssh call will use direct_call, which will return back if the module was unable to be run. - ''' + """ return True diff --git a/salt_sproxy/_modules/netbox.py b/salt_sproxy/_modules/netbox.py index ebe60cda..c304075b 100644 --- a/salt_sproxy/_modules/netbox.py +++ b/salt_sproxy/_modules/netbox.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -''' +""" NetBox ====== @@ -32,7 +32,7 @@ In ``salt-sproxy``, this module has been included beginning with version 2019.10.0. -''' +""" from __future__ import absolute_import, unicode_literals, print_function import re @@ -50,45 +50,45 @@ log = logging.getLogger(__name__) -AUTH_ENDPOINTS = ('secrets',) +AUTH_ENDPOINTS = ("secrets",) -__func_alias__ = {'filter_': 'filter', 'get_': 'get'} +__func_alias__ = {"filter_": "filter", "get_": "get"} def __virtual__(): - ''' + """ pynetbox must be installed. - ''' + """ if not HAS_PYNETBOX: return ( False, - 'The netbox execution module cannot be loaded: ' - 'pynetbox library is not installed.', + "The netbox execution module cannot be loaded: " + "pynetbox library is not installed.", ) else: return True def _config(): - config = __salt__['config.get']('netbox') + config = __salt__["config.get"]("netbox") if not config: raise CommandExecutionError( - 'NetBox execution module configuration could not be found' + "NetBox execution module configuration could not be found" ) return config def _nb_obj(auth_required=False): pynb_kwargs = {} - pynb_kwargs['token'] = _config().get('token') + pynb_kwargs["token"] = _config().get("token") if auth_required: - pynb_kwargs['private_key_file'] = _config().get('keyfile') - return pynetbox.api(_config().get('url'), **pynb_kwargs) + pynb_kwargs["private_key_file"] = _config().get("keyfile") + return pynetbox.api(_config().get("url"), **pynb_kwargs) def _strip_url_field(input_dict): - if 'url' in input_dict.keys(): - del input_dict['url'] + if "url" in input_dict.keys(): + del input_dict["url"] for k, v in input_dict.items(): if isinstance(v, dict): _strip_url_field(v) @@ -103,49 +103,49 @@ def _dict(iterable): def _add(app, endpoint, payload): - ''' + """ POST a payload - ''' + """ nb = _nb_obj(auth_required=True) try: return getattr(getattr(nb, app), endpoint).create(**payload) except RequestError as e: # pylint: disable=undefined-variable - log.error('%s, %s, %s', e.req.request.headers, e.request_body, e.error) + log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error) return False def slugify(value): - '''' + """' Slugify given value. Credit to Djangoproject https://docs.djangoproject.com/en/2.0/_modules/django/utils/text/#slugify - ''' - value = re.sub(r'[^\w\s-]', '', value).strip().lower() - return re.sub(r'[-\s]+', '-', value) + """ + value = re.sub(r"[^\w\s-]", "", value).strip().lower() + return re.sub(r"[-\s]+", "-", value) def _get(app, endpoint, id=None, auth_required=False, **kwargs): - ''' + """ Helper function to do a GET request to Netbox. Returns the actual pynetbox object, which allows manipulation from other functions. - ''' + """ nb = _nb_obj(auth_required=auth_required) if id: item = getattr(getattr(nb, app), endpoint).get(id) else: - kwargs = __utils__['args.clean_kwargs'](**kwargs) + kwargs = __utils__["args.clean_kwargs"](**kwargs) item = getattr(getattr(nb, app), endpoint).get(**kwargs) return item def _if_name_unit(if_name): - if_name_split = if_name.split('.') + if_name_split = if_name.split(".") if len(if_name_split) == 2: return if_name_split - return if_name, '0' + return if_name, "0" def filter_(app, endpoint, **kwargs): - ''' + """ Get a list of items from NetBox. app @@ -163,10 +163,10 @@ def filter_(app, endpoint, **kwargs): .. code-block:: bash salt myminion netbox.filter dcim devices status=1 role=router - ''' + """ ret = [] nb = _nb_obj(auth_required=True if app in AUTH_ENDPOINTS else False) - clean_kwargs = __utils__['args.clean_kwargs'](**kwargs) + clean_kwargs = __utils__["args.clean_kwargs"](**kwargs) if not clean_kwargs: nb_query = getattr(getattr(nb, app), endpoint).all() else: @@ -177,7 +177,7 @@ def filter_(app, endpoint, **kwargs): def get_(app, endpoint, id=None, **kwargs): - ''' + """ Get a single item from NetBox. app @@ -199,7 +199,7 @@ def get_(app, endpoint, id=None, **kwargs): .. code-block:: bash salt myminion netbox.get dcim devices name=my-router - ''' + """ return _dict( _get( app, @@ -212,7 +212,7 @@ def get_(app, endpoint, id=None, **kwargs): def create_manufacturer(name): - ''' + """ .. versionadded:: 2019.2.0 Create a device manufacturer. @@ -225,21 +225,21 @@ def create_manufacturer(name): .. code-block:: bash salt myminion netbox.create_manufacturer Juniper - ''' - nb_man = get_('dcim', 'manufacturers', name=name) + """ + nb_man = get_("dcim", "manufacturers", name=name) if nb_man: return False else: - payload = {'name': name, 'slug': slugify(name)} - man = _add('dcim', 'manufacturers', payload) + payload = {"name": name, "slug": slugify(name)} + man = _add("dcim", "manufacturers", payload) if man: - return {'dcim': {'manufacturers': payload}} + return {"dcim": {"manufacturers": payload}} else: return False def create_device_type(model, manufacturer): - ''' + """ .. versionadded:: 2019.2.0 Create a device type. If the manufacturer doesn't exist, create a new manufacturer. @@ -254,19 +254,19 @@ def create_device_type(model, manufacturer): .. code-block:: bash salt myminion netbox.create_device_type MX480 Juniper - ''' - nb_type = get_('dcim', 'device-types', model=model) + """ + nb_type = get_("dcim", "device-types", model=model) if nb_type: return False - nb_man = get_('dcim', 'manufacturers', name=manufacturer) + nb_man = get_("dcim", "manufacturers", name=manufacturer) new_man = None if not nb_man: new_man = create_manufacturer(manufacturer) - payload = {'model': model, 'manufacturer': nb_man['id'], 'slug': slugify(model)} - typ = _add('dcim', 'device-types', payload) - ret_dict = {'dcim': {'device-types': payload}} + payload = {"model": model, "manufacturer": nb_man["id"], "slug": slugify(model)} + typ = _add("dcim", "device-types", payload) + ret_dict = {"dcim": {"device-types": payload}} if new_man: - ret_dict['dcim'].update(new_man['dcim']) + ret_dict["dcim"].update(new_man["dcim"]) if typ: return ret_dict else: @@ -274,7 +274,7 @@ def create_device_type(model, manufacturer): def create_device_role(role, color): - ''' + """ .. versionadded:: 2019.2.0 Create a device role @@ -287,21 +287,21 @@ def create_device_role(role, color): .. code-block:: bash salt myminion netbox.create_device_role router - ''' - nb_role = get_('dcim', 'device-roles', name=role) + """ + nb_role = get_("dcim", "device-roles", name=role) if nb_role: return False else: - payload = {'name': role, 'slug': slugify(role), 'color': color} - role = _add('dcim', 'device-roles', payload) + payload = {"name": role, "slug": slugify(role), "color": color} + role = _add("dcim", "device-roles", payload) if role: - return {'dcim': {'device-roles': payload}} + return {"dcim": {"device-roles": payload}} else: return False def create_platform(platform): - ''' + """ .. versionadded:: 2019.2.0 Create a new device platform @@ -314,21 +314,21 @@ def create_platform(platform): .. code-block:: bash salt myminion netbox.create_platform junos - ''' - nb_platform = get_('dcim', 'platforms', slug=slugify(platform)) + """ + nb_platform = get_("dcim", "platforms", slug=slugify(platform)) if nb_platform: return False else: - payload = {'name': platform, 'slug': slugify(platform)} - plat = _add('dcim', 'platforms', payload) + payload = {"name": platform, "slug": slugify(platform)} + plat = _add("dcim", "platforms", payload) if plat: - return {'dcim': {'platforms': payload}} + return {"dcim": {"platforms": payload}} else: return False def create_site(site): - ''' + """ .. versionadded:: 2019.2.0 Create a new device site @@ -341,21 +341,21 @@ def create_site(site): .. code-block:: bash salt myminion netbox.create_site BRU - ''' - nb_site = get_('dcim', 'sites', name=site) + """ + nb_site = get_("dcim", "sites", name=site) if nb_site: return False else: - payload = {'name': site, 'slug': slugify(site)} - site = _add('dcim', 'sites', payload) + payload = {"name": site, "slug": slugify(site)} + site = _add("dcim", "sites", payload) if site: - return {'dcim': {'sites': payload}} + return {"dcim": {"sites": payload}} else: return False def create_device(name, role, model, manufacturer, site): - ''' + """ .. versionadded:: 2019.2.0 Create a new device with a name, role, model, manufacturer and site. @@ -377,41 +377,41 @@ def create_device(name, role, model, manufacturer, site): .. code-block:: bash salt myminion netbox.create_device edge_router router MX480 Juniper BRU - ''' + """ try: - nb_role = get_('dcim', 'device-roles', name=role) + nb_role = get_("dcim", "device-roles", name=role) if not nb_role: return False - nb_type = get_('dcim', 'device-types', model=model) + nb_type = get_("dcim", "device-types", model=model) if not nb_type: return False - nb_site = get_('dcim', 'sites', name=site) + nb_site = get_("dcim", "sites", name=site) if not nb_site: return False - status = {'label': "Active", 'value': 1} + status = {"label": "Active", "value": 1} except RequestError as e: # pylint: disable=undefined-variable - log.error('%s, %s, %s', e.req.request.headers, e.request_body, e.error) + log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error) return False payload = { - 'name': name, - 'display_name': name, - 'slug': slugify(name), - 'device_type': nb_type['id'], - 'device_role': nb_role['id'], - 'site': nb_site['id'], + "name": name, + "display_name": name, + "slug": slugify(name), + "device_type": nb_type["id"], + "device_role": nb_role["id"], + "site": nb_site["id"], } - new_dev = _add('dcim', 'devices', payload) + new_dev = _add("dcim", "devices", payload) if new_dev: - return {'dcim': {'devices': payload}} + return {"dcim": {"devices": payload}} else: return False def update_device(name, **kwargs): - ''' + """ .. versionadded:: 2019.2.0 Add attributes to an existing device, identified by name. @@ -426,16 +426,16 @@ def update_device(name, **kwargs): .. code-block:: bash salt myminion netbox.update_device edge_router serial=JN2932920 - ''' - kwargs = __utils__['args.clean_kwargs'](**kwargs) - nb_device = _get('dcim', 'devices', auth_required=True, name=name) + """ + kwargs = __utils__["args.clean_kwargs"](**kwargs) + nb_device = _get("dcim", "devices", auth_required=True, name=name) for k, v in kwargs.items(): setattr(nb_device, k, v) try: nb_device.save() - return {'dcim': {'devices': kwargs}} + return {"dcim": {"devices": kwargs}} except RequestError as e: # pylint: disable=undefined-variable - log.error('%s, %s, %s', e.req.request.headers, e.request_body, e.error) + log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error) return False @@ -443,11 +443,11 @@ def create_inventory_item( device_name, item_name, manufacturer_name=None, - serial='', - part_id='', - description='', + serial="", + part_id="", + description="", ): - ''' + """ .. versionadded:: 2019.2.0 Add an inventory item to an existing device. @@ -474,34 +474,34 @@ def create_inventory_item( .. code-block:: bash salt myminion netbox.create_inventory_item edge_router Transceiver part_id=740-01234 - ''' - nb_device = get_('dcim', 'devices', name=device_name) + """ + nb_device = get_("dcim", "devices", name=device_name) if not nb_device: return False if manufacturer_name: - nb_man = get_('dcim', 'manufacturers', name=manufacturer_name) + nb_man = get_("dcim", "manufacturers", name=manufacturer_name) if not nb_man: create_manufacturer(manufacturer_name) - nb_man = get_('dcim', 'manufacturers', name=manufacturer_name) + nb_man = get_("dcim", "manufacturers", name=manufacturer_name) payload = { - 'device': nb_device['id'], - 'name': item_name, - 'description': description, - 'serial': serial, - 'part_id': part_id, - 'parent': None, + "device": nb_device["id"], + "name": item_name, + "description": description, + "serial": serial, + "part_id": part_id, + "parent": None, } if manufacturer_name: - payload['manufacturer'] = nb_man['id'] - done = _add('dcim', 'inventory-items', payload) + payload["manufacturer"] = nb_man["id"] + done = _add("dcim", "inventory-items", payload) if done: - return {'dcim': {'inventory-items': payload}} + return {"dcim": {"inventory-items": payload}} else: return done def delete_inventory_item(item_id): - ''' + """ .. versionadded:: 2019.2.0 Remove an item from a devices inventory. Identified by the netbox id @@ -514,14 +514,14 @@ def delete_inventory_item(item_id): .. code-block:: bash salt myminion netbox.delete_inventory_item 1354 - ''' - nb_inventory_item = _get('dcim', 'inventory-items', auth_required=True, id=item_id) + """ + nb_inventory_item = _get("dcim", "inventory-items", auth_required=True, id=item_id) nb_inventory_item.delete() - return {'DELETE': {'dcim': {'inventory-items': item_id}}} + return {"DELETE": {"dcim": {"inventory-items": item_id}}} def create_interface_connection(interface_a, interface_b): - ''' + """ .. versionadded:: 2019.2.0 Create an interface connection between 2 interfaces @@ -536,17 +536,17 @@ def create_interface_connection(interface_a, interface_b): .. code-block:: bash salt myminion netbox.create_interface_connection 123 456 - ''' - payload = {'interface_a': interface_a, 'interface_b': interface_b} - ret = _add('dcim', 'interface-connections', payload) + """ + payload = {"interface_a": interface_a, "interface_b": interface_b} + ret = _add("dcim", "interface-connections", payload) if ret: - return {'dcim': {'interface-connections': {ret['id']: payload}}} + return {"dcim": {"interface-connections": {ret["id"]: payload}}} else: return ret def get_interfaces(device_name=None, **kwargs): - ''' + """ .. versionadded:: 2019.2.0 Returns interfaces for a specific device using arbitrary netbox filters @@ -562,15 +562,15 @@ def get_interfaces(device_name=None, **kwargs): salt myminion netbox.get_interfaces edge_router name="et-0/0/5" - ''' + """ if not device_name: - device_name = __opts__['id'] - netbox_device = get_('dcim', 'devices', name=device_name) - return filter_('dcim', 'interfaces', device_id=netbox_device['id'], **kwargs) + device_name = __opts__["id"] + netbox_device = get_("dcim", "devices", name=device_name) + return filter_("dcim", "interfaces", device_id=netbox_device["id"], **kwargs) def openconfig_interfaces(device_name=None): - ''' + """ .. versionadded:: 2019.2.0 Return a dictionary structured as standardised in the @@ -588,72 +588,72 @@ def openconfig_interfaces(device_name=None): salt '*' netbox.openconfig_interfaces salt '*' netbox.openconfig_interfaces device_name=cr1.thn.lon - ''' + """ oc_if = {} interfaces = get_interfaces(device_name=device_name) ipaddresses = get_ipaddresses(device_name=device_name) for interface in interfaces: - if_name, if_unit = _if_name_unit(interface['name']) + if_name, if_unit = _if_name_unit(interface["name"]) if if_name not in oc_if: oc_if[if_name] = { - 'config': {'name': if_name}, - 'subinterfaces': {'subinterface': {}}, + "config": {"name": if_name}, + "subinterfaces": {"subinterface": {}}, } - if if_unit == '0': - oc_if[if_name]['config']['enabled'] = interface['enabled'] - if interface['description']: - if if_name == interface['name']: + if if_unit == "0": + oc_if[if_name]["config"]["enabled"] = interface["enabled"] + if interface["description"]: + if if_name == interface["name"]: # When that's a real unit 0 interface # Otherwise it will inherit the description from the subif - oc_if[if_name]['config']['description'] = str( - interface['description'] + oc_if[if_name]["config"]["description"] = str( + interface["description"] ) else: subif_descr = { - 'subinterfaces': { - 'subinterface': { + "subinterfaces": { + "subinterface": { if_unit: { - 'config': { - 'description': str(interface['description']) + "config": { + "description": str(interface["description"]) } } } } } - oc_if[if_name] = __utils__['dictupdate.update']( + oc_if[if_name] = __utils__["dictupdate.update"]( oc_if[if_name], subif_descr ) - if interface['mtu']: - oc_if[if_name]['config']['mtu'] = int(interface['mtu']) + if interface["mtu"]: + oc_if[if_name]["config"]["mtu"] = int(interface["mtu"]) else: - oc_if[if_name]['subinterfaces']['subinterface'][if_unit] = { - 'config': {'index': int(if_unit), 'enabled': interface['enabled']} + oc_if[if_name]["subinterfaces"]["subinterface"][if_unit] = { + "config": {"index": int(if_unit), "enabled": interface["enabled"]} } - if interface['description']: - oc_if[if_name]['subinterfaces']['subinterface'][if_unit]['config'][ - 'description' - ] = str(interface['description']) + if interface["description"]: + oc_if[if_name]["subinterfaces"]["subinterface"][if_unit]["config"][ + "description" + ] = str(interface["description"]) for ipaddress in ipaddresses: - ip, prefix_length = ipaddress['address'].split('/') - if_name = ipaddress['interface']['name'] + ip, prefix_length = ipaddress["address"].split("/") + if_name = ipaddress["interface"]["name"] if_name, if_unit = _if_name_unit(if_name) - ipvkey = 'ipv{}'.format(ipaddress['family']) - if if_unit not in oc_if[if_name]['subinterfaces']['subinterface']: - oc_if[if_name]['subinterfaces']['subinterface'][if_unit] = { - 'config': {'index': int(if_unit), 'enabled': True} + ipvkey = "ipv{}".format(ipaddress["family"]) + if if_unit not in oc_if[if_name]["subinterfaces"]["subinterface"]: + oc_if[if_name]["subinterfaces"]["subinterface"][if_unit] = { + "config": {"index": int(if_unit), "enabled": True} } - if ipvkey not in oc_if[if_name]['subinterfaces']['subinterface'][if_unit]: - oc_if[if_name]['subinterfaces']['subinterface'][if_unit][ipvkey] = { - 'addresses': {'address': {}} + if ipvkey not in oc_if[if_name]["subinterfaces"]["subinterface"][if_unit]: + oc_if[if_name]["subinterfaces"]["subinterface"][if_unit][ipvkey] = { + "addresses": {"address": {}} } - oc_if[if_name]['subinterfaces']['subinterface'][if_unit][ipvkey]['addresses'][ - 'address' - ][ip] = {'config': {'ip': ip, 'prefix_length': int(prefix_length)}} - return {'interfaces': {'interface': oc_if}} + oc_if[if_name]["subinterfaces"]["subinterface"][if_unit][ipvkey]["addresses"][ + "address" + ][ip] = {"config": {"ip": ip, "prefix_length": int(prefix_length)}} + return {"interfaces": {"interface": oc_if}} def openconfig_lacp(device_name=None): - ''' + """ .. versionadded:: 2019.2.0 Return a dictionary structured as standardised in the @@ -679,25 +679,25 @@ def openconfig_lacp(device_name=None): salt '*' netbox.openconfig_lacp salt '*' netbox.openconfig_lacp device_name=cr1.thn.lon - ''' + """ oc_lacp = {} interfaces = get_interfaces(device_name=device_name) for interface in interfaces: - if not interface['lag']: + if not interface["lag"]: continue - if_name, if_unit = _if_name_unit(interface['name']) - parent_if = interface['lag']['name'] + if_name, if_unit = _if_name_unit(interface["name"]) + parent_if = interface["lag"]["name"] if parent_if not in oc_lacp: oc_lacp[parent_if] = { - 'config': { - 'name': parent_if, - 'interval': 'SLOW', - 'lacp_mode': 'ACTIVE', + "config": { + "name": parent_if, + "interval": "SLOW", + "lacp_mode": "ACTIVE", }, - 'members': {'member': {}}, + "members": {"member": {}}, } - oc_lacp[parent_if]['members']['member'][if_name] = {} - return {'lacp': {'interfaces': {'interface': oc_lacp}}} + oc_lacp[parent_if]["members"]["member"][if_name] = {} + return {"lacp": {"interfaces": {"interface": oc_lacp}}} def create_interface( @@ -710,7 +710,7 @@ def create_interface( lag_parent=None, form_factor=None, ): - ''' + """ .. versionadded:: 2019.2.0 Attach an interface to a device. If not all arguments are provided, @@ -738,50 +738,50 @@ def create_interface( .. code-block:: bash salt myminion netbox.create_interface edge_router ae13 description="Core uplink" - ''' - nb_device = get_('dcim', 'devices', name=device_name) + """ + nb_device = get_("dcim", "devices", name=device_name) if not nb_device: return False if lag_parent: lag_interface = get_( - 'dcim', 'interfaces', device_id=nb_device['id'], name=lag_parent + "dcim", "interfaces", device_id=nb_device["id"], name=lag_parent ) if not lag_interface: return False if not description: - description = '' + description = "" if not enabled: - enabled = 'false' + enabled = "false" # Set default form factor to 1200. This maps to SFP+ (10GE). This should be addressed by # the _choices endpoint. payload = { - 'device': nb_device['id'], - 'name': interface_name, - 'description': description, - 'enabled': enabled, - 'form_factor': 1200, + "device": nb_device["id"], + "name": interface_name, + "description": description, + "enabled": enabled, + "form_factor": 1200, } if form_factor is not None: - payload['form_factor'] = form_factor + payload["form_factor"] = form_factor if lag: - payload['form_factor'] = 200 + payload["form_factor"] = 200 if lag_parent: - payload['lag'] = lag_interface['id'] + payload["lag"] = lag_interface["id"] if mac_address: - payload['mac_address'] = mac_address + payload["mac_address"] = mac_address nb_interface = get_( - 'dcim', 'interfaces', device_id=nb_device['id'], name=interface_name + "dcim", "interfaces", device_id=nb_device["id"], name=interface_name ) if not nb_interface: - nb_interface = _add('dcim', 'interfaces', payload) + nb_interface = _add("dcim", "interfaces", payload) if nb_interface: - return {'dcim': {'interfaces': {nb_interface['id']: payload}}} + return {"dcim": {"interfaces": {nb_interface["id"]: payload}}} else: return nb_interface def update_interface(device_name, interface_name, **kwargs): - ''' + """ .. versionadded:: 2019.2.0 Update an existing interface with new attributes. @@ -798,13 +798,13 @@ def update_interface(device_name, interface_name, **kwargs): .. code-block:: bash salt myminion netbox.update_interface edge_router ae13 mac_address=50:87:69:53:32:D0 - ''' - nb_device = get_('dcim', 'devices', name=device_name) + """ + nb_device = get_("dcim", "devices", name=device_name) nb_interface = _get( - 'dcim', - 'interfaces', + "dcim", + "interfaces", auth_required=True, - device_id=nb_device['id'], + device_id=nb_device["id"], name=interface_name, ) if not nb_device: @@ -812,18 +812,18 @@ def update_interface(device_name, interface_name, **kwargs): if not nb_interface: return False else: - for k, v in __utils__['args.clean_kwargs'](**kwargs).items(): + for k, v in __utils__["args.clean_kwargs"](**kwargs).items(): setattr(nb_interface, k, v) try: nb_interface.save() - return {'dcim': {'interfaces': {nb_interface.id: dict(nb_interface)}}} + return {"dcim": {"interfaces": {nb_interface.id: dict(nb_interface)}}} except RequestError as e: # pylint: disable=undefined-variable - log.error('%s, %s, %s', e.req.request.headers, e.request_body, e.error) + log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error) return False def delete_interface(device_name, interface_name): - ''' + """ .. versionadded:: 2019.2.0 Delete an interface from a device. @@ -839,25 +839,25 @@ def delete_interface(device_name, interface_name): .. code-block:: bash salt myminion netbox.delete_interface edge_router ae13 - ''' - nb_device = get_('dcim', 'devices', name=device_name) + """ + nb_device = get_("dcim", "devices", name=device_name) nb_interface = _get( - 'dcim', - 'interfaces', + "dcim", + "interfaces", auth_required=True, - device_id=nb_device['id'], + device_id=nb_device["id"], name=interface_name, ) if nb_interface: nb_interface.delete() return { - 'DELETE': {'dcim': {'interfaces': {nb_interface.id: nb_interface.name}}} + "DELETE": {"dcim": {"interfaces": {nb_interface.id: nb_interface.name}}} } return False def make_interface_lag(device_name, interface_name): - ''' + """ .. versionadded:: 2019.2.0 Update an interface to be a LAG. @@ -873,12 +873,12 @@ def make_interface_lag(device_name, interface_name): .. code-block:: bash salt myminion netbox.make_interface_lag edge_router ae13 - ''' + """ return update_interface(device_name, interface_name, form_factor=200) def make_interface_child(device_name, interface_name, parent_name): - ''' + """ .. versionadded:: 2019.2.0 Set an interface as part of a LAG. @@ -897,17 +897,17 @@ def make_interface_child(device_name, interface_name, parent_name): .. code-block:: bash salt myminion netbox.make_interface_child xe-1/0/2 ae13 - ''' - nb_device = get_('dcim', 'devices', name=device_name) - nb_parent = get_('dcim', 'interfaces', device_id=nb_device['id'], name=parent_name) + """ + nb_device = get_("dcim", "devices", name=device_name) + nb_parent = get_("dcim", "interfaces", device_id=nb_device["id"], name=parent_name) if nb_device and nb_parent: - return update_interface(device_name, interface_name, lag=nb_parent['id']) + return update_interface(device_name, interface_name, lag=nb_parent["id"]) else: return False def get_ipaddresses(device_name=None, **kwargs): - ''' + """ .. versionadded:: 2019.2.0 Filters for an IP address using specified filters @@ -922,15 +922,15 @@ def get_ipaddresses(device_name=None, **kwargs): .. code-block:: bash salt myminion netbox.get_ipaddresses device_name family=4 - ''' + """ if not device_name: - device_name = __opts__['id'] - netbox_device = get_('dcim', 'devices', name=device_name) - return filter_('ipam', 'ip-addresses', device_id=netbox_device['id'], **kwargs) + device_name = __opts__["id"] + netbox_device = get_("dcim", "devices", name=device_name) + return filter_("ipam", "ip-addresses", device_id=netbox_device["id"], **kwargs) def create_ipaddress(ip_address, family, device=None, interface=None): - ''' + """ .. versionadded:: 2019.2.0 Add an IP address, and optionally attach it to an interface. @@ -949,39 +949,39 @@ def create_ipaddress(ip_address, family, device=None, interface=None): .. code-block:: bash salt myminion netbox.create_ipaddress 192.168.1.1/24 4 device=edge_router interface=ae13 - ''' + """ nb_addr = None - payload = {'family': family, 'address': ip_address} + payload = {"family": family, "address": ip_address} if interface and device: - nb_device = get_('dcim', 'devices', name=device) + nb_device = get_("dcim", "devices", name=device) if not nb_device: return False nb_interface = get_( - 'dcim', 'interfaces', device_id=nb_device['id'], name=interface + "dcim", "interfaces", device_id=nb_device["id"], name=interface ) if not nb_interface: return False nb_addr = get_( - 'ipam', - 'ip-addresses', + "ipam", + "ip-addresses", q=ip_address, - interface_id=nb_interface['id'], + interface_id=nb_interface["id"], family=family, ) if nb_addr: log.error(nb_addr) return False else: - payload['interface'] = nb_interface['id'] - ipaddr = _add('ipam', 'ip-addresses', payload) + payload["interface"] = nb_interface["id"] + ipaddr = _add("ipam", "ip-addresses", payload) if ipaddr: - return {'ipam': {'ip-addresses': payload}} + return {"ipam": {"ip-addresses": payload}} else: return ipaddr def delete_ipaddress(ipaddr_id): - ''' + """ .. versionadded:: 2019.2.0 Delete an IP address. IP addresses in Netbox are a combination of address @@ -995,17 +995,17 @@ def delete_ipaddress(ipaddr_id): .. code-block:: bash salt myminion netbox.delete_ipaddress 9002 - ''' + """ - nb_ipaddr = _get('ipam', 'ip-addresses', auth_required=True, id=ipaddr_id) + nb_ipaddr = _get("ipam", "ip-addresses", auth_required=True, id=ipaddr_id) if nb_ipaddr: nb_ipaddr.delete() - return {'DELETE': {'ipam': {'ip-address': ipaddr_id}}} + return {"DELETE": {"ipam": {"ip-address": ipaddr_id}}} return False def create_circuit_provider(name, asn=None): - ''' + """ .. versionadded:: 2019.2.0 Create a new Netbox circuit provider @@ -1020,31 +1020,31 @@ def create_circuit_provider(name, asn=None): .. code-block:: bash salt myminion netbox.create_circuit_provider Telia 1299 - ''' + """ - nb_circuit_provider = get_('circuits', 'providers', name=name) + nb_circuit_provider = get_("circuits", "providers", name=name) payload = {} if nb_circuit_provider: - if nb_circuit_provider['asn'] == asn: + if nb_circuit_provider["asn"] == asn: return False else: - log.error('Duplicate provider with different ASN: %s: %s', name, asn) + log.error("Duplicate provider with different ASN: %s: %s", name, asn) raise CommandExecutionError( - 'Duplicate provider with different ASN: {}: {}'.format(name, asn) + "Duplicate provider with different ASN: {}: {}".format(name, asn) ) else: - payload = {'name': name, 'slug': slugify(name)} + payload = {"name": name, "slug": slugify(name)} if asn: - payload['asn'] = asn - circuit_provider = _add('circuits', 'providers', payload) + payload["asn"] = asn + circuit_provider = _add("circuits", "providers", payload) if circuit_provider: - return {'circuits': {'providers': {circuit_provider['id']: payload}}} + return {"circuits": {"providers": {circuit_provider["id"]: payload}}} else: return circuit_provider def get_circuit_provider(name, asn=None): - ''' + """ .. versionadded:: 2019.2.0 Get a circuit provider with a given name and optional ASN. @@ -1059,16 +1059,16 @@ def get_circuit_provider(name, asn=None): .. code-block:: bash salt myminion netbox.get_circuit_provider Telia 1299 - ''' + """ if asn: - nb_circuit_provider = get_('circuits', 'providers', asn=asn) + nb_circuit_provider = get_("circuits", "providers", asn=asn) else: - nb_circuit_provider = get_('circuits', 'providers', name=name) + nb_circuit_provider = get_("circuits", "providers", name=name) return nb_circuit_provider def create_circuit_type(name): - ''' + """ .. versionadded:: 2019.2.0 Create a new Netbox circuit type. @@ -1081,21 +1081,21 @@ def create_circuit_type(name): .. code-block:: bash salt myminion netbox.create_circuit_type Transit - ''' - nb_circuit_type = get_('circuits', 'circuit-types', slug=slugify(name)) + """ + nb_circuit_type = get_("circuits", "circuit-types", slug=slugify(name)) if nb_circuit_type: return False else: - payload = {'name': name, 'slug': slugify(name)} - circuit_type = _add('circuits', 'circuit-types', payload) + payload = {"name": name, "slug": slugify(name)} + circuit_type = _add("circuits", "circuit-types", payload) if circuit_type: - return {'circuits': {'circuit-types': {circuit_type['id']: payload}}} + return {"circuits": {"circuit-types": {circuit_type["id"]: payload}}} else: return circuit_type def create_circuit(name, provider_id, circuit_type, description=None): - ''' + """ .. versionadded:: 2019.2.0 Create a new Netbox circuit @@ -1116,24 +1116,24 @@ def create_circuit(name, provider_id, circuit_type, description=None): .. code-block:: bash salt myminion netbox.create_circuit NEW_CIRCUIT_01 Telia Transit 1299 "New Telia circuit" - ''' + """ - nb_circuit_provider = get_('circuits', 'providers', provider_id) - nb_circuit_type = get_('circuits', 'circuit-types', slug=slugify(circuit_type)) + nb_circuit_provider = get_("circuits", "providers", provider_id) + nb_circuit_type = get_("circuits", "circuit-types", slug=slugify(circuit_type)) if nb_circuit_provider and nb_circuit_type: payload = { - 'cid': name, - 'provider': nb_circuit_provider['id'], - 'type': nb_circuit_type['id'], + "cid": name, + "provider": nb_circuit_provider["id"], + "type": nb_circuit_type["id"], } if description: - payload['description'] = description - nb_circuit = get_('circuits', 'circuits', cid=name) + payload["description"] = description + nb_circuit = get_("circuits", "circuits", cid=name) if nb_circuit: return False - circuit = _add('circuits', 'circuits', payload) + circuit = _add("circuits", "circuits", payload) if circuit: - return {'circuits': {'circuits': {circuit['id']: payload}}} + return {"circuits": {"circuits": {circuit["id"]: payload}}} else: return circuit else: @@ -1141,9 +1141,9 @@ def create_circuit(name, provider_id, circuit_type, description=None): def create_circuit_termination( - circuit, interface, device, speed, xconnect_id=None, term_side='A' + circuit, interface, device, speed, xconnect_id=None, term_side="A" ): - ''' + """ .. versionadded:: 2019.2.0 Terminate a circuit on an interface @@ -1166,29 +1166,29 @@ def create_circuit_termination( .. code-block:: bash salt myminion netbox.create_circuit_termination NEW_CIRCUIT_01 xe-0/0/1 myminion 10000 xconnect_id=XCON01 - ''' + """ - nb_device = get_('dcim', 'devices', name=device) - nb_interface = get_('dcim', 'interfaces', device_id=nb_device['id'], name=interface) - nb_circuit = get_('circuits', 'circuits', cid=circuit) + nb_device = get_("dcim", "devices", name=device) + nb_interface = get_("dcim", "interfaces", device_id=nb_device["id"], name=interface) + nb_circuit = get_("circuits", "circuits", cid=circuit) if nb_circuit and nb_device: - nb_termination = get_('circuits', 'circuit-terminations', q=nb_circuit['cid']) + nb_termination = get_("circuits", "circuit-terminations", q=nb_circuit["cid"]) if nb_termination: return False payload = { - 'circuit': nb_circuit['id'], - 'interface': nb_interface['id'], - 'site': nb_device['site']['id'], - 'port_speed': speed, - 'term_side': term_side, + "circuit": nb_circuit["id"], + "interface": nb_interface["id"], + "site": nb_device["site"]["id"], + "port_speed": speed, + "term_side": term_side, } if xconnect_id: - payload['xconnect_id'] = xconnect_id - circuit_termination = _add('circuits', 'circuit-terminations', payload) + payload["xconnect_id"] = xconnect_id + circuit_termination = _add("circuits", "circuit-terminations", payload) if circuit_termination: return { - 'circuits': { - 'circuit-terminations': {circuit_termination['id']: payload} + "circuits": { + "circuit-terminations": {circuit_termination["id"]: payload} } } else: diff --git a/salt_sproxy/_proxy/ssh.py b/salt_sproxy/_proxy/ssh.py index d3181aa1..1bd837b4 100644 --- a/salt_sproxy/_proxy/ssh.py +++ b/salt_sproxy/_proxy/ssh.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -''' +""" SSH Proxy ========= @@ -90,19 +90,19 @@ user: test passwd: test port: 2022 -''' +""" from __future__ import absolute_import, print_function, unicode_literals import json import logging +import six import salt.client.ssh import salt.fileclient import salt.exceptions import salt.utils.path -from salt.ext import six -__proxyenabled__ = ['ssh'] +__proxyenabled__ = ["ssh"] log = logging.getLogger(__name__) @@ -112,10 +112,10 @@ def _prep_conn(opts, fun, *args, **kwargs): - ''' + """ Prepare the connection. - ''' - opts['_ssh_version'] = salt.client.ssh.ssh_version() + """ + opts["_ssh_version"] = salt.client.ssh.ssh_version() fsclient = salt.fileclient.FSClient(opts) # TODO: Have here more options to simplify the usage, through features like # auto-expand the path to the priv key, auto-discovery, etc. @@ -129,88 +129,88 @@ def _prep_conn(opts, fun, *args, **kwargs): for key, val in six.iteritems(kwargs) ] ) - if not opts['proxy'].get('ssh_options'): - opts['proxy']['ssh_options'] = [] - if opts['proxy'].get('ignore_host_keys', False): - opts['proxy']['ssh_options'].append('StrictHostKeyChecking=no') - if opts['proxy'].get('no_host_keys', False): - opts['proxy']['ssh_options'].extend( + if not opts["proxy"].get("ssh_options"): + opts["proxy"]["ssh_options"] = [] + if opts["proxy"].get("ignore_host_keys", False): + opts["proxy"]["ssh_options"].append("StrictHostKeyChecking=no") + if opts["proxy"].get("no_host_keys", False): + opts["proxy"]["ssh_options"].extend( ["StrictHostKeyChecking=no", "UserKnownHostsFile=/dev/null"] ) - for cli_opt in ('identities_only', 'priv', 'priv_passwd'): - if opts.get(cli_opt) and not opts['proxy'].get(cli_opt): - opts['proxy'][cli_opt] = opts[cli_opt] + for cli_opt in ("identities_only", "priv", "priv_passwd"): + if opts.get(cli_opt) and not opts["proxy"].get(cli_opt): + opts["proxy"][cli_opt] = opts[cli_opt] ext_mods = salt.client.ssh.mod_data(fsclient) conn = salt.client.ssh.Single( - opts, argv, opts['id'], fsclient=fsclient, mods=ext_mods, **opts['proxy'] + opts, argv, opts["id"], fsclient=fsclient, mods=ext_mods, **opts["proxy"] ) conn.args = args conn.kwargs = kwargs - thin_dir = conn.opts['thin_dir'] - thin_dir = thin_dir.replace('proxy', '') - conn.opts['thin_dir'] = thin_dir + thin_dir = conn.opts["thin_dir"] + thin_dir = thin_dir.replace("proxy", "") + conn.opts["thin_dir"] = thin_dir conn.thin_dir = thin_dir return conn def init(opts): - ''' + """ Init the SSH connection, and execute a simple call to ensure that the remote device is reachable, otherwise throw an error. - ''' + """ global CONN, INITIALIZED - if not salt.utils.path.which('ssh'): + if not salt.utils.path.which("ssh"): raise salt.exceptions.SaltSystemExit( code=-1, - msg='No ssh binary found in path -- ssh must be installed for this Proxy module. Exiting.', + msg="No ssh binary found in path -- ssh must be installed for this Proxy module. Exiting.", ) - CONN = _prep_conn(opts, 'cmd.run', 'echo') + CONN = _prep_conn(opts, "cmd.run", "echo") INITIALIZED = True def initialized(): - ''' + """ Proxy initialized properly? - ''' + """ return INITIALIZED def module_executors(): - ''' + """ Return the list of executors that should invoke the Salt functions. - ''' - return ['ssh'] + """ + return ["ssh"] def call(fun, *args, **kwargs): - ''' + """ Call an arbitrary Salt function and return the output. - ''' + """ global CONN, INITIALIZED if not CONN or not INITIALIZED: return opts = CONN.opts - opts['output'] = 'json' + opts["output"] = "json" ssh_conn = _prep_conn(opts, fun, *args, **kwargs) ret = ssh_conn.run() if ret[2] != 0: - log.error('[%s] %s', opts['id'], ret[1]) + log.error("[%s] %s", opts["id"], ret[1]) return ret[0] thin_ret = json.loads(ret[0]) - if '_error' in thin_ret['local']: - log.error(thin_ret['local']['_error']) - if 'stdout' in thin_ret['local']: - log.error(thin_ret['local']['stdout']) - return thin_ret['local']['return'] + if "_error" in thin_ret["local"]: + log.error(thin_ret["local"]["_error"]) + if "stdout" in thin_ret["local"]: + log.error(thin_ret["local"]["stdout"]) + return thin_ret["local"]["return"] def ping(): - ''' + """ Execute "echo" on the remote host to ensure it's still accessible. - ''' + """ global CONN, INITIALIZED if not CONN or not INITIALIZED: - log.debug('Not connected, or not initialized') + log.debug("Not connected, or not initialized") return False ret = CONN.run() log.debug(ret) @@ -218,20 +218,20 @@ def ping(): def grains(): - ''' + """ Invoke grains.items from the thin Salt on the remote machine, in order to return here the Grains. - ''' + """ global GRAINS_CACHE if not GRAINS_CACHE: - GRAINS_CACHE = call('grains.items') + GRAINS_CACHE = call("grains.items") return GRAINS_CACHE def shutdown(opts): - ''' + """ Buh-bye... - ''' + """ global CONN, INITIALIZED if CONN and INITIALIZED: del CONN diff --git a/salt_sproxy/_roster/__init__.py b/salt_sproxy/_roster/__init__.py index f03ead5c..4f537513 100644 --- a/salt_sproxy/_roster/__init__.py +++ b/salt_sproxy/_roster/__init__.py @@ -1,15 +1,15 @@ # -*- coding: utf-8 -*- -''' +""" Various features for the Roster modules. -''' +""" from __future__ import absolute_import import re import fnmatch import logging +import six import salt.cache -from salt.ext import six import salt.utils.minions import salt.utils.dictupdate @@ -24,181 +24,175 @@ def load_cache(pool, __runner__, opts, tgt, tgt_type=None): - ''' + """ Load the Pillar and Grain cache, as required, and merge the Roster Grains and Pillar into. - ''' - if opts.get('grains'): + """ + if opts.get("grains"): for device, device_opts in six.iteritems(pool): - if 'minion_opts' not in device_opts: - device_opts['minion_opts'] = {} - if 'grains' not in device_opts['minion_opts']: - device_opts['minion_opts']['grains'] = {} - device_opts['minion_opts']['grains'] = salt.utils.dictupdate.merge( - opts['grains'], device_opts['minion_opts']['grains'], merge_lists=True, + if "minion_opts" not in device_opts: + device_opts["minion_opts"] = {} + if "grains" not in device_opts["minion_opts"]: + device_opts["minion_opts"]["grains"] = {} + device_opts["minion_opts"]["grains"] = salt.utils.dictupdate.merge( + opts["grains"], + device_opts["minion_opts"]["grains"], + merge_lists=True, ) - if tgt_type in ('glob', 'pcre', 'list'): + if tgt_type in ("glob", "pcre", "list"): # When the target type is glob, pcre, or list, we don't require grains # or pillar loaded from the cache, because the targeting won't depend on # those. return pool - if not opts.get('use_cached_grains', True) and not opts.get( - 'use_cached_pillar', True + if not opts.get("use_cached_grains", True) and not opts.get( + "use_cached_pillar", True ): return pool # NOTE: It wouldn't be feasible to use the cache.grains or cache.pillar # Runners as they rely on fetching data from the Master, for Minions that # are accepted. What we're doing here is reading straight from the cache. - log.debug('Loading cached and merging into the Roster data') + log.debug("Loading cached and merging into the Roster data") cache = salt.cache.factory(opts) - cache_pool = cache.list('minions') + cache_pool = cache.list("minions") for device in cache_pool: if device not in pool: - log.trace('%s has cache, but is not in the Roster pool', device) + log.trace("%s has cache, but is not in the Roster pool", device) continue - if 'minion_opts' not in pool[device]: - pool[device]['minion_opts'] = {'grains': {}, 'pillar': {}} - cache_key = 'minions/{}/data'.format(device) - if opts.get('target_use_cached_grains', True) and tgt_type in ( - 'compound', - 'grain', - 'grain_pcre', - 'nodegroup', + if "minion_opts" not in pool[device]: + pool[device]["minion_opts"] = {"grains": {}, "pillar": {}} + cache_key = "minions/{}/data".format(device) + if opts.get("target_use_cached_grains", True) and tgt_type in ( + "compound", + "grain", + "grain_pcre", + "nodegroup", ): - log.debug('Fetching cached Grains for %s', device) - cached_grains = cache.fetch(cache_key, 'grains') + log.debug("Fetching cached Grains for %s", device) + cached_grains = cache.fetch(cache_key, "grains") if cached_grains: - pool[device]['minion_opts']['grains'] = salt.utils.dictupdate.merge( + pool[device]["minion_opts"]["grains"] = salt.utils.dictupdate.merge( cached_grains, - pool[device]['minion_opts'].get('grains', {}), + pool[device]["minion_opts"].get("grains", {}), merge_lists=True, ) - if opts.get('target_use_cached_pillar', True) and tgt_type in ( - 'compound', - 'pillar', - 'pillar_pcre', - 'pillar_target', - 'nodegroup', + if opts.get("target_use_cached_pillar", True) and tgt_type in ( + "compound", + "pillar", + "pillar_pcre", + "pillar_target", + "nodegroup", ): - log.debug('Fetching cached Pillar for %s', device) - cached_pillar = cache.fetch(cache_key, 'pillar') + log.debug("Fetching cached Pillar for %s", device) + cached_pillar = cache.fetch(cache_key, "pillar") if cached_pillar: - pool[device]['minion_opts']['pillar'] = salt.utils.dictupdate.merge( + pool[device]["minion_opts"]["pillar"] = salt.utils.dictupdate.merge( cached_pillar, - pool[device]['minion_opts'].get('pillar', {}), + pool[device]["minion_opts"].get("pillar", {}), merge_lists=True, ) - log.debug('The device pool with the cached data') + log.debug("The device pool with the cached data") log.debug(pool) return pool def glob(pool, tgt, opts=None): - ''' - ''' - log.debug('Glob matching on %s ? %s', pool.items(), tgt) + """ """ + log.debug("Glob matching on %s ? %s", pool.items(), tgt) return { minion: pool[minion] for minion in pool.keys() if fnmatch.fnmatch(minion, tgt) } def grain(pool, tgt, opts=None): - ''' - ''' - delimiter = opts.get('delimiter', DEFAULT_TARGET_DELIM) - log.debug('Grain matching on %s, over %s', tgt, pool) + """ """ + delimiter = opts.get("delimiter", DEFAULT_TARGET_DELIM) + log.debug("Grain matching on %s, over %s", tgt, pool) ret = { minion: pool[minion] for minion in pool.keys() if subdict_match( - pool[minion].get('minion_opts', {}).get('grains', {}), + pool[minion].get("minion_opts", {}).get("grains", {}), tgt, delimiter=delimiter, ) } - log.debug('Grain match returned') + log.debug("Grain match returned") log.debug(ret) return ret def grain_pcre(pool, tgt, opts=None): - ''' - ''' - delimiter = opts.get('delimiter', DEFAULT_TARGET_DELIM) - log.debug('Grain PCRE matching on %s, over %s', tgt, pool) + """ """ + delimiter = opts.get("delimiter", DEFAULT_TARGET_DELIM) + log.debug("Grain PCRE matching on %s, over %s", tgt, pool) ret = { minion: pool[minion] for minion in pool.keys() if subdict_match( - pool[minion].get('minion_opts', {}).get('grains', {}), + pool[minion].get("minion_opts", {}).get("grains", {}), tgt, delimiter=delimiter, regex_match=True, ) } - log.debug('Grain PCRE match returned') + log.debug("Grain PCRE match returned") log.debug(ret) return ret def pillar(pool, tgt, opts=None): - ''' - ''' - delimiter = opts.get('delimiter', DEFAULT_TARGET_DELIM) - log.debug('Pillar matching on %s, over %s', tgt, pool) + """ """ + delimiter = opts.get("delimiter", DEFAULT_TARGET_DELIM) + log.debug("Pillar matching on %s, over %s", tgt, pool) ret = { minion: pool[minion] for minion in pool.keys() if subdict_match( - pool[minion].get('minion_opts', {}).get('pillar', {}), + pool[minion].get("minion_opts", {}).get("pillar", {}), tgt, delimiter=delimiter, ) } - log.debug('Pillar match returned') + log.debug("Pillar match returned") log.debug(ret) return ret def pillar_pcre(pool, tgt, opts=None): - ''' - ''' - delimiter = opts.get('delimiter', DEFAULT_TARGET_DELIM) - log.debug('Pillar PCRE matching on %s, over %s', tgt, pool) + """ """ + delimiter = opts.get("delimiter", DEFAULT_TARGET_DELIM) + log.debug("Pillar PCRE matching on %s, over %s", tgt, pool) ret = { minion: pool[minion] for minion in pool.keys() if subdict_match( - pool[minion].get('minion_opts', {}).get('pillar', {}), + pool[minion].get("minion_opts", {}).get("pillar", {}), tgt, delimiter=delimiter, regex_match=True, ) } - log.debug('Pillar PCRE match returned') + log.debug("Pillar PCRE match returned") log.debug(ret) return ret def list_(pool, tgt, opts=None): - ''' - ''' - log.debug('List matching on %s ? %s', pool.items(), tgt) + """ """ + log.debug("List matching on %s ? %s", pool.items(), tgt) return {minion: pool[minion] for minion in pool.keys() if minion in tgt} def pcre(pool, tgt, opts=None): - ''' - ''' - log.debug('PCRE matching on %s ? %s', pool.items(), tgt) + """ """ + log.debug("PCRE matching on %s ? %s", pool.items(), tgt) rgx = re.compile(tgt) return {minion: pool[minion] for minion in pool.keys() if rgx.search(minion)} def nodegroup(pool, tgt, opts=None): - ''' - ''' - nodegroups = opts.get('nodegroups', {}) + """ """ + nodegroups = opts.get("nodegroups", {}) # tgt is the name of the nodegroup if tgt not in nodegroups: return {} @@ -207,27 +201,27 @@ def nodegroup(pool, tgt, opts=None): TGT_FUN = { - 'glob': glob, - 'G': grain, - 'grain': grain, - 'P': grain_pcre, - 'grain_pcre': grain_pcre, - 'I': pillar, - 'pillar': pillar, - 'pillar_target': pillar, - 'J': pillar_pcre, - 'pillar_pcre': pillar_pcre, - 'L': list_, - 'list': list_, - 'N': nodegroup, - 'nodegroup': nodegroup, - 'E': pcre, - 'pcre': pcre, + "glob": glob, + "G": grain, + "grain": grain, + "P": grain_pcre, + "grain_pcre": grain_pcre, + "I": pillar, + "pillar": pillar, + "pillar_target": pillar, + "J": pillar_pcre, + "pillar_pcre": pillar_pcre, + "L": list_, + "list": list_, + "N": nodegroup, + "nodegroup": nodegroup, + "E": pcre, + "pcre": pcre, } def compound(pool, tgt, opts=None): - ''' + """ Execute a compound match on a pool of devices returned by the Roster. The Roster module must collect the entire list of devices managed by this Master / salt-sproxy instance, and this function helps filtering out the Minions @@ -236,14 +230,14 @@ def compound(pool, tgt, opts=None): together with their opts (i.e., extra Grains and Pillar). The first argument passed in is ``pool`` which is a dictionary containing the total group of devices that can possibly be managed, and their opts. - ''' + """ minions = {} if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)): - log.error('Compound target received that is neither string, list nor tuple') + log.error("Compound target received that is neither string, list nor tuple") return minions results = [] - opers = ['and', 'or', 'not', '(', ')'] + opers = ["and", "or", "not", "(", ")"] if isinstance(tgt, six.string_types): words = tgt.split() @@ -256,38 +250,38 @@ def compound(pool, tgt, opts=None): if word in opers: if results: - if results[-1] == '(' and word in ('and', 'or'): + if results[-1] == "(" and word in ("and", "or"): log.error('Invalid beginning operator after "(": %s', word) return {} - if word == 'not': - if not results[-1] in ('and', 'or', '('): - results.append('and') + if word == "not": + if not results[-1] in ("and", "or", "("): + results.append("and") results.append(word) else: # seq start with binary oper, fail - if word not in ['(', 'not']: - log.error('Invalid beginning operator: %s', word) + if word not in ["(", "not"]: + log.error("Invalid beginning operator: %s", word) return {} results.append(word) - elif target_info and target_info['engine']: - engine = TGT_FUN.get(target_info['engine']) + elif target_info and target_info["engine"]: + engine = TGT_FUN.get(target_info["engine"]) if not engine: # If an unknown engine is called at any time, fail out log.error( 'Unrecognized target engine "%s" for target ' 'expression "%s"', - target_info['engine'], + target_info["engine"], word, ) return {} - res = engine(pool, target_info['pattern'], opts=opts) + res = engine(pool, target_info["pattern"], opts=opts) results.append(str(set(res.keys()))) else: res = glob(pool, word, opts=opts) results.append(str(set(res.keys()))) - log.debug('Collected individual results') + log.debug("Collected individual results") log.debug(results) expr_chunks = [] @@ -308,42 +302,42 @@ def compound(pool, tgt, opts=None): # Below, when evaluating the expression, I've added a block to catch the # exception and ask for bug report for index, res in enumerate(results): - if res == 'not': - res = '{} -'.format(universe) - if res == 'and': - if results[index + 1] == 'not': - res = '-' - results[index + 1] = '' + if res == "not": + res = "{} -".format(universe) + if res == "and": + if results[index + 1] == "not": + res = "-" + results[index + 1] = "" else: - res = '&' - elif res == 'or': - if results[index + 1] == 'not': - res = '| ( {} -'.format(universe) + res = "&" + elif res == "or": + if results[index + 1] == "not": + res = "| ( {} -".format(universe) parens_count += 1 - results[index + 1] = '' + results[index + 1] = "" else: - res = '|' + res = "|" expr_chunks.append(res) - expr_chunks += ')' * parens_count + expr_chunks += ")" * parens_count - match_expr = ' '.join(expr_chunks) - log.debug('Matching expression: %s', match_expr) + match_expr = " ".join(expr_chunks) + log.debug("Matching expression: %s", match_expr) try: matched_minions = eval(match_expr) # pylint: disable=W0123 except SyntaxError: - log.error('Looks like this target expression is failing.') - if 'or not' in tgt: + log.error("Looks like this target expression is failing.") + if "or not" in tgt: log.error( - 'This may be a salt-sproxy bug, please report at: \n' - 'https://github.com/mirceaulinic/salt-sproxy/issues/new?' - 'labels=bug%2C+pending+triage&template=bug_report.md' - '&title=Issue%20when%20using%20the%20compound%20target' + "This may be a salt-sproxy bug, please report at: \n" + "https://github.com/mirceaulinic/salt-sproxy/issues/new?" + "labels=bug%2C+pending+triage&template=bug_report.md" + "&title=Issue%20when%20using%20the%20compound%20target" ) return {} - log.debug('Matched Minions') + log.debug("Matched Minions") log.debug(matched_minions) return {minion: pool[minion] for minion in matched_minions} -TGT_FUN['compound'] = compound +TGT_FUN["compound"] = compound diff --git a/salt_sproxy/_roster/ansible.py b/salt_sproxy/_roster/ansible.py index 39e07913..8bf461e6 100644 --- a/salt_sproxy/_roster/ansible.py +++ b/salt_sproxy/_roster/ansible.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -''' +""" Read in an Ansible inventory file or script Flat inventory files should be in the regular ansible inventory format. @@ -86,7 +86,7 @@ True Any of the [groups] or direct hostnames will return. The 'all' is special, and returns everything. -''' +""" # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import copy @@ -109,62 +109,62 @@ log = logging.getLogger(__name__) CONVERSION = { - 'ansible_ssh_host': 'host', - 'ansible_ssh_port': 'port', - 'ansible_ssh_user': 'user', - 'ansible_ssh_pass': 'passwd', - 'ansible_sudo_pass': 'sudo', - 'ansible_ssh_private_key_file': 'priv', + "ansible_ssh_host": "host", + "ansible_ssh_port": "port", + "ansible_ssh_user": "user", + "ansible_ssh_pass": "passwd", + "ansible_sudo_pass": "sudo", + "ansible_ssh_private_key_file": "priv", } -__virtualname__ = 'ansible' +__virtualname__ = "ansible" def __virtual__(): return ( - utils_which('ansible-inventory') and __virtualname__, - 'Install `ansible` to use inventory', + utils_which("ansible-inventory") and __virtualname__, + "Install `ansible` to use inventory", ) -def targets(tgt, tgt_type='glob', **kwargs): - ''' +def targets(tgt, tgt_type="glob", **kwargs): + """ Return the targets from the ansible inventory_file Default: /etc/salt/roster - ''' - inventory = __runner__['salt.cmd']( - 'cmd.run', 'ansible-inventory -i {0} --list'.format(get_roster_file(__opts__)) + """ + inventory = __runner__["salt.cmd"]( + "cmd.run", "ansible-inventory -i {0} --list".format(get_roster_file(__opts__)) ) - __context__['inventory'] = json.loads(utils_to_str(inventory)) + __context__["inventory"] = json.loads(utils_to_str(inventory)) - if tgt_type == 'nodegroup': + if tgt_type == "nodegroup": hosts = _get_hosts_from_group(tgt) return {host: _get_hostvars(host) for host in hosts} - pool = {host: _get_hostvars(host) for host in _get_hosts_from_group('all')} + pool = {host: _get_hostvars(host) for host in _get_hosts_from_group("all")} pool = salt_sproxy._roster.load_cache( pool, __runner__, __opts__, tgt, tgt_type=tgt_type ) - log.debug('Ansible devices pool') + log.debug("Ansible devices pool") log.debug(pool) engine = salt_sproxy._roster.TGT_FUN[tgt_type] return engine(pool, tgt, opts=__opts__) def _get_hosts_from_group(group): - inventory = __context__['inventory'] - hosts = [host for host in inventory.get(group, {}).get('hosts', [])] - for child in inventory.get(group, {}).get('children', []): + inventory = __context__["inventory"] + hosts = [host for host in inventory.get(group, {}).get("hosts", [])] + for child in inventory.get(group, {}).get("children", []): hosts.extend(_get_hosts_from_group(child)) return hosts def _get_hostvars(host): - hostvars = __context__['inventory']['_meta'].get('hostvars', {}).get(host, {}) - ret = copy.deepcopy(__opts__.get('roster_defaults', {})) - for value in CONVERSION: - if value in hostvars: - ret[CONVERSION[value]] = hostvars.pop(value) - ret['minion_opts'] = hostvars - if 'host' not in ret: - ret['host'] = host + hostvars = __context__["inventory"]["_meta"].get("hostvars", {}).get(host, {}) + ret = copy.deepcopy(__opts__.get("roster_defaults", {})) + for key, value in CONVERSION.items(): + if key in hostvars: + ret[value] = hostvars.pop(key) + ret["minion_opts"] = hostvars + if "host" not in ret: + ret["host"] = host return ret diff --git a/salt_sproxy/_roster/file.py b/salt_sproxy/_roster/file.py index f920d27b..046b69bf 100644 --- a/salt_sproxy/_roster/file.py +++ b/salt_sproxy/_roster/file.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -''' +""" Load the list of devices from an arbitrary SLS file. To use this module, you only need to configure the --roster option to ``file`` (on the CLI or Master config), and if the Roster SLS file is in a different location than ``/etc/salt/roster``, you'd also need to specify ``--roster-file`` (or ``roster_file`` in the Master config). -''' +""" # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging @@ -17,30 +17,30 @@ import salt_sproxy._roster -__virtualname__ = 'file' +__virtualname__ = "file" log = logging.getLogger(__name__) -def targets(tgt, tgt_type='glob', **kwargs): - ''' +def targets(tgt, tgt_type="glob", **kwargs): + """ Return the targets from the sls file, checks opts for location but defaults to /etc/salt/roster - ''' + """ template = get_roster_file(__opts__) rend = salt.loader.render(__opts__, {}) - __runner__.name = '__salt__' - kwargs['__salt__'] = __runner__ + __runner__.name = "__salt__" + kwargs["__salt__"] = __runner__ pool = compile_template( template, rend, - __opts__['renderer'], - __opts__['renderer_blacklist'], - __opts__['renderer_whitelist'], - mask_value='passw*', + __opts__["renderer"], + __opts__["renderer_blacklist"], + __opts__["renderer_whitelist"], + mask_value="passw*", **kwargs ) - pool = {host: {'minion_opts': conf} for host, conf in pool.items()} + pool = {host: {"minion_opts": conf} for host, conf in pool.items()} pool = salt_sproxy._roster.load_cache( pool, __runner__, __opts__, tgt, tgt_type=tgt_type ) diff --git a/salt_sproxy/_roster/netbox.py b/salt_sproxy/_roster/netbox.py index 72a56dde..b3ef8f1f 100644 --- a/salt_sproxy/_roster/netbox.py +++ b/salt_sproxy/_roster/netbox.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -''' +""" Load devices from `NetBox `__, and make them available for salt-ssh or salt-sproxy (or any other program that doesn't require (Proxy) Minions running). @@ -38,7 +38,7 @@ file to change this behavior. See https://github.com/netbox-community/netbox/releases/tag/v2.6.0 for more information -''' +""" # Import Python libs from __future__ import absolute_import, print_function, unicode_literals @@ -56,25 +56,25 @@ import salt_sproxy._roster -__virtualname__ = 'netbox' +__virtualname__ = "netbox" log = logging.getLogger(__name__) -AUTH_ENDPOINTS = ('secrets',) +AUTH_ENDPOINTS = ("secrets",) def __virtual__(): if not HAS_PYNETBOX: - return (False, 'Please install pynetbox to be able to use the NetBox Roster') + return (False, "Please install pynetbox to be able to use the NetBox Roster") return __virtualname__ -def _setval(key, val, dict_=None, delim=':'): - ''' +def _setval(key, val, dict_=None, delim=":"): + """ Set a value under the dictionary hierarchy identified under the key. The target 'foo:bar:baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. - ''' + """ if not dict_: dict_ = {} prev_hier = dict_ @@ -92,10 +92,10 @@ def _setval(key, val, dict_=None, delim=':'): def _netbox_config(): - config = __opts__.get('netbox') + config = __opts__.get("netbox") if not config: raise CommandExecutionError( - 'NetBox configuration could not be found in the Master config' + "NetBox configuration could not be found in the Master config" ) return config @@ -103,15 +103,15 @@ def _netbox_config(): def _nb_obj(auth_required=False): pynb_kwargs = {} nb_config = _netbox_config() - pynb_kwargs['token'] = nb_config.get('token') + pynb_kwargs["token"] = nb_config.get("token") if auth_required: - pynb_kwargs['private_key_file'] = nb_config.get('keyfile') - return pynetbox.api(nb_config.get('url'), **pynb_kwargs) + pynb_kwargs["private_key_file"] = nb_config.get("keyfile") + return pynetbox.api(nb_config.get("url"), **pynb_kwargs) def _strip_url_field(input_dict): - if 'url' in input_dict.keys(): - del input_dict['url'] + if "url" in input_dict.keys(): + del input_dict["url"] for k, v in input_dict.items(): if isinstance(v, dict): _strip_url_field(v) @@ -119,7 +119,7 @@ def _strip_url_field(input_dict): def _netbox_filter(app, endpoint, **kwargs): - ''' + """ Get a list of items from NetBox. app @@ -135,7 +135,7 @@ def _netbox_filter(app, endpoint, **kwargs): and clicking Filters. e.g., ``role=router`` Returns a list of dictionaries. - ''' + """ ret = [] nb = _nb_obj(auth_required=True if app in AUTH_ENDPOINTS else False) clean_kwargs = salt.utils.args.clean_kwargs(**kwargs) @@ -148,31 +148,31 @@ def _netbox_filter(app, endpoint, **kwargs): return ret -def targets(tgt, tgt_type='glob', **kwargs): - ''' +def targets(tgt, tgt_type="glob", **kwargs): + """ Return the targets from NetBox. - ''' - netbox_filters = __opts__.get('netbox', {}).get('filters', {}) + """ + netbox_filters = __opts__.get("netbox", {}).get("filters", {}) netbox_filters.update(**kwargs) filtered = False - if tgt_type == 'list' or ( - tgt_type == 'glob' and not any([char in tgt for char in '*?[!']) + if tgt_type == "list" or ( + tgt_type == "glob" and not any([char in tgt for char in "*?[!"]) ): - netbox_filters['name'] = tgt + netbox_filters["name"] = tgt filtered = True - elif tgt_type == 'grain' and tgt.startswith('netbox:'): - levels = tgt.split('netbox:')[1].split(':') + elif tgt_type == "grain" and tgt.startswith("netbox:"): + levels = tgt.split("netbox:")[1].split(":") if len(levels) > 2: - netbox_filters[levels[0]] = _setval(':'.join(levels[1:-1]), levels[-1]) + netbox_filters[levels[0]] = _setval(":".join(levels[1:-1]), levels[-1]) filtered = True elif len(levels) == 2: netbox_filters[levels[0]] = levels[1] filtered = True - log.debug('Querying NetBox with the following filters') + log.debug("Querying NetBox with the following filters") log.debug(netbox_filters) - netbox_devices = _netbox_filter('dcim', 'devices', **netbox_filters) + netbox_devices = _netbox_filter("dcim", "devices", **netbox_filters) pool = { - device['name']: {'minion_opts': {'grains': {'netbox': device}}} + device["name"]: {"minion_opts": {"grains": {"netbox": device}}} for device in netbox_devices } if filtered: diff --git a/salt_sproxy/_roster/pillar.py b/salt_sproxy/_roster/pillar.py index 0af56f7c..c317ddac 100644 --- a/salt_sproxy/_roster/pillar.py +++ b/salt_sproxy/_roster/pillar.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -''' +""" Load the list of devices from the Pillar. Simply configure the ``roster`` option to point to this module, while making @@ -40,35 +40,35 @@ External Pillars. Check out https://docs.saltstack.com/en/latest/ref/pillar/all/index.html for the complete list of available Pillar modules you can use. -''' +""" # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging import salt_sproxy._roster -__virtualname__ = 'pillar' +__virtualname__ = "pillar" log = logging.getLogger(__name__) -def targets(tgt, tgt_type='glob', **kwargs): - ''' +def targets(tgt, tgt_type="glob", **kwargs): + """ Return the targets from External Pillar requested. - ''' - roster_opts = __opts__.get('roster_pillar', {}) - minion_id = roster_opts.get('minion_id', kwargs.get('minion_id', '*')) - pillar_key = roster_opts.get('pillar_key', kwargs.get('pillar_key', 'devices')) - saltenv = roster_opts.get('saltenv', kwargs.get('saltenv', 'base')) - pillarenv = roster_opts.get('pillarenv', kwargs.get('pillarenv')) - pillar = __runner__['pillar.show_pillar']( + """ + roster_opts = __opts__.get("roster_pillar", {}) + minion_id = roster_opts.get("minion_id", kwargs.get("minion_id", "*")) + pillar_key = roster_opts.get("pillar_key", kwargs.get("pillar_key", "devices")) + saltenv = roster_opts.get("saltenv", kwargs.get("saltenv", "base")) + pillarenv = roster_opts.get("pillarenv", kwargs.get("pillarenv")) + pillar = __runner__["pillar.show_pillar"]( minion=minion_id, saltenv=saltenv, pillarenv=pillarenv ) pillar_devices = pillar[pillar_key] - log.debug('Compiled the following list of devices from the Pillar') + log.debug("Compiled the following list of devices from the Pillar") log.debug(pillar_devices) pool = { - device.pop('id', device.pop('name')): {'minion_opts': device} + device.pop("id", device.pop("name")): {"minion_opts": device} for device in pillar_devices } pool = salt_sproxy._roster.load_cache( diff --git a/salt_sproxy/_runners/proxy.py b/salt_sproxy/_runners/proxy.py index 64fde1df..4c6eeed6 100644 --- a/salt_sproxy/_runners/proxy.py +++ b/salt_sproxy/_runners/proxy.py @@ -12,12 +12,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. -''' +""" Salt Runner to invoke arbitrary commands on network devices that are not managed via a Proxy or regular Minion. Therefore, this Runner doesn't necessarily require the targets to be up and running, as it will connect to collect the Grains, compile the Pillar, then execute the commands. -''' +""" from __future__ import absolute_import, print_function, unicode_literals # Import Python std lib @@ -32,6 +32,8 @@ import traceback import multiprocessing +import six + # Import Salt modules import salt.cache import salt.loader @@ -40,7 +42,7 @@ import salt.version import salt.utils.jid import salt.utils.master -from salt.ext import six + from salt.minion import SMinion from salt.cli.batch import Batch import salt.utils.stringutils @@ -72,7 +74,7 @@ # module properties # ------------------------------------------------------------------------------ -_SENTINEL = 'FIN.' +_SENTINEL = "FIN." log = logging.getLogger(__name__) @@ -86,7 +88,7 @@ def _napalm_is_proxy(opts): - return opts.get('proxy', {}).get('proxytype') == 'napalm' + return opts.get("proxy", {}).get("proxytype") == "napalm" # Point the native is_proxy function to the above, so it doesn't check whether @@ -116,9 +118,8 @@ def _salt_call_and_return( events=True, **opts ): - ''' - ''' - opts['jid'] = jid + """ """ + opts["jid"] = jid ret, retcode = salt_call( minion_id, salt_function, @@ -127,22 +128,22 @@ def _salt_call_and_return( **opts ) if events: - __salt__['event.send']( - 'proxy/runner/{jid}/ret/{minion_id}'.format(minion_id=minion_id, jid=jid), + __salt__["event.send"]( + "proxy/runner/{jid}/ret/{minion_id}".format(minion_id=minion_id, jid=jid), { - 'fun': salt_function, - 'fun_args': arg, - 'id': minion_id, - 'jid': jid, - 'return': ret, - 'retcode': retcode, - 'success': retcode == 0, + "fun": salt_function, + "fun_args": arg, + "id": minion_id, + "jid": jid, + "return": ret, + "retcode": retcode, + "success": retcode == 0, }, ) try: ret = json.loads(json.dumps(ret)) except (ValueError, TypeError): - log.error('Function return is not JSON-serializable data', exc_info=True) + log.error("Function return is not JSON-serializable data", exc_info=True) log.error(ret) ret_queue.put(({minion_id: ret}, retcode)) sys.exit(retcode) @@ -151,8 +152,7 @@ def _salt_call_and_return( def _existing_proxy_cli_batch( cli_batch, ret_queue, batch_stop_queue, sproxy_stop_queue ): - ''' - ''' + """ """ run = cli_batch.run() cumulative_retcode = 0 for ret in run: @@ -161,16 +161,15 @@ def _existing_proxy_cli_batch( retcode = 0 if ret and isinstance(ret, dict): minion_id = list(ret.keys())[0] - if isinstance(ret[minion_id], dict) and 'retcode' in ret[minion_id]: - retcode = ret[minion_id].pop('retcode') + if isinstance(ret[minion_id], dict) and "retcode" in ret[minion_id]: + retcode = ret[minion_id].pop("retcode") ret_queue.put((ret, retcode)) cumulative_retcode = max(cumulative_retcode, retcode) batch_stop_queue.put(cumulative_retcode) def _receive_replies_async(ret_queue, done_queue, progress_bar): - ''' - ''' + """ """ count = 0 while True: ret, retcode = ret_queue.get() @@ -180,7 +179,10 @@ def _receive_replies_async(ret_queue, done_queue, progress_bar): # When async, print out the replies as soon as they arrive # after passing them through the outputter of choice out_fmt = salt.output.out_format( - ret, __opts__.get('output', 'nested'), opts=__opts__, _retcode=retcode, + ret, + __opts__.get("output", "nested"), + opts=__opts__, + _retcode=retcode, ) if out_fmt: # out_fmt can be empty string, for example, when using the ``quiet`` @@ -192,8 +194,7 @@ def _receive_replies_async(ret_queue, done_queue, progress_bar): def _receive_replies_sync(ret_queue, static_queue, done_queue, progress_bar): - ''' - ''' + """ """ count = 0 cumulative_retcode = 0 while True: @@ -215,7 +216,7 @@ def __init__( self.eauth = eauth if eauth else {} self.pub_kwargs = eauth if eauth else {} self.quiet = quiet - self.local = salt.client.get_local_client(opts['conf_file']) + self.local = salt.client.get_local_client(opts["conf_file"]) self.minions, self.ping_gen, self.down_minions = self.gather_minions() self.options = parser @@ -224,10 +225,10 @@ def _gather_minions(self): class NoPingBatch(Batch): - ''' + """ Similar to the native Salt Batch. but without issuing test.ping to ensure that the Minions are up and running. - ''' + """ def __init__( self, opts, eauth=None, quiet=False, parser=None @@ -236,8 +237,8 @@ def __init__( self.eauth = eauth if eauth else {} self.pub_kwargs = eauth if eauth else {} self.quiet = quiet - self.local = salt.client.get_local_client(opts['conf_file']) - self.minions, self.ping_gen, self.down_minions = self.opts['tgt'], [], [] + self.local = salt.client.get_local_client(opts["conf_file"]) + self.minions, self.ping_gen, self.down_minions = self.opts["tgt"], [], [] self.options = parser def gather_minions(self): @@ -248,32 +249,32 @@ def gather_minions(self): # and extended to allow more flexible options for the (pre-)loading of the # Pillars and the Grains. class SProxyMinion(SMinion): - ''' + """ Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SProxyMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. - ''' + """ def _matches_target(self): match_func = self.matchers.get( - '{0}_match.match'.format(self.opts['__tgt_type']), None + "{0}_match.match".format(self.opts["__tgt_type"]), None ) if match_func is None: return False - if self.opts['__tgt_type'] in ('grain', 'grain_pcre', 'pillar'): - delimiter = self.opts.get('delimiter', DEFAULT_TARGET_DELIM) - if not match_func(self.opts['__tgt'], delimiter=delimiter): + if self.opts["__tgt_type"] in ("grain", "grain_pcre", "pillar"): + delimiter = self.opts.get("delimiter", DEFAULT_TARGET_DELIM) + if not match_func(self.opts["__tgt"], delimiter=delimiter): return False - elif not match_func(self.opts['__tgt']): + elif not match_func(self.opts["__tgt"]): return False else: - if not self.matchers['glob_match.match'](self.opts['__tgt']): + if not self.matchers["glob_match.match"](self.opts["__tgt"]): return False return True def gen_modules(self, initial_load=False): # pylint: disable=arguments-differ - ''' + """ Tell the minion to reload the execution modules. CLI Example: @@ -281,89 +282,89 @@ def gen_modules(self, initial_load=False): # pylint: disable=arguments-differ .. code-block:: bash salt '*' sys.reload_modules - ''' - if self.opts.get('proxy_preload_grains', True): + """ + if self.opts.get("proxy_preload_grains", True): loaded_grains = salt.loader.grains(self.opts) - self.opts['grains'].update(loaded_grains) + self.opts["grains"].update(loaded_grains) if ( - self.opts['roster_opts'] - and self.opts.get('proxy_merge_roster_grains', True) - and 'grains' in self.opts['roster_opts'] - and isinstance(self.opts['roster_opts']['grains'], dict) + self.opts["roster_opts"] + and self.opts.get("proxy_merge_roster_grains", True) + and "grains" in self.opts["roster_opts"] + and isinstance(self.opts["roster_opts"]["grains"], dict) ): # Merge the Grains from the Roster opts - log.debug('Merging Grains with the Roster provided ones') - self.opts['grains'] = salt.utils.dictupdate.merge( - self.opts['roster_opts']['grains'], self.opts['grains'] + log.debug("Merging Grains with the Roster provided ones") + self.opts["grains"] = salt.utils.dictupdate.merge( + self.opts["roster_opts"]["grains"], self.opts["grains"] ) cached_grains = None - if self.opts.get('proxy_use_cached_grains', True): - cached_grains = self.opts.pop('proxy_cached_grains', None) + if self.opts.get("proxy_use_cached_grains", True): + cached_grains = self.opts.pop("proxy_cached_grains", None) - initial_grains = copy.deepcopy(self.opts['grains']) + initial_grains = copy.deepcopy(self.opts["grains"]) if cached_grains: # Merging the collected Grains into the cached Grains, but only for # the initial Pillar compilation, to ensure we only do so to avoid # any processing errors. initial_grains = salt.utils.dictupdate.merge(cached_grains, initial_grains) - if self.opts.get('proxy_load_pillar', True): - self.opts['pillar'] = salt.pillar.get_pillar( + if self.opts.get("proxy_load_pillar", True): + self.opts["pillar"] = salt.pillar.get_pillar( self.opts, initial_grains, - self.opts['id'], - saltenv=self.opts['saltenv'], - pillarenv=self.opts.get('pillarenv'), + self.opts["id"], + saltenv=self.opts["saltenv"], + pillarenv=self.opts.get("pillarenv"), ).compile_pillar() - if self.opts['roster_opts'] and self.opts.get('proxy_merge_roster_opts', True): - if 'proxy' not in self.opts['pillar']: - self.opts['pillar']['proxy'] = {} - self.opts['pillar']['proxy'] = salt.utils.dictupdate.merge( - self.opts['pillar']['proxy'], self.opts['roster_opts'] + if self.opts["roster_opts"] and self.opts.get("proxy_merge_roster_opts", True): + if "proxy" not in self.opts["pillar"]: + self.opts["pillar"]["proxy"] = {} + self.opts["pillar"]["proxy"] = salt.utils.dictupdate.merge( + self.opts["pillar"]["proxy"], self.opts["roster_opts"] ) - self.opts['pillar']['proxy'].pop('grains', None) - self.opts['pillar']['proxy'].pop('pillar', None) + self.opts["pillar"]["proxy"].pop("grains", None) + self.opts["pillar"]["proxy"].pop("pillar", None) - if self.opts.get('preload_targeting', False) or self.opts.get( - 'invasive_targeting', False + if self.opts.get("preload_targeting", False) or self.opts.get( + "invasive_targeting", False ): - log.debug('Loading the Matchers modules') + log.debug("Loading the Matchers modules") self.matchers = salt.loader.matchers(self.opts) - if self.opts.get('preload_targeting', False): + if self.opts.get("preload_targeting", False): log.debug( - 'Preload targeting requested, trying to see if %s matches the target %s (%s)', - self.opts['id'], - str(self.opts['__tgt']), - self.opts['__tgt_type'], + "Preload targeting requested, trying to see if %s matches the target %s (%s)", + self.opts["id"], + str(self.opts["__tgt"]), + self.opts["__tgt_type"], ) matched = self._matches_target() if not matched: return - if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts: + if "proxy" not in self.opts["pillar"] and "proxy" not in self.opts: errmsg = ( 'No "proxy" configuration key found in pillar or opts ' - 'dictionaries for id {id}. Check your pillar/options ' - 'configuration and contents. Salt-proxy aborted.' - ).format(id=self.opts['id']) + "dictionaries for id {id}. Check your pillar/options " + "configuration and contents. Salt-proxy aborted." + ).format(id=self.opts["id"]) log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) - if 'proxy' not in self.opts: - self.opts['proxy'] = {} - if 'proxy' not in self.opts['pillar']: - self.opts['pillar']['proxy'] = {} - self.opts['proxy'] = salt.utils.dictupdate.merge( - self.opts['proxy'], self.opts['pillar']['proxy'] + if "proxy" not in self.opts: + self.opts["proxy"] = {} + if "proxy" not in self.opts["pillar"]: + self.opts["pillar"]["proxy"] = {} + self.opts["proxy"] = salt.utils.dictupdate.merge( + self.opts["proxy"], self.opts["pillar"]["proxy"] ) # Then load the proxy module - fq_proxyname = self.opts['proxy']['proxytype'] + fq_proxyname = self.opts["proxy"]["proxytype"] self.utils = salt.loader.utils(self.opts) self.proxy = salt.loader.proxy( self.opts, utils=self.utils, whitelist=[fq_proxyname] @@ -371,102 +372,102 @@ def gen_modules(self, initial_load=False): # pylint: disable=arguments-differ self.functions = salt.loader.minion_mods( self.opts, utils=self.utils, notify=False, proxy=self.proxy ) - self.functions.pack['__grains__'] = copy.deepcopy(self.opts['grains']) + self.functions.pack["__grains__"] = copy.deepcopy(self.opts["grains"]) - self.functions.pack['__proxy__'] = self.proxy - self.proxy.pack['__salt__'] = self.functions - self.proxy.pack['__pillar__'] = self.opts['pillar'] + self.functions.pack["__proxy__"] = self.proxy + self.proxy.pack["__salt__"] = self.functions + self.proxy.pack["__pillar__"] = self.opts["pillar"] # No need to inject the proxy into utils, as we don't need scheduler for # this sort of short living Minion. # self.utils = salt.loader.utils(self.opts, proxy=self.proxy) - self.proxy.pack['__utils__'] = self.utils + self.proxy.pack["__utils__"] = self.utils # Reload all modules so all dunder variables are injected self.proxy.reload_modules() - if self.opts.get('proxy_no_connect', False): - log.info('Requested not to initialize the connection with the device') + if self.opts.get("proxy_no_connect", False): + log.info("Requested not to initialize the connection with the device") else: - log.debug('Trying to initialize the connection with the device') + log.debug("Trying to initialize the connection with the device") # When requested --no-connect, don't init the connection, but simply # go ahead and execute the function requested. if ( - '{0}.init'.format(fq_proxyname) not in self.proxy - or '{0}.shutdown'.format(fq_proxyname) not in self.proxy + "{0}.init".format(fq_proxyname) not in self.proxy + or "{0}.shutdown".format(fq_proxyname) not in self.proxy ): errmsg = ( - '[{0}] Proxymodule {1} is missing an init() or a shutdown() or both. '.format( - self.opts['id'], fq_proxyname + "[{0}] Proxymodule {1} is missing an init() or a shutdown() or both. ".format( + self.opts["id"], fq_proxyname ) - + 'Check your proxymodule. Salt-proxy aborted.' + + "Check your proxymodule. Salt-proxy aborted." ) log.error(errmsg) self._running = False if self.unreachable_devices is not None: - self.unreachable_devices.append(self.opts['id']) + self.unreachable_devices.append(self.opts["id"]) raise SaltSystemExit( code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg ) - proxy_init_fn = self.proxy[fq_proxyname + '.init'] + proxy_init_fn = self.proxy[fq_proxyname + ".init"] try: proxy_init_fn(self.opts) self.connected = True except Exception as exc: log.error( - 'Encountered error when starting up the connection with %s:', - self.opts['id'], + "Encountered error when starting up the connection with %s:", + self.opts["id"], exc_info=True, ) if self.unreachable_devices is not None: - self.unreachable_devices.append(self.opts['id']) + self.unreachable_devices.append(self.opts["id"]) raise - if self.opts.get('proxy_load_grains', True): + if self.opts.get("proxy_load_grains", True): # When the Grains are loaded from the cache, no need to re-load them # again. - grains = copy.deepcopy(self.opts['grains']) + grains = copy.deepcopy(self.opts["grains"]) # Copy the existing Grains loaded so far, otherwise # salt.loader.grains is going to wipe what's under the grains # key in the opts. # After loading, merge with the previous loaded grains, which # may contain other grains from different sources, e.g., roster. loaded_grains = salt.loader.grains(self.opts, proxy=self.proxy) - self.opts['grains'] = salt.utils.dictupdate.merge(grains, loaded_grains) - if self.opts.get('proxy_load_pillar', True): - self.opts['pillar'] = salt.pillar.get_pillar( + self.opts["grains"] = salt.utils.dictupdate.merge(grains, loaded_grains) + if self.opts.get("proxy_load_pillar", True): + self.opts["pillar"] = salt.pillar.get_pillar( self.opts, - self.opts['grains'], - self.opts['id'], - saltenv=self.opts['saltenv'], - pillarenv=self.opts.get('pillarenv'), + self.opts["grains"], + self.opts["id"], + saltenv=self.opts["saltenv"], + pillarenv=self.opts.get("pillarenv"), ).compile_pillar() - self.functions.pack['__opts__'] = self.opts - self.functions.pack['__grains__'] = copy.deepcopy(self.opts['grains']) - self.functions.pack['__pillar__'] = copy.deepcopy(self.opts['pillar']) - self.grains_cache = copy.deepcopy(self.opts['grains']) + self.functions.pack["__opts__"] = self.opts + self.functions.pack["__grains__"] = copy.deepcopy(self.opts["grains"]) + self.functions.pack["__pillar__"] = copy.deepcopy(self.opts["pillar"]) + self.grains_cache = copy.deepcopy(self.opts["grains"]) - if self.opts.get('invasive_targeting', False): + if self.opts.get("invasive_targeting", False): log.info( - 'Invasive targeting requested, trying to see if %s matches the target %s (%s)', - self.opts['id'], - str(self.opts['__tgt']), - self.opts['__tgt_type'], + "Invasive targeting requested, trying to see if %s matches the target %s (%s)", + self.opts["id"], + str(self.opts["__tgt"]), + self.opts["__tgt_type"], ) matched = self._matches_target() if not matched: # Didn't match, shutting down this Proxy Minion, and exiting. log.debug( - '%s does not match the target expression, aborting', self.opts['id'] + "%s does not match the target expression, aborting", self.opts["id"] ) - proxy_shut_fn = self.proxy[fq_proxyname + '.shutdown'] + proxy_shut_fn = self.proxy[fq_proxyname + ".shutdown"] proxy_shut_fn(self.opts) return self.module_executors = self.proxy.get( - '{0}.module_executors'.format(fq_proxyname), lambda: [] - )() or self.opts.get('module_executors', []) + "{0}.module_executors".format(fq_proxyname), lambda: [] + )() or self.opts.get("module_executors", []) if self.module_executors: self.executors = salt.loader.executors( self.opts, self.functions, proxy=self.proxy @@ -475,11 +476,11 @@ def gen_modules(self, initial_load=False): # pylint: disable=arguments-differ # Late load the Returners, as they might need Grains, which may not be # properly or completely loaded before this. self.returners = None - if self.opts['returner']: + if self.opts["returner"]: self.returners = salt.loader.returners( self.opts, self.functions, proxy=self.proxy ) - self.proxy.pack['__ret__'] = self.returners + self.proxy.pack["__ret__"] = self.returners self.ready = True @@ -526,13 +527,13 @@ def salt_call( invasive_targeting=False, failhard=False, timeout=60, - returner='', - returner_config='', + returner="", + returner_config="", returner_kwargs=None, args=(), **kwargs ): - ''' + """ Invoke a Salt Execution Function that requires or invokes an NAPALM functionality (directly or indirectly). @@ -610,40 +611,38 @@ def salt_call( salt-run proxy.salt_call bgp.neighbors junos 1.2.3.4 test test123 salt-run proxy.salt_call net.load_config junos 1.2.3.4 test test123 text='set system ntp peer 1.2.3.4' - ''' + """ opts = copy.deepcopy(__opts__) - opts['id'] = minion_id - opts['pillarenv'] = __opts__.get('pillarenv', 'base') - opts['__cli'] = __opts__.get('__cli', 'salt-call') - opts['__tgt'] = tgt - opts['__tgt_type'] = tgt_type - if 'saltenv' not in opts: - opts['saltenv'] = 'base' + opts["id"] = minion_id + opts["pillarenv"] = __opts__.get("pillarenv", "base") + opts["__cli"] = __opts__.get("__cli", "salt-call") + opts["__tgt"] = tgt + opts["__tgt_type"] = tgt_type + if "saltenv" not in opts: + opts["saltenv"] = "base" if not default_grains: default_grains = {} - opts['grains'] = default_grains + opts["grains"] = default_grains if not default_pillar: default_pillar = {} - opts['pillar'] = default_pillar - opts['proxy_load_pillar'] = with_pillar - opts['proxy_load_grains'] = with_grains - opts['proxy_preload_pillar'] = preload_pillar - opts['proxy_preload_grains'] = preload_grains - opts['proxy_cache_grains'] = cache_grains - opts['proxy_cache_pillar'] = cache_pillar - opts['preload_targeting'] = preload_targeting - opts['invasive_targeting'] = invasive_targeting - opts['proxy_no_connect'] = no_connect - opts['proxy_test_ping'] = test_ping - opts['proxy_use_cached_grains'] = use_cached_grains + opts["pillar"] = default_pillar + opts["proxy_load_pillar"] = with_pillar + opts["proxy_load_grains"] = with_grains + opts["proxy_preload_pillar"] = preload_pillar + opts["proxy_preload_grains"] = preload_grains + opts["proxy_cache_grains"] = cache_grains + opts["proxy_cache_pillar"] = cache_pillar + opts["preload_targeting"] = preload_targeting + opts["invasive_targeting"] = invasive_targeting + opts["proxy_no_connect"] = no_connect + opts["proxy_test_ping"] = test_ping + opts["proxy_use_cached_grains"] = use_cached_grains if use_cached_grains: - cache_data = __salt__['cache.fetch']( - 'minions/{}'.format(minion_id), 'data' - ) - if cache_data and 'grains' in cache_data: - opts['proxy_cached_grains'] = cache_data['grains'] - opts['roster_opts'] = roster_opts - opts['returner'] = returner + cache_data = __salt__["cache.fetch"]("minions/{}".format(minion_id), "data") + if cache_data and "grains" in cache_data: + opts["proxy_cached_grains"] = cache_data["grains"] + opts["roster_opts"] = roster_opts + opts["returner"] = returner if not returner_kwargs: returner_kwargs = {} minion_defaults = salt.config.DEFAULT_MINION_OPTS.copy() @@ -654,34 +653,34 @@ def salt_call( sa_proxy = StandaloneProxy(opts, unreachable_devices) if not sa_proxy.ready: log.debug( - 'The SProxy Minion for %s is not able to start up, aborting', opts['id'] + "The SProxy Minion for %s is not able to start up, aborting", opts["id"] ) return kwargs = clean_kwargs(**kwargs) ret = None retcode = 0 - executors = getattr(sa_proxy, 'module_executors') + executors = getattr(sa_proxy, "module_executors") try: if executors: for name in executors: - ex_name = '{}.execute'.format(name) + ex_name = "{}.execute".format(name) if ex_name not in sa_proxy.executors: raise SaltInvocationError( "Executor '{0}' is not available".format(name) ) ret = sa_proxy.executors[ex_name]( - opts, {'fun': salt_function}, salt_function, args, kwargs + opts, {"fun": salt_function}, salt_function, args, kwargs ) if ret is not None: break else: ret = sa_proxy.functions[salt_function](*args, **kwargs) - retcode = sa_proxy.functions.pack['__context__'].get('retcode', 0) + retcode = sa_proxy.functions.pack["__context__"].get("retcode", 0) except Exception as err: - log.info('Exception while running %s on %s', salt_function, opts['id']) + log.info("Exception while running %s on %s", salt_function, opts["id"]) if failed_devices is not None: - failed_devices.append(opts['id']) - ret = 'The minion function caused an exception: {err}'.format( + failed_devices.append(opts["id"]) + ret = "The minion function caused an exception: {err}".format( err=traceback.format_exc() ) if not retcode: @@ -690,46 +689,48 @@ def salt_call( raise finally: if sa_proxy.connected: - shut_fun = '{}.shutdown'.format(sa_proxy.opts['proxy']['proxytype']) + shut_fun = "{}.shutdown".format(sa_proxy.opts["proxy"]["proxytype"]) sa_proxy.proxy[shut_fun](opts) if returner: - returner_fun = '{}.returner'.format(returner) + returner_fun = "{}.returner".format(returner) if returner_fun in sa_proxy.returners: log.debug( - 'Sending the response from %s to the %s Returner', opts['id'], returner, + "Sending the response from %s to the %s Returner", + opts["id"], + returner, ) ret_data = { - 'id': opts['id'], - 'jid': jid, - 'fun': salt_function, - 'fun_args': args, - 'return': ret, - 'ret_config': returner_config, - 'ret_kwargs': returner_kwargs, + "id": opts["id"], + "jid": jid, + "fun": salt_function, + "fun_args": args, + "return": ret, + "ret_config": returner_config, + "ret_kwargs": returner_kwargs, } try: sa_proxy.returners[returner_fun](ret_data) except Exception as err: log.error( - 'Exception while sending the response from %s to the %s returner', - opts['id'], + "Exception while sending the response from %s to the %s returner", + opts["id"], returner, ) log.error(err, exc_info=True) else: log.warning( - 'Returner %s is not available. Check that the dependencies are properly installed' + "Returner %s is not available. Check that the dependencies are properly installed" ) cache_data = {} if cache_grains: - log.debug('Caching Grains for %s', minion_id) - log.debug(sa_proxy.opts['grains']) - cache_data['grains'] = copy.deepcopy(sa_proxy.opts['grains']) + log.debug("Caching Grains for %s", minion_id) + log.debug(sa_proxy.opts["grains"]) + cache_data["grains"] = copy.deepcopy(sa_proxy.opts["grains"]) if cache_pillar: - log.debug('Caching Pillar for %s', minion_id) - cache_data['pillar'] = copy.deepcopy(sa_proxy.opts['pillar']) - cached_store = __salt__['cache.store']( - 'minions/{}'.format(minion_id), 'data', cache_data + log.debug("Caching Pillar for %s", minion_id) + cache_data["pillar"] = copy.deepcopy(sa_proxy.opts["pillar"]) + cached_store = __salt__["cache.store"]( + "minions/{}".format(minion_id), "data", cache_data ) return ret, retcode @@ -768,12 +769,12 @@ def execute_devices( verbose=False, progress=False, hide_timeout=False, - returner='', - returner_config='', + returner="", + returner_config="", returner_kwargs=None, **kwargs ): - ''' + """ Execute a Salt function on a group of network devices identified by their Minion ID, as listed under the ``minions`` argument. @@ -858,12 +859,12 @@ def execute_devices( .. code-block:: bash salt-run proxy.execute "['172.17.17.1', '172.17.17.2']" test.ping driver=eos username=test password=test123 - ''' - resp = '' + """ + resp = "" retcode = 0 - __pub_user = kwargs.get('__pub_user') + __pub_user = kwargs.get("__pub_user") if not __pub_user: - __pub_user = __utils__['user.get_specific_user']() + __pub_user = __utils__["user.get_specific_user"]() kwargs = clean_kwargs(**kwargs) if not jid: if salt.version.__version_info__ >= (2018, 3, 0): @@ -872,48 +873,48 @@ def execute_devices( jid = salt.utils.jid.gen_jid() # pylint: disable=no-value-for-parameter event_args = list(args[:]) if kwargs: - event_kwargs = {'__kwarg__': True} + event_kwargs = {"__kwarg__": True} event_kwargs.update(kwargs) event_args.append(event_kwargs) if not returner_kwargs: returner_kwargs = {} opts = { - 'with_grains': with_grains, - 'with_pillar': with_pillar, - 'preload_grains': preload_grains, - 'preload_pillar': preload_pillar, - 'default_grains': default_grains, - 'default_pillar': default_pillar, - 'preload_targeting': preload_targeting, - 'invasive_targeting': invasive_targeting, - 'args': args, - 'cache_grains': cache_grains, - 'cache_pillar': cache_pillar, - 'use_cached_grains': use_cached_grains, - 'use_cached_pillar': use_cached_pillar, - 'use_existing_proxy': use_existing_proxy, - 'no_connect': no_connect, - 'test_ping': test_ping, - 'tgt': tgt, - 'tgt_type': tgt_type, - 'failhard': failhard, - 'timeout': timeout, - 'returner': returner, - 'returner_config': returner_config, - 'returner_kwargs': returner_kwargs, + "with_grains": with_grains, + "with_pillar": with_pillar, + "preload_grains": preload_grains, + "preload_pillar": preload_pillar, + "default_grains": default_grains, + "default_pillar": default_pillar, + "preload_targeting": preload_targeting, + "invasive_targeting": invasive_targeting, + "args": args, + "cache_grains": cache_grains, + "cache_pillar": cache_pillar, + "use_cached_grains": use_cached_grains, + "use_cached_pillar": use_cached_pillar, + "use_existing_proxy": use_existing_proxy, + "no_connect": no_connect, + "test_ping": test_ping, + "tgt": tgt, + "tgt_type": tgt_type, + "failhard": failhard, + "timeout": timeout, + "returner": returner, + "returner_config": returner_config, + "returner_kwargs": returner_kwargs, } opts.update(kwargs) if events: - __salt__['event.send']( - 'proxy/runner/{jid}/new'.format(jid=jid), + __salt__["event.send"]( + "proxy/runner/{jid}/new".format(jid=jid), { - 'fun': salt_function, - 'minions': minions, - 'arg': event_args, - 'jid': jid, - 'tgt': tgt, - 'tgt_type': tgt_type, - 'user': __pub_user, + "fun": salt_function, + "minions": minions, + "arg": event_args, + "jid": jid, + "tgt": tgt, + "tgt_type": tgt_type, + "user": __pub_user, }, ) if not existing_minions: @@ -945,8 +946,8 @@ def execute_devices( ret = {} sproxy_minions = list(set(minions) - set(existing_minions)) if batch_size: - if '%' in str(batch_size): - percent = int(batch_size.replace('%', '')) + if "%" in str(batch_size): + percent = int(batch_size.replace("%", "")) batch_size = len(minions) * percent / 100 batch_size = int(batch_size) batch_count = int(len(minions) / batch_size) + ( @@ -970,44 +971,48 @@ def execute_devices( if existing_batch_size > 0: # When there are existing Minions matching the target, use the native # batching function to execute against these Minions. - log.debug('Executing against the existing Minions') + log.debug("Executing against the existing Minions") log.debug(existing_minions) batch_opts = copy.deepcopy(__opts__) - batch_opts['batch'] = str(existing_batch_size) - batch_opts['tgt'] = existing_minions - batch_opts['tgt_type'] = 'list' - batch_opts['fun'] = salt_function - batch_opts['arg'] = event_args - batch_opts['batch_wait'] = batch_wait - batch_opts['selected_target_option'] = 'list' - batch_opts['return'] = returner - batch_opts['ret_config'] = returner_config - batch_opts['ret_kwargs'] = returner_kwargs + batch_opts["batch"] = str(existing_batch_size) + batch_opts["tgt"] = existing_minions + batch_opts["tgt_type"] = "list" + batch_opts["fun"] = salt_function + batch_opts["arg"] = event_args + batch_opts["batch_wait"] = batch_wait + batch_opts["selected_target_option"] = "list" + batch_opts["return"] = returner + batch_opts["ret_config"] = returner_config + batch_opts["ret_kwargs"] = returner_kwargs if test_ping: cli_batch = PingBatch(batch_opts, quiet=True) - cli_batch.minions, cli_batch.ping_gen, cli_batch.down_minions = cli_batch.gather_minions() + ( + cli_batch.minions, + cli_batch.ping_gen, + cli_batch.down_minions, + ) = cli_batch.gather_minions() cli_batch.gather_minions = cli_batch._gather_minions else: cli_batch = NoPingBatch(batch_opts, quiet=True) - log.debug('Batching detected the following Minions responsive') + log.debug("Batching detected the following Minions responsive") log.debug(cli_batch.minions) if cli_batch.down_minions: log.warning( - 'The following existing Minions connected to the Master ' - 'seem to be unresponsive: %s', - ', '.join(cli_batch.down_minions), + "The following existing Minions connected to the Master " + "seem to be unresponsive: %s", + ", ".join(cli_batch.down_minions), ) down_minions = cli_batch.down_minions for minion in down_minions: ret_queue.put( ( - {minion: 'Minion did not return. [Not connected]'}, + {minion: "Minion did not return. [Not connected]"}, salt.defaults.exitcodes.EX_UNAVAILABLE, ) ) log.info( - '%d devices matched the target, executing in %d batches', + "%d devices matched the target, executing in %d batches", len(minions), batch_count, ) @@ -1038,7 +1043,7 @@ def execute_devices( batch_stop_queue.put(0) log.debug( - 'Executing sproxy normal run on the following devices (%d batch size):', + "Executing sproxy normal run on the following devices (%d batch size):", sproxy_batch_size, ) log.debug(sproxy_minions) @@ -1051,8 +1056,8 @@ def execute_devices( for minion_id in sproxy_minions: device_opts = copy.deepcopy(opts) if roster_targets and isinstance(roster_targets, dict): - device_opts['roster_opts'] = roster_targets.get(minion_id, {}).get( - 'minion_opts' + device_opts["roster_opts"] = roster_targets.get(minion_id, {}).get( + "minion_opts" ) sproxy_execute_queue.put((minion_id, device_opts)) @@ -1074,7 +1079,7 @@ def execute_devices( time.sleep(0.001) continue minion_id, device_opts = sproxy_execute_queue.get() - log.debug('Starting execution for %s', minion_id) + log.debug("Starting execution for %s", minion_id) device_proc = multiprocessing.Process( target=_salt_call_and_return, name=minion_id, @@ -1114,7 +1119,7 @@ def execute_devices( # If the process didn't finish the task, it means it's past # the timeout value, time to kiss it goodbye. log.info( - 'Terminating the process for %s, as it didn\'t reply within %d seconds', + "Terminating the process for %s, as it didn't reply within %d seconds", proc._name, timeout, ) @@ -1122,7 +1127,7 @@ def execute_devices( if not hide_timeout: ret_queue.put( ( - {proc._name: 'Minion did not return. [No response]'}, + {proc._name: "Minion did not return. [No response]"}, salt.defaults.exitcodes.EX_UNAVAILABLE, ) ) @@ -1142,7 +1147,7 @@ def execute_devices( sproxy_processes.remove(proc) if stop_iteration: - log.error('Exiting as an error has occurred') + log.error("Exiting as an error has occurred") ret_queue.put((_SENTINEL, salt.defaults.exitcodes.EX_GENERIC)) sproxy_stop_queue.put(_SENTINEL) for proc in sproxy_processes: @@ -1154,7 +1159,7 @@ def execute_devices( if batch_wait: log.debug( - 'Waiting %f seconds before executing the next batch', batch_wait + "Waiting %f seconds before executing the next batch", batch_wait ) time.sleep(batch_wait) @@ -1184,80 +1189,80 @@ def execute_devices( resp.update(ret) if summary: - salt.utils.stringutils.print_cli('\n') + salt.utils.stringutils.print_cli("\n") salt.utils.stringutils.print_cli( - '-------------------------------------------' + "-------------------------------------------" ) - salt.utils.stringutils.print_cli('Summary') + salt.utils.stringutils.print_cli("Summary") salt.utils.stringutils.print_cli( - '-------------------------------------------' + "-------------------------------------------" ) salt.utils.stringutils.print_cli( - '# of devices targeted: {0}'.format(len(minions)) + "# of devices targeted: {0}".format(len(minions)) ) salt.utils.stringutils.print_cli( - '# of devices returned: {0}'.format( + "# of devices returned: {0}".format( len(minions) - len(timeout_devices) - len(unreachable_devices) ) ) salt.utils.stringutils.print_cli( - '# of devices that did not return: {0}'.format(len(timeout_devices)) + "# of devices that did not return: {0}".format(len(timeout_devices)) ) salt.utils.stringutils.print_cli( - '# of devices with errors: {0}'.format(len(failed_devices)) + "# of devices with errors: {0}".format(len(failed_devices)) ) salt.utils.stringutils.print_cli( - '# of devices unreachable: {0}'.format(len(unreachable_devices)) + "# of devices unreachable: {0}".format(len(unreachable_devices)) ) if verbose: if timeout_devices: salt.utils.stringutils.print_cli( ( - '\nThe following devices didn\'t return (timeout):' - '\n - {0}'.format('\n - '.join(timeout_devices)) + "\nThe following devices didn't return (timeout):" + "\n - {0}".format("\n - ".join(timeout_devices)) ) ) if failed_devices: salt.utils.stringutils.print_cli( ( '\nThe following devices returned "bad" output:' - '\n - {0}'.format('\n - '.join(failed_devices)) + "\n - {0}".format("\n - ".join(failed_devices)) ) ) if unreachable_devices: salt.utils.stringutils.print_cli( ( - '\nThe following devices are unreachable:' - '\n - {0}'.format('\n - '.join(unreachable_devices)) + "\nThe following devices are unreachable:" + "\n - {0}".format("\n - ".join(unreachable_devices)) ) ) salt.utils.stringutils.print_cli( - '-------------------------------------------' + "-------------------------------------------" ) if events: - __salt__['event.send']( - 'proxy/runner/{jid}/summary'.format(jid=jid), + __salt__["event.send"]( + "proxy/runner/{jid}/summary".format(jid=jid), { - 'tgt': tgt, - 'tgt_type': tgt_type, - 'fun': salt_function, - 'fun_args': event_args, - 'jid': jid, - 'user': __pub_user, - 'retcode': retcode, - 'matched_minions': minions, - 'existing_minions': existing_minions, - 'sproxy_minions': sproxy_minions, - 'timeout_minions': list(timeout_devices), - 'down_minions': list(down_minions), - 'unreachable_devices': list(unreachable_devices), - 'failed_minions': list(failed_devices), + "tgt": tgt, + "tgt_type": tgt_type, + "fun": salt_function, + "fun_args": event_args, + "jid": jid, + "user": __pub_user, + "retcode": retcode, + "matched_minions": minions, + "existing_minions": existing_minions, + "sproxy_minions": sproxy_minions, + "timeout_minions": list(timeout_devices), + "down_minions": list(down_minions), + "unreachable_devices": list(unreachable_devices), + "failed_minions": list(failed_devices), }, ) - __context__['retcode'] = retcode + __context__["retcode"] = retcode if retcode != salt.defaults.exitcodes.EX_OK: salt.utils.stringutils.print_cli( - 'ERROR: Minions returned with non-zero exit code' + "ERROR: Minions returned with non-zero exit code" ) return resp @@ -1265,7 +1270,7 @@ def execute_devices( def execute( tgt, salt_function=None, - tgt_type='glob', + tgt_type="glob", roster=None, preview_target=False, target_details=False, @@ -1302,12 +1307,12 @@ def execute( sync_modules=False, sync_grains=False, sync_all=False, - returner='', - returner_config='', + returner="", + returner_config="", returner_kwargs=None, **kwargs ): - ''' + """ Invoke a Salt function on the list of devices matched by the Roster subsystem. @@ -1410,53 +1415,53 @@ def execute( salt-run proxy.execute_roster edge* test.ping salt-run proxy.execute_roster junos-edges test.ping tgt_type=nodegroup - ''' + """ targets = [] rtargets = None - roster = roster or __opts__.get('proxy_roster', __opts__.get('roster')) + roster = roster or __opts__.get("proxy_roster", __opts__.get("roster")) - saltenv = __opts__.get('saltenv', 'base') + saltenv = __opts__.get("saltenv", "base") if sync_roster and not sync_all: - __salt__['saltutil.sync_roster'](saltenv=saltenv) + __salt__["saltutil.sync_roster"](saltenv=saltenv) if sync_modules and not sync_all: - __salt__['saltutil.sync_modules'](saltenv=saltenv) + __salt__["saltutil.sync_modules"](saltenv=saltenv) if sync_all: - __salt__['saltutil.sync_all'](saltenv=saltenv) + __salt__["saltutil.sync_all"](saltenv=saltenv) if not timeout: - log.warning('Timeout set as 0, will wait for the devices to reply indefinitely') + log.warning("Timeout set as 0, will wait for the devices to reply indefinitely") # Setting the timeout as None, because that's the value we need to pass # to multiprocessing's join() method to wait for the devices to reply # indefinitely. timeout = None - if tgt_type == 'pillar_target': + if tgt_type == "pillar_target": # When using the -I option on the CLI, the tgt_type passed on is called # `pillar_target`: # https://github.com/saltstack/salt/blob/e9e48b7fb6a688f4f22d74a849d58c1c156563d1/salt/utils/parsers.py#L1266 # While if we want to use this against existing Minions, the option # needs to be just `pillar`: # https://github.com/saltstack/salt/blob/99385b50718d70d93fd5b83e61c0f4b3a402490c/salt/utils/minions.py#L359 - tgt_type = 'pillar' + tgt_type = "pillar" if preload_targeting or invasive_targeting: - _tgt = '*' - _tgt_type = 'glob' + _tgt = "*" + _tgt_type = "glob" else: _tgt = tgt _tgt_type = tgt_type existing_minions = [] - if not roster or roster == 'None': + if not roster or roster == "None": log.info( - 'No Roster specified. Please use the ``roster`` argument, or set the ``proxy_roster`` option in the ' - 'Master configuration.' + "No Roster specified. Please use the ``roster`` argument, or set the ``proxy_roster`` option in the " + "Master configuration." ) targets = [] if use_existing_proxy: # When targeting exiting Proxies, we're going to look and match the # accepted keys - log.debug('Requested to match the target based on the existing Minions') + log.debug("Requested to match the target based on the existing Minions") target_util = salt.utils.master.MasterPillarUtil( tgt, tgt_type, @@ -1472,48 +1477,48 @@ def execute( # want however, it won't be of much use, as the command is going to # be spread out to non-existing minions, so better turn off that # feature. - log.debug('Trying a fuzzy match on the target') - if tgt_type == 'list': + log.debug("Trying a fuzzy match on the target") + if tgt_type == "list": targets = tgt[:] - elif tgt_type == 'glob' and tgt != '*': + elif tgt_type == "glob" and tgt != "*": targets = [tgt] else: targets = None if target_cache and not (invasive_targeting or preload_targeting): cache_bank = salt.cache.factory(__opts__) cache_key = hashlib.sha1( - '{tgt}_{tgt_type}'.format(tgt=tgt, tgt_type=tgt_type).encode() + "{tgt}_{tgt_type}".format(tgt=tgt, tgt_type=tgt_type).encode() ).hexdigest() - cache_time_key = '{}_time'.format(cache_key) - cache_time = cache_bank.fetch('_salt_sproxy_target', cache_time_key) + cache_time_key = "{}_time".format(cache_key) + cache_time = cache_bank.fetch("_salt_sproxy_target", cache_time_key) if cache_time and time.time() - cache_time <= target_cache_timeout: - log.debug('Loading the targets from the cache') - targets = cache_bank.fetch('_salt_sproxy_target', cache_key) + log.debug("Loading the targets from the cache") + targets = cache_bank.fetch("_salt_sproxy_target", cache_key) if not targets: rtargets = {} if use_existing_proxy: - log.debug('Gathering the cached Grains from the existing Minions') - cached_grains = __salt__['cache.grains'](tgt=tgt, tgt_type=tgt_type) + log.debug("Gathering the cached Grains from the existing Minions") + cached_grains = __salt__["cache.grains"](tgt=tgt, tgt_type=tgt_type) for target, target_grains in cached_grains.items(): - rtargets[target] = {'minion_opts': {'grains': target_grains}} + rtargets[target] = {"minion_opts": {"grains": target_grains}} existing_minions.append(target) - log.debug('Computing the target using the %s Roster', roster) - __opts__['use_cached_grains'] = use_cached_grains - __opts__['use_cached_pillar'] = use_cached_pillar + log.debug("Computing the target using the %s Roster", roster) + __opts__["use_cached_grains"] = use_cached_grains + __opts__["use_cached_pillar"] = use_cached_pillar roster_modules = salt.loader.roster( __opts__, runner=__salt__, whitelist=[roster] ) - if '.targets' not in roster: - roster = '{mod}.targets'.format(mod=roster) + if ".targets" not in roster: + roster = "{mod}.targets".format(mod=roster) rtargets_roster = roster_modules[roster](_tgt, tgt_type=_tgt_type) rtargets = salt.utils.dictupdate.merge(rtargets, rtargets_roster) targets = list(rtargets.keys()) if target_cache and not (invasive_targeting or preload_targeting): - cache_bank.store('_salt_sproxy_target', cache_key, targets) - cache_bank.store('_salt_sproxy_target', cache_time_key, time.time()) + cache_bank.store("_salt_sproxy_target", cache_key, targets) + cache_bank.store("_salt_sproxy_target", cache_time_key, time.time()) if preload_targeting or invasive_targeting: log.debug( - 'Loaded everything from the Roster, to start collecting Grains and Pillars:' + "Loaded everything from the Roster, to start collecting Grains and Pillars:" ) else: log.debug( @@ -1521,24 +1526,24 @@ def execute( ) log.debug(targets) if not targets: - return 'No devices matched your target. Please review your tgt / tgt_type arguments, or the Roster data source' + return "No devices matched your target. Please review your tgt / tgt_type arguments, or the Roster data source" if preview_target: return targets elif not salt_function: - return 'Please specify a Salt function to execute.' - jid = kwargs.get('__pub_jid') + return "Please specify a Salt function to execute." + jid = kwargs.get("__pub_jid") if not jid: if salt.version.__version_info__ >= (2018, 3, 0): jid = salt.utils.jid.gen_jid(__opts__) else: jid = salt.utils.jid.gen_jid() # pylint: disable=no-value-for-parameter if verbose or show_jid: - salt.utils.stringutils.print_cli('Executing job with jid {0}'.format(jid)) + salt.utils.stringutils.print_cli("Executing job with jid {0}".format(jid)) salt.utils.stringutils.print_cli( - '-------------------------------------------\n' + "-------------------------------------------\n" ) if events: - __salt__['event.send'](jid, {'minions': targets}) + __salt__["event.send"](jid, {"minions": targets}) return execute_devices( targets, salt_function, diff --git a/salt_sproxy/cli.py b/salt_sproxy/cli.py index d87a41b9..b9781401 100644 --- a/salt_sproxy/cli.py +++ b/salt_sproxy/cli.py @@ -12,9 +12,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. -''' +""" The CLI entry point module. -''' +""" from __future__ import absolute_import, print_function, unicode_literals from salt_sproxy.parsers import SaltStandaloneProxyOptionParser @@ -23,11 +23,11 @@ import sys import logging +import six import salt.runner import salt.utils.parsers from salt.utils.verify import check_user, verify_log from salt.exceptions import SaltClientError -from salt.ext import six import salt.defaults.exitcodes # pylint: disable=W0611 import salt.utils.stringutils import salt.version @@ -49,17 +49,17 @@ class SaltStandaloneProxy(SaltStandaloneProxyOptionParser): - ''' + """ Used to execute Salt functions on a number of devices. - ''' + """ def run(self): - ''' + """ Execute salt-run - ''' + """ self.parse_args() - if self.config.get('config_dump'): + if self.config.get("config_dump"): sys.stdout.write(safe_dump(self.config, default_flow_style=False)) return self.config @@ -70,95 +70,95 @@ def run(self): profiling_enabled = self.options.profiling_enabled curpath = os.path.dirname(os.path.realpath(__file__)) - saltenv = self.config.get('saltenv_cli', self.config.get('saltenv')) + saltenv = self.config.get("saltenv_cli", self.config.get("saltenv")) if not saltenv: - saltenv = 'base' - self.config['saltenv'] = saltenv - if self.config.get('pillar_root'): + saltenv = "base" + self.config["saltenv"] = saltenv + if self.config.get("pillar_root"): log.info( - 'Setting and using %s as the Pillar root', self.config['pillar_root'] + "Setting and using %s as the Pillar root", self.config["pillar_root"] ) - self.config['pillar_roots'] = {saltenv: self.config['pillar_root']} - if self.config.get('file_root'): + self.config["pillar_roots"] = {saltenv: self.config["pillar_root"]} + if self.config.get("file_root"): log.info( - 'Setting and using %s as the Salt file root', self.config['file_root'] + "Setting and using %s as the Salt file root", self.config["file_root"] ) - self.config['file_root'] = {saltenv: self.config['file_root']} - if self.config.get('installation_path'): + self.config["file_root"] = {saltenv: self.config["file_root"]} + if self.config.get("installation_path"): salt.utils.stringutils.print_cli(curpath) return - if self.config.get('display_file_roots'): + if self.config.get("display_file_roots"): salt.utils.stringutils.print_cli( - 'salt-sproxy is installed at: {}'.format(curpath) + "salt-sproxy is installed at: {}".format(curpath) ) salt.utils.stringutils.print_cli( - '\nYou can configure the file_roots on the Master, e.g.,\n' + "\nYou can configure the file_roots on the Master, e.g.,\n" ) salt.utils.stringutils.print_cli( - 'file_roots:\n {0}:\n - {1}'.format(saltenv, curpath) + "file_roots:\n {0}:\n - {1}".format(saltenv, curpath) ) - salt.utils.stringutils.print_cli('\n\nOr only for the Runners:\n') + salt.utils.stringutils.print_cli("\n\nOr only for the Runners:\n") salt.utils.stringutils.print_cli( - 'runner_dirs:\n - {}/_runners'.format(curpath) + "runner_dirs:\n - {}/_runners".format(curpath) ) return - if self.config.get('save_file_roots'): + if self.config.get("save_file_roots"): updated = False - with fopen(self.config['conf_file'], 'r+') as master_fp: + with fopen(self.config["conf_file"], "r+") as master_fp: master_cfg = safe_load(master_fp) if not master_cfg: master_cfg = {} - file_roots = master_cfg.get('file_roots', {saltenv: []}).get( + file_roots = master_cfg.get("file_roots", {saltenv: []}).get( saltenv, [] ) - runner_dirs = master_cfg.get('runner_dirs', []) - sproxy_runners = os.path.join(curpath, '_runners') + runner_dirs = master_cfg.get("runner_dirs", []) + sproxy_runners = os.path.join(curpath, "_runners") if curpath not in file_roots: file_roots.append(curpath) - master_cfg['file_roots'] = {saltenv: file_roots} + master_cfg["file_roots"] = {saltenv: file_roots} updated = True salt.utils.stringutils.print_cli( - '{} added to the file_roots:\n'.format(curpath) + "{} added to the file_roots:\n".format(curpath) ) salt.utils.stringutils.print_cli( - 'file_roots:\n {0}\n - {1}\n'.format( - saltenv, '\n -'.join(file_roots) + "file_roots:\n {0}\n - {1}\n".format( + saltenv, "\n -".join(file_roots) ) ) if sproxy_runners not in runner_dirs: runner_dirs.append(sproxy_runners) - master_cfg['runner_dirs'] = runner_dirs + master_cfg["runner_dirs"] = runner_dirs updated = True salt.utils.stringutils.print_cli( - '{} added to runner_dirs:\n'.format(sproxy_runners) + "{} added to runner_dirs:\n".format(sproxy_runners) ) salt.utils.stringutils.print_cli( - 'runner_dirs:\n - {0}'.format('\n - '.join(runner_dirs)) + "runner_dirs:\n - {0}".format("\n - ".join(runner_dirs)) ) if updated: master_fp.seek(0) safe_dump(master_cfg, master_fp, default_flow_style=False) - log.debug('Syncing Runners on the Master') + log.debug("Syncing Runners on the Master") runner_client = salt.runner.RunnerClient(self.config) sync_runners = runner_client.cmd( - 'saltutil.sync_all', - kwarg={'saltenv': saltenv}, + "saltutil.sync_all", + kwarg={"saltenv": saltenv}, print_event=False, ) - log.debug('saltutil.sync_all output:') + log.debug("saltutil.sync_all output:") log.debug(sync_runners) else: salt.utils.stringutils.print_cli( - 'The {} path is already included into the file_roots and runner_dirs'.format( + "The {} path is already included into the file_roots and runner_dirs".format( curpath ) ) salt.utils.stringutils.print_cli( - '\nNow you can start using salt-sproxy for ' - 'event-driven automation, and the Salt REST API.\n' - 'See https://salt-sproxy.readthedocs.io/en/latest/salt_api.html' - '\nand https://salt-sproxy.readthedocs.io/en/latest/events.html ' - 'for more details.' + "\nNow you can start using salt-sproxy for " + "event-driven automation, and the Salt REST API.\n" + "See https://salt-sproxy.readthedocs.io/en/latest/salt_api.html" + "\nand https://salt-sproxy.readthedocs.io/en/latest/events.html " + "for more details." ) return # The code below executes the Runner sequence, but it swaps the function @@ -166,44 +166,44 @@ def run(self): # function requested by the user from the CLI, as an argument. # The same goes with the CLI options that are sent as kwargs to the # proxy Runner. - tgt = self.config['tgt'] - fun = self.config['fun'] - args = self.config['arg'] + tgt = self.config["tgt"] + fun = self.config["fun"] + args = self.config["arg"] kwargs = {} - if 'output' not in self.config and fun in ( - 'state.sls', - 'state.apply', - 'state.highstate', + if "output" not in self.config and fun in ( + "state.sls", + "state.apply", + "state.highstate", ): - self.config['output'] = 'highstate' - kwargs['progress'] = self.config.pop('progress', False) + self.config["output"] = "highstate" + kwargs["progress"] = self.config.pop("progress", False) # To be able to reuse the proxy Runner (which is not yet available # natively in Salt), we can override the ``runner_dirs`` configuration # option to tell Salt to load that Runner too. This way, we can also # load other types of modules that may be required or we provide fixes # or backports - for example the Ansible Roster which doesn't work fine # pre Salt 2018.3 (in case anyone would like to use it). - file_roots = self.config.get('file_roots', {saltenv: []}) + file_roots = self.config.get("file_roots", {saltenv: []}) if saltenv not in file_roots: file_roots[saltenv] = [] file_roots[saltenv].append(curpath) - self.config['file_roots'] = file_roots - runner_dirs = self.config.get('runner_dirs', []) - runner_path = os.path.join(curpath, '_runners') + self.config["file_roots"] = file_roots + runner_dirs = self.config.get("runner_dirs", []) + runner_path = os.path.join(curpath, "_runners") runner_dirs.append(runner_path) - self.config['runner_dirs'] = runner_dirs + self.config["runner_dirs"] = runner_dirs runner_client = None - sync_all = self.config.get('sync_all', False) - sync_grains = self.config.get('sync_grains', True) - sync_modules = self.config.get('sync_modules', True) - sync_roster = self.config.get('sync_roster', True) - sync_proxy = self.config.get('sync_proxy', False) - sync_executors = self.config.get('sync_executors', False) + sync_all = self.config.get("sync_all", False) + sync_grains = self.config.get("sync_grains", True) + sync_modules = self.config.get("sync_modules", True) + sync_roster = self.config.get("sync_roster", True) + sync_proxy = self.config.get("sync_proxy", False) + sync_executors = self.config.get("sync_executors", False) kwargs.update( { - 'sync_all': sync_all, - 'sync_roster': sync_roster, - 'sync_modules': sync_modules, + "sync_all": sync_all, + "sync_roster": sync_roster, + "sync_modules": sync_modules, } ) if any( @@ -218,50 +218,50 @@ def run(self): ): runner_client = salt.runner.RunnerClient(self.config) if sync_all: - log.debug('Sync all') + log.debug("Sync all") sync_all_ret = runner_client.cmd( - 'saltutil.sync_all', kwarg={'saltenv': saltenv}, print_event=False + "saltutil.sync_all", kwarg={"saltenv": saltenv}, print_event=False ) log.debug(sync_all_ret) if sync_grains and not sync_all: - log.debug('Syncing grains') + log.debug("Syncing grains") sync_grains_ret = runner_client.cmd( - 'saltutil.sync_grains', + "saltutil.sync_grains", kwarg={ - 'saltenv': saltenv, - 'extmod_whitelist': ','.join( - self.config.get('whitelist_grains', []) + "saltenv": saltenv, + "extmod_whitelist": ",".join( + self.config.get("whitelist_grains", []) ), - 'extmod_blacklist': ','.join(self.config.get('disable_grains', [])), + "extmod_blacklist": ",".join(self.config.get("disable_grains", [])), }, print_event=False, ) log.debug(sync_grains_ret) - if self.config.get('module_dirs_cli'): + if self.config.get("module_dirs_cli"): log.debug( - 'Loading execution modules from the dirs provided via --module-dirs' + "Loading execution modules from the dirs provided via --module-dirs" ) - module_dirs = self.config.get('module_dirs', []) - module_dirs.extend(self.config['module_dirs_cli']) - self.config['module_dirs'] = module_dirs + module_dirs = self.config.get("module_dirs", []) + module_dirs.extend(self.config["module_dirs_cli"]) + self.config["module_dirs"] = module_dirs if sync_modules and not sync_all: # Don't sync modules by default - log.debug('Syncing modules') - module_dirs = self.config.get('module_dirs', []) - module_path = os.path.join(curpath, '_modules') + log.debug("Syncing modules") + module_dirs = self.config.get("module_dirs", []) + module_path = os.path.join(curpath, "_modules") module_dirs.append(module_path) - self.config['module_dirs'] = module_dirs + self.config["module_dirs"] = module_dirs # No need to explicitly load the modules here, as during runtime, # Salt is anyway going to load the modules on the fly. sync_modules_ret = runner_client.cmd( - 'saltutil.sync_modules', + "saltutil.sync_modules", kwarg={ - 'saltenv': saltenv, - 'extmod_whitelist': ','.join( - self.config.get('whitelist_modules', []) + "saltenv": saltenv, + "extmod_whitelist": ",".join( + self.config.get("whitelist_modules", []) ), - 'extmod_blacklist': ','.join( - self.config.get('disable_modules', []) + "extmod_blacklist": ",".join( + self.config.get("disable_modules", []) ), }, print_event=False, @@ -269,151 +269,151 @@ def run(self): log.debug(sync_modules_ret) # Resync Roster module to load the ones we have here in the library, and # potentially others provided by the user in their environment - if sync_roster and not sync_all and self.config.get('roster'): + if sync_roster and not sync_all and self.config.get("roster"): # Sync Rosters by default - log.debug('Syncing roster') - roster_dirs = self.config.get('roster_dirs', []) - roster_path = os.path.join(curpath, '_roster') + log.debug("Syncing roster") + roster_dirs = self.config.get("roster_dirs", []) + roster_path = os.path.join(curpath, "_roster") roster_dirs.append(roster_path) - self.config['roster_dirs'] = roster_dirs + self.config["roster_dirs"] = roster_dirs sync_roster_ret = runner_client.cmd( - 'saltutil.sync_roster', - kwarg={'saltenv': saltenv, 'extmod_whitelist': self.config['roster']}, + "saltutil.sync_roster", + kwarg={"saltenv": saltenv, "extmod_whitelist": self.config["roster"]}, print_event=False, ) log.debug(sync_roster_ret) if sync_proxy and not sync_all: - log.debug('Syncing Proxy modules') - proxy_dirs = self.config.get('proxy_dirs', []) - proxy_path = os.path.join(curpath, '_proxy') + log.debug("Syncing Proxy modules") + proxy_dirs = self.config.get("proxy_dirs", []) + proxy_path = os.path.join(curpath, "_proxy") proxy_dirs.append(proxy_path) - self.config['proxy_dirs'] = proxy_dirs + self.config["proxy_dirs"] = proxy_dirs sync_proxy_ret = runner_client.cmd( - 'saltutil.sync_proxymodules', + "saltutil.sync_proxymodules", kwarg={ - 'saltenv': saltenv, - 'extmod_whitelist': ','.join( - self.config.get('whitelist_proxys', []) + "saltenv": saltenv, + "extmod_whitelist": ",".join( + self.config.get("whitelist_proxys", []) ), - 'extmod_blacklist': ','.join(self.config.get('disable_proxys', [])), + "extmod_blacklist": ",".join(self.config.get("disable_proxys", [])), }, print_event=False, ) log.debug(sync_proxy_ret) if sync_executors and not sync_all: - log.debug('Syncing Executors modules') - executor_dirs = self.config.get('executor_dirs', []) - executor_path = os.path.join(curpath, '_executors') + log.debug("Syncing Executors modules") + executor_dirs = self.config.get("executor_dirs", []) + executor_path = os.path.join(curpath, "_executors") executor_dirs.append(executor_path) - self.config['executor_dirs'] = executor_dirs + self.config["executor_dirs"] = executor_dirs sync_executors_ret = runner_client.cmd( - 'saltutil.sync_executors', + "saltutil.sync_executors", kwarg={ - 'saltenv': saltenv, - 'extmod_whitelist': ','.join( - self.config.get('whitelist_executors', []) + "saltenv": saltenv, + "extmod_whitelist": ",".join( + self.config.get("whitelist_executors", []) ), - 'extmod_blacklist': ','.join( - self.config.get('disable_executors', []) + "extmod_blacklist": ",".join( + self.config.get("disable_executors", []) ), }, print_event=False, ) log.debug(sync_executors_ret) - if self.config.get('states_dir'): - states_dirs = self.config.get('states_dirs', []) - states_dirs.append(self.config['states_dir']) - self.config['states_dirs'] = states_dirs - self.config['fun'] = 'proxy.execute' + if self.config.get("states_dir"): + states_dirs = self.config.get("states_dirs", []) + states_dirs.append(self.config["states_dir"]) + self.config["states_dirs"] = states_dirs + self.config["fun"] = "proxy.execute" tmp_args = args[:] for index, arg in enumerate(tmp_args): - if isinstance(arg, dict) and '__kwarg__' in arg: + if isinstance(arg, dict) and "__kwarg__" in arg: args.pop(index) kwargs = arg - kwargs['__kwarg__'] = True + kwargs["__kwarg__"] = True tgt_types = ( - 'compound', - 'list', - 'grain', - 'pcre', - 'grain_pcre', - 'pillar', - 'pillar_pcre', - 'pillar_target', - 'nodegroup', + "compound", + "list", + "grain", + "pcre", + "grain_pcre", + "pillar", + "pillar_pcre", + "pillar_target", + "nodegroup", ) - kwargs['tgt_type'] = 'glob' + kwargs["tgt_type"] = "glob" for tgt_type in tgt_types: if hasattr(self.options, tgt_type) and getattr(self.options, tgt_type): - kwargs['tgt_type'] = tgt_type + kwargs["tgt_type"] = tgt_type kwargs_opts = ( - 'preview_target', - 'batch_size', - 'batch_wait', - 'roster', - 'timeout', - 'static', - 'no_connect', - 'failhard', - 'summary', - 'verbose', - 'show_jid', - 'hide_timeout', - 'progress', - 'returner', - 'target_cache', - 'returner_config', - 'returner_kwargs', + "preview_target", + "batch_size", + "batch_wait", + "roster", + "timeout", + "static", + "no_connect", + "failhard", + "summary", + "verbose", + "show_jid", + "hide_timeout", + "progress", + "returner", + "target_cache", + "returner_config", + "returner_kwargs", ) for kwargs_opt in kwargs_opts: if getattr(self.options, kwargs_opt) is not None: kwargs[kwargs_opt] = getattr(self.options, kwargs_opt) reverse_opts = { # option_name: runner_kwarg - 'no_cached_grains': 'use_cached_grains', - 'no_cached_pillar': 'use_cached_pillar', - 'no_grains': 'with_grains', - 'no_pillar': 'with_pillar', - 'dont_cache_grains': 'cache_grains', - 'dont_cache_pillar': 'cache_pillar', + "no_cached_grains": "use_cached_grains", + "no_cached_pillar": "use_cached_pillar", + "no_grains": "with_grains", + "no_pillar": "with_pillar", + "dont_cache_grains": "cache_grains", + "dont_cache_pillar": "cache_pillar", } for opt, kwarg in six.iteritems(reverse_opts): if getattr(self.options, opt): kwargs[kwarg] = False - kwargs['events'] = self.config.get('events', False) - kwargs['use_existing_proxy'] = self.config.get('use_existing_proxy', False) - kwargs['test_ping'] = self.config.get('test_ping', False) - kwargs['target_cache_timeout'] = self.config.get( - 'target_cache_timeout', 60 + kwargs["events"] = self.config.get("events", False) + kwargs["use_existing_proxy"] = self.config.get("use_existing_proxy", False) + kwargs["test_ping"] = self.config.get("test_ping", False) + kwargs["target_cache_timeout"] = self.config.get( + "target_cache_timeout", 60 ) # seconds - kwargs['args'] = args - kwargs['default_grains'] = self.config.get( - 'sproxy_grains', - self.config.get('default_grains', self.config.get('grains')), + kwargs["args"] = args + kwargs["default_grains"] = self.config.get( + "sproxy_grains", + self.config.get("default_grains", self.config.get("grains")), ) - kwargs['default_pillar'] = self.config.get( - 'sproxy_pillar', - self.config.get('default_pillar', self.config.get('pillar')), + kwargs["default_pillar"] = self.config.get( + "sproxy_pillar", + self.config.get("default_pillar", self.config.get("pillar")), ) - kwargs['preload_targeting'] = self.config.get('preload_targeting', False) - kwargs['invasive_targeting'] = self.config.get('invasive_targeting', False) - kwargs['failhard'] = self.config.get('failhard', False) - self.config['arg'] = [tgt, fun, kwargs] + kwargs["preload_targeting"] = self.config.get("preload_targeting", False) + kwargs["invasive_targeting"] = self.config.get("invasive_targeting", False) + kwargs["failhard"] = self.config.get("failhard", False) + self.config["arg"] = [tgt, fun, kwargs] runner = salt.runner.Runner(self.config) - if self.config.get('doc', True): + if self.config.get("doc", True): # late import as salt.loader adds up some execution time, and we # don't want that, but only when displaying docs. from salt.loader import utils, grains, minion_mods - runner.opts['fun'] = fun - runner.opts['grains'] = grains(runner.opts) + runner.opts["fun"] = fun + runner.opts["grains"] = grains(runner.opts) runner._functions = minion_mods(runner.opts, utils=utils(runner.opts)) # Run this here so SystemExit isn't raised anywhere else when # someone tries to use the runners via the python API try: - if check_user(self.config['user']): + if check_user(self.config["user"]): pr = activate_profile(profiling_enabled) try: ret = runner.run() @@ -423,12 +423,12 @@ def run(self): # runners might still use it. For this reason, we # also check ret['data']['retcode'] if # ret['retcode'] is not available. - if 'retcode' in runner.context: - self.exit(runner.context['retcode']) - if isinstance(ret, dict) and 'retcode' in ret: - self.exit(ret['retcode']) - elif isinstance(ret, dict) and 'retcode' in ret.get('data', {}): - self.exit(ret['data']['retcode']) + if "retcode" in runner.context: + self.exit(runner.context["retcode"]) + if isinstance(ret, dict) and "retcode" in ret: + self.exit(ret["retcode"]) + elif isinstance(ret, dict) and "retcode" in ret.get("data", {}): + self.exit(ret["data"]["retcode"]) finally: output_profile( pr, stats_path=self.options.profiling_path, stop=True diff --git a/salt_sproxy/parsers.py b/salt_sproxy/parsers.py index 5408972e..29e770a2 100644 --- a/salt_sproxy/parsers.py +++ b/salt_sproxy/parsers.py @@ -2,18 +2,18 @@ from __future__ import absolute_import import sys -import optparse +import optparse # pylint: disable=deprecated-module import multiprocessing import salt_sproxy.version -from salt.ext import six +import six +from six.moves import map # pylint: disable=W8410 +from six.moves import range # pylint: disable=W8410 import salt.version import salt.utils.args import salt.utils.parsers import salt.config as config -from salt.ext.six.moves import map -from salt.ext.six.moves import range try: from jnpr.junos import __version__ as jnpr_version @@ -26,62 +26,62 @@ def salt_information(): - ''' + """ Return version of Salt and salt-sproxy. - ''' - yield 'Salt', salt.version.__version__ - yield 'Salt SProxy', salt_sproxy.version.__version__ + """ + yield "Salt", salt.version.__version__ + yield "Salt SProxy", salt_sproxy.version.__version__ def dependency_information(include_salt_cloud=False): - ''' + """ Report versions of library dependencies. This function has been ported from https://github.com/saltstack/salt/blob/develop/salt/version.py and extended here to collect the version information for several more libraries that may be necessary for various Proxy (or Execution) Modules. - ''' + """ libs = [ - ('Python', None, sys.version.rsplit('\n')[0].strip()), - ('NAPALM', 'napalm', '__version__'), - ('Netmiko', 'netmiko', '__version__'), - ('junos-eznc', None, jnpr_version), - ('ncclient', 'ncclient', '__version__'), - ('paramiko', 'paramiko', '__version__'), - ('pyeapi', 'pyeapi', '__version__'), - ('textfsm', 'textfsm', '__version__'), - ('jxmlease', 'jxmlease', '__version__'), - ('scp', 'scp', '__version__'), - ('PyNSO', 'pynso', '__version__'), - ('Ansible', 'ansible', '__version__'), - ('PyNetBox', 'pynetbox', '__version__'), - ('Jinja2', 'jinja2', '__version__'), - ('M2Crypto', 'M2Crypto', 'version'), - ('msgpack-python', 'msgpack', 'version'), - ('msgpack-pure', 'msgpack_pure', 'version'), - ('pycrypto', 'Crypto', '__version__'), - ('pycryptodome', 'Cryptodome', 'version_info'), - ('PyYAML', 'yaml', '__version__'), - ('PyZMQ', 'zmq', '__version__'), - ('ZMQ', 'zmq', 'zmq_version'), - ('Mako', 'mako', '__version__'), - ('Tornado', 'tornado', 'version'), - ('timelib', 'timelib', 'version'), - ('dateutil', 'dateutil', '__version__'), - ('pygit2', 'pygit2', '__version__'), - ('libgit2', 'pygit2', 'LIBGIT2_VERSION'), - ('smmap', 'smmap', '__version__'), - ('cffi', 'cffi', '__version__'), - ('pycparser', 'pycparser', '__version__'), - ('gitdb', 'gitdb', '__version__'), - ('gitpython', 'git', '__version__'), - ('python-gnupg', 'gnupg', '__version__'), - ('docker-py', 'docker', '__version__'), + ("Python", None, sys.version.rsplit("\n", maxsplit=1)[0].strip()), + ("NAPALM", "napalm", "__version__"), + ("Netmiko", "netmiko", "__version__"), + ("junos-eznc", None, jnpr_version), + ("ncclient", "ncclient", "__version__"), + ("paramiko", "paramiko", "__version__"), + ("pyeapi", "pyeapi", "__version__"), + ("textfsm", "textfsm", "__version__"), + ("jxmlease", "jxmlease", "__version__"), + ("scp", "scp", "__version__"), + ("PyNSO", "pynso", "__version__"), + ("Ansible", "ansible", "__version__"), + ("PyNetBox", "pynetbox", "__version__"), + ("Jinja2", "jinja2", "__version__"), + ("M2Crypto", "M2Crypto", "version"), + ("msgpack-python", "msgpack", "version"), + ("msgpack-pure", "msgpack_pure", "version"), + ("pycrypto", "Crypto", "__version__"), + ("pycryptodome", "Cryptodome", "version_info"), + ("PyYAML", "yaml", "__version__"), + ("PyZMQ", "zmq", "__version__"), + ("ZMQ", "zmq", "zmq_version"), + ("Mako", "mako", "__version__"), + ("Tornado", "tornado", "version"), + ("timelib", "timelib", "version"), + ("dateutil", "dateutil", "__version__"), + ("pygit2", "pygit2", "__version__"), + ("libgit2", "pygit2", "LIBGIT2_VERSION"), + ("smmap", "smmap", "__version__"), + ("cffi", "cffi", "__version__"), + ("pycparser", "pycparser", "__version__"), + ("gitdb", "gitdb", "__version__"), + ("gitpython", "git", "__version__"), + ("python-gnupg", "gnupg", "__version__"), + ("docker-py", "docker", "__version__"), ] if include_salt_cloud: - libs.append(('Apache Libcloud', 'libcloud', '__version__')) + libs.append(("Apache Libcloud", "libcloud", "__version__")) for name, imp, attr in libs: if imp is None: @@ -93,7 +93,7 @@ def dependency_information(include_salt_cloud=False): if callable(version): version = version() if isinstance(version, (tuple, list)): - version = '.'.join(map(str, version)) + version = ".".join(map(str, version)) yield name, version except Exception: yield name, None @@ -121,437 +121,436 @@ class SaltStandaloneProxyOptionParser( salt.utils.parsers.NoParseMixin, ) ): - default_timeout = 60 description = ( - r''' + r""" ___ _ _ ___ ___ / __| __ _ | | | |_ / __| | _ \ _ _ ___ __ __ _ _ \__ \ / _` | | | | _| \__ \ | _/ | '_| / _ \ \ \ / | || | |___/ \__,_| |_| \__| |___/ |_| |_| \___/ /_\_\ \_, | |__/ -''' - 'salt-sproxy is a tool to invoke arbitrary Salt functions on a group\n' - 'of (network) devices connecting through a Salt Proxy Minion, without\n' - 'having the Proxy Minion services up and running (or the Master).\n' +""" + "salt-sproxy is a tool to invoke arbitrary Salt functions on a group\n" + "of (network) devices connecting through a Salt Proxy Minion, without\n" + "having the Proxy Minion services up and running (or the Master).\n" ) VERSION = salt_sproxy.version.__version__ - usage = '%prog [options] [arguments]' + usage = "%prog [options] [arguments]" epilog = ( - 'You can find additional help about %prog at ' - 'https://salt-sproxy.readthedocs.io/en/latest/' + "You can find additional help about %prog at " + "https://salt-sproxy.readthedocs.io/en/latest/" ) # ConfigDirMixIn config filename attribute - _config_filename_ = 'master' + _config_filename_ = "master" # LogLevelMixIn attributes - _default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level'] - _default_logging_logfile_ = config.DEFAULT_MASTER_OPTS['log_file'] + _default_logging_level_ = config.DEFAULT_MASTER_OPTS["log_level"] + _default_logging_logfile_ = config.DEFAULT_MASTER_OPTS["log_file"] def format_description(self, formatter): return self.description def _mixin_setup(self): self.add_option( - '-r', '--roster', default=False, help='The name of the Salt Roster to use.' + "-r", "--roster", default=False, help="The name of the Salt Roster to use." ) self.add_option( - '--roster-file', - dest='roster_file', - help='Absolute path to the Roster file to use.', + "--roster-file", + dest="roster_file", + help="Absolute path to the Roster file to use.", ) self.add_option( - '-s', - '--static', + "-s", + "--static", default=False, - action='store_true', - help=('Return the data from devices as a group after they all return.'), + action="store_true", + help=("Return the data from devices as a group after they all return."), ) self.add_option( "--async", default=False, dest="async", action="store_true", - help=('Run the salt-sproxy command but don\'t wait for a reply.'), + help=("Run the salt-sproxy command but don't wait for a reply."), ) self.add_option( - '--dont-cache-grains', + "--dont-cache-grains", default=False, - action='store_true', - help=('Do not cache the collected Grains for the sproxy devices.'), + action="store_true", + help=("Do not cache the collected Grains for the sproxy devices."), ) self.add_option( - '--dont-cache-pillar', + "--dont-cache-pillar", default=False, - action='store_true', - help=('Do not cache the compiled Pillar for the sproxy devices.'), + action="store_true", + help=("Do not cache the compiled Pillar for the sproxy devices."), ) self.add_option( - '--no-cached-grains', + "--no-cached-grains", default=False, - action='store_true', - help='Do not use the available cached Grains (if any).', + action="store_true", + help="Do not use the available cached Grains (if any).", ) self.add_option( - '--no-cached-pillar', + "--no-cached-pillar", default=False, - action='store_true', - help='Do not use the available cached Pillar (if any)', + action="store_true", + help="Do not use the available cached Pillar (if any)", ) self.add_option( - '--no-grains', + "--no-grains", default=False, - action='store_true', + action="store_true", help=( - 'Do not attempt to collect Grains at all. Use with care, it ' - 'may lead to unexpected results.' + "Do not attempt to collect Grains at all. Use with care, it " + "may lead to unexpected results." ), ) self.add_option( - '-i', - '--ignore-host-keys', + "-i", + "--ignore-host-keys", default=False, - action='store_true', - dest='ignore_host_keys', + action="store_true", + dest="ignore_host_keys", help=( - 'By default ssh host keys are honored and connections will ask ' - 'for approval. Use this option to disable StrictHostKeyChecking.' + "By default ssh host keys are honored and connections will ask " + "for approval. Use this option to disable StrictHostKeyChecking." ), ) self.add_option( - '--no-host-keys', + "--no-host-keys", default=False, - action='store_true', - dest='no_host_keys', + action="store_true", + dest="no_host_keys", help=( - 'Fully ignores ssh host keys which by default are honored and ' - 'connections would ask for approval. Useful if the host key of ' - 'a remote server has changed and would still error with ' - '--ignore-host-keys.' + "Fully ignores ssh host keys which by default are honored and " + "connections would ask for approval. Useful if the host key of " + "a remote server has changed and would still error with " + "--ignore-host-keys." ), ) self.add_option( - '--identities-only', + "--identities-only", default=False, - action='store_true', - dest='identities_only', + action="store_true", + dest="identities_only", help=( - 'Use the only authentication identity files configured in the ' - 'ssh_config files. See ``IdentitiesOnly`` flag in man ssh_config.' + "Use the only authentication identity files configured in the " + "ssh_config files. See ``IdentitiesOnly`` flag in man ssh_config." ), ) self.add_option( - '--priv', - dest='priv', - help=('Specify the SSH private key file to be used for authentication.'), + "--priv", + dest="priv", + help=("Specify the SSH private key file to be used for authentication."), ) self.add_option( - '--priv-passwd', - dest='priv_passwd', - help=('Specify the SSH private key file\'s passphrase when required.'), + "--priv-passwd", + dest="priv_passwd", + help=("Specify the SSH private key file's passphrase when required."), ) self.add_option( - '--preload-targeting', + "--preload-targeting", default=False, - action='store_true', + action="store_true", help=( - 'Preload Grains for all the devices before targeting.' - 'This is useful to match the devices on dynamic Grains that ' - 'do not require the connection with the remote device - e.g., ' - 'Grains collected from an external API, etc.' + "Preload Grains for all the devices before targeting." + "This is useful to match the devices on dynamic Grains that " + "do not require the connection with the remote device - e.g., " + "Grains collected from an external API, etc." ), ) self.add_option( - '--invasive-targeting', + "--invasive-targeting", default=False, - action='store_true', + action="store_true", help=( - 'Collect all the possible data from every device salt-sproxy ' - 'is aware of, before distributing the commands. ' - 'In other words, this option tells salt-sproxy to connect to ' - 'every possible device defined in the Roster of choice, collect ' - 'Grains, compile Pillars, etc., and only then execute the ' - 'command against the devices matching the target expression.' - 'Use with care, as this may significantly reduce the ' - 'performances, but this is the price paid to be able to target ' - 'using device properties. ' - 'Consider using this option in conjunction with --cache-grains ' - 'and / or --cache-pillar to cache the Grains and the Pillars to ' - 're-use them straight away next time.' + "Collect all the possible data from every device salt-sproxy " + "is aware of, before distributing the commands. " + "In other words, this option tells salt-sproxy to connect to " + "every possible device defined in the Roster of choice, collect " + "Grains, compile Pillars, etc., and only then execute the " + "command against the devices matching the target expression." + "Use with care, as this may significantly reduce the " + "performances, but this is the price paid to be able to target " + "using device properties. " + "Consider using this option in conjunction with --cache-grains " + "and / or --cache-pillar to cache the Grains and the Pillars to " + "re-use them straight away next time." ), ) self.add_option( - '--no-pillar', + "--no-pillar", default=False, - action='store_true', + action="store_true", help=( - 'Do not compile Pillar at all. Use with care, it may lead to ' - 'unexpected results.' + "Do not compile Pillar at all. Use with care, it may lead to " + "unexpected results." ), ) self.add_option( - '-b', - '--batch', - '--batch-size', - dest='batch_size', + "-b", + "--batch", + "--batch-size", + dest="batch_size", help=( - 'The number of devices to connect to in parallel. ' - 'Default: {} (number of CPUs on your machine)'.format(CPU_COUNT) + "The number of devices to connect to in parallel. " + "Default: {} (number of CPUs on your machine)".format(CPU_COUNT) ), ) self.add_option( - '--preview-target', - dest='preview_target', - action='store_true', - help='Show the devices expected to match the target.', + "--preview-target", + dest="preview_target", + action="store_true", + help="Show the devices expected to match the target.", ) self.add_option( - '--sync-all', - dest='sync_all', - action='store_true', + "--sync-all", + dest="sync_all", + action="store_true", help=( - 'Load the all extension modules provided with salt-sproxy, as ' - 'well as the extension modules from your own environment.' + "Load the all extension modules provided with salt-sproxy, as " + "well as the extension modules from your own environment." ), ) self.add_option( - '--sync-grains', - dest='sync_grains', - action='store_true', + "--sync-grains", + dest="sync_grains", + action="store_true", help=( - 'Re-sync the Grains modules. Useful if you have custom Grains ' - 'modules in your own environment.' + "Re-sync the Grains modules. Useful if you have custom Grains " + "modules in your own environment." ), ) self.add_option( - '--sync-modules', - dest='sync_modules', - action='store_true', - help=('Load the salt-sproxy Execution modules.'), + "--sync-modules", + dest="sync_modules", + action="store_true", + help=("Load the salt-sproxy Execution modules."), ) self.add_option( - '--sync-roster', - dest='sync_roster', - action='store_true', + "--sync-roster", + dest="sync_roster", + action="store_true", help=( - 'Synchronise the Roster modules (both salt-sproxy native ' - 'and provided by the user in their own environment).' + "Synchronise the Roster modules (both salt-sproxy native " + "and provided by the user in their own environment)." ), ) self.add_option( - '--sync-proxy', - dest='sync_proxy', - action='store_true', - help=('Load the salt-sproxy Proxy modules.'), + "--sync-proxy", + dest="sync_proxy", + action="store_true", + help=("Load the salt-sproxy Proxy modules."), ) self.add_option( - '--sync-executors', - dest='sync_executors', - action='store_true', - help=('Load the salt-sproxy Executor modules.'), + "--sync-executors", + dest="sync_executors", + action="store_true", + help=("Load the salt-sproxy Executor modules."), ) self.add_option( - '--saltenv', - dest='saltenv_cli', - help='The Salt environment name to load module and files from', + "--saltenv", + dest="saltenv_cli", + help="The Salt environment name to load module and files from", ) self.add_option( - '--events', - dest='events', - action='store_true', + "--events", + dest="events", + action="store_true", help=( - 'Whether should put the events on the Salt bus (mostly ' - 'useful when having a Master running).' + "Whether should put the events on the Salt bus (mostly " + "useful when having a Master running)." ), ) self.add_option( - '--use-proxy', - '--use-existing-proxy', - '--use-existing-minion', - dest='use_existing_proxy', - action='store_true', + "--use-proxy", + "--use-existing-proxy", + "--use-existing-minion", + dest="use_existing_proxy", + action="store_true", help=( - 'Use the existing Proxy Minions to execute the commands, ' - 'whenever available.' + "Use the existing Proxy Minions to execute the commands, " + "whenever available." ), ) self.add_option( - '--pillar-root', + "--pillar-root", default=None, - help='Set this directory as the base pillar root.', + help="Set this directory as the base pillar root.", ) self.add_option( - '--file-root', + "--file-root", default=None, - help='Set this directory as the base file root.', + help="Set this directory as the base file root.", ) self.add_option( - '--states-dir', + "--states-dir", default=None, - help='Set this directory to search for additional states.', + help="Set this directory to search for additional states.", ) self.add_option( - '-m', - '--module-dirs', - dest='module_dirs_cli', + "-m", + "--module-dirs", + dest="module_dirs_cli", default=[], - action='append', + action="append", help=( - 'Specify an additional directory to pull modules from. ' - 'Multiple directories can be provided by passing ' - '`-m/--module-dirs` multiple times.' + "Specify an additional directory to pull modules from. " + "Multiple directories can be provided by passing " + "`-m/--module-dirs` multiple times." ), ) self.add_option( - '--installation-path', - dest='installation_path', - action='store_true', - help=('Display the absolute path to where salt-sproxy is installed.'), + "--installation-path", + dest="installation_path", + action="store_true", + help=("Display the absolute path to where salt-sproxy is installed."), ) self.add_option( - '--display-file-roots', - dest='display_file_roots', - action='store_true', + "--display-file-roots", + dest="display_file_roots", + action="store_true", help=( - 'Display the file_roots option you would need to configure ' - 'in order to use the salt-sproxy extension modules directly, ' - 'and, implicitly, leverage the event-driven methodology and ' - 'the Salt REST API.' + "Display the file_roots option you would need to configure " + "in order to use the salt-sproxy extension modules directly, " + "and, implicitly, leverage the event-driven methodology and " + "the Salt REST API." ), ) self.add_option( - '--save-file-roots', - dest='save_file_roots', - action='store_true', + "--save-file-roots", + dest="save_file_roots", + action="store_true", help=( - 'Saves the file_roots configuration so you can start ' - 'leveraging the event-driven automation and the Salt REST API.' + "Saves the file_roots configuration so you can start " + "leveraging the event-driven automation and the Salt REST API." ), ) self.add_option( - '--config-dump', - dest='config_dump', + "--config-dump", + dest="config_dump", default=False, - action='store_true', - help='Dump the salt-sproxy configuration values', + action="store_true", + help="Dump the salt-sproxy configuration values", ) self.add_option( - '--no-connect', - dest='no_connect', - action='store_true', + "--no-connect", + dest="no_connect", + action="store_true", default=False, help=( - 'Do not initiate the connection with the device, only use ' - 'cached data to compile data and execute Salt functions that ' - 'do not require the actual connection with the device.' + "Do not initiate the connection with the device, only use " + "cached data to compile data and execute Salt functions that " + "do not require the actual connection with the device." ), ) self.add_option( - '--test-ping', - dest='test_ping', - action='store_true', + "--test-ping", + dest="test_ping", + action="store_true", default=False, help=( - 'When using together with --use-existing-proxy, this option can' - ' help to ensure the existing Proxy Minion is responsive (not ' - 'only up and running, by executing a ping test.' + "When using together with --use-existing-proxy, this option can" + " help to ensure the existing Proxy Minion is responsive (not " + "only up and running, by executing a ping test." ), ) self.add_option( - '--failhard', - dest='failhard', - action='store_true', + "--failhard", + dest="failhard", + action="store_true", default=False, - help='Stop execution at the first execution error.', + help="Stop execution at the first execution error.", ) self.add_option( - '--target-cache', - dest='target_cache', - action='store_true', + "--target-cache", + dest="target_cache", + action="store_true", default=False, - help=('Cache the list of devices matched by your target expression.'), + help=("Cache the list of devices matched by your target expression."), ) group = self.output_options_group = optparse.OptionGroup( - self, 'Output Options', 'Configure your preferred output format.' + self, "Output Options", "Configure your preferred output format." ) self.add_option_group(group) group.add_option( - '-q', - '--quiet', + "-q", + "--quiet", default=False, - action='store_true', - help='Do not display the results of the run.', + action="store_true", + help="Do not display the results of the run.", ) self.add_option( - '--summary', + "--summary", default=False, - action='store_true', - help='Display salt execution summary information.', + action="store_true", + help="Display salt execution summary information.", ) self.add_option( - '-v', - '--verbose', + "-v", + "--verbose", default=False, - action='store_true', - help='Turn on command verbosity, display jid and detailed summary.', + action="store_true", + help="Turn on command verbosity, display jid and detailed summary.", ) self.add_option( - '--show-jid', + "--show-jid", default=False, - action='store_true', - help='Display jid without the additional output of --verbose.', + action="store_true", + help="Display jid without the additional output of --verbose.", ) self.add_option( - '--hide-timeout', + "--hide-timeout", default=False, - action='store_true', - help='Hide devices that timeout.', + action="store_true", + help="Hide devices that timeout.", ) self.add_option( - '--batch-wait', + "--batch-wait", default=0, type=float, help=( - 'Wait the specified time in seconds after each batch is done' - 'before executing the next one.' + "Wait the specified time in seconds after each batch is done" + "before executing the next one." ), ) self.add_option( - '-p', - '--progress', + "-p", + "--progress", default=False, - action='store_true', - help='Display a progress graph.', + action="store_true", + help="Display a progress graph.", ) self.add_option( - '--return', - dest='returner', - default='', - metavar='RETURNER', + "--return", + dest="returner", + default="", + metavar="RETURNER", help=( - 'The name of the Returner module to use for sending data to ' - 'various external systems.' + "The name of the Returner module to use for sending data to " + "various external systems." ), ) self.add_option( - '--return-config', - dest='returner_config', - default='', - metavar='RETURNER_CONF', - help='Specify an alternative Returner config.', + "--return-config", + dest="returner_config", + default="", + metavar="RETURNER_CONF", + help="Specify an alternative Returner config.", ) self.add_option( - '--return-kwargs', - dest='returner_kwargs', + "--return-kwargs", + dest="returner_kwargs", default={}, - metavar='RETURNER_KWARGS', - help='Set Returner options at the command line.', + metavar="RETURNER_KWARGS", + help="Set Returner options at the command line.", ) self.add_option( "-d", @@ -561,8 +560,8 @@ def _mixin_setup(self): default=False, action="store_true", help=( - 'Return the documentation for the specified module or for ' - 'all modules if none are specified.' + "Return the documentation for the specified module or for " + "all modules if none are specified." ), ) @@ -576,51 +575,51 @@ def _mixin_after_parsed(self): or self.options.config_dump ): # Insert dummy arg when displaying the file_roots - self.args.append('not_a_valid_target') - self.args.append('not_a_valid_command') + self.args.append("not_a_valid_target") + self.args.append("not_a_valid_command") elif self.options.doc: if len(self.args) == 1: - self.args.insert(0, 'not_a_valid_target') + self.args.insert(0, "not_a_valid_target") elif len(self.args) == 0: - self.args.append('not_a_valid_target') - self.args.append('*') + self.args.append("not_a_valid_target") + self.args.append("*") if self.options.list: try: - if ',' in self.args[0]: - self.config['tgt'] = self.args[0].replace(' ', '').split(',') + if "," in self.args[0]: + self.config["tgt"] = self.args[0].replace(" ", "").split(",") else: - self.config['tgt'] = self.args[0].split() + self.config["tgt"] = self.args[0].split() except IndexError: - self.exit(42, '\nCannot execute command without defining a target.\n\n') + self.exit(42, "\nCannot execute command without defining a target.\n\n") else: try: - self.config['tgt'] = self.args[0] + self.config["tgt"] = self.args[0] except IndexError: - self.exit(42, '\nCannot execute command without defining a target.\n\n') + self.exit(42, "\nCannot execute command without defining a target.\n\n") if self.options.preview_target: # Insert dummy arg which won't be used - self.args.append('not_a_valid_command') + self.args.append("not_a_valid_command") # Detect compound command and set up the data for it if self.args: try: - if ',' in self.args[1]: - self.config['fun'] = self.args[1].split(',') - self.config['arg'] = [[]] + if "," in self.args[1]: + self.config["fun"] = self.args[1].split(",") + self.config["arg"] = [[]] cmd_index = 0 if ( self.args[2:].count(self.options.args_separator) - == len(self.config['fun']) - 1 + == len(self.config["fun"]) - 1 ): # new style parsing: standalone argument separator for arg in self.args[2:]: if arg == self.options.args_separator: cmd_index += 1 - self.config['arg'].append([]) + self.config["arg"].append([]) else: - self.config['arg'][cmd_index].append(arg) + self.config["arg"][cmd_index].append(arg) else: # old style parsing: argument separator can be inside args for arg in self.args[2:]: @@ -628,42 +627,42 @@ def _mixin_after_parsed(self): sub_args = arg.split(self.options.args_separator) for sub_arg_index, sub_arg in enumerate(sub_args): if sub_arg: - self.config['arg'][cmd_index].append(sub_arg) + self.config["arg"][cmd_index].append(sub_arg) if sub_arg_index != len(sub_args) - 1: cmd_index += 1 - self.config['arg'].append([]) + self.config["arg"].append([]) else: - self.config['arg'][cmd_index].append(arg) - if len(self.config['fun']) > len(self.config['arg']): + self.config["arg"][cmd_index].append(arg) + if len(self.config["fun"]) > len(self.config["arg"]): self.exit( 42, - 'Cannot execute compound command without ' - 'defining all arguments.\n', + "Cannot execute compound command without " + "defining all arguments.\n", ) - elif len(self.config['fun']) < len(self.config['arg']): + elif len(self.config["fun"]) < len(self.config["arg"]): self.exit( 42, - 'Cannot execute compound command with more ' - 'arguments than commands.\n', + "Cannot execute compound command with more " + "arguments than commands.\n", ) # parse the args and kwargs before sending to the publish # interface - for i in range(len(self.config['arg'])): - self.config['arg'][i] = salt.utils.args.parse_input( - self.config['arg'][i], no_parse=self.options.no_parse + for i in range(len(self.config["arg"])): + self.config["arg"][i] = salt.utils.args.parse_input( + self.config["arg"][i], no_parse=self.options.no_parse ) else: - self.config['fun'] = self.args[1] - self.config['arg'] = self.args[2:] + self.config["fun"] = self.args[1] + self.config["arg"] = self.args[2:] # parse the args and kwargs before sending to the publish # interface - self.config['arg'] = salt.utils.args.parse_input( - self.config['arg'], no_parse=self.options.no_parse + self.config["arg"] = salt.utils.args.parse_input( + self.config["arg"], no_parse=self.options.no_parse ) except IndexError: - self.exit(42, '\nIncomplete options passed.\n\n') + self.exit(42, "\nIncomplete options passed.\n\n") def setup_config(self): defaults = config.DEFAULT_MASTER_OPTS.copy() - defaults['timeout'] = 60 + defaults["timeout"] = 60 return config.client_config(self.get_config_file_path(), defaults=defaults) diff --git a/salt_sproxy/scripts.py b/salt_sproxy/scripts.py index a722851a..95da5f58 100644 --- a/salt_sproxy/scripts.py +++ b/salt_sproxy/scripts.py @@ -5,9 +5,9 @@ import inspect import logging +import six import salt.netapi import salt.scripts -from salt.ext import six import salt.utils.parsers from salt.scripts import _install_signal_handlers from salt_sproxy._runners.proxy import execute as sproxy_execute @@ -16,11 +16,11 @@ def _prep_kwargs(kwargs, opts): - ''' + """ Gather the sproxy execute argument from the Master opts when available. - ''' + """ execute_args = set(inspect.getfullargspec(sproxy_execute)[0]) - sapi_args = execute_args - {'tgt', 'salt_function', 'tgt_type', 'static', 'timeout'} + sapi_args = execute_args - {"tgt", "salt_function", "tgt_type", "static", "timeout"} for arg in sapi_args: if arg not in kwargs and arg in opts: kwargs[arg] = opts[arg] @@ -28,36 +28,36 @@ def _prep_kwargs(kwargs, opts): def sapi_sproxy( - self, tgt, fun, tgt_type='glob', timeout=None, full_return=False, **kwargs + self, tgt, fun, tgt_type="glob", timeout=None, full_return=False, **kwargs ): - ''' + """ Shortcut to invoke an arbitrary Salt function via sproxy. - ''' + """ kwargs.update( - {'salt_function': fun, 'tgt': tgt, 'tgt_type': tgt_type, 'static': True} + {"salt_function": fun, "tgt": tgt, "tgt_type": tgt_type, "static": True} ) kwargs = _prep_kwargs(kwargs, self.opts) - log.debug('New kwargs:') + log.debug("New kwargs:") log.debug(kwargs) return salt.netapi.NetapiClient.runner( - self, 'proxy.execute', timeout=timeout, full_return=full_return, **kwargs + self, "proxy.execute", timeout=timeout, full_return=full_return, **kwargs ) def sapi_sproxy_async( - self, tgt, fun, tgt_type='glob', timeout=None, full_return=False, **kwargs + self, tgt, fun, tgt_type="glob", timeout=None, full_return=False, **kwargs ): - ''' + """ Shortcut to invoke an arbitrary Salt function via sproxy, asynchronously. - ''' + """ kwargs.update( - {'salt_function': fun, 'tgt': tgt, 'tgt_type': tgt_type, 'static': True} + {"salt_function": fun, "tgt": tgt, "tgt_type": tgt_type, "static": True} ) kwargs = _prep_kwargs(kwargs, self.opts) - log.debug('New kwargs:') + log.debug("New kwargs:") log.debug(kwargs) return salt.netapi.NetapiClient.runner_async( - self, 'proxy.execute', timeout=timeout, full_return=full_return, **kwargs + self, "proxy.execute", timeout=timeout, full_return=full_return, **kwargs ) @@ -68,39 +68,39 @@ def sapi_sproxy_async( for name, _ in inspect.getmembers( salt.netapi.NetapiClient, predicate=inspect.ismethod if six.PY2 else None ) - if not (name == 'run' or name.startswith('_')) + if not (name == "run" or name.startswith("_")) ] salt.utils.parsers.SaltAPIParser.description = ( - 'salt-sapi is an enhanced Salt API system that provides additional ' - 'sproxy and sproxy_async clients, to simplify the usage of salt-sproxy ' - 'through the Salt REST API' + "salt-sapi is an enhanced Salt API system that provides additional " + "sproxy and sproxy_async clients, to simplify the usage of salt-sproxy " + "through the Salt REST API" ) salt.utils.parsers.SaltAPIParser.epilog = ( 'You can find additional help about %prog issuing "man %prog" ' - 'or on https://salt-sproxy.readthedocs.io/ and ' - 'https://docs.saltstack.com/en/latest/ref/cli/salt-api.html.' + "or on https://salt-sproxy.readthedocs.io/ and " + "https://docs.saltstack.com/en/latest/ref/cli/salt-api.html." ) def salt_sapi(): - ''' + """ The main function for salt-sapi. - ''' + """ salt.scripts.salt_api() def salt_sproxy(): - ''' + """ Execute a salt convenience routine. - ''' + """ import salt_sproxy.cli - if '' in sys.path: - sys.path.remove('') + if "" in sys.path: + sys.path.remove("") client = salt_sproxy.cli.SaltStandaloneProxy() _install_signal_handlers(client) client.run() -if __name__ == '__main__': +if __name__ == "__main__": salt_sproxy() diff --git a/salt_sproxy/version.py b/salt_sproxy/version.py index 58fc0fac..bb352f1d 100644 --- a/salt_sproxy/version.py +++ b/salt_sproxy/version.py @@ -4,10 +4,10 @@ import pkg_resources try: - __version__ = pkg_resources.get_distribution('salt_sproxy').version + __version__ = pkg_resources.get_distribution("salt_sproxy").version except pkg_resources.DistributionNotFound: - __version__ = 'Not installed' + __version__ = "Not installed" -__version_info__ = tuple(__version__.split('.')) +__version_info__ = tuple(__version__.split(".")) -all = ['__version__', '__version_info__'] +all = ["__version__", "__version_info__"] diff --git a/setup.py b/setup.py index bd584f6d..9baa59ce 100644 --- a/setup.py +++ b/setup.py @@ -1,92 +1,91 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -''' +""" The setup script for Salt SProxy -''' +""" import codecs from setuptools import setup, find_packages -__author__ = 'Mircea Ulinic ' +__author__ = "Mircea Ulinic " -with codecs.open('pypi.rst', 'r', encoding='utf8') as file: +with codecs.open("pypi.rst", "r", encoding="utf8") as file: long_description = file.read() with open("requirements.txt", "r") as fs: reqs = [r for r in fs.read().splitlines() if (len(r) > 0 and not r.startswith("#"))] -name = 'salt-sproxy' -repo_slug = 'mirceaulinic/{}'.format(name) -repo_url = 'https://github.com/{}'.format(repo_slug) +name = "salt-sproxy" +repo_slug = "mirceaulinic/{}".format(name) +repo_url = "https://github.com/{}".format(repo_slug) setup( name=name, - version='2022.10.0', - namespace_packages=['salt_sproxy'], + version="2023.8.0", + namespace_packages=["salt_sproxy"], packages=find_packages(), - author='Mircea Ulinic', - author_email='ping@mirceaulinic.net', - description='Salt plugin for managing devices and applications, without running (Proxy) Minions', + author="Mircea Ulinic", + author_email="ping@mirceaulinic.net", + description="Salt plugin for managing devices and applications, without running (Proxy) Minions", long_description=long_description, classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Environment :: Plugins', - 'License :: OSI Approved :: Apache Software License', - 'Topic :: Utilities', - 'Topic :: System :: Networking', - 'Topic :: System :: Clustering', - 'Topic :: System :: Operating System', - 'Topic :: System :: Distributed Computing', - 'Topic :: System :: Systems Administration', - 'Programming Language :: Python', - 'Programming Language :: Cython', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Operating System :: POSIX', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: MacOS', - 'Intended Audience :: Developers', - 'Intended Audience :: Telecommunications Industry', - 'Intended Audience :: System Administrators', - 'Intended Audience :: Education', - 'Intended Audience :: Science/Research', + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Environment :: Plugins", + "License :: OSI Approved :: Apache Software License", + "Topic :: Utilities", + "Topic :: System :: Networking", + "Topic :: System :: Clustering", + "Topic :: System :: Operating System", + "Topic :: System :: Distributed Computing", + "Topic :: System :: Systems Administration", + "Programming Language :: Python", + "Programming Language :: Cython", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Operating System :: POSIX", + "Operating System :: Microsoft :: Windows", + "Operating System :: MacOS", + "Intended Audience :: Developers", + "Intended Audience :: Telecommunications Industry", + "Intended Audience :: System Administrators", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", ], - url='https://github.com/mirceaulinic/salt-sproxy', + url="https://github.com/mirceaulinic/salt-sproxy", license="Apache License 2.0", keywords=( - 'salt', - 'network', - 'automation', - 'cli', - 'proxy', - 'minion', - 'salt-extension', + "salt", + "network", + "automation", + "cli", + "proxy", + "minion", + "salt-extension", ), project_urls={ - 'CI: GitHub Actions': '{}/actions'.format(repo_url), - 'Docs: RTD': 'https://salt-sproxy.readthedocs.io/', - 'GitHub: issues': '{}/issues'.format(repo_url), - 'GitHub: repo': repo_url, + "CI: GitHub Actions": "{}/actions".format(repo_url), + "Docs: RTD": "https://salt-sproxy.readthedocs.io/", + "GitHub: issues": "{}/issues".format(repo_url), + "GitHub: repo": repo_url, }, include_package_data=True, install_requires=reqs, entry_points={ - 'console_scripts': [ - 'salt-sapi=salt_sproxy.scripts:salt_sapi', - 'salt-sproxy=salt_sproxy.scripts:salt_sproxy', + "console_scripts": [ + "salt-sapi=salt_sproxy.scripts:salt_sapi", + "salt-sproxy=salt_sproxy.scripts:salt_sproxy", ] }, extras_require={ - 'api': ['cherrypy'], - 'netbox': ['pynetbox'], - 'napalm': ['napalm'], - 'junos': ['junos-eznc'], - 'netmiko': ['netmiko'], + "api": ["cherrypy>=18.8.0"], + "netbox": ["pynetbox"], + "napalm": ["napalm"], + "junos": ["junos-eznc"], + "netmiko": ["netmiko"], }, - data_files=[('man/man1', ['docs/man/salt-sproxy.1', 'docs/man/salt-sapi.1'])], + data_files=[("man/man1", ["docs/man/salt-sproxy.1", "docs/man/salt-sapi.1"])], ) diff --git a/tests/run/master b/tests/run/master index 29cbb052..ad1199c5 100644 --- a/tests/run/master +++ b/tests/run/master @@ -50,3 +50,16 @@ grains: disable_grains: # disabling ESXI Grain module load due to https://github.com/saltstack/salt/issues/57811 - esxi + +netapi_enable_clients: + - local + - local_async + - local_batch + - local_subset + - runner + - runner_async + - ssh + - wheel + - wheel_async + - sproxy + - sproxy_async diff --git a/tox.ini b/tox.ini index 8cf26437..d7480836 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py3{6,7,8},cli,black,pylint,sphinx +envlist = py3{8,9,10,11},cli,black,pylint,sphinx skip_missing_interpreters = true [testenv] @@ -20,15 +20,15 @@ commands = deps = -rrequirements-dev.txt -basepython = python3.6 +basepython = python3.9 commands = - black --check --skip-string-normalization . + black --check . [testenv:pylint] deps = -rrequirements-dev.txt -basepython = python3.6 +basepython = python3.9 commands = pylint --rcfile=.pylintrc salt_sproxy/ @@ -36,12 +36,12 @@ commands = deps = -rdocs/requirements.txt -basepython = python3.6 +basepython = python3.9 changedir = docs/ commands = make doctest -whitelist_externals = +allowlist_externals = make