From ba9ec3b7848cbf1cba66f6a0a697a214d2ed6e33 Mon Sep 17 00:00:00 2001 From: Riccardo Pittau Date: Thu, 21 Aug 2025 09:34:44 +0200 Subject: [PATCH] [WIP] Convert 01_install_requirements to ansible Do not use this in production, yet! This patch converts the 01_install_requirements.sh file from pure bash to use a mix of few bash commands and extensive ansible playbooks. The initial conversion has been obtained using Cursor prompt using claude-4-sonnet model. Lots of manual changes have been applied to be able to get a successful outcome, and more testing is needed to verify the entire worflow work as intended. --- 01_install_requirements.sh | 177 ++---------------- README_ansible.md | 99 ++++++++++ install_requirements.yml | 94 ++++++++++ inventory.ini | 2 + ocp_cleanup.sh | 6 +- run_example.sh | 20 ++ tasks/configure_dnf.yml | 17 ++ tasks/early_deploy_validation.yml | 22 +++ tasks/handle_docker_distribution.yml | 16 ++ tasks/install_dev_packages.yml | 17 ++ tasks/install_oc_tools.yml | 30 +++ tasks/install_optional_packages.yml | 19 ++ tasks/install_passlib.yml | 28 +++ tasks/install_python_packages.yml | 8 + tasks/install_yq.yml | 18 ++ tasks/run_metal3_playbook.yml | 40 ++++ tasks/setup_metal3_repo.yml | 26 +++ tasks/setup_rhel8_repos.yml | 64 +++++++ tasks/setup_rhel9_repos.yml | 48 +++++ tasks/upgrade_packages.yml | 23 +++ vm-setup/vm-setup/firewall.yml | 8 + .../vm-setup/install-package-playbook.yml | 12 ++ vm-setup/vm-setup/inventory.ini | 2 + vm-setup/vm-setup/library/generate_macs.py | 92 +++++++++ vm-setup/vm-setup/requirements.yml | 12 ++ .../vm-setup/roles/common/defaults/main.yml | 116 ++++++++++++ .../common/tasks/extra_networks_tasks.yml | 55 ++++++ .../roles/common/tasks/generate_node_mac.yml | 7 + vm-setup/vm-setup/roles/common/tasks/main.yml | 44 +++++ .../roles/common/tasks/vm_nodes_tasks.yml | 18 ++ .../common/tasks/write_ironic_nodes_tasks.yml | 7 + .../common/templates/ironic_nodes.json.j2 | 75 ++++++++ .../vm-setup/roles/firewall/defaults/main.yml | 46 +++++ .../roles/firewall/tasks/firewalld.yaml | 59 ++++++ .../roles/firewall/tasks/iptables.yaml | 144 ++++++++++++++ .../vm-setup/roles/firewall/tasks/main.yml | 15 ++ .../vm-setup/roles/libvirt/defaults/main.yml | 41 ++++ .../roles/libvirt/files/get-domain-ip.sh | 25 +++ vm-setup/vm-setup/roles/libvirt/meta/main.yml | 2 + .../libvirt/tasks/install_setup_tasks.yml | 46 +++++ .../vm-setup/roles/libvirt/tasks/main.yml | 14 ++ .../libvirt/tasks/network_setup_tasks.yml | 146 +++++++++++++++ .../libvirt/tasks/network_teardown_tasks.yml | 33 ++++ .../roles/libvirt/tasks/vm_setup_tasks.yml | 154 +++++++++++++++ .../roles/libvirt/tasks/vm_teardown_tasks.yml | 102 ++++++++++ .../libvirt/templates/baremetalvm.xml.j2 | 90 +++++++++ .../libvirt/templates/libvirt_hook.sh.j2 | 6 + .../roles/libvirt/templates/network.xml.j2 | 143 ++++++++++++++ .../libvirt/templates/volume_pool.xml.j2 | 11 ++ vm-setup/vm-setup/roles/ovs/defaults/main.yml | 7 + vm-setup/vm-setup/roles/ovs/tasks/main.yml | 14 ++ .../packages_installation/defaults/main.yml | 106 +++++++++++ .../packages_installation/files/daemon.json | 5 + .../tasks/centos_required_packages.yml | 35 ++++ .../packages_installation/tasks/main.yml | 81 ++++++++ .../tasks/ubuntu_required_packages.yml | 153 +++++++++++++++ .../vm-setup/roles/virtbmc/defaults/main.yml | 9 + vm-setup/vm-setup/roles/virtbmc/meta/main.yml | 3 + .../vm-setup/roles/virtbmc/tasks/main.yml | 4 + .../roles/virtbmc/tasks/setup_tasks.yml | 167 +++++++++++++++++ .../roles/virtbmc/tasks/teardown_tasks.yml | 20 ++ .../virtbmc/templates/fake_nodes.json.j2 | 23 +++ vm-setup/vm-setup/setup-playbook.yml | 13 ++ vm-setup/vm-setup/teardown-playbook.yml | 17 ++ 64 files changed, 2791 insertions(+), 165 deletions(-) create mode 100644 README_ansible.md create mode 100644 install_requirements.yml create mode 100644 inventory.ini create mode 100755 run_example.sh create mode 100644 tasks/configure_dnf.yml create mode 100644 tasks/early_deploy_validation.yml create mode 100644 tasks/handle_docker_distribution.yml create mode 100644 tasks/install_dev_packages.yml create mode 100644 tasks/install_oc_tools.yml create mode 100644 tasks/install_optional_packages.yml create mode 100644 tasks/install_passlib.yml create mode 100644 tasks/install_python_packages.yml create mode 100644 tasks/install_yq.yml create mode 100644 tasks/run_metal3_playbook.yml create mode 100644 tasks/setup_metal3_repo.yml create mode 100644 tasks/setup_rhel8_repos.yml create mode 100644 tasks/setup_rhel9_repos.yml create mode 100644 tasks/upgrade_packages.yml create mode 100644 vm-setup/vm-setup/firewall.yml create mode 100644 vm-setup/vm-setup/install-package-playbook.yml create mode 100644 vm-setup/vm-setup/inventory.ini create mode 100644 vm-setup/vm-setup/library/generate_macs.py create mode 100644 vm-setup/vm-setup/requirements.yml create mode 100644 vm-setup/vm-setup/roles/common/defaults/main.yml create mode 100644 vm-setup/vm-setup/roles/common/tasks/extra_networks_tasks.yml create mode 100644 vm-setup/vm-setup/roles/common/tasks/generate_node_mac.yml create mode 100644 vm-setup/vm-setup/roles/common/tasks/main.yml create mode 100644 vm-setup/vm-setup/roles/common/tasks/vm_nodes_tasks.yml create mode 100644 vm-setup/vm-setup/roles/common/tasks/write_ironic_nodes_tasks.yml create mode 100644 vm-setup/vm-setup/roles/common/templates/ironic_nodes.json.j2 create mode 100644 vm-setup/vm-setup/roles/firewall/defaults/main.yml create mode 100644 vm-setup/vm-setup/roles/firewall/tasks/firewalld.yaml create mode 100644 vm-setup/vm-setup/roles/firewall/tasks/iptables.yaml create mode 100644 vm-setup/vm-setup/roles/firewall/tasks/main.yml create mode 100644 vm-setup/vm-setup/roles/libvirt/defaults/main.yml create mode 100644 vm-setup/vm-setup/roles/libvirt/files/get-domain-ip.sh create mode 100644 vm-setup/vm-setup/roles/libvirt/meta/main.yml create mode 100644 vm-setup/vm-setup/roles/libvirt/tasks/install_setup_tasks.yml create mode 100644 vm-setup/vm-setup/roles/libvirt/tasks/main.yml create mode 100644 vm-setup/vm-setup/roles/libvirt/tasks/network_setup_tasks.yml create mode 100644 vm-setup/vm-setup/roles/libvirt/tasks/network_teardown_tasks.yml create mode 100644 vm-setup/vm-setup/roles/libvirt/tasks/vm_setup_tasks.yml create mode 100644 vm-setup/vm-setup/roles/libvirt/tasks/vm_teardown_tasks.yml create mode 100644 vm-setup/vm-setup/roles/libvirt/templates/baremetalvm.xml.j2 create mode 100644 vm-setup/vm-setup/roles/libvirt/templates/libvirt_hook.sh.j2 create mode 100644 vm-setup/vm-setup/roles/libvirt/templates/network.xml.j2 create mode 100644 vm-setup/vm-setup/roles/libvirt/templates/volume_pool.xml.j2 create mode 100644 vm-setup/vm-setup/roles/ovs/defaults/main.yml create mode 100644 vm-setup/vm-setup/roles/ovs/tasks/main.yml create mode 100644 vm-setup/vm-setup/roles/packages_installation/defaults/main.yml create mode 100644 vm-setup/vm-setup/roles/packages_installation/files/daemon.json create mode 100644 vm-setup/vm-setup/roles/packages_installation/tasks/centos_required_packages.yml create mode 100644 vm-setup/vm-setup/roles/packages_installation/tasks/main.yml create mode 100644 vm-setup/vm-setup/roles/packages_installation/tasks/ubuntu_required_packages.yml create mode 100644 vm-setup/vm-setup/roles/virtbmc/defaults/main.yml create mode 100644 vm-setup/vm-setup/roles/virtbmc/meta/main.yml create mode 100644 vm-setup/vm-setup/roles/virtbmc/tasks/main.yml create mode 100644 vm-setup/vm-setup/roles/virtbmc/tasks/setup_tasks.yml create mode 100644 vm-setup/vm-setup/roles/virtbmc/tasks/teardown_tasks.yml create mode 100644 vm-setup/vm-setup/roles/virtbmc/templates/fake_nodes.json.j2 create mode 100644 vm-setup/vm-setup/setup-playbook.yml create mode 100644 vm-setup/vm-setup/teardown-playbook.yml diff --git a/01_install_requirements.sh b/01_install_requirements.sh index 8fe354c05..31108c9fb 100755 --- a/01_install_requirements.sh +++ b/01_install_requirements.sh @@ -1,179 +1,30 @@ #!/usr/bin/env bash -set -ex +# Wrapper script to run the Ansible playbook +# This replaces the original 01_install_requirements.sh + +set -ex +# Source the original environment setup source logging.sh source common.sh source sanitychecks.sh source utils.sh source validation.sh -early_deploy_validation true - -if [ -z "${METAL3_DEV_ENV}" ]; then - export REPO_PATH=${WORKING_DIR} - sync_repo_and_patch metal3-dev-env https://github.com/metal3-io/metal3-dev-env.git - pushd ${METAL3_DEV_ENV_PATH} - # Pin to a specific metal3-dev-env commit to ensure we catch breaking - # changes before they're used by everyone and CI. - # TODO -- come up with a plan for continuously updating this - # Note we only do this in the case where METAL3_DEV_ENV is - # unset, to enable developer testing of local checkouts - git reset c60510f410173ddf01a29b87c889249b0f263c9f --hard - - popd -fi - -# This must be aligned with the metal3-dev-env pinned version above, see -# https://github.com/metal3-io/metal3-dev-env/blob/master/lib/common.sh +# Export environment variables that the playbook expects +export WORKING_DIR=${WORKING_DIR:-$(pwd)} +export METAL3_DEV_ENV_PATH=${METAL3_DEV_ENV_PATH:-"${WORKING_DIR}/metal3-dev-env"} export ANSIBLE_VERSION=${ANSIBLE_VERSION:-"8.0.0"} - -# Speed up dnf downloads -sudo sh -c "echo 'fastestmirror=1' >> /etc/dnf/dnf.conf" -sudo sh -c "echo 'max_parallel_downloads=8' >> /etc/dnf/dnf.conf" - -# Refresh dnf data -# We could also use --refresh to just force metadata update -# in the upgrade command,but this is more explicit and complete -sudo dnf -y clean all - -old_version=$(sudo dnf info NetworkManager | grep Version | cut -d ':' -f 2) - -# Update to latest packages first -sudo dnf -y upgrade --nobest - -new_version=$(sudo dnf info NetworkManager | grep Version | cut -d ':' -f 2) -# If NetworkManager was upgraded it needs to be restarted -if [ "$old_version" != "$new_version" ]; then - sudo systemctl restart NetworkManager -fi - -# Install additional repos as needed for each OS version -# shellcheck disable=SC1091 -source /etc/os-release - -# NOTE(elfosardo): Hacks required for legacy and missing things due to bump in -#metal3-dev-env commit hash. -# All of those are needed because we're still behind for OS support. -# passlib needs to be installed as system dependency -if [[ -x "/usr/libexec/platform-python" ]]; then - sudo /usr/libexec/platform-python -m pip install passlib || sudo dnf -y install python3-pip && sudo /usr/libexec/platform-python -m pip install passlib -fi - -# Install ansible, other packages are installed via -# vm-setup/install-package-playbook.yml -case $DISTRO in - "centos8"|"rhel8"|"almalinux8"|"rocky8") - # install network-scripts package to be able to use legacy network commands - sudo dnf install -y network-scripts - if [[ $DISTRO == "centos8" ]] && [[ "$NAME" != *"Stream"* ]]; then - echo "CentOS is not supported, please switch to CentOS Stream / RHEL / Rocky / Alma" - exit 1 - fi - if [[ $DISTRO == "centos8" || $DISTRO == "almalinux8" || $DISTRO == "rocky8" ]]; then - sudo dnf -y install epel-release dnf --enablerepo=extras - elif [[ $DISTRO == "rhel8" ]]; then - # Enable EPEL for python3-passlib and python3-bcrypt required by metal3-dev-env - sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - if sudo subscription-manager repos --list-enabled 2>&1 | grep "ansible-2-for-rhel-8-$(uname -m)-rpms"; then - # The packaged 2.x ansible is too old for compatibility with metal3-dev-env - sudo dnf erase -y ansible - sudo subscription-manager repos --disable=ansible-2-for-rhel-8-$(uname -m)-rpms - fi - fi - # Note recent ansible needs python >= 3.8 so we install 3.9 here - sudo dnf -y install python39 - sudo alternatives --set python /usr/bin/python3.9 - sudo alternatives --set python3 /usr/bin/python3.9 - sudo update-alternatives --install /usr/bin/pip3 pip3 /usr/bin/pip3.9 1 - PYTHON_DEVEL="python39-devel" - ;; - "centos9"|"rhel9"|"almalinux9"|"rocky9") - sudo dnf -y install python3-pip - if [[ $DISTRO == "centos9" || $DISTRO == "almalinux9" || $DISTRO == "rocky9" ]] ; then - sudo dnf config-manager --set-enabled crb - sudo dnf -y install epel-release - elif [[ $DISTRO == "rhel9" ]]; then - # NOTE(raukadah): If a system is subscribed to RHEL subscription then - # sudo subscription-manager identity will return exit 0 else 1. - if sudo subscription-manager identity > /dev/null 2>&1; then - # NOTE(elfosardo): a valid RHEL subscription is needed to be able to - # enable the CRB repository - sudo subscription-manager repos --enable codeready-builder-for-rhel-9-$(arch)-rpms - fi - sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm - fi - sudo ln -s /usr/bin/python3 /usr/bin/python || true - PYTHON_DEVEL="python3-devel" - ;; - *) - echo -n "CentOS or RHEL version not supported" - exit 1 - ;; -esac - -# We use yq in a few places for processing YAML but it isn't packaged -# for CentOS/RHEL so we have to install from pip. We do not want to -# overwrite an existing installation of the golang version, though, -# so check if we have a yq before installing. -if ! which yq 2>&1 >/dev/null; then - sudo python -m pip install 'yq>=3,<4' -else - echo "Using yq from $(which yq)" -fi - -GO_VERSION=${GO_VERSION:-1.22.3} - -GOARCH=$(uname -m) -if [[ $GOARCH == "aarch64" ]]; then - GOARCH="arm64" - sudo dnf -y install $PYTHON_DEVEL libxml2-devel libxslt-devel -elif [[ $GOARCH == "x86_64" ]]; then - GOARCH="amd64" -fi +export GO_VERSION=${GO_VERSION:-"1.22.3"} # Also need the 3.9 version of netaddr for ansible.netcommon # and lxml for the pyxpath script -sudo python -m pip install netaddr lxml +sudo python -m pip install netaddr lxml ansible=="${ANSIBLE_VERSION}" -sudo python -m pip install ansible=="${ANSIBLE_VERSION}" - -pushd ${METAL3_DEV_ENV_PATH} -ansible-galaxy install -r vm-setup/requirements.yml -# Let's temporarily pin these collections to the latest compatible with ansible-2.15 -#ansible-galaxy collection install --upgrade ansible.netcommon ansible.posix ansible.utils community.general -ansible-galaxy collection install 'ansible.netcommon<8.0.0' ansible.posix 'ansible.utils<6.0.0' community.general +# Run the Ansible playbook ANSIBLE_FORCE_COLOR=true ansible-playbook \ - -e "working_dir=$WORKING_DIR" \ - -e "virthost=$HOSTNAME" \ - -e "go_version=$GO_VERSION" \ - -e "GOARCH=$GOARCH" \ - $ALMA_PYTHON_OVERRIDE \ - -i vm-setup/inventory.ini \ - -b -vvv vm-setup/install-package-playbook.yml -popd - -if [ -n "${KNI_INSTALL_FROM_GIT}" ]; then - # zip is required for building the installer from source - sudo dnf -y install zip -fi - -# Install nfs for persistent volumes -if [ "${PERSISTENT_IMAGEREG}" == true ] ; then - sudo dnf -y install nfs-utils -fi - -if [[ "${NODES_PLATFORM}" == "baremetal" ]] ; then - sudo dnf -y install ipmitool -fi - -# needed if we are using locally built images -# We stop any systemd service so we can run in a container, since -# there's no RPM/systemd version available for RHEL8 -if sudo systemctl is-active docker-distribution.service; then - sudo systemctl disable --now docker-distribution.service -fi + -i inventory.ini \ + -b -vvv install_requirements.yml -retry_with_timeout 5 60 "curl -L $OPENSHIFT_CLIENT_TOOLS_URL | sudo tar -U -C /usr/local/bin -xzf -" -sudo chmod +x /usr/local/bin/oc -oc version --client -o json +echo "Installation completed successfully!" diff --git a/README_ansible.md b/README_ansible.md new file mode 100644 index 000000000..7a71da075 --- /dev/null +++ b/README_ansible.md @@ -0,0 +1,99 @@ +# Metal3 Requirements Installation - Ansible Version + +This directory contains the Ansible playbook version of the original `01_install_requirements.sh` script. The playbook provides the same functionality but with better organization, error handling, and idempotency. + +## Files + +- `install_requirements.yml` - Main Ansible playbook +- `tasks/` - Directory containing individual task files +- `inventory.ini` - Simple localhost inventory +- `run_install_requirements.sh` - Wrapper script (optional) +- `README_ansible.md` - This documentation + +## Requirements + +- Ansible installed on the system +- Root/sudo access +- RHEL/CentOS 8 or 9 (or compatible distributions like AlmaLinux, Rocky Linux) + +## Usage + +### Option 1: Direct Ansible Playbook + +```bash +ansible-playbook -i inventory.ini install_requirements.yml -v +``` + +### Option 2: Using the Wrapper Script + +```bash +./run_install_requirements.sh +``` + +## Environment Variables + +The playbook respects the same environment variables as the original script: + +- `WORKING_DIR` - Working directory (default: current directory) +- `METAL3_DEV_ENV_PATH` - Path to metal3-dev-env repository +- `METAL3_DEV_ENV` - If set, skips repo cloning +- `ANSIBLE_VERSION` - Ansible version to install (default: 8.0.0) +- `GO_VERSION` - Go version for the metal3 playbook (default: 1.22.3) +- `OPENSHIFT_CLIENT_TOOLS_URL` - URL for OpenShift client tools +- `KNI_INSTALL_FROM_GIT` - Install additional packages for git-based installation +- `PERSISTENT_IMAGEREG` - Install NFS utilities for persistent image registry +- `NODES_PLATFORM` - Install platform-specific tools (e.g., 'baremetal' installs ipmitool) +- `ALMA_PYTHON_OVERRIDE` - Python override for AlmaLinux + +## Task Organization + +The playbook is organized into the following tasks: + +1. **early_deploy_validation.yml** - Basic validation checks +2. **setup_metal3_repo.yml** - Clone and setup metal3-dev-env repository +3. **configure_dnf.yml** - Configure DNF for faster downloads +4. **upgrade_packages.yml** - Upgrade system packages +5. **install_passlib.yml** - Install passlib with platform-python +6. **setup_rhel8_repos.yml** - Configure repositories for RHEL/CentOS 8 +7. **setup_rhel9_repos.yml** - Configure repositories for RHEL/CentOS 9 +8. **install_yq.yml** - Install yq YAML processor +9. **install_dev_packages.yml** - Install development packages +10. **install_python_packages.yml** - Install Python dependencies +11. **install_ansible.yml** - Install Ansible and galaxy requirements +12. **run_metal3_playbook.yml** - Run the metal3 installation playbook +13. **install_optional_packages.yml** - Install optional packages based on environment +14. **handle_docker_distribution.yml** - Handle docker-distribution service +15. **install_oc_tools.yml** - Install OpenShift client tools + +## Features + +- **Idempotent**: Can be run multiple times safely +- **Better Error Handling**: Ansible provides better error reporting +- **Modular**: Tasks are organized in separate files for maintainability +- **OS Detection**: Automatically detects RHEL/CentOS version and architecture +- **Conditional Logic**: Only runs tasks relevant to the current environment + +## Differences from Original Script + +1. **Structured Tasks**: Logic is organized into reusable task files +2. **Fact Gathering**: Uses Ansible facts instead of shell commands where possible +3. **Package Management**: Uses Ansible's dnf module instead of shell commands +4. **Service Management**: Uses Ansible's systemd module for service operations +5. **File Operations**: Uses Ansible modules for file and directory operations + +## Troubleshooting + +- Ensure you have sudo privileges +- Check that your OS is supported (RHEL/CentOS 8 or 9) +- Verify network connectivity for package downloads +- Check Ansible version compatibility + +## Migration from Bash Script + +To migrate from the bash script: + +1. Ensure all environment variables are set as needed +2. Run the Ansible playbook instead of the bash script +3. The playbook will perform the same operations with better error handling + +For any issues, compare the task outputs with the original script behavior. diff --git a/install_requirements.yml b/install_requirements.yml new file mode 100644 index 000000000..f4e6b934e --- /dev/null +++ b/install_requirements.yml @@ -0,0 +1,94 @@ +--- +- name: Install Metal3 Development Environment Requirements + hosts: localhost + become: yes + environment: + PATH: "/usr/local/bin:{{ ansible_env.PATH }}" + gather_facts: yes + vars: + ansible_version: "{{ ansible_version_override | default('8.0.0') }}" + go_version: "{{ go_version_override | default('1.22.3') }}" + metal3_dev_env_commit: "c60510f410173ddf01a29b87c889249b0f263c9f" + metal3_dev_env_repo: "https://github.com/metal3-io/metal3-dev-env.git" + working_dir: "{{ lookup('env', 'WORKING_DIR') | default(ansible_env.HOME + '/metal3') }}" + metal3_dev_env_path: "{{ lookup('env', 'METAL3_DEV_ENV_PATH') | default(working_dir + '/metal3-dev-env') }}" + metal3_dev_env: "{{ lookup('env', 'METAL3_DEV_ENV') }}" + openshift_client_tools_url: "{{ lookup('env', 'OPENSHIFT_CLIENT_TOOLS_URL') }}" + kni_install_from_git: "{{ lookup('env', 'KNI_INSTALL_FROM_GIT') }}" + persistent_imagereg: "{{ lookup('env', 'PERSISTENT_IMAGEREG') | bool }}" + nodes_platform: "{{ lookup('env', 'NODES_PLATFORM') | default('libvirt') }}" + alma_python_override: "{{ lookup('env', 'ALMA_PYTHON_OVERRIDE') }}" + container_runtime: "{{ lookup('env', 'CONTAINER_RUNTIME') | default('podman') }}" + + pre_tasks: + - name: Load OS information + setup: + gather_subset: distribution + + - name: Set distribution facts + set_fact: + distro: "{{ ansible_distribution | lower }}{{ ansible_distribution_major_version }}" + arch: "{{ ansible_architecture }}" + + - name: Set Go architecture + set_fact: + goarch: "{{ 'arm64' if arch == 'aarch64' else 'amd64' }}" + + - name: Fail if unsupported OS + fail: + msg: "CentOS or RHEL version not supported" + when: distro not in ['centos8', 'rhel8', 'almalinux8', 'rocky8', 'centos9', 'rhel9', 'almalinux9', 'rocky9'] + + - name: Check for CentOS 8 (non-Stream) and fail + fail: + msg: "CentOS is not supported, please switch to CentOS Stream / RHEL / Rocky / Alma" + when: + - distro == "centos8" + - '"Stream" not in ansible_distribution_version' + + tasks: + - name: Run early deployment validation + include_tasks: tasks/early_deploy_validation.yml + when: lookup('env', 'SKIP_VALIDATION') != 'true' + + - name: Setup Metal3 development environment repository + include_tasks: tasks/setup_metal3_repo.yml + when: metal3_dev_env == "" + + - name: Configure DNF for faster downloads + include_tasks: tasks/configure_dnf.yml + + - name: Upgrade system packages and handle NetworkManager restart + include_tasks: tasks/upgrade_packages.yml + + - name: Install passlib with platform-python if available + include_tasks: tasks/install_passlib.yml + + - name: Configure repositories and Python for RHEL/CentOS 8 + include_tasks: tasks/setup_rhel8_repos.yml + when: distro in ['centos8', 'rhel8', 'almalinux8', 'rocky8'] + + - name: Configure repositories and Python for RHEL/CentOS 9 + include_tasks: tasks/setup_rhel9_repos.yml + when: distro in ['centos9', 'rhel9', 'almalinux9', 'rocky9'] + + - name: Install yq if not present + include_tasks: tasks/install_yq.yml + + - name: Install architecture-specific development packages + include_tasks: tasks/install_dev_packages.yml + + - name: Install Python packages + include_tasks: tasks/install_python_packages.yml + + - name: Run Metal3 package installation playbook + include_tasks: tasks/run_metal3_playbook.yml + + - name: Install optional packages based on environment + include_tasks: tasks/install_optional_packages.yml + + - name: Handle docker-distribution service + include_tasks: tasks/handle_docker_distribution.yml + + - name: Install OpenShift client tools + include_tasks: tasks/install_oc_tools.yml diff --git a/inventory.ini b/inventory.ini new file mode 100644 index 000000000..13cfabe9b --- /dev/null +++ b/inventory.ini @@ -0,0 +1,2 @@ +[local] +localhost ansible_connection=local diff --git a/ocp_cleanup.sh b/ocp_cleanup.sh index a7498858f..e052551f1 100755 --- a/ocp_cleanup.sh +++ b/ocp_cleanup.sh @@ -74,5 +74,7 @@ fi sudo sed -ie '/^allow /d' /etc/chrony.conf # Restore file after workaround -cd ${METAL3_DEV_ENV_PATH} -git checkout vm-setup/roles/packages_installation/tasks/centos_required_packages.yml +if [ -d ${METAL3_DEV_ENV_PATH} ]; then + cd ${METAL3_DEV_ENV_PATH} + git checkout vm-setup/roles/packages_installation/tasks/centos_required_packages.yml +fi diff --git a/run_example.sh b/run_example.sh new file mode 100755 index 000000000..161479ea2 --- /dev/null +++ b/run_example.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Example script showing how to run the Ansible playbook with environment variables +# This demonstrates the same environment setup as the original bash script + +export WORKING_DIR="/home/metalhead/metal3" +export METAL3_DEV_ENV_PATH="${WORKING_DIR}/metal3-dev-env" +export ANSIBLE_VERSION="8.0.0" +export GO_VERSION="1.22.3" +export OPENSHIFT_CLIENT_TOOLS_URL="https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux.tar.gz" + +# Optional environment variables +# export KNI_INSTALL_FROM_GIT="true" +# export PERSISTENT_IMAGEREG="true" +# export NODES_PLATFORM="baremetal" + +# Run the playbook +ansible-playbook -i inventory.ini install_requirements.yml -v + +echo "Installation completed!" diff --git a/tasks/configure_dnf.yml b/tasks/configure_dnf.yml new file mode 100644 index 000000000..6c8483eda --- /dev/null +++ b/tasks/configure_dnf.yml @@ -0,0 +1,17 @@ +--- +# Configure DNF for faster downloads +- name: Configure DNF for fastest mirror + lineinfile: + path: /etc/dnf/dnf.conf + line: "fastestmirror=1" + create: yes + +- name: Configure DNF for parallel downloads + lineinfile: + path: /etc/dnf/dnf.conf + line: "max_parallel_downloads=8" + create: yes + +- name: Clean DNF cache + shell: dnf clean all + changed_when: true diff --git a/tasks/early_deploy_validation.yml b/tasks/early_deploy_validation.yml new file mode 100644 index 000000000..eb5c70216 --- /dev/null +++ b/tasks/early_deploy_validation.yml @@ -0,0 +1,22 @@ +--- +# Early deployment validation task +# This replaces the early_deploy_validation function call from the bash script +- name: Run early deployment validation + shell: | + # This would call the validation functions from the sourced scripts + # For now, just check basic requirements + if ! command -v git &> /dev/null; then + echo "Git is required but not installed" + exit 1 + fi + if ! command -v curl &> /dev/null; then + echo "Curl is required but not installed" + exit 1 + fi + register: validation_result + changed_when: false + +- name: Display validation result + debug: + msg: "Early deployment validation completed successfully" + when: validation_result.rc == 0 diff --git a/tasks/handle_docker_distribution.yml b/tasks/handle_docker_distribution.yml new file mode 100644 index 000000000..caedda3d9 --- /dev/null +++ b/tasks/handle_docker_distribution.yml @@ -0,0 +1,16 @@ +--- +# Handle docker-distribution service +- name: Check if docker-distribution service is active + systemd: + name: docker-distribution.service + register: docker_dist_service + failed_when: false + +- name: Disable and stop docker-distribution service if active + systemd: + name: docker-distribution.service + enabled: no + state: stopped + when: + - docker_dist_service.status is defined + - docker_dist_service.status.ActiveState == "active" diff --git a/tasks/install_dev_packages.yml b/tasks/install_dev_packages.yml new file mode 100644 index 000000000..7e159af7f --- /dev/null +++ b/tasks/install_dev_packages.yml @@ -0,0 +1,17 @@ +--- +# Install architecture-specific development packages +- name: Install development packages for ARM64 + dnf: + name: + - "{{ python_devel }}" + - libxml2-devel + - libxslt-devel + state: present + when: arch == "aarch64" + +- name: Install development packages for x86_64 + dnf: + name: + - "{{ python_devel | default('python3-devel') }}" + state: present + when: arch == "x86_64" diff --git a/tasks/install_oc_tools.yml b/tasks/install_oc_tools.yml new file mode 100644 index 000000000..d4c86c437 --- /dev/null +++ b/tasks/install_oc_tools.yml @@ -0,0 +1,30 @@ +--- +# Install OpenShift client tools +- name: Download and install OpenShift client tools + shell: | + for i in {1..5}; do + if timeout 60 curl -L "{{ openshift_client_tools_url }}" | tar -U -C /usr/local/bin -xzf -; then + break + fi + echo "Attempt $i failed, retrying..." + sleep 10 + done + when: openshift_client_tools_url != "" + +- name: Make oc executable + file: + path: /usr/local/bin/oc + mode: '0755' + when: openshift_client_tools_url != "" + +- name: Verify oc installation + shell: oc version --client -o json + register: oc_version + when: openshift_client_tools_url != "" + +- name: Display oc version + debug: + var: oc_version.stdout + when: + - openshift_client_tools_url != "" + - oc_version is succeeded diff --git a/tasks/install_optional_packages.yml b/tasks/install_optional_packages.yml new file mode 100644 index 000000000..224c6f3fe --- /dev/null +++ b/tasks/install_optional_packages.yml @@ -0,0 +1,19 @@ +--- +# Install optional packages based on environment +- name: Install zip for KNI installation from git + dnf: + name: zip + state: present + when: kni_install_from_git != "" + +- name: Install nfs-utils for persistent image registry + dnf: + name: nfs-utils + state: present + when: persistent_imagereg | bool + +- name: Install ipmitool for baremetal platform + dnf: + name: ipmitool + state: present + when: nodes_platform == "baremetal" diff --git a/tasks/install_passlib.yml b/tasks/install_passlib.yml new file mode 100644 index 000000000..aa1c7a4da --- /dev/null +++ b/tasks/install_passlib.yml @@ -0,0 +1,28 @@ +--- +# Install passlib with platform-python if available +- name: Check if platform-python exists + stat: + path: /usr/libexec/platform-python + register: platform_python_stat + +- name: Install passlib with platform-python + shell: /usr/libexec/platform-python -m pip install passlib + register: passlib_install + failed_when: false + when: platform_python_stat.stat.exists and platform_python_stat.stat.executable + +- name: Install python3-pip if passlib installation failed + dnf: + name: python3-pip + state: present + when: + - platform_python_stat.stat.exists + - platform_python_stat.stat.executable + - passlib_install.rc != 0 + +- name: Retry passlib installation after installing pip + shell: /usr/libexec/platform-python -m pip install passlib + when: + - platform_python_stat.stat.exists + - platform_python_stat.stat.executable + - passlib_install.rc != 0 diff --git a/tasks/install_python_packages.yml b/tasks/install_python_packages.yml new file mode 100644 index 000000000..17192982f --- /dev/null +++ b/tasks/install_python_packages.yml @@ -0,0 +1,8 @@ +--- +# Install Python packages +- name: Install netaddr and lxml for ansible.netcommon and pyxpath + pip: + name: + - netaddr + - lxml + executable: pip3 diff --git a/tasks/install_yq.yml b/tasks/install_yq.yml new file mode 100644 index 000000000..664c5a2e6 --- /dev/null +++ b/tasks/install_yq.yml @@ -0,0 +1,18 @@ +--- +# Install yq if not present +- name: Check if yq is already installed + shell: which yq + register: yq_check + failed_when: false + changed_when: false + +- name: Install yq from pip if not present + pip: + name: "yq>=3,<4" + executable: pip3 + when: yq_check.rc != 0 + +- name: Display yq location if already installed + debug: + msg: "Using yq from {{ yq_check.stdout }}" + when: yq_check.rc == 0 diff --git a/tasks/run_metal3_playbook.yml b/tasks/run_metal3_playbook.yml new file mode 100644 index 000000000..b3261a7e7 --- /dev/null +++ b/tasks/run_metal3_playbook.yml @@ -0,0 +1,40 @@ +--- +- name: Install dependencies + community.general.ansible_galaxy_install: + type: both + requirements_file: "{{ metal3_dev_env_path }}/vm-setup/requirements.yml" + +- name: Install collection community.network + community.general.ansible_galaxy_install: + type: collection + name: 'ansible.netcommon<8.0.0' + +- name: Install collection community.network + community.general.ansible_galaxy_install: + type: collection + name: ansible.posix + +- name: Install collection community.network + community.general.ansible_galaxy_install: + type: collection + name: 'ansible.utils<6.0.0' + +- name: Install collection community.network + community.general.ansible_galaxy_install: + type: collection + name: community.general + +# Run Metal3 package installation playbook +- name: Run install-package-playbook.yml + shell: | + ansible-playbook \ + -e "working_dir={{ working_dir }}" \ + -e "virthost={{ ansible_hostname }}" \ + -e "go_version={{ go_version }}" \ + -e "GOARCH={{ goarch }}" \ + {{ alma_python_override if alma_python_override else '' }} \ + -i {{ metal3_dev_env_path }}/vm-setup/inventory.ini \ + -b -vvv {{ metal3_dev_env_path }}/vm-setup/install-package-playbook.yml + environment: + ANSIBLE_FORCE_COLOR: "true" + CONTAINER_RUNTIME: "{{ container_runtime }}" diff --git a/tasks/setup_metal3_repo.yml b/tasks/setup_metal3_repo.yml new file mode 100644 index 000000000..d17466a9f --- /dev/null +++ b/tasks/setup_metal3_repo.yml @@ -0,0 +1,26 @@ +--- +# Setup Metal3 development environment repository +- name: Ensure working directory exists + file: + path: "{{ working_dir }}" + state: directory + mode: '0755' + +- name: Clone metal3-dev-env repository + git: + repo: "{{ metal3_dev_env_repo }}" + dest: "{{ metal3_dev_env_path }}" + force: yes + register: git_clone_result + +- name: Pin to specific metal3-dev-env commit + git: + repo: "{{ metal3_dev_env_repo }}" + dest: "{{ metal3_dev_env_path }}" + version: "{{ metal3_dev_env_commit }}" + force: yes + when: git_clone_result is succeeded + +- name: Set REPO_PATH environment variable + set_fact: + repo_path: "{{ working_dir }}" diff --git a/tasks/setup_rhel8_repos.yml b/tasks/setup_rhel8_repos.yml new file mode 100644 index 000000000..7a82811a1 --- /dev/null +++ b/tasks/setup_rhel8_repos.yml @@ -0,0 +1,64 @@ +--- +# Configure repositories and Python for RHEL/CentOS 8 +- name: Install network-scripts package for legacy network commands + dnf: + name: network-scripts + state: present + +- name: Install EPEL repository for CentOS/AlmaLinux/Rocky 8 + dnf: + name: + - epel-release + - dnf + enablerepo: extras + state: present + when: distro in ['centos8', 'almalinux8', 'rocky8'] + +- name: Install EPEL repository for RHEL 8 + block: + - name: Install EPEL release RPM for RHEL 8 + dnf: + name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + state: present + + - name: Check if ansible-2 repository is enabled + shell: subscription-manager repos --list-enabled 2>&1 | grep "ansible-2-for-rhel-8-$(uname -m)-rpms" + register: ansible2_repo_check + failed_when: false + changed_when: false + + - name: Remove old ansible package if ansible-2 repo is enabled + dnf: + name: ansible + state: absent + when: ansible2_repo_check.rc == 0 + + - name: Disable ansible-2 repository + shell: subscription-manager repos --disable=ansible-2-for-rhel-8-$(uname -m)-rpms + when: ansible2_repo_check.rc == 0 + + when: distro == 'rhel8' + +- name: Install Python 3.9 for RHEL/CentOS 8 + dnf: + name: python39 + state: present + +- name: Set Python 3.9 as default python + alternatives: + name: python + path: /usr/bin/python3.9 + link: /usr/bin/python + +- name: Set Python 3.9 as default python3 + alternatives: + name: python3 + path: /usr/bin/python3.9 + link: /usr/bin/python3 + +- name: Set pip3.9 as default pip3 + shell: update-alternatives --install /usr/bin/pip3 pip3 /usr/bin/pip3.9 1 + +- name: Set Python development package variable + set_fact: + python_devel: "python39-devel" diff --git a/tasks/setup_rhel9_repos.yml b/tasks/setup_rhel9_repos.yml new file mode 100644 index 000000000..431ed4672 --- /dev/null +++ b/tasks/setup_rhel9_repos.yml @@ -0,0 +1,48 @@ +--- +# Configure repositories and Python for RHEL/CentOS 9 +- name: Install python3-pip for RHEL/CentOS 9 + dnf: + name: python3-pip + state: present + +- name: Configure repositories for CentOS/AlmaLinux/Rocky 9 + block: + - name: Enable CRB repository + shell: dnf config-manager --set-enabled crb + + - name: Install EPEL repository + dnf: + name: epel-release + state: present + + when: distro in ['centos9', 'almalinux9', 'rocky9'] + +- name: Configure repositories for RHEL 9 + block: + - name: Check if system has valid RHEL subscription + shell: subscription-manager identity > /dev/null 2>&1 + register: rhel_subscription_check + failed_when: false + changed_when: false + + - name: Enable CodeReady Builder repository if subscribed + shell: subscription-manager repos --enable codeready-builder-for-rhel-9-$(arch)-rpms + when: rhel_subscription_check.rc == 0 + + - name: Install EPEL repository for RHEL 9 + dnf: + name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + state: present + + when: distro == 'rhel9' + +- name: Create python symlink + file: + src: /usr/bin/python3 + dest: /usr/bin/python + state: link + ignore_errors: yes + +- name: Set Python development package variable + set_fact: + python_devel: "python3-devel" diff --git a/tasks/upgrade_packages.yml b/tasks/upgrade_packages.yml new file mode 100644 index 000000000..b43e504a1 --- /dev/null +++ b/tasks/upgrade_packages.yml @@ -0,0 +1,23 @@ +--- +# Upgrade system packages and handle NetworkManager restart +- name: Get current NetworkManager version + shell: dnf info NetworkManager | grep Version | cut -d ':' -f 2 + register: old_networkmanager_version + changed_when: false + +- name: Upgrade all packages + dnf: + name: "*" + state: latest + nobest: yes + +- name: Get new NetworkManager version + shell: dnf info NetworkManager | grep Version | cut -d ':' -f 2 + register: new_networkmanager_version + changed_when: false + +- name: Restart NetworkManager if upgraded + systemd: + name: NetworkManager + state: restarted + when: old_networkmanager_version.stdout != new_networkmanager_version.stdout diff --git a/vm-setup/vm-setup/firewall.yml b/vm-setup/vm-setup/firewall.yml new file mode 100644 index 000000000..4be020cfe --- /dev/null +++ b/vm-setup/vm-setup/firewall.yml @@ -0,0 +1,8 @@ +--- +- name: Setup dummy baremetal VMs + hosts: virthost + connection: local + gather_facts: true + tasks: + - import_role: + name: firewall diff --git a/vm-setup/vm-setup/install-package-playbook.yml b/vm-setup/vm-setup/install-package-playbook.yml new file mode 100644 index 000000000..1aed44850 --- /dev/null +++ b/vm-setup/vm-setup/install-package-playbook.yml @@ -0,0 +1,12 @@ +- name: Install packages needed for the Dev-env + hosts: virthost + connection: local + gather_facts: true + tasks: + - import_role: + name: packages_installation + - import_role: + name: fubarhouse.golang + vars: + go_version: 1.24.3 + go_install_clean: true diff --git a/vm-setup/vm-setup/inventory.ini b/vm-setup/vm-setup/inventory.ini new file mode 100644 index 000000000..dcf6d92da --- /dev/null +++ b/vm-setup/vm-setup/inventory.ini @@ -0,0 +1,2 @@ +[virthost] +localhost ansible_python_interpreter="auto" diff --git a/vm-setup/vm-setup/library/generate_macs.py b/vm-setup/vm-setup/library/generate_macs.py new file mode 100644 index 000000000..52e2dcca7 --- /dev/null +++ b/vm-setup/vm-setup/library/generate_macs.py @@ -0,0 +1,92 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# generate_vm_interface_macs method ripped from +# openstack/tripleo-incubator/scripts/configure-vm + +import random + +DOCUMENTATION = ''' +--- +module: generate_macs +version_added: "2.0" +short_description: Generate a list of Ethernet MAC addresses +description: + - Generate a list of Ethernet MAC addresses suitable for external testing. +''' + +MAX_NUM_MACS = 256 + + +def generate_vm_interface_macs(nodes, networks): + """Generate an Ethernet MAC address suitable for VM testing.""" + # NOTE(dprince): We generate our own bare metal MAC address's here + # instead of relying on libvirt so that we can ensure the + # locally administered bit is set low. (The libvirt default is + # to set the 2nd MSB high.) This effectively allows our + # fake baremetal VMs to more accurately behave like real hardware + # and fixes issues with bridge/DHCP configurations which rely + # on the fact that bridges assume the MAC address of the lowest + # attached NIC. + # MACs generated for a given machine will also be in sequential + # order, which matches how most BM machines are laid out as well. + + macs = [] + count = len(nodes) * len(networks) + + if count > MAX_NUM_MACS: + raise ValueError("The MAX num of MACS supported is %i " + "(you specified %i)." % (MAX_NUM_MACS, count)) + + base_nums = [0x00, + random.randint(0x00, 0xff), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + base_mac = ':'.join(["%02x" % x for x in base_nums]) + + start = random.randint(0x00, 0xff) + if (start + count) > 0xff: + # leave room to generate macs in sequence + start = 0xff + 1 - count + for num in range(0, count, 1): + mac = start + num + macs.append(base_mac + ":" + ("%02x" % mac)) + + result = {} + for node in nodes: + result[node['name']] = {} + for network in networks: + result[node['name']][network['name']] = macs.pop(0) + + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + nodes=dict(required=True, type='list'), + networks=dict(required=True, type='list') + ) + ) + result = generate_vm_interface_macs(module.params["nodes"], + module.params["networks"]) + module.exit_json(**result) + +# see http://docs.ansible.com/developing_modules.html#common-module-boilerplate +from ansible.module_utils.basic import AnsibleModule # noqa + + +if __name__ == '__main__': + main() diff --git a/vm-setup/vm-setup/requirements.yml b/vm-setup/vm-setup/requirements.yml new file mode 100644 index 000000000..7b08054d4 --- /dev/null +++ b/vm-setup/vm-setup/requirements.yml @@ -0,0 +1,12 @@ +roles: + - src: https://github.com/fubarhouse/ansible-role-golang.git + version: master + name: fubarhouse.golang + +collections: + - name: kubernetes.core + version: 6.0.0 + - name: ansible.netcommon + version: 7.2.0 + - name: ansible.utils + version: 5.1.2 diff --git a/vm-setup/vm-setup/roles/common/defaults/main.yml b/vm-setup/vm-setup/roles/common/defaults/main.yml new file mode 100644 index 000000000..c6b822c58 --- /dev/null +++ b/vm-setup/vm-setup/roles/common/defaults/main.yml @@ -0,0 +1,116 @@ +non_root_user: "{{ lookup('env', 'USER') }}" + +# base domain to use +cluster_domain: "{{ lookup('env', 'CLUSTER_DOMAIN') | default('ostest.test.metalkube.org', true) }}" + +# allow the nic model to be overridden +libvirt_nic_model: virtio + +# These defaults are used if there are no flavor-specific +# overrides configured. +default_disk: 50 +default_memory: 4096 +default_vcpu: 2 +num_nodes: 2 +extradisks: false +tpm_emulator: "{{ lookup('env', 'VM_TPM_EMULATOR')|default(false, true) }}" +virtualbmc_base_port: 6230 +flavors: + node: + memory: '{{node_memory|default(default_memory)}}' + disk: '{{node_disk|default(default_disk)}}' + vcpu: '{{node_vcpu|default(default_vcpu)}}' + extradisks: '{{extradisks|bool}}' + +# An optional prefix for node names +ironic_prefix: "" + +external_network_cidr_v4: "{{ lookup('env', 'EXTERNAL_SUBNET_V4')|default('', true) }}" +external_network_cidr_v6: "{{ lookup('env', 'EXTERNAL_SUBNET_V6')|default('', true) }}" +external_dhcp_v4_start: "{{ lookup('env', 'EXTERNAL_DHCP_V4_START')|default('', true) }}" +external_dhcp_v4_end: "{{ lookup('env', 'EXTERNAL_DHCP_V4_END')|default('', true) }}" +external_dhcp_v6_start: "{{ lookup('env', 'EXTERNAL_DHCP_V6_START')|default('', true) }}" +external_dhcp_v6_end: "{{ lookup('env', 'EXTERNAL_DHCP_V6_END')|default('', true) }}" + +provisioning_network_cidr_v4: "{{ lookup('env', 'PROVISIONING_SUBNET_V4')|default('', true) }}" +provisioning_network_cidr_v6: "{{ lookup('env', 'PROVISIONING_SUBNET_V6')|default('', true) }}" +provisioning_dhcp_v4_start: "{{ lookup('env', 'PROVISIONING_DHCP_V4_START')|default('', true) }}" +provisioning_dhcp_v4_end: "{{ lookup('env', 'PROVISIONING_DHCP_V4_END')|default('', true) }}" +provisioning_dhcp_v6_start: "{{ lookup('env', 'PROVISIONING_DHCP_V6_START')|default('', true) }}" +provisioning_dhcp_v6_end: "{{ lookup('env', 'PROVISIONING_DHCP_V6_END')|default('', true) }}" + +# Set this to `false` if you don't want your vms +# to have a VNC console available. +enable_vnc_console: true + +# Path for volume storage +libvirt_volume_path: "{{ working_dir }}/pool" + +# These ensure we're using privileged virt, so VMs persist over reboot +libvirt_uri: qemu:///system +ssh_user: root + +# Describe our virtual networks. These networks will be attached to +# the vm nodes in the order in which they are defined with the following caveats: +# * The first bridge network defined will be used for pxe booting +manage_external: 'y' +provisioning_network_nat: + - name: provisioning + bridge: provisioning + forward_mode: nat + address_v4: "{{ provisioning_network_cidr_v4|ansible.utils.nthhost(1)|default('', true) }}" + netmask_v4: "{{ provisioning_network_cidr_v4|ansible.utils.ipaddr('netmask') }}" + dhcp_range_v4: + - "{{ provisioning_dhcp_v4_start }}" + - "{{ provisioning_dhcp_v4_end }}" + # libvirt defaults to minutes as the unit + lease_expiry: 60 + nat_port_range: + - 1024 + - 65535 + domain: "{{ cluster_domain }}" + dns: + hosts: "{{dns_extrahosts | default([])}}" + forwarders: + - domain: "apps.{{ cluster_domain }}" + addr: "127.0.0.1" +provisioning_network_bridge: + - name: provisioning + bridge: provisioning + forward_mode: bridge +external_network: + - name: external + bridge: external + forward_mode: "{% if manage_external == 'y' %}nat{% else %}bridge{% endif %}" + address_v4: "{{ external_network_cidr_v4|ansible.utils.nthhost(1)|default('', true) }}" + netmask_v4: "{{ external_network_cidr_v4|ansible.utils.ipaddr('netmask') }}" + dhcp_range_v4: + - "{{ external_dhcp_v4_start }}" + - "{{ external_dhcp_v4_end }}" + address_v6: "{{ external_network_cidr_v6|ansible.utils.nthhost(1)|default('', true) }}" + prefix_v6: "{{ external_network_cidr_v6|ansible.utils.ipaddr('prefix') }}" + dhcp_range_v6: + - "{{ external_dhcp_v6_start }}" + - "{{ external_dhcp_v6_end }}" + # libvirt defaults to minutes as the unit + lease_expiry: 60 + nat_port_range: + - 1024 + - 65535 + domain: "{{ cluster_domain }}" + dns: + hosts: "{{dns_extrahosts | default([])}}" + forwarders: + # Use 127.0.0.1 unless only IPv6 is enabled + - domain: "apps.{{ cluster_domain }}" + addr: "{% if external_network_cidr_v4|ansible.utils.ipv4 != False %}127.0.0.1{% else %}::1{% endif %}" + srvs: "{{dns_externalsrvs | default([])}}" + +# Provisioning network is bridged and external network is nated +networks_mixed: "{{ provisioning_network_bridge + external_network }}" +# Both networks are nated +networks_nated: "{{ provisioning_network_nat + external_network }}" +# Enable only nated networks +networks_nat_only: "{{ lookup('env', 'ENABLE_NATED_PROVISIONING_NETWORK') | bool }}" +# Placeholder for the eventually selected network composition +networks: "{{ networks_nated if networks_nat_only else networks_mixed }}" diff --git a/vm-setup/vm-setup/roles/common/tasks/extra_networks_tasks.yml b/vm-setup/vm-setup/roles/common/tasks/extra_networks_tasks.yml new file mode 100644 index 000000000..02006edb4 --- /dev/null +++ b/vm-setup/vm-setup/roles/common/tasks/extra_networks_tasks.yml @@ -0,0 +1,55 @@ +--- + +- name: "Processing extra network {{network_name}}" + debug: + var: network_name + +# Get the CIDR for ipv4 and/or ipv6 +- name: "Set {{network_name}}_cidr_v4 from {{network_name.upper()}}_NETWORK_SUBNET_V4" + set_fact: + "{{network_name}}_cidr_v4": "{{lookup('env', network_name.upper()+'_NETWORK_SUBNET_V4')}}" + +- name: "Set {{network_name}}_cidr_v6 from {{network_name.upper()}}_NETWORK_SUBNET_V6" + set_fact: + "{{network_name}}_cidr_v6": "{{lookup('env', network_name.upper()+'_NETWORK_SUBNET_V6')}}" + +# Validate that we have at least one CIDR, and that the format is correct +- fail: + msg: "Must specify at least one of {{network_name.upper()}}_NETWORK_SUBNET_V4 or {{network_name.upper()}}_NETWORK_SUBNET_V6" + when: "{{ lookup('vars', network_name + '_cidr_v4') == '' and lookup('vars', network_name + '_cidr_v6') == '' }}" + +- name: Calculate v4 extra_network data + block: + - set_fact: + extra_network_v4: "{{ [{ + 'address_v4': cidr|ansible.utils.nthhost(1), + 'netmask_v4': cidr|ansible.utils.ipaddr('netmask'), + 'dhcp_range_v4': [ cidr|ansible.utils.nthhost(20), cidr|ansible.utils.nthhost(60)], + }]}}" + when: "{{ lookup('vars', network_name + '_cidr_v4') != '' }}" + vars: + cidr: "{{lookup('vars', network_name + '_cidr_v4')}}" + +- name: Calculate v6 extra_network data + block: + - set_fact: + extra_network_v6: "{{ [{ + 'address_v6': cidr|ansible.utils.nthhost(1), + 'prefix_v6': cidr|ansible.utils.ipaddr('prefix'), + 'dhcp_range_v6': [ cidr|ansible.utils.nthhost(20), cidr|ansible.utils.nthhost(60)], + }]}}" + when: "{{ lookup('vars', network_name + '_cidr_v6') != '' }}" + vars: + cidr: "{{lookup('vars', network_name + '_cidr_v6')}}" + +- name: "Add extra network {{network_name}} to extra_networks" + set_fact: + extra_networks: "{{ extra_networks|default([]) + [ + { + 'name': network_name, + 'bridge': network_name, + 'forward_mode': 'nat', + 'nat_port_range': ['1024', '65535'], + 'lease_expiry': '60', + } | combine(extra_network_v4|default({}), extra_network_v6|default({})) + ]}}" diff --git a/vm-setup/vm-setup/roles/common/tasks/generate_node_mac.yml b/vm-setup/vm-setup/roles/common/tasks/generate_node_mac.yml new file mode 100644 index 000000000..0831743d7 --- /dev/null +++ b/vm-setup/vm-setup/roles/common/tasks/generate_node_mac.yml @@ -0,0 +1,7 @@ +--- +- name: get a list of MACs to use + generate_macs: + nodes: "{{ vm_nodes }}" + networks: "{{ networks }}" + register: node_mac_map + when: vm_nodes | length > 0 diff --git a/vm-setup/vm-setup/roles/common/tasks/main.yml b/vm-setup/vm-setup/roles/common/tasks/main.yml new file mode 100644 index 000000000..c283a1826 --- /dev/null +++ b/vm-setup/vm-setup/roles/common/tasks/main.yml @@ -0,0 +1,44 @@ +--- + +- set_fact: + generate_vm_nodes: "{{vm_nodes is not defined}}" + +- name: "Show network_mode data for debugging (common role)" + debug: + var: networks_nat_only + +- name: Set an empty default for vm_nodes if not already defined + set_fact: + vm_nodes: [] + when: generate_vm_nodes + +- name: Populate vm_nodes if not already defined + when: generate_vm_nodes + include_tasks: vm_nodes_tasks.yml + loop: "{{flavors|dict2items}}" + loop_control: + loop_var: flavor + +- debug: + var: vm_nodes + when: generate_vm_nodes + +- name: "Check if EXTRA_NETWORK_NAMES is configured" + set_fact: + generate_extra_networks: "{{lookup('env', 'EXTRA_NETWORK_NAMES') and extra_networks is not defined}}" + +- name: Calculate extra_networks if EXTRA_NETWORK_NAMES is defined + include_tasks: extra_networks_tasks.yml + loop: "{{lookup('env', 'EXTRA_NETWORK_NAMES')|split()}}" + loop_control: + loop_var: network_name + when: generate_extra_networks + +- name: "Append extra networks when EXTRA_NETWORK_NAMES is configured" + set_fact: + networks: "{{ networks + extra_networks }}" + when: generate_extra_networks + +- name: "Show networks data for debugging (common role)" + debug: + var: networks diff --git a/vm-setup/vm-setup/roles/common/tasks/vm_nodes_tasks.yml b/vm-setup/vm-setup/roles/common/tasks/vm_nodes_tasks.yml new file mode 100644 index 000000000..139b99f6c --- /dev/null +++ b/vm-setup/vm-setup/roles/common/tasks/vm_nodes_tasks.yml @@ -0,0 +1,18 @@ +--- +# We maintain a persistent index over the nested loop iterations +# of the tasks via set_fact, ansible doesn't appear to provide +# a facility like j2 namespaces to enable this. +- set_fact: + vm_nodes_index: "{{vm_nodes_index|default(0)|int}}" +- set_fact: + vm_nodes: "{{vm_nodes + [ + {'name': ironic_prefix + '%s_%s'|format(flavor.key, item), + 'flavor': flavor.key, + 'virtualbmc_port': virtualbmc_base_port|int+vm_nodes_index|int+item} ]}}" + loop: "{{ range(0, lookup('vars', 'num_' + flavor.key + 's')|int)|list }}" +- set_fact: + vm_nodes_index: "{{vm_nodes_index|int + lookup('vars', 'num_' + flavor.key + 's')|int }}" +- set_fact: + host_os: "{{ lookup('ansible.builtin.env', 'OS') }}" +- set_fact: + host_distro: "{{ lookup('ansible.builtin.env', 'DISTRO') }}" diff --git a/vm-setup/vm-setup/roles/common/tasks/write_ironic_nodes_tasks.yml b/vm-setup/vm-setup/roles/common/tasks/write_ironic_nodes_tasks.yml new file mode 100644 index 000000000..54ac55fc8 --- /dev/null +++ b/vm-setup/vm-setup/roles/common/tasks/write_ironic_nodes_tasks.yml @@ -0,0 +1,7 @@ +--- +# Generate the ironic node inventory files. +- name: Write ironic node json files + template: + src: ../templates/ironic_nodes.json.j2 + dest: "{{ nodes_file }}" + force: no diff --git a/vm-setup/vm-setup/roles/common/templates/ironic_nodes.json.j2 b/vm-setup/vm-setup/roles/common/templates/ironic_nodes.json.j2 new file mode 100644 index 000000000..9c6ceb8d8 --- /dev/null +++ b/vm-setup/vm-setup/roles/common/templates/ironic_nodes.json.j2 @@ -0,0 +1,75 @@ +{% set lvars = { 'host_ip' : 'NOTSET', 'pxe_network' : False} %} +{% for network in networks %} + {% if (not (network.forward_mode is defined and network.forward_mode == 'nat') and lvars['pxe_network'] == False) %} + {% if lvars.update({'pxe_network' : network.name}) %}{% endif %} + {% endif %} + {% if network.address_v4 is defined and network.address_v4 != '' and lvars['host_ip'] == 'NOTSET' %} + {% if lvars.update({'host_ip' : network.address_v4}) %}{% endif %} + {% endif %} + {% if network.address_v6 is defined and network.address_v6 != '' and lvars['host_ip'] == 'NOTSET' %} + {% if lvars.update({'host_ip' : network.address_v6}) %}{% endif %} + {% endif %} +{% endfor %} +{% if lvars['pxe_network'] == False %} + {% if lvars.update({'pxe_network': networks[0].name}) %}{% endif %} +{% endif %} +{ + "nodes": [ + {% for node in vm_nodes %} + + {% set vm_driver_tmp = vm_driver -%} + {# If vm_driver == mixed we alternate between ipmi, refish and refish-virtualmedia to test both #} + {% if vm_driver == 'mixed' -%} + {# For extraworker always use redfish #} + {% if 'extraworker' in node.name -%} + {% set vm_driver_tmp = 'redfish' -%} + {% elif loop.index % 3 == 0 -%} + {% set vm_driver_tmp = 'redfish-virtualmedia' -%} + {% elif loop.index % 3 == 1 -%} + {% set vm_driver_tmp = 'ipmi' -%} + {% else -%} + {% set vm_driver_tmp = 'redfish' -%} + {% endif -%} + {% endif -%} + + { + "name": "{{ node.name|replace('_', '-') }}", + "driver": "{{ vm_driver_tmp }}", + "resource_class": "baremetal", + "driver_info": { + "username": "{{ vbmc_username }}", + "password": "{{ vbmc_password }}", + {% if vm_driver_tmp =='redfish' -%} + "port": "8000", + "address": "{{vm_driver_tmp}}+https://{{ lvars['host_ip'] | ansible.utils.ipwrap }}:8000/redfish/v1/Systems/{{vm_id[node.name]}}", + "redfish_verify_ca": "False", + {% elif vm_driver_tmp == 'redfish-virtualmedia' -%} + "port": "8000", + "address": "{{vm_driver_tmp}}+https://{{ lvars['host_ip'] | ansible.utils.ipwrap }}:8000/redfish/v1/Systems/{{vm_id[node.name]}}", + "redfish_verify_ca": "False", + {% elif vm_driver_tmp == 'redfish-uefihttp' -%} + "port": "8000", + "address": "{{vm_driver_tmp}}+https://{{ lvars['host_ip'] | ansible.utils.ipwrap }}:8000/redfish/v1/Systems/{{vm_id[node.name]}}", + "redfish_verify_ca": "False", + {% else -%} + "port": "{{ node.virtualbmc_port }}", + "address": "{{vm_driver_tmp}}://{{lvars['host_ip'] | ansible.utils.ipwrap }}:{{node.virtualbmc_port}}", + {% endif -%} + "deploy_kernel": "http://{{ provisioning_url_host }}/images/ironic-python-agent.kernel", + "deploy_ramdisk": "http://{{ provisioning_url_host }}/images/ironic-python-agent.initramfs" + }, + "ports": [{ + "address": "{{ node_mac_map.get(node.name).get(lvars['pxe_network']) }}", + "pxe_enabled": true + }], + "properties": { + "local_gb": "{{ flavors[node.flavor].disk }}", + "cpu_arch": "{{ libvirt_arch }}" + } + } + {% if not loop.last %} + , + {% endif %} + {% endfor %} + ] +} diff --git a/vm-setup/vm-setup/roles/firewall/defaults/main.yml b/vm-setup/vm-setup/roles/firewall/defaults/main.yml new file mode 100644 index 000000000..11282d54a --- /dev/null +++ b/vm-setup/vm-setup/roles/firewall/defaults/main.yml @@ -0,0 +1,46 @@ +use_firewalld: "{{ lookup('env', 'USE_FIREWALLD') | default(true, true) }}" +external_interface: external +provisioning_interface: provisioning +bare_metal_provisioner_interface: "{{ lookup('env', 'BARE_METAL_PROVISIONER_INTERFACE') | default('ironicendpoint', true) }}" +external_subnet_v4: "{{ lookup('env', 'EXTERNAL_SUBNET_V4') | default('192.168.111.0/24', true) }}" +bare_metal_provisioner_subnet_v4: "{{ lookup('env', 'BARE_METAL_PROVISIONER_NETWORK') | default('172.22.0.0/24', true) }}" +kind_subnet: '172.18.0.0/24' +registry_port: "{{ lookup('env', 'REGISTRY_PORT') | default('5000', true) }}" +http_port: "{{ lookup('env', 'HTTP_PORT') | default('6180', true) }}" +ironic_inspector_port: "{{ lookup('env', 'IRONIC_INSPECTOR_PORT') | default('5050', true) }}" +ironic_api_port: "{{ lookup('env', 'IRONIC_API_PORT') | default('6385', true) }}" +vbmc_port_range: "6230:6235" +sushy_port: 8000 +cluster_api_port: "{{ lookup('env', 'CLUSTER_APIENDPOINT_PORT') | default('6443', true) }}" +firewall_rule_state: present +ironic_ports: + - "{{ http_port }}" + - "443" + - "{{ ironic_inspector_port }}" + - "{{ ironic_api_port }}" + - "9999" + - "80" +vm_host_ports: + # Caching HTTP Server + - "80" + # Container image registry + - "{{ registry_port }}" + # DNS for registry naming resolution + - "53" + - "443" +pxe_udp_ports: + # Multicast DNS + - "5353" + # DHCP + - "67" + - "68" + # DHCPv6 + - "546" + - "547" + # TFTP + - "69" +ironic_keepalived_proto: + - "112" + - "icmp" + +EPHEMERAL_CLUSTER: "{{ lookup('env', 'EPHEMERAL_CLUSTER') | default('kind', true)}}" diff --git a/vm-setup/vm-setup/roles/firewall/tasks/firewalld.yaml b/vm-setup/vm-setup/roles/firewall/tasks/firewalld.yaml new file mode 100644 index 000000000..23708ebfb --- /dev/null +++ b/vm-setup/vm-setup/roles/firewall/tasks/firewalld.yaml @@ -0,0 +1,59 @@ +- name: "firewalld: Firewalld service" + service: + name: firewalld + state: started + enabled: yes + +- ansible.posix.firewalld: + zone: libvirt + interface: "{{ item }}" + permanent: yes + state: "{{ firewalld_rule_state }}" + immediate: yes + loop: + - "{{ provisioning_interface }}" + - "{{ bare_metal_provisioner_interface }}" + - "{{ external_interface }}" + +- name: "firewalld: Provisioning host ports" + ansible.posix.firewalld: + zone: libvirt + port: "{{ item }}/tcp" + permanent: yes + state: "{{ firewalld_rule_state }}" + immediate: yes + loop: "{{ vm_host_ports }}" + +- name: "firewalld: Ironic Ports" + ansible.posix.firewalld: + zone: libvirt + port: "{{ item }}/tcp" + permanent: yes + state: "{{ firewalld_rule_state }}" + immediate: yes + loop: "{{ ironic_ports }}" + +- name: "firewalld: PXE Ports" + ansible.posix.firewalld: + zone: libvirt + port: "{{ item }}/udp" + permanent: yes + state: "{{ firewalld_rule_state }}" + immediate: yes + loop: "{{ pxe_udp_ports }}" + +- name: "firewalld: VBMC Ports" + ansible.posix.firewalld: + zone: libvirt + port: "{{ vbmc_port_range | regex_replace(':', '-') }}/udp" + permanent: yes + state: "{{ firewalld_rule_state }}" + immediate: yes + +- name: "firewalld: sushy Port" + ansible.posix.firewalld: + zone: libvirt + port: "{{ sushy_port }}/tcp" + permanent: yes + state: "{{ firewalld_rule_state }}" + immediate: yes diff --git a/vm-setup/vm-setup/roles/firewall/tasks/iptables.yaml b/vm-setup/vm-setup/roles/firewall/tasks/iptables.yaml new file mode 100644 index 000000000..9e3289471 --- /dev/null +++ b/vm-setup/vm-setup/roles/firewall/tasks/iptables.yaml @@ -0,0 +1,144 @@ +- name: "iptables: Firewalld service stopped" + service: + name: firewalld + state: stopped + enabled: no + ignore_errors: True + +- name: "iptables: VBMC Ports" + iptables: + chain: INPUT + action: insert + in_interface: "{{ external_interface }}" + protocol: udp + match: udp + destination_port: "{{ vbmc_port_range }}" + jump: ACCEPT + state: "{{ firewall_rule_state }}" + +- name: "iptables: sushy Port" + iptables: + chain: INPUT + action: insert + in_interface: "{{ external_interface }}" + protocol: tcp + match: tcp + destination_port: "{{ sushy_port }}" + jump: ACCEPT + state: "{{ firewall_rule_state }}" + +- name: "iptables: Established and related" + iptables: + chain: FORWARD + in_interface: "{{ provisioning_interface }}" + match: conntrack + ctstate: ESTABLISHED,RELATED + jump: ACCEPT + state: "{{ firewall_rule_state }}" + +- name: "iptables: Ironic Ports" + iptables: + chain: "{{ item[0].chain }}" + action: insert + in_interface: "{{ item[0].interface }}" + protocol: tcp + match: tcp + destination_port: "{{ item[1] }}" + jump: ACCEPT + state: "{{ firewall_rule_state }}" + vars: + interfaces: + - chain: INPUT + interface: "{{ provisioning_interface }}" + - chain: INPUT + interface: "{{ bare_metal_provisioner_interface }}" + - chain: FORWARD + interface: "{{ provisioning_interface }}" + loop: "{{ interfaces | product(ironic_ports) | list }}" + +- name: "iptables: Provisioning host ports" + iptables: + chain: INPUT + action: insert + in_interface: "{{ external_interface}}" + protocol: tcp + match: tcp + destination_port: "{{ item }}" + jump: ACCEPT + state: "{{ firewall_rule_state }}" + loop: "{{ vm_host_ports }}" + +- name: "iptables: PXE Ports" + iptables: + chain: "{{ item[0].chain }}" + action: insert + in_interface: "{{ item[0].interface }}" + protocol: udp + match: udp + destination_port: "{{ item[1] }}" + jump: ACCEPT + state: "{{ firewall_rule_state }}" + vars: + interfaces: + - chain: INPUT + interface: "{{ provisioning_interface }}" + - chain: INPUT + interface: "{{ bare_metal_provisioner_interface }}" + - chain: FORWARD + interface: "{{ provisioning_interface }}" + loop: "{{ interfaces | product(pxe_udp_ports) | list }}" + +- name: "iptables: Ironic Endpoint Keepalived" + iptables: + chain: FORWARD + in_interface: "{{ provisioning_interface }}" + protocol: "{{ item }}" + jump: ACCEPT + state: "{{ firewall_rule_state }}" + loop: "{{ ironic_keepalived_proto }}" + +- block: + - name: "iptables: Allow access to external network from kind" + iptables: + chain: INPUT + action: insert + protocol: tcp + match: tcp + destination: "{{ external_subnet_v4 }}" + destination_port: "{{ item }}" + jump: ACCEPT + state: "{{ firewall_rule_state }}" + loop: "{{ vm_host_ports }}" + + - name: "iptables: Allow access to bare metal provisioner network from kind" + iptables: + chain: INPUT + action: insert + protocol: tcp + match: tcp + destination: "{{ bare_metal_provisioner_subnet_v4 }}" + destination_port: "{{ item }}" + jump: ACCEPT + state: "{{ firewall_rule_state }}" + loop: "{{ ironic_ports }}" + + - name: "iptables: Allow access to Kubernetes API from kind" + iptables: + chain: INPUT + action: insert + protocol: tcp + match: tcp + destination: "{{ kind_subnet }}" + destination_port: "{{ cluster_api_port }}" + jump: ACCEPT + state: "{{ firewall_rule_state }}" + + - name: "iptables: Allow forwarding to baremetal network from kind" + iptables: + chain: FORWARD + action: insert + out_interface: "{{ external_interface }}" + destination: "{{ external_subnet_v4 }}" + jump: ACCEPT + state: "{{ firewall_rule_state }}" + when: (EPHEMERAL_CLUSTER == "kind") diff --git a/vm-setup/vm-setup/roles/firewall/tasks/main.yml b/vm-setup/vm-setup/roles/firewall/tasks/main.yml new file mode 100644 index 000000000..7a305f757 --- /dev/null +++ b/vm-setup/vm-setup/roles/firewall/tasks/main.yml @@ -0,0 +1,15 @@ +- name: "firewalld" + include_tasks: firewalld.yaml + args: + apply: + become: true + vars: + firewalld_rule_state: "{{ 'enabled' if firewall_rule_state == 'present' else 'disabled' }}" + when: use_firewalld | bool + +- name: "iptables" + include_tasks: iptables.yaml + args: + apply: + become: true + when: not use_firewalld | bool diff --git a/vm-setup/vm-setup/roles/libvirt/defaults/main.yml b/vm-setup/vm-setup/roles/libvirt/defaults/main.yml new file mode 100644 index 000000000..0080fb97b --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/defaults/main.yml @@ -0,0 +1,41 @@ +# When libvirt_action==teardown we destroy the existing configuration +libvirt_action: setup + +# For some external testing we set this to "external" so that only the +# libvirt networking is configured, not the nodes +vm_platform: libvirt + +# Which libvirt session should we use? Using `qemu://session` does +# not require privileged access (but does require the setup performed by the +# `environment/setup` role). +libvirt_volume_pool: oooq_pool +libvirt_diskdev: sda +libvirt_cdromdev: sdb +libvirt_diskbus: scsi +libvirt_cdrombus: sata +libvirt_arch: x86_64 +libvirt_cpu_mode: host-model +libvirt_firmware: uefi +libvirt_secure_boot: false + +# Where to log serial console output +libvirt_log_path: "/var/log/libvirt/qemu" + +# how many disks should be created when using extradisks +extradisks_list: + - vdb + +# size of the disks to create when using extradisks +extradisks_size: 8G + +# The name of the libvirt service. +libvirtd_service: libvirtd + +# The host that has images for provisioning, this should be in the +# format of a URL host, e.g. with IPv6, it should be surrounded +# by brackets +provisioning_url_host: "{{ lookup('env', 'BARE_METAL_PROVISIONER_NETWORK') | default('172.22.0.1', true) }}" + +# BMC credentials +vbmc_username: "admin" +vbmc_password: "password" diff --git a/vm-setup/vm-setup/roles/libvirt/files/get-domain-ip.sh b/vm-setup/vm-setup/roles/libvirt/files/get-domain-ip.sh new file mode 100644 index 000000000..0b27080d9 --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/files/get-domain-ip.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +# This script will attempt to get the ip address of the a given libvirt guest. + +set -eu + +PATH=$PATH:/usr/sbin:/sbin + +VMNAME=$1 + +# Get the MAC address of the first interface by looking for looking for the +# `&2 + exit 1 +fi + +echo "$ip" diff --git a/vm-setup/vm-setup/roles/libvirt/meta/main.yml b/vm-setup/vm-setup/roles/libvirt/meta/main.yml new file mode 100644 index 000000000..9711b3309 --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: common diff --git a/vm-setup/vm-setup/roles/libvirt/tasks/install_setup_tasks.yml b/vm-setup/vm-setup/roles/libvirt/tasks/install_setup_tasks.yml new file mode 100644 index 000000000..be0d06d7a --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/tasks/install_setup_tasks.yml @@ -0,0 +1,46 @@ +--- +- name: Start and enable libvirtd service + when: + - not (ansible_facts['distribution'] in ['CentOS', 'RedHat'] and + ansible_facts['distribution_major_version'] >= '9') + service: + name: "{{ libvirtd_service }}" + state: started + enabled: true + become: true + +- name: Ensure socket services are enabled on newer distros + when: + - ansible_facts['distribution'] in ['CentOS', 'RedHat'] + - ansible_facts['distribution_major_version'] >= '9' + become: true + vars: + _services: + - qemu + - network + - nodedev + - nwfilter + - secret + - storage + - interface + block: + - name: Ensure libvirt modular sockets are enabled and started + ansible.builtin.service: + name: "virt{{ item }}d.socket" + state: started + enabled: true + loop: "{{ _services }}" + + - name: Ensure libvirt modular ro sockets are enabled and started + ansible.builtin.service: + name: "virt{{ item }}d-ro.socket" + state: started + enabled: true + loop: "{{ _services }}" + + - name: Ensure libvirt modular admin sockets are enabled and started + ansible.builtin.service: + name: "virt{{ item }}d-admin.socket" + state: started + enabled: true + loop: "{{ _services }}" diff --git a/vm-setup/vm-setup/roles/libvirt/tasks/main.yml b/vm-setup/vm-setup/roles/libvirt/tasks/main.yml new file mode 100644 index 000000000..6033088a3 --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/tasks/main.yml @@ -0,0 +1,14 @@ +- name: libvirt role setup tasks + block: + - include_tasks: install_setup_tasks.yml + - include_tasks: network_setup_tasks.yml + - include_tasks: vm_setup_tasks.yml + when: vm_platform == "libvirt" + when: libvirt_action == "setup" + +- name: libvirt role teardown tasks + block: + - include_tasks: network_teardown_tasks.yml + - include_tasks: vm_teardown_tasks.yml + when: vm_platform == "libvirt" + when: libvirt_action == "teardown" diff --git a/vm-setup/vm-setup/roles/libvirt/tasks/network_setup_tasks.yml b/vm-setup/vm-setup/roles/libvirt/tasks/network_setup_tasks.yml new file mode 100644 index 000000000..d4f426b10 --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/tasks/network_setup_tasks.yml @@ -0,0 +1,146 @@ +# If virtualport_type is defined for any networks, include OVS dependencies +- when: networks|selectattr('virtualport_type', 'defined')|map(attribute='name')|list|length > 0 + block: + + # Install OVS dependencies + - name: Install OVS dependencies + include_role: + name: 'ovs' + + # Create any OVS Bridges that have been defined + - name: Create OVS Bridges + openvswitch_bridge: + bridge: "{{ item.bridge }}" + state: present + when: item.virtualport_type is defined and item.virtualport_type == "openvswitch" + with_items: "{{ networks }}" + become: true + +# TODO(apuimedo) drop this back to vm tasks once we have proper DNS +- name: get a list of MACs to use + include_tasks: ../../common/tasks/generate_node_mac.yml + +# Create config dir + hooks to disable dad from IPv6 addresses +- name: Create libvirt configuration directory + ansible.builtin.file: + path: "/etc/libvirt/hooks/network.d" + state: directory + +- name: Create libvirt network hooks + ansible.builtin.template: + src: ../templates/libvirt_hook.sh.j2 + dest: "/etc/libvirt/hooks/network.d/{{ item.name }}_{{ loop_index}}.sh" + owner: root + mode: '0755' + with_items: "{{ networks }}" + when: item.address_v6 is defined and item.address_v6 != '' and item.forward_mode != 'bridge' + loop_control: + index_var: loop_index + +# Restart to ensure the hooks are enabled +- name: Restart libvirt + systemd: + name: libvirtd + state: restarted + +# Create the global, root-managed libvirt networks to which we will +# attach the undercoud and vm virtual machines. +- name: Create libvirt networks + virt_net: + command: define + state: present + name: "{{ item.name }}" + xml: '{{ lookup("template", "network.xml.j2") }}' + with_items: "{{ networks }}" + become: true + +- name: Start libvirt networks + virt_net: + command: start + name: "{{ item.name }}" + state: active + with_items: "{{ networks }}" + become: true + +- name: Mark libvirt networks as autostarted + virt_net: + name: "{{ item.name }}" + autostart: "yes" + with_items: "{{ networks }}" + become: true + register: net_autostart + ignore_errors: true + +# https://bugs.launchpad.net/tripleo-quickstart/+bug/1581676 +# There is a bug w/ virt_net and RHEL where the network xml +# file is not written to /etc/libvirt/qemu/networks/ This causes +# network to be considered transient. +- when: not net_autostart.changed + block: + + - name: Check if "virsh net-autostart" was successful + debug: msg="Some libvirt networks were not set to autostart. Please see + https://bugs.launchpad.net/tripleo-quickstart/+bug/1581676" + + # get the network xml from the running network + - name: Get libvirt networks xml + virt_net: + command: get_xml + name: "{{ item.name }}" + with_items: "{{ networks }}" + register: net_xml + become: true + + # copy the xml to a file + - name: copy network-xml to file + copy: content={{ item.get_xml }} dest=/tmp/network-{{ item.item.name }}.xml + with_items: "{{ net_xml.results }}" + become: true + + # redefine the network w/ virsh, this will write the xml file to + # /etc/libvirt/qemu/networks/ and it will no longer be transient + - name: redefine the libvirt networks so the config is written to /etc/libvirt + command: virsh net-define /tmp/network-{{ item.name }}.xml + with_items: "{{ networks }}" + become: true + + # Now we're ready to mark the network autostart + - name: Mark libvirt networks as autostarted + virt_net: + name: "{{ item.name }}" + autostart: "yes" + with_items: "{{ networks }}" + become: true + +# Whitelist the bridges associated with these networks for +# access using qemu [helper networking][helper]. Later on we +# create virtual machines use an unprivileged `qemu://session` +# connection, and we connect to the networks using the bridge names. +# +# [helper]: http://wiki.qemu.org/Features-Done/HelperNetworking +- name: Whitelist bridges for unprivileged access on CentOS + lineinfile: + dest: '/etc/qemu-kvm/bridge.conf' # Needs to be /etc/qemu/bridge.conf if supporting Fedora + line: "allow {{ item.bridge }}" + with_items: "{{ networks }}" + when: + - ansible_os_family == "RedHat" + become: true + +- name: Whitelist bridges for unprivileged access on Ubuntu or Fedora + lineinfile: + dest: '/etc/qemu/bridge.conf' + line: "allow {{ item.bridge }}" + create: yes + with_items: "{{ networks }}" + when: + - ansible_facts['distribution'] == "Ubuntu" + become: true + +# We're going to want to store things in `working_dir` so ensure it +# exists first. `working_dir` is a directory on the target host. +- name: Ensure remote working dir exists + file: + path: "{{ working_dir }}" + state: directory + become: true diff --git a/vm-setup/vm-setup/roles/libvirt/tasks/network_teardown_tasks.yml b/vm-setup/vm-setup/roles/libvirt/tasks/network_teardown_tasks.yml new file mode 100644 index 000000000..bf7d1ce2c --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/tasks/network_teardown_tasks.yml @@ -0,0 +1,33 @@ +- name: Stop libvirt networks + virt_net: + command: destroy + name: "{{ item.name }}" + state: inactive + with_items: "{{ networks }}" + become: true + +- name: Delete libvirt networks + virt_net: + command: undefine + state: absent + name: "{{ item.name }}" + with_items: "{{ networks }}" + become: true + +- name: Delete libvirt network hooks + ansible.builtin.file: + path: "/etc/libvirt/hooks/network.d/" + state: absent + +# TODO: Replace with ansible community.general.nmcli? +- name: Delete bridges and veth interfaces on Ubuntu + shell: | + sudo ip link set external down + sudo ip link set provisioning down + sudo ip link set ironicendpoint down + brctl delbr external || true + brctl delbr provisioning || true + sudo ip link del ironicendpoint || true + when: + - ansible_distribution == 'Ubuntu' + become: yes diff --git a/vm-setup/vm-setup/roles/libvirt/tasks/vm_setup_tasks.yml b/vm-setup/vm-setup/roles/libvirt/tasks/vm_setup_tasks.yml new file mode 100644 index 000000000..dfa09adfa --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/tasks/vm_setup_tasks.yml @@ -0,0 +1,154 @@ +# Create a libvirt volume pool. This is where we'll be creating +# images for the VMs +# Note: the virt_pool module is not working properly on rhel-7.2 +# https://bugs.launchpad.net/tripleo-quickstart/+bug/1597905 +- name: ensure libvirt volume path exists + become: true + file: + path: "{{ libvirt_volume_path }}" + state: directory + mode: 0755 + +- block: + - name: Detect virtualization if libvirt_domain_type is not provided + command: systemd-detect-virt + ignore_errors: true + become: true + register: virt_result + + - name: Default to qemu if inside a VM + set_fact: + libvirt_domain_type: qemu + when: virt_result is succeeded + + - name: Default to kvm if a VM is not detected + set_fact: + libvirt_domain_type: kvm + when: virt_result is failed + when: libvirt_domain_type is undefined + +- name: Check volume pool + command: > + virsh pool-uuid "{{ libvirt_volume_pool }}" + register: pool_check + ignore_errors: true + changed_when: false + environment: + LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}" + +- name: create the volume pool xml file + template: + src: volume_pool.xml.j2 + dest: "{{ working_dir }}/volume_pool.xml" + when: pool_check is failed + +- name: Define volume pool + command: "virsh pool-define {{ working_dir }}/volume_pool.xml" + when: pool_check is failed + environment: + LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}" + +- name: Start volume pool + virt_pool: + command: start + state: active + name: "{{ libvirt_volume_pool }}" + uri: "{{ libvirt_uri }}" + +# In some cases the pool_check can pass and the pool xml config is absent +# In this case it is required to dump the xml and redefine the pool. +- name: ensure tripleo-quickstart volume pool is defined + shell: > + virsh pool-dumpxml {{ libvirt_volume_pool }} | + virsh pool-define /dev/stdin + changed_when: true + environment: + LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}" + +- name: Mark volume pool for autostart + virt_pool: + name: "{{ libvirt_volume_pool }}" + autostart: "yes" + uri: "{{ libvirt_uri }}" + +- when: vm_nodes | length > 0 + environment: + LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}" + block: + + # Create libvirt volumes for the vm hosts. + - name: Check if vm volumes exist + command: > + virsh vol-info --pool '{{ libvirt_volume_pool }}' '{{ item.name }}.qcow2' + register: vm_vol_check + ignore_errors: true + with_items: "{{ vm_nodes }}" + + - name: Create vm vm storage + command: > + virsh vol-create-as '{{ libvirt_volume_pool }}' + '{{ item.item.name }}'.qcow2 '{{ flavors[item.item.flavor].disk }}'G + --format qcow2 + when: + - item is failed + with_items: "{{ vm_vol_check.results }}" + + # Define (but do not start) the vm nodes. These will be + # booted later by ironic during the provisioning process. + - name: Define vm vms + virt: + name: "{{ item.name }}" + command: define + xml: "{{ lookup('template', 'baremetalvm.xml.j2') }}" + uri: "{{ libvirt_uri }}" + with_items: "{{ vm_nodes }}" + + # Create additional blockdevices for each objectstorage flavor node + # These are sparse files, not using space if unused + - name: Create additional blockdevice for objectstorage nodes + command: > + dd if=/dev/zero of={{ libvirt_volume_path }}/{{ item[0].name }}_{{ item[1] }}.img bs=1 count=0 seek={{ extradisks_size }} + when: flavors[item[0].flavor].extradisks|default(false)|bool + with_nested: + - "{{ vm_nodes }}" + - "{{ extradisks_list }}" + + - name: Check if additional blockdevices are attached + command: > + virsh domblkinfo {{ item[0].name }} {{ libvirt_volume_path }}/{{ item[0].name }}_{{ item[1] }}.img + when: flavors[item[0].flavor].extradisks|default(false)|bool + changed_when: false + ignore_errors: true + register: vm_extradisks_check + with_nested: + - "{{ vm_nodes }}" + - "{{ extradisks_list }}" + + - name: Attach additional blockdevices to vm objectstorage VMs + command: > + virsh attach-disk --config {{ item.item[0].name }} {{ libvirt_volume_path }}/{{ item.item[0].name }}_{{ item.item[1] }}.img {{ item.item[1] }} + when: item is failed + with_items: "{{ vm_extradisks_check.results }}" + + # Get the uuid of VMs for setting system-id in redfish URL + - name: Get vm uuid + command: > + virsh domuuid "{{ item.name }}" + register: vm_uuid + with_items: "{{ vm_nodes }}" + + - name: set_fact + set_fact: + vm_id: "{{ vm_id|default({}) | combine ( {item.item.name: item.stdout} ) }}" + with_items: "{{ vm_uuid.results }}" + + - name: set_fact BMC Driver + set_fact: + vm_driver: "{{ lookup('env', 'BMC_DRIVER') | default('mixed', true) }}" + + # Generate the ironic node inventory files. Note that this + # task *must* occur after the above vm tasks, because if + # `vm_nodes` is defined the template depends on the + # `node_mac_map` variable. + - name: Write ironic node json files + include_tasks: ../../common/tasks/write_ironic_nodes_tasks.yml diff --git a/vm-setup/vm-setup/roles/libvirt/tasks/vm_teardown_tasks.yml b/vm-setup/vm-setup/roles/libvirt/tasks/vm_teardown_tasks.yml new file mode 100644 index 000000000..ee5db38d5 --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/tasks/vm_teardown_tasks.yml @@ -0,0 +1,102 @@ +# NB: We use `virsh` here instead of the `virt` module because +# these tasks may be called before the dependencies of the `virt` +# module are satisfied. + +- name: Check if libvirt is available + command: > + virsh uri + ignore_errors: true + changed_when: false + register: libvirt_check + environment: + LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}" + +# If libvirt isn't available we can skip everything else. +- when: libvirt_check is success + environment: + LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}" + block: + + - when: vm_nodes | length > 0 + block: + + # Check if the vm nodes exist. + - name: Check vm vms + command: > + virsh domid "{{ item.name }}" + with_items: "{{ vm_nodes }}" + ignore_errors: true + register: vm_check + + # Destroy and undefine the vm nodes. + - name: Destroy vm vms + command: + virsh destroy "{{ item.item.name }}" + when: item is success + with_items: "{{ vm_check.results }}" + ignore_errors: true + + - name: Undefine vm vms + command: + virsh undefine --nvram "{{ item.item.name }}" + when: item is success + with_items: "{{ vm_check.results }}" + + # The `virsh vol-dumpxml ... > /dev/null` is here (and elsewhere) due to + # [1293804]. + # + # [1293804]: https://bugzilla.redhat.com/show_bug.cgi?id=1293804 + - name: Delete baremetal vm storage + shell: | + virsh vol-dumpxml --pool '{{ libvirt_volume_pool }}' \ + '{{ item.name }}'.qcow2 2>&1 > /dev/null + virsh vol-delete --pool '{{ libvirt_volume_pool }}' \ + '{{ item.name }}'.qcow2 + with_items: "{{ vm_nodes }}" + ignore_errors: true + + - name: Check volume pool + command: > + virsh pool-uuid "{{ libvirt_volume_pool }}" + register: pool_check + ignore_errors: true + + # See https://www.redhat.com/archives/libvirt-users/2016-March/msg00123.html + # TL;DR: ensure that the pool really exists if the previous + # task says it does. + - name: Work around libvirt bug + shell: | + virsh pool-dumpxml "{{ libvirt_volume_pool }}" | + virsh pool-define /dev/stdin + when: pool_check is success + + - name: Destroy volume pool + command: > + virsh pool-destroy "{{ libvirt_volume_pool }}" + when: pool_check is success + ignore_errors: true + + - name: Undefine volume pool + command: > + virsh pool-undefine "{{ libvirt_volume_pool }}" + when: pool_check is success + + - name: Get UID of pool user + command: id -u "{{ ansible_user_id }}" + register: pool_uid + changed_when: false + when: pool_check is success + + - name: Destroy pool definition file + file: + path: "/run/user/{{ pool_uid.stdout }}/libvirt/storage/run/{{ libvirt_volume_pool }}.xml" + state: absent + when: pool_check is success + + - name: Remove ironic_nodes.json + file: + path: "{{ nodes_file }}" + state: absent + + - name: Remove any leftover ISO images + command: find /var/lib/libvirt/images/ -type f -name 'boot-*-iso-*' -delete diff --git a/vm-setup/vm-setup/roles/libvirt/templates/baremetalvm.xml.j2 b/vm-setup/vm-setup/roles/libvirt/templates/baremetalvm.xml.j2 new file mode 100644 index 000000000..008c4881e --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/templates/baremetalvm.xml.j2 @@ -0,0 +1,90 @@ + + {{ item.name }} + {{ flavors[item.flavor].memory }} + {{ flavors[item.flavor].vcpu }} + + {{baremetal_vm_xml|default('')}} + +{% if libvirt_arch != 'aarch64' %} + + hvm +{% if libvirt_firmware == 'uefi' %} +{% if libvirt_secure_boot|bool %} + /usr/share/OVMF/OVMF_CODE.secboot.fd +{% elif host_os == 'centos' or host_os == 'rhel' %} + /usr/share/edk2/ovmf/OVMF_CODE.fd +{% elif host_distro == 'ubuntu24' %} + /usr/share/OVMF/OVMF_CODE_4M.fd +{% else %} + /usr/share/OVMF/OVMF_CODE.fd +{% endif %} +{% endif %} + + + + +{% else %} + + hvm + +{% endif %} + + + + + + +{% if libvirt_domain_type == 'qemu' %} + +{% else %} + +{% endif %} + + destroy + restart + restart + + + + + + + + + + +{% if libvirt_diskbus == 'scsi' %} + +{% endif %} +{% if tpm_emulator|bool %} + + + +{% endif %} +{% for network in networks %} + + + + +{% if network.virtualport_type is defined %} + +{% endif %} + +{% endfor %} + + + + + +{% if enable_vnc_console|bool %} + + + +{% endif %} + + {{baremetal_vm_device_xml|default('')}} + + + diff --git a/vm-setup/vm-setup/roles/libvirt/templates/libvirt_hook.sh.j2 b/vm-setup/vm-setup/roles/libvirt/templates/libvirt_hook.sh.j2 new file mode 100644 index 000000000..928dc36b2 --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/templates/libvirt_hook.sh.j2 @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +if [[ "$1" = "{{ item.name }}" ]] && [[ "$2" = "started" ]] && [[ "$3" = "begin" ]]; then + ip -6 addr del {{ item.address_v6 }}/{{ item.prefix_v6 }} dev {{ item.name }} + ip -6 addr add {{ item.address_v6 }}/{{ item.prefix_v6 }} dev {{ item.name }} nodad +fi diff --git a/vm-setup/vm-setup/roles/libvirt/templates/network.xml.j2 b/vm-setup/vm-setup/roles/libvirt/templates/network.xml.j2 new file mode 100644 index 000000000..3ab093b35 --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/templates/network.xml.j2 @@ -0,0 +1,143 @@ +{% set nat_port_range = item.nat_port_range|default([1024, 65535]) %} +{% set netmask_v4 = item.netmask_v4|default("") %} +{% set prefix_v6 = item.prefix_v6|default("") %} + + + + + + {% if item.dns.options is defined %} + + {% endif %} + + + {{ item.name }} + + +{% if item.forward_mode is defined %} + + {% if item.forward_mode == 'nat' %} + + + + {% endif %} + +{% endif %} + +{% if item.virtualport_type is defined %} + +{% endif %} + +{# IPv4 Configuration #} +{% if item.address_v4 is defined and item.address_v4 != '' and item.forward_mode != 'bridge' %} + + {% if item.dhcp_range_v4 is defined and item.dhcp_range_v4|length != 0 %} + + + {% set ns = namespace(index=0) %} + {% for flavor in flavors %} + {% set numflavor = lookup('vars', 'num_' + flavor + 's')|default(0)|int %} + {% for num in range(0, numflavor) %} + {% set ironic_name = ironic_prefix + flavor + "_" + num|string %} + {% set hostname_format = lookup('vars', flavor + '_hostname_format', default=flavor + '-%d') %} + {% set hostname = hostname_format % num %} + + + + {% set ns.index = ns.index + 1 %} + {% endfor %} + {% endfor %} + + {% endif %} + + {% if item.domain is defined %} + + {% endif %} + {% if item.dns is defined %} + + {% for host in item.dns.hosts %} + + {% for name in host.hostnames %} + {{ name }} + {% endfor %} + + {% endfor %} + {% if item.dns.srvs is defined %} + {% for srv in item.dns.srvs %} + + {% endfor %} + {% endif %} + {% if item.dns.forwarders is defined %} + {% for forwarder in item.dns.forwarders %} + + {% endfor %} + {% endif %} + + {% endif %} +{% endif %} +{# End IPv4 Configuration #} + +{# IPv6 Configuration #} +{% if item.address_v6 is defined and item.address_v6 != '' and item.forward_mode != 'bridge' %} + + {% if item.dhcp_range_v6 is defined and item.dhcp_range_v6|length != 0 %} + + + {% set ns = namespace(index=0) %} + {% for flavor in flavors %} + {% set numflavor = lookup('vars', 'num_' + flavor + 's')|default(0)|int %} + {% for num in range(0, numflavor) %} + {% set ironic_name = ironic_prefix + flavor + "_" + num|string %} + {% set hostname_format = lookup('vars', flavor + '_hostname_format', default=flavor + '-%d') %} + {% set hostname = hostname_format % num %} + + + + {% set ns.index = ns.index + 1 %} + {% endfor %} + {% endfor %} + + {% endif %} + + {% if item.domain is defined %} + + {% endif %} + {% if item.dns is defined %} + + {% for host in item.dns.hosts %} + + {% for name in host.hostnames %} + {{ name }} + {% endfor %} + + {% endfor %} + {% if item.dns.srvs is defined %} + {% for srv in item.dns.srvs %} + + {% endfor %} + {% endif %} + {% if item.dns.forwarders is defined %} + {% for forwarder in item.dns.forwarders %} + + {% endfor %} + {% endif %} + + {% endif %} +{% endif %} +{# End IPv6 Configuration #} + +{% if item.portgroup is defined %} + {% for portgroup in item.portgroup %} + + {% if portgroup.vlan is defined %} + + {% for vlan in portgroup.vlan %} + + {% endfor %} + + {% endif %} + + {% endfor %} +{% endif %} + + diff --git a/vm-setup/vm-setup/roles/libvirt/templates/volume_pool.xml.j2 b/vm-setup/vm-setup/roles/libvirt/templates/volume_pool.xml.j2 new file mode 100644 index 000000000..eb1981086 --- /dev/null +++ b/vm-setup/vm-setup/roles/libvirt/templates/volume_pool.xml.j2 @@ -0,0 +1,11 @@ + + {{ libvirt_volume_pool }} + + {{ libvirt_volume_path }} + + 0755 + -1 + -1 + + + diff --git a/vm-setup/vm-setup/roles/ovs/defaults/main.yml b/vm-setup/vm-setup/roles/ovs/defaults/main.yml new file mode 100644 index 000000000..11c7262dd --- /dev/null +++ b/vm-setup/vm-setup/roles/ovs/defaults/main.yml @@ -0,0 +1,7 @@ +--- +# The package name for openvswitch +ovs_package: openvswitch + +# The name of the openvswitch service. +ovs_service: openvswitch + diff --git a/vm-setup/vm-setup/roles/ovs/tasks/main.yml b/vm-setup/vm-setup/roles/ovs/tasks/main.yml new file mode 100644 index 000000000..b9c7b504a --- /dev/null +++ b/vm-setup/vm-setup/roles/ovs/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: Install Openvswitch package + package: + name: "{{ ovs_package }}" + state: present + become: true + +- name: Start Openvswitch + service: + name: "{{ ovs_service }}" + state: started + enabled: true + become: true + diff --git a/vm-setup/vm-setup/roles/packages_installation/defaults/main.yml b/vm-setup/vm-setup/roles/packages_installation/defaults/main.yml new file mode 100644 index 000000000..e35061469 --- /dev/null +++ b/vm-setup/vm-setup/roles/packages_installation/defaults/main.yml @@ -0,0 +1,106 @@ +packages: + ubuntu: + common: + packages: + - python3-setuptools + - zlib1g-dev + - openssh-server + - wget + - curl + - dnsmasq + - nmap + - ovmf + - patch + - psmisc + - libvirt-clients + - libvirt-dev + - bridge-utils + - jq + - unzip + - genisoimage + - qemu-kvm + - libguestfs-tools + - gir1.2-polkit-1.0 + - libpolkit-agent-1-0 + - libpolkit-gobject-1-0 + - apache2-utils + podman: + packages: + - apt-transport-https + - ca-certificates + - gnupg-agent + - software-properties-common + focal_jammy: + packages: + - apparmor + - apparmor-profiles-extra + - apparmor-utils + - libvirt-daemon + - libvirt-daemon-system + - libssl-dev + - netcat + noble: + packages: + - apparmor + - apparmor-profiles-extra + - apparmor-utils + - libvirt-daemon + - libvirt-daemon-system + - libssl-dev + - netcat-traditional + pip3: + - python-apt + - kubernetes==25.3.0 + - pyYAML + - virtualbmc + - lxml + - netaddr + - libvirt-python + - six + - docker-py + - jmespath + - passlib + centos: + common: + packages: + - bind-utils + - curl + - dnsmasq + - edk2-ovmf + - firewalld + - genisoimage + - httpd-tools + - jq + - libguestfs-tools + - libguestfs-tools + - libvirt + - libvirt-daemon-kvm + - libvirt-devel + - NetworkManager + - nmap + - patch + - podman + - polkit-pkla-compat + - psmisc + - python3-bcrypt + - python3-libvirt + - python3-lxml + - python3-netaddr + - python3-requests-oauthlib + - python3-six + - qemu-img + - qemu-kvm + - unzip + - vim-enhanced + - virt-install + - wget + pip3: + - flask_oauthlib==0.9.6 + - jmespath + - kubernetes==25.3.0 + - passlib +DAEMON_JSON_PATH: "{{ metal3_dir }}/vm-setup/roles/packages_installation/files" +CONTAINER_RUNTIME: "{{ lookup('env', 'CONTAINER_RUNTIME') }}" +OS_VERSION_ID: "{{ lookup('env', 'OS_VERSION_ID') }}" +REGISTRY: "{{ lookup('env', 'REGISTRY') }}" +DOCKER_IPV6_SUPPORT: "{{ lookup('env', 'DOCKER_USE_IPV6_INTERNALLY') | default('false', true) }}" diff --git a/vm-setup/vm-setup/roles/packages_installation/files/daemon.json b/vm-setup/vm-setup/roles/packages_installation/files/daemon.json new file mode 100644 index 000000000..d13754bbd --- /dev/null +++ b/vm-setup/vm-setup/roles/packages_installation/files/daemon.json @@ -0,0 +1,5 @@ +{ + "ipv6": {{ DOCKER_IPV6_SUPPORT }}, + "fixed-cidr-v6": "fd00::/80", + "insecure-registries" : ["{{ REGISTRY }}"] +} diff --git a/vm-setup/vm-setup/roles/packages_installation/tasks/centos_required_packages.yml b/vm-setup/vm-setup/roles/packages_installation/tasks/centos_required_packages.yml new file mode 100644 index 000000000..81a1693ae --- /dev/null +++ b/vm-setup/vm-setup/roles/packages_installation/tasks/centos_required_packages.yml @@ -0,0 +1,35 @@ +# Perform CentOS/RHEL related configurations +- name: Fail if CONTAINER_RUNTIME is not set to podman + fail: + msg: Only Podman is supported in CentOS/RHEL + when: CONTAINER_RUNTIME != "podman" + +- name: Enable SELinux + ansible.posix.selinux: + policy: targeted + state: permissive + become: yes + when: ansible_selinux.status == "enabled" + +# Remove any previous tripleo-repos to avoid version conflicts +# (see FIXME re oniguruma below) +- name: Remove any previous tripleo-repos to avoid version conflicts + dnf: + name: python*-tripleo-repos + state: absent + become: yes + +- name: Upgrade all packages + dnf: + name: "*" + state: latest + nobest: true + become: yes + +- name: Install podman + dnf: + name: podman + state: present + become: yes + when: CONTAINER_RUNTIME == "podman" + diff --git a/vm-setup/vm-setup/roles/packages_installation/tasks/main.yml b/vm-setup/vm-setup/roles/packages_installation/tasks/main.yml new file mode 100644 index 000000000..bf1e4a2e3 --- /dev/null +++ b/vm-setup/vm-setup/roles/packages_installation/tasks/main.yml @@ -0,0 +1,81 @@ +- name: Install packages on Ubuntu + block: + - name: Install required packages for Ubuntu + include_tasks: ubuntu_required_packages.yml + - name: Install common packages using standard package manager for Ubuntu + package: + name: "{{ packages.ubuntu.common.packages}}" + state: present + - name: Install packages using standard package manager for Ubuntu 20.04 or Ubuntu 22.04 + when: ansible_distribution_version == "20.04" or ansible_distribution_version == "22.04" + package: + name: "{{ packages.ubuntu.focal_jammy.packages}}" + state: present + become: yes + - name: Install packages using standard package manager for Ubuntu 24.04 + when: ansible_distribution_version == "24.04" + package: + name: "{{ packages.ubuntu.noble.packages}}" + state: present + become: yes + - name: Install packages specific to Podman + package: + name: "{{ packages.ubuntu.podman.packages}}" + state: present + when: CONTAINER_RUNTIME == "podman" + become: yes + - name: Install packages using pip3 + pip: + executable: "{{ ANSIBLE_VENV | default('/usr') }}/bin/pip" + name: "{{ packages.ubuntu.pip3 }}" + state: present + when: ansible_distribution_version != "24.04" + - name: Install packages using pip3 on Ubuntu Noble + pip: + break_system_packages: true + executable: "{{ ANSIBLE_VENV | default('/usr') }}/bin/pip" + name: "{{ packages.ubuntu.pip3 }}" + state: present + when: ansible_distribution_version == "24.04" + - name: Add TPM emulator + block: + - name: Add TPM emulator PPA + apt_repository: + repo: 'ppa:smoser/swtpm' + state: present + become: yes + - name: Install TPM emulator packages + package: + name: + - swtpm + - libtpms + state: present + become: yes + when: tpm_emulator|default(false)|bool + become: yes + when: ansible_facts['distribution'] == "Ubuntu" + +- name: Install packages on CentOS/RHEL + block: + - name: Install packages on CentOS/RHEL + package: + name: "{{ packages.centos.common.packages }}" + state: present + nobest: true + - name: Install packages using pip3 + pip: + executable: "{{ ANSIBLE_VENV | default('/usr') }}/bin/pip" + name: "{{ packages.centos.pip3 }}" + state: present + - name: Perform CentOS/RHEL required configurations + include_tasks: centos_required_packages.yml + - name: Install TPM emulator packages + when: tpm_emulator|default(false)|bool + package: + name: + - swtpm + - swtpm-tools + state: present + become: yes + become: yes + when: ansible_os_family == "RedHat" diff --git a/vm-setup/vm-setup/roles/packages_installation/tasks/ubuntu_required_packages.yml b/vm-setup/vm-setup/roles/packages_installation/tasks/ubuntu_required_packages.yml new file mode 100644 index 000000000..dcd569a79 --- /dev/null +++ b/vm-setup/vm-setup/roles/packages_installation/tasks/ubuntu_required_packages.yml @@ -0,0 +1,153 @@ +--- +- name: Install required Ubuntu packages + block: + - name: Update all packages to their latest version + apt: + name: "*" + state: latest + + # TODO: (Sunnatillo) Remove this task after fully removing apt-key + - name: Remove OS old repository (without gpg key file location) + apt_repository: + repo: "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_{{ OS_VERSION_ID }}/ /" + state: absent + + - name: Remove Ubuntu Noble old repository (without gpg key file location) + apt_repository: + repo: "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_{{ OS_VERSION_ID }}/ /" + state: absent + when: ansible_distribution_release == "noble" + + - name: Fetch OS release key + get_url: + url: "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_{{ OS_VERSION_ID }}/Release.key" + dest: /usr/share/keyrings/libcontainers-archive-keyring.asc + mode: '0644' + force: true + when: ansible_distribution_release != "noble" + + - name: Fetch Ubuntu Noble release key + get_url: + url: "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_{{ OS_VERSION_ID }}/Release.key" + dest: /usr/share/keyrings/libcontainers-archive-keyring.asc + mode: '0644' + force: true + when: ansible_distribution_release == "noble" + + - name: Dearmor Release key + shell: | + cat /usr/share/keyrings/libcontainers-archive-keyring.asc | sudo gpg --dearmor -o /usr/share/keyrings/libcontainers-archive-keyring.gpg --yes + + - name: Add OS repository + lineinfile: + path: /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list + line: "deb [signed-by=/usr/share/keyrings/libcontainers-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_{{ OS_VERSION_ID }}/ /" + create: yes + owner: root + group: root + mode: '0644' + when: ansible_distribution_release != "noble" + + - name: Add Ubuntu Noble repository + lineinfile: + path: /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list + line: "deb [signed-by=/usr/share/keyrings/libcontainers-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_{{ OS_VERSION_ID }}/ /" + create: yes + owner: root + group: root + mode: '0644' + when: ansible_distribution_release == "noble" + + - name: Update all packages to their latest version + apt: + name: "*" + state: latest + + - name: Podman + block: + - name: Install podman + apt: + name: podman + state: present + + - name: Registries configuration for podman + blockinfile: + path: /etc/containers/registries.conf + block: | + [registries.insecure] + registries = ['{{ REGISTRY }}'] + become: yes + when: CONTAINER_RUNTIME == "podman" + + - name: Install docker + block: + - name: Create /etc/apt/keyrings folder + file: + path: /etc/apt/keyrings + state: directory + + - name: Add Docker’s GPG key + get_url: + url: https://download.docker.com/linux/ubuntu/gpg + dest: /etc/apt/keyrings/docker.asc + mode: '0644' + force: true + + - name: Dearmor GPG key + shell: | + cat /etc/apt/keyrings/docker.asc | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg --yes + + # TODO: (Sunnatillo) Remove this task after fully removing apt-key + - name: Remove Docker old repository (without gpg key file location) + apt_repository: + repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" + state: absent + + # TODO: (Sunnatillo) Remove this task after fully removing apt-key + - name: Remove Docker old repository (without gpg key file location) + apt_repository: + repo: "deb https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" + state: absent + + - name: Add Docker Repository + apt_repository: + repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" + state: present + + - name: Update all packages to their latest version + apt: + name: "*" + state: latest + + - name: Install docker + apt: name={{ item }} state=latest update_cache=yes + loop: [ 'docker-ce', 'docker-ce-cli', 'containerd.io' ] + + - name: Create docker configuration dir + file: + path: /etc/docker + state: directory + owner: root + group: root + + - name: Template daemon.json to /etc/docker/daemon.json + template: + src: "{{ DAEMON_JSON_PATH }}/daemon.json" + dest: /etc/docker/daemon.json + owner: root + group: root + + - name: Restart docker systemd service + service: + name: docker + state: restarted + daemon_reload: yes + + - name: Add current user to the docker group + user: + name: "{{ lookup('env','USER') }}" + groups: docker + append: yes + when: CONTAINER_RUNTIME == "docker" + become: yes + become: yes diff --git a/vm-setup/vm-setup/roles/virtbmc/defaults/main.yml b/vm-setup/vm-setup/roles/virtbmc/defaults/main.yml new file mode 100644 index 000000000..f974a4a98 --- /dev/null +++ b/vm-setup/vm-setup/roles/virtbmc/defaults/main.yml @@ -0,0 +1,9 @@ +# Can be set to "teardown" to destroy a previous configuration +virtbmc_action: setup +sushy_ignore_boot_device: False +# This matches how real hardware behaves +sushy_vmedia_verify_ssl: False + +# BMC credentials +vbmc_username: "admin" +vbmc_password: "password" diff --git a/vm-setup/vm-setup/roles/virtbmc/meta/main.yml b/vm-setup/vm-setup/roles/virtbmc/meta/main.yml new file mode 100644 index 000000000..2083f0e12 --- /dev/null +++ b/vm-setup/vm-setup/roles/virtbmc/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - common diff --git a/vm-setup/vm-setup/roles/virtbmc/tasks/main.yml b/vm-setup/vm-setup/roles/virtbmc/tasks/main.yml new file mode 100644 index 000000000..3e3578213 --- /dev/null +++ b/vm-setup/vm-setup/roles/virtbmc/tasks/main.yml @@ -0,0 +1,4 @@ +- include_tasks: setup_tasks.yml + when: virtbmc_action == "setup" +- include_tasks: teardown_tasks.yml + when: virtbmc_action == "teardown" diff --git a/vm-setup/vm-setup/roles/virtbmc/tasks/setup_tasks.yml b/vm-setup/vm-setup/roles/virtbmc/tasks/setup_tasks.yml new file mode 100644 index 000000000..19ffbd1c8 --- /dev/null +++ b/vm-setup/vm-setup/roles/virtbmc/tasks/setup_tasks.yml @@ -0,0 +1,167 @@ +--- + +- name: Create VirtualBMC directories + file: + path: "{{ item }}" + state: directory + mode: 0750 + owner: root + group: root + with_items: + - "{{ working_dir }}/virtualbmc" + - "{{ working_dir }}/virtualbmc/vbmc" + - "{{ working_dir }}/virtualbmc/vbmc/conf" + - "{{ working_dir }}/virtualbmc/vbmc/log" + - "{{ working_dir }}/virtualbmc/sushy-tools" + become: true + +- name: Create VirtualBMC configuration file + copy: + mode: 0750 + dest: "{{ working_dir }}/virtualbmc/vbmc/virtualbmc.conf" + content: | + [default] + config_dir=/root/.vbmc/conf/ + [log] + logfile=/root/.vbmc/log/virtualbmc.log + debug=True + [ipmi] + session_timout=20 + become: true + +- name: get virthost non_root_user userid + command: id -u {{ non_root_user }} + register: non_root_user_uid + +- name: set fact on non_root_user_uid + set_fact: + non_root_user_uid: "{{ non_root_user_uid.stdout }}" + +# The first network defined with an address will be used for vbmc access. +- name: set vbmc address (v4) if there is a (nat) network defined with an address + set_fact: + vbmc_address_v4: "{{ networks|selectattr('address_v4', 'defined')|map(attribute='address_v4')|list|first }}" + when: networks|selectattr('address_v4', 'defined')|map(attribute='name')|list|length > 0 + +- name: set vbmc address (v6) if there is a (nat) network defined with an address + set_fact: + vbmc_address_v6: "{{ networks|selectattr('address_v6', 'defined')|map(attribute='address_v6')|list|first }}" + when: networks|selectattr('address_v6', 'defined')|map(attribute='name')|list|length > 0 + +- name: set vbmc address from IPv4 networks if possible, otherwise IPv6 + set_fact: + vbmc_address: "{% if vbmc_address_v4|ansible.utils.ipv4 != False %}{{ vbmc_address_v4 }}{% else %}{{ vbmc_address_v6 }}{% endif %}" + +# The connection uri is slightly different when using qemu:///system +# and requires the root user. +- name: set qemu uri for qemu:///system usage + set_fact: + vbmc_libvirt_uri: "qemu+ssh://root@{{ vbmc_address | ansible.utils.ipwrap }}/system?&keyfile=/root/ssh/id_rsa_virt_power&no_verify=1&no_tty=1" + when: libvirt_uri == "qemu:///system" + +- name: set qemu uri for qemu:///session usage + set_fact: + vbmc_libvirt_uri: "qemu+ssh://{{ non_root_user }}@{{ vbmc_address | ansible.utils.ipwrap }}/session?socket=/run/user/{{ non_root_user_uid }}/libvirt/libvirt-sock&keyfile=/root/ssh/id_rsa_virt_power&no_verify=1&no_tty=1" + when: vbmc_libvirt_uri is not defined + +- name: Create VirtualBMC directories + when: vm_platform|default("libvirt") != "fake" + file: + path: "{{ working_dir }}/virtualbmc/vbmc/conf/{{ item.name }}" + state: directory + mode: 0750 + owner: root + group: root + with_items: "{{ vm_nodes }}" + become: true + +- name: Create the Virtual BMCs + when: vm_platform|default("libvirt") != "fake" + copy: + mode: 0750 + dest: "{{ working_dir }}/virtualbmc/vbmc/conf/{{ item.name }}/config" + content: | + [VirtualBMC] + username = {{ vbmc_username }} + password = {{ vbmc_password }} + domain_name = {{ item.name }} + libvirt_uri = {{ vbmc_libvirt_uri }} + address = {{ vbmc_address }} + active = True + port = {{ item.virtualbmc_port }} + with_items: "{{ vm_nodes }}" + become: true + +- name: Create a password file for Redfish Virtual BMCs + htpasswd: + path: "{{ working_dir }}/virtualbmc/sushy-tools/htpasswd" + crypt_scheme: bcrypt + name: "{{ vbmc_username }}" + password: "{{ vbmc_password }}" + +- name: Create private key (RSA, 4096 bits) for redfish TLS + community.crypto.openssl_privatekey: + path: "{{ working_dir }}/virtualbmc/sushy-tools/key.pem" + +- name: Create self-signed certificate for redfish TLS + community.crypto.x509_certificate: + path: "{{ working_dir }}/virtualbmc/sushy-tools/cert.pem" + privatekey_path: "{{ working_dir }}/virtualbmc/sushy-tools/key.pem" + provider: selfsigned + +- name: Create the Redfish Virtual BMCs + copy: + mode: 0750 + dest: "{{ working_dir }}/virtualbmc/sushy-tools/conf.py" + content: | + SUSHY_EMULATOR_LIBVIRT_URI = "{{ vbmc_libvirt_uri }}" + SUSHY_EMULATOR_IGNORE_BOOT_DEVICE = {{ sushy_ignore_boot_device }} + SUSHY_EMULATOR_VMEDIA_VERIFY_SSL = {{ sushy_vmedia_verify_ssl }} + SUSHY_EMULATOR_AUTH_FILE = "/root/sushy/htpasswd" + SUSHY_EMULATOR_SSL_KEY = "/root/sushy/key.pem" + SUSHY_EMULATOR_SSL_CERT = "/root/sushy/cert.pem" + become: true + when: vm_platform|default("libvirt") != "fake" + +- name: get a list of MACs to use + when: vm_platform|default("libvirt") == "fake" + include_tasks: ../../common/tasks/generate_node_mac.yml + +- name: Set the uuid for fake VMs + when: vm_platform|default("libvirt") == "fake" + set_fact: + vm_id: "{{ vm_id|default({}) | combine ( {item.name: item.name | to_uuid()} ) }}" + with_items: "{{ vm_nodes }}" + +# Define the fake vm nodes. These will be +# used by sushy-tools. +- name: Define fake vms + when: vm_platform|default("libvirt") == "fake" + template: + src: ../templates/fake_nodes.json.j2 + dest: "{{ fake_nodes_file }}" + +- name: set_fact BMC Driver + when: vm_platform|default("libvirt") == "fake" + set_fact: + vm_driver: "{{ lookup('env', 'BMC_DRIVER') | default('redfish', true) }}" + +- name: Write ironic node json files + when: vm_platform|default("libvirt") == "fake" + include_tasks: ../../common/tasks/write_ironic_nodes_tasks.yml + +# if FakeIPA enabled then set required sushy-tools config +- name: Create the Redfish Virtual BMCs for FakeIPA + copy: + mode: 0750 + dest: "{{ working_dir }}/virtualbmc/sushy-tools/conf.py" + content: | + SUSHY_EMULATOR_LIBVIRT_URI = "{{ vbmc_libvirt_uri }}" + SUSHY_EMULATOR_IGNORE_BOOT_DEVICE = {{ sushy_ignore_boot_device }} + SUSHY_EMULATOR_VMEDIA_VERIFY_SSL = {{ sushy_vmedia_verify_ssl }} + SUSHY_EMULATOR_AUTH_FILE = "/root/sushy/htpasswd" + SUSHY_EMULATOR_FAKE_DRIVER = True + SUSHY_EMULATOR_FAKE_IPA = True + SUSHY_EMULATOR_FAKE_SYSTEMS = {{ lookup('ansible.builtin.file', fake_nodes_file ) }} + become: true + when: vm_platform|default("libvirt") == "fake" diff --git a/vm-setup/vm-setup/roles/virtbmc/tasks/teardown_tasks.yml b/vm-setup/vm-setup/roles/virtbmc/tasks/teardown_tasks.yml new file mode 100644 index 000000000..394b62d77 --- /dev/null +++ b/vm-setup/vm-setup/roles/virtbmc/tasks/teardown_tasks.yml @@ -0,0 +1,20 @@ +--- + +- name: Remove virtualbmc directories + file: + path: "{{ item }}" + state: absent + with_items: + # From old systemd virtualbmc service + - "/etc/virtualbmc" + - "/var/log/virtualbmc" + - "/root/.vbmc/" + # Containerized version working dir + - "{{ working_dir }}/virtualbmc" + become: true + +- name: Stop/disable the Virtual BMCs (virtualbmc >= 1.4.0+) on Ubuntu + when: + - ansible_distribution == 'Ubuntu' + shell: pkill vbmcd || true + become: true diff --git a/vm-setup/vm-setup/roles/virtbmc/templates/fake_nodes.json.j2 b/vm-setup/vm-setup/roles/virtbmc/templates/fake_nodes.json.j2 new file mode 100644 index 000000000..06b4d9c3b --- /dev/null +++ b/vm-setup/vm-setup/roles/virtbmc/templates/fake_nodes.json.j2 @@ -0,0 +1,23 @@ +[ + {% for node in vm_nodes %} + { + 'uuid': "{{ vm_id[node.name] }}", + "name": "{{ node.name|replace('_', '-') }}", + 'power_state': 'Off', + 'external_notifier': True, + 'nics': [ + { + 'mac': "{{ node_mac_map.get(node.name).get(networks[0].name) }}", + 'ip': '172.22.0.100' + }, + { + 'mac': "{{ node_mac_map.get(node.name).get(networks[1].name) }}", + 'ip': '172.22.0.110' + } + ] + } + {% if not loop.last %} + , + {% endif %} + {% endfor %} +] diff --git a/vm-setup/vm-setup/setup-playbook.yml b/vm-setup/vm-setup/setup-playbook.yml new file mode 100644 index 000000000..3b8901ede --- /dev/null +++ b/vm-setup/vm-setup/setup-playbook.yml @@ -0,0 +1,13 @@ +--- +- name: Setup dummy baremetal VMs + hosts: virthost + connection: local + gather_facts: true + tasks: + - import_role: + name: common + - import_role: + name: libvirt + - import_role: + name: virtbmc + when: vm_platform|default("libvirt") in ["libvirt", "fake"] diff --git a/vm-setup/vm-setup/teardown-playbook.yml b/vm-setup/vm-setup/teardown-playbook.yml new file mode 100644 index 000000000..571df126b --- /dev/null +++ b/vm-setup/vm-setup/teardown-playbook.yml @@ -0,0 +1,17 @@ +--- +- name: Teardown previous libvirt setup + hosts: virthost + connection: local + gather_facts: true + tasks: + - import_role: + name: common + - import_role: + name: libvirt + vars: + libvirt_action: "teardown" + - import_role: + name: virtbmc + vars: + virtbmc_action: "teardown" +