From 26cdbcc78cacad0ad027adb6cfcb8aecb469dd8d Mon Sep 17 00:00:00 2001 From: Takeshi Nakatani Date: Mon, 22 Mar 2021 11:16:11 +0900 Subject: [PATCH] Initial commit --- .github/ISSUE_TEMPLATE.md | 17 + .github/PULL_REQUEST_TEMPLATE.md | 6 + .github/workflows/build_helper.sh | 603 ++++ .github/workflows/ci.yml | 146 + .github/workflows/ostypevars.sh | 216 ++ .gitignore | 96 + AUTHORS | 7 + COPYING | 21 + ChangeLog | 5 + INSTALL | 0 Makefile.am | 101 + NEWS | 0 README | 29 + README.md | 75 + autogen.sh | 118 + buildutils/Makefile.am | 35 + buildutils/control.in | 24 + buildutils/copyright.in | 8 + buildutils/debian_build.sh | 385 +++ buildutils/k2hdkc-dbaas-cli.spec.in | 102 + buildutils/make_release_version_file.sh | 257 ++ buildutils/make_rpm_changelog.sh | 140 + buildutils/make_variables.sh | 205 ++ buildutils/rpm_build.sh | 194 ++ buildutils/setup_k2hr3_cli_component.sh | 306 ++ configure.ac | 90 + src/Makefile.am | 30 + src/libexec/Makefile.am | 30 + src/libexec/database/CREDIT | 16 + src/libexec/database/Makefile.am | 41 + src/libexec/database/command.sh | 1122 +++++++ src/libexec/database/functions.sh | 1480 +++++++++ src/libexec/database/help.sh | 296 ++ .../database/k2hdkc_dbaas_create_host.templ | 57 + .../database/k2hdkc_dbaas_resource.templ | 132 + .../k2hdkc_dbaas_resource_keys.config | 60 + src/libexec/database/openstack.sh | 2707 +++++++++++++++++ src/libexec/database/options.sh | 654 ++++ src/libexec/database/summary.sh | 46 + src/libexec/database/variables.sh | 323 ++ test/Makefile.am | 38 + test/snapshots/Makefile.am | 29 + test/snapshots/test_database.snapshot | 117 + test/test.sh | 212 ++ test/test_database.sh | 386 +++ test/util_dbaas_request.sh | 628 ++++ 46 files changed, 11590 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100755 .github/workflows/build_helper.sh create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/ostypevars.sh create mode 100644 .gitignore create mode 100644 AUTHORS create mode 100644 COPYING create mode 100644 ChangeLog create mode 100644 INSTALL create mode 100644 Makefile.am create mode 100644 NEWS create mode 100644 README create mode 100644 README.md create mode 100755 autogen.sh create mode 100644 buildutils/Makefile.am create mode 100644 buildutils/control.in create mode 100644 buildutils/copyright.in create mode 100755 buildutils/debian_build.sh create mode 100644 buildutils/k2hdkc-dbaas-cli.spec.in create mode 100755 buildutils/make_release_version_file.sh create mode 100755 buildutils/make_rpm_changelog.sh create mode 100755 buildutils/make_variables.sh create mode 100755 buildutils/rpm_build.sh create mode 100755 buildutils/setup_k2hr3_cli_component.sh create mode 100644 configure.ac create mode 100644 src/Makefile.am create mode 100644 src/libexec/Makefile.am create mode 100644 src/libexec/database/CREDIT create mode 100644 src/libexec/database/Makefile.am create mode 100644 src/libexec/database/command.sh create mode 100644 src/libexec/database/functions.sh create mode 100644 src/libexec/database/help.sh create mode 100644 src/libexec/database/k2hdkc_dbaas_create_host.templ create mode 100644 src/libexec/database/k2hdkc_dbaas_resource.templ create mode 100644 src/libexec/database/k2hdkc_dbaas_resource_keys.config create mode 100644 src/libexec/database/openstack.sh create mode 100644 src/libexec/database/options.sh create mode 100644 src/libexec/database/summary.sh create mode 100644 src/libexec/database/variables.sh create mode 100644 test/Makefile.am create mode 100644 test/snapshots/Makefile.am create mode 100644 test/snapshots/test_database.snapshot create mode 100755 test/test.sh create mode 100755 test/test_database.sh create mode 100644 test/util_dbaas_request.sh diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..cc28f32 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,17 @@ +## Additional Information +(The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all.) + +- Version of K2HDKC DBaaS Command Line Interface(CLI) being used + ``` + ``` + +- System information (uname -a) + ``` + ``` + +- Distro (cat /etc/issue) + ``` + ``` + +## Details about issue + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..27fa119 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,6 @@ +## Relevant Issue (if applicable) +(If there are Issues related to this PullRequest, please list it.) + +## Details +(Please describe the details of PullRequest.) + diff --git a/.github/workflows/build_helper.sh b/.github/workflows/build_helper.sh new file mode 100755 index 0000000..906a8f1 --- /dev/null +++ b/.github/workflows/build_helper.sh @@ -0,0 +1,603 @@ +#!/bin/sh +# +# Utility helper tools for Github Actions by AntPickax +# +# Copyright 2020 Yahoo Japan Corporation. +# +# AntPickax provides utility tools for supporting autotools +# builds. +# +# These tools retrieve the necessary information from the +# repository and appropriately set the setting values of +# configure, Makefile, spec,etc file and so on. +# These tools were recreated to reduce the number of fixes and +# reduce the workload of developers when there is a change in +# the project configuration. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon, Nov 16 2020 +# REVISION: 1.1 +# + +#--------------------------------------------------------------------- +# Helper for container on Github Actions +#--------------------------------------------------------------------- +func_usage() +{ + echo "" + echo "Usage: $1 [options...]" + echo "" + echo " Required option:" + echo " --help(-h) print help" + echo " --ostype(-os) specify os and version as like \"ubuntu:trusty\"" + echo "" + echo " Option:" + echo " --ostype-vars-file(-f) specify the file that describes the package list to be installed before build(default is ostypevars.sh)" + echo " --force-publish(-p) force the release package to be uploaded. normally the package is uploaded only when it is tagged(determined from GITHUB_REF/GITHUB_EVENT_NAME)." + echo " --not-publish(-np) do not force publish the release package." + echo " --build-number(-num) build number for packaging(default 1)" + echo " --developer-fullname(-devname) specify developer name for debian and ubuntu packaging(default is null, it is specified in configure.ac)" + echo " --developer-email(-devmail) specify developer e-mail for debian and ubuntu packaging(default is null, it is specified in configure.ac)" + echo "" + echo " Option for packagecloud.io:" + echo " --use-packagecloudio-repo(-usepc) use packagecloud.io repository(default), exclusive -notpc option" + echo " --not-use-packagecloudio-repo(-notpc) not use packagecloud.io repository, exclusive -usepc option" + echo " --packagecloudio-token(-pctoken) packagecloud.io token for uploading(specify when uploading)" + echo " --packagecloudio-owner(-pcowner) owner name of uploading destination to packagecloud.io, this is part of the repository path(default is antpickax)" + echo " --packagecloudio-publish-repo(-pcprepo) repository name of uploading destination to packagecloud.io, this is part of the repository path(default is current)" + echo " --packagecloudio-download-repo(-pcdlrepo) repository name of installing packages in packagecloud.io, this is part of the repository path(default is stable)" + echo "" + echo " Note:" + echo " This program uses the GITHUB_REF and GITHUB_EVENT_NAME environment variable internally." + echo "" +} + +# +# Utility functions +# +prn_cmd() +{ + echo "" + echo "$ $*" +} + +run_cmd() +{ + echo "" + echo "$ $*" + # shellcheck disable=SC2068 + $@ + if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : \"$*\"" + exit 1 + fi +} + +#--------------------------------------------------------------------- +# Common Variables +#--------------------------------------------------------------------- +PRGNAME=$(basename "$0") +MYSCRIPTDIR=$(dirname "$0") +MYSCRIPTDIR=$(cd "${MYSCRIPTDIR}" || exit 1; pwd) +SRCTOP=$(cd "${MYSCRIPTDIR}/../.." || exit 1; pwd) + +#--------------------------------------------------------------------- +# Parse Options +#--------------------------------------------------------------------- +echo "[INFO] ${PRGNAME} : Start the parsing of options." + +OPT_OSTYPE= +OPT_OSTYPEVARS_FILE= +OPT_IS_FORCE_PUBLISH= +OPT_BUILD_NUMBER= +OPT_DEBEMAIL= +OPT_DEBFULLNAME= +OPT_USE_PC_REPO= +OPT_PC_TOKEN= +OPT_PC_OWNER= +OPT_PC_PUBLISH_REPO= +OPT_PC_DOWNLOAD_REPO= + +while [ $# -ne 0 ]; do + if [ "X$1" = "X" ]; then + break + + elif [ "X$1" = "X-h" ] || [ "X$1" = "X-H" ] || [ "X$1" = "X--help" ] || [ "X$1" = "X--HELP" ]; then + func_usage "${PRGNAME}" + exit 0 + + elif [ "X$1" = "X-os" ] || [ "X$1" = "X-OS" ] || [ "X$1" = "X--ostype" ] || [ "X$1" = "X--OSTYPE" ]; then + if [ "X${OPT_OSTYPE}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--ostype(-os)\" option." + exit 1 + fi + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--ostype(-os)\" option is specified without parameter." + exit 1 + fi + OPT_OSTYPE=$1 + + elif [ "X$1" = "X-f" ] || [ "X$1" = "X-F" ] || [ "X$1" = "X--ostype-vars-file" ] || [ "X$1" = "X--OSTYPE-VARS-FILE" ]; then + if [ "X${OPT_OSTYPEVARS_FILE}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--ostype-vars-file(-f)\" option." + exit 1 + fi + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--ostype-vars-file(-f)\" option is specified without parameter." + exit 1 + fi + if [ ! -f "$1" ]; then + echo "[ERROR] ${PRGNAME} : $1 file is not existed, it is specified \"--ostype-vars-file(-f)\" option." + exit 1 + fi + OPT_OSTYPEVARS_FILE=$1 + + elif [ "X$1" = "X-p" ] || [ "X$1" = "X-P" ] || [ "X$1" = "X--force-publish" ] || [ "X$1" = "X--FORCE-PUBLISH" ]; then + if [ "X${OPT_IS_FORCE_PUBLISH}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--force-publish(-p)\" or \"--not-publish(-np)\" option." + exit 1 + fi + OPT_IS_FORCE_PUBLISH="true" + + elif [ "X$1" = "X-np" ] || [ "X$1" = "X-NP" ] || [ "X$1" = "X--not-publish" ] || [ "X$1" = "X--NOT-PUBLISH" ]; then + if [ "X${OPT_IS_FORCE_PUBLISH}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--force-publish(-p)\" or \"--not-publish(-np)\" option." + exit 1 + fi + OPT_IS_FORCE_PUBLISH="false" + + elif [ "X$1" = "X-num" ] || [ "X$1" = "X-NUM" ] || [ "X$1" = "X--build-number" ] || [ "X$1" = "X--BUILD-NUMBER" ]; then + if [ "X${OPT_BUILD_NUMBER}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--build-number(-num)\" option." + exit 1 + fi + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--build-number(-num)\" option is specified without parameter." + exit 1 + fi + # shellcheck disable=SC2003 + expr "$1" + 0 >/dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--build-number(-num)\" option specify with positive NUMBER parameter." + exit 1 + fi + if [ "$1" -le 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--build-number(-num)\" option specify with positive NUMBER parameter." + exit 1 + fi + OPT_BUILD_NUMBER=$1 + + elif [ "X$1" = "X-devname" ] || [ "X$1" = "X-DEVNAME" ] || [ "X$1" = "X--developer-fullname" ] || [ "X$1" = "X--DEVELOPER-FULLNAME" ]; then + if [ "X${OPT_DEBFULLNAME}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--developer-fullname(-devname)\" option." + exit 1 + fi + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--developer-fullname(-devname)\" option is specified without parameter." + exit 1 + fi + OPT_DEBFULLNAME=$1 + + elif [ "X$1" = "X-devmail" ] || [ "X$1" = "X-DEVMAIL" ] || [ "X$1" = "X--developer-email" ] || [ "X$1" = "X--DEVELOPER-EMAIL" ]; then + if [ "X${OPT_DEBEMAIL}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--developer-email(-devmail)\" option." + exit 1 + fi + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--developer-email(-devmail)\" option is specified without parameter." + exit 1 + fi + OPT_DEBEMAIL=$1 + + elif [ "X$1" = "X-usepc" ] || [ "X$1" = "X-USEPC" ] || [ "X$1" = "X--use-packagecloudio-repo" ] || [ "X$1" = "X--USE-PACKAGECLOUDIO-REPO" ]; then + if [ "X${OPT_USE_PC_REPO}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--use-packagecloudio-repo(-usepc)\" or \"--not-use-packagecloudio-repo(-notpc)\" option." + exit 1 + fi + OPT_USE_PC_REPO="true" + + elif [ "X$1" = "X-notpc" ] || [ "X$1" = "X-NOTPC" ] || [ "X$1" = "X--not-use-packagecloudio-repo" ] || [ "X$1" = "X--NOT-USE-PACKAGECLOUDIO-REPO" ]; then + if [ "X${OPT_USE_PC_REPO}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--use-packagecloudio-repo(-usepc)\" or \"--not-use-packagecloudio-repo(-notpc)\" option." + exit 1 + fi + OPT_USE_PC_REPO="false" + + elif [ "X$1" = "X-pctoken" ] || [ "X$1" = "X-PCTOKEN" ] || [ "X$1" = "X--packagecloudio-token" ] || [ "X$1" = "X--PACKAGECLOUDIO-TOKEN" ]; then + if [ "X${OPT_PC_TOKEN}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--packagecloudio-token(-pctoken)\" option." + exit 1 + fi + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--packagecloudio-token(-pctoken)\" option is specified without parameter." + exit 1 + fi + OPT_PC_TOKEN=$1 + + elif [ "X$1" = "X-pcowner" ] || [ "X$1" = "X-PCOWNER" ] || [ "X$1" = "X--packagecloudio-owner" ] || [ "X$1" = "X--PACKAGECLOUDIO-OWNER" ]; then + if [ "X${OPT_PC_OWNER}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--packagecloudio-owner(-pcowner)\" option." + exit 1 + fi + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--packagecloudio-owner(-pcowner)\" option is specified without parameter." + exit 1 + fi + OPT_PC_OWNER=$1 + + elif [ "X$1" = "X-pcprepo" ] || [ "X$1" = "X-PCPREPO" ] || [ "X$1" = "X--packagecloudio-publish-repo" ] || [ "X$1" = "X--PACKAGECLOUDIO-PUBLICH-REPO" ]; then + if [ "X${OPT_PC_PUBLISH_REPO}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--packagecloudio-publish-repo(-pcprepo)\" option." + exit 1 + fi + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--packagecloudio-publish-repo(-pcprepo)\" option is specified without parameter." + exit 1 + fi + OPT_PC_PUBLISH_REPO=$1 + + elif [ "X$1" = "X-pcdlrepo" ] || [ "X$1" = "X-PCDLREPO" ] || [ "X$1" = "X--packagecloudio-download-repo" ] || [ "X$1" = "X--PACKAGECLOUDIO-DOWNLOAD-REPO" ]; then + if [ "X${OPT_PC_DOWNLOAD_REPO}" != "X" ]; then + echo "[ERROR] ${PRGNAME} : already set \"--packagecloudio-download-repo(-pcdlrepo)\" option." + exit 1 + fi + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : \"--packagecloudio-download-repo(-pcdlrepo)\" option is specified without parameter." + exit 1 + fi + OPT_PC_DOWNLOAD_REPO=$1 + fi + shift +done + +# +# Check only options that must be specified +# +if [ "X${OPT_OSTYPE}" = "X" ]; then + echo "[ERROR] ${PRGNAME} : \"--ostype(-os)\" option is not specified." + exit 1 +else + CI_OSTYPE=${OPT_OSTYPE} +fi + +#--------------------------------------------------------------------- +# Load variables from file +#--------------------------------------------------------------------- +echo "[INFO] ${PRGNAME} : Load local variables with an external file." + +if [ "X${OPT_OSTYPEVARS_FILE}" = "X" ]; then + OSTYPEVARS_FILE="${MYSCRIPTDIR}/ostypevars.sh" +elif [ ! -f "${OPT_OSTYPEVARS_FILE}" ]; then + echo "[WARNING] ${PRGNAME} : not found ${OPT_OSTYPEVARS_FILE} file, then default(ostypevars.sh) file is used." + OSTYPEVARS_FILE="${MYSCRIPTDIR}/ostypevars.sh" +else + OSTYPEVARS_FILE=${OPT_OSTYPEVARS_FILE} +fi +if [ -f "${OSTYPEVARS_FILE}" ]; then + echo "[INFO] ${PRGNAME} : Load ${OSTYPEVARS_FILE} for local variables by os:type(${CI_OSTYPE})" + . "${OSTYPEVARS_FILE}" +fi +if [ "X${DIST_TAG}" = "X" ]; then + echo "[ERROR] ${PRGNAME} : Distro/Version is not set, please check ${OSTYPEVARS_FILE} and check \"DIST_TAG\" varibale." + exit 1 +fi + +#--------------------------------------------------------------------- +# Merge other variables +#--------------------------------------------------------------------- +echo "[INFO] ${PRGNAME} : Set and check local variables." + +# +# Check GITHUB Environment +# +if [ "X${GITHUB_EVENT_NAME}" = "Xschedule" ]; then + IN_SCHEDULE_PROCESS=1 +else + IN_SCHEDULE_PROCESS=0 +fi +PUBLISH_TAG_NAME= +if [ "X${GITHUB_REF}" != "X" ]; then + echo "${GITHUB_REF}" | grep 'refs/tags/' >/dev/null 2>&1 + if [ $? -eq 0 ]; then + PUBLISH_TAG_NAME=$(echo "${GITHUB_REF}" | sed 's#refs/tags/##g') + fi +fi + +# +# Check whether to publish +# +if [ "X${OPT_IS_FORCE_PUBLISH}" = "X" ]; then + if [ ${IN_SCHEDULE_PROCESS} -ne 1 ]; then + if [ "X${PUBLISH_TAG_NAME}" != "X" ]; then + IS_PUBLISH=1 + else + IS_PUBLISH=0 + fi + else + IS_PUBLISH=0 + fi +elif [ "X${OPT_IS_FORCE_PUBLISH}" = "Xtrue" ]; then + # + # FORCE PUBLISH + # + if [ ${IN_SCHEDULE_PROCESS} -ne 1 ]; then + if [ "X${PUBLISH_TAG_NAME}" != "X" ]; then + echo "[INFO] ${PRGNAME} : specified \"--use-packagecloudio-repo(-usepc)\" option, then forcibly publish" + fi + IS_PUBLISH=1 + else + echo "[WARNING] ${PRGNAME} : specified \"--use-packagecloudio-repo(-usepc)\" option, but not publish because this process is kicked by scheduler." + IS_PUBLISH=0 + fi +else + # + # FORCE NOT PUBLISH + # + IS_PUBLISH=0 +fi + +# +# Set variables for packaging +# +if [ "X${OPT_BUILD_NUMBER}" != "X" ]; then + BUILD_NUMBER=${BUILD_NUMBER} +else + BUILD_NUMBER=1 +fi +if [ "X${OPT_DEBEMAIL}" != "X" ]; then + export DEBEMAIL=${OPT_DEBEMAIL} +fi +if [ "X${OPT_DEBFULLNAME}" != "X" ]; then + export DEBFULLNAME=${OPT_DEBFULLNAME} +fi + +# +# Set variables for packagecloud.io +# +if [ "X${OPT_USE_PC_REPO}" = "Xfalse" ]; then + USE_PC_REPO=0 +else + USE_PC_REPO=1 +fi +if [ "X${OPT_PC_TOKEN}" != "X" ]; then + PC_TOKEN=${OPT_PC_TOKEN} +else + PC_TOKEN= +fi +if [ "X${OPT_PC_OWNER}" != "X" ]; then + PC_OWNER=${OPT_PC_OWNER} +else + PC_OWNER="antpickax" +fi +if [ "X${OPT_PC_PUBLISH_REPO}" != "X" ]; then + PC_PUBLISH_REPO=${OPT_PC_PUBLISH_REPO} +else + PC_PUBLISH_REPO="current" +fi +if [ "X${OPT_PC_DOWNLOAD_REPO}" != "X" ]; then + PC_DOWNLOAD_REPO=${OPT_PC_DOWNLOAD_REPO} +else + PC_DOWNLOAD_REPO="stable" +fi + +# +# Information +# +echo "[INFO] ${PRGNAME} : All local variables for building and packaging." +echo " PRGNAME = ${PRGNAME}" +echo " MYSCRIPTDIR = ${MYSCRIPTDIR}" +echo " SRCTOP = ${SRCTOP}" +echo " CI_OSTYPE = ${CI_OSTYPE}" +echo " IS_OS_UBUNTU = ${IS_OS_UBUNTU}" +echo " IS_OS_DEBIAN = ${IS_OS_DEBIAN}" +echo " IS_OS_CENTOS = ${IS_OS_CENTOS}" +echo " IS_OS_FEDORA = ${IS_OS_FEDORA}" +echo " OSTYPEVARS_FILE = ${OSTYPEVARS_FILE}" +echo " DIST_TAG = ${DIST_TAG}" +echo " INSTALL_PKG_LIST = ${INSTALL_PKG_LIST}" +echo " CONFIGURE_EXT_OPT = ${CONFIGURE_EXT_OPT}" +echo " IN_SCHEDULE_PROCESS = ${IN_SCHEDULE_PROCESS}" +echo " INSTALLER_BIN = ${INSTALLER_BIN}" +echo " PKG_TYPE_DEB = ${PKG_TYPE_DEB}" +echo " PKG_TYPE_RPM = ${PKG_TYPE_RPM}" +echo " PKG_OUTPUT_DIR = ${PKG_OUTPUT_DIR}" +echo " PKG_EXT = ${PKG_EXT}" +echo " IS_PUBLISH = ${IS_PUBLISH}" +echo " PUBLISH_TAG_NAME = ${PUBLISH_TAG_NAME}" +echo " BUILD_NUMBER = ${BUILD_NUMBER}" +echo " DEBEMAIL = ${DEBEMAIL}" +echo " DEBFULLNAME = ${DEBFULLNAME}" +echo " USE_PC_REPO = ${USE_PC_REPO}" +echo " PC_TOKEN = **********" +echo " PC_OWNER = ${PC_OWNER}" +echo " PC_PUBLISH_REPO = ${PC_PUBLISH_REPO}" +echo " PC_DOWNLOAD_REPO = ${PC_DOWNLOAD_REPO}" + +#--------------------------------------------------------------------- +# Set package repository on packagecloud.io before build +#--------------------------------------------------------------------- +if [ ${USE_PC_REPO} -eq 1 ]; then + echo "[INFO] ${PRGNAME} : Setup packagecloud.io repository." + + # + # Check curl + # + curl --version >/dev/null 2>&1 + if [ $? -ne 0 ]; then + run_cmd "${INSTALLER_BIN}" update -y "${INSTALL_QUIET_ARG}" + run_cmd "${INSTALLER_BIN}" install -y "${INSTALL_QUIET_ARG}" curl + fi + + # + # Download and set packagecloud.io repository + # + if [ "${IS_OS_CENTOS}" -eq 1 ] || [ "${IS_OS_FEDORA}" -eq 1 ]; then + PC_REPO_ADD_SH="script.rpm.sh" + else + PC_REPO_ADD_SH="script.deb.sh" + fi + prn_cmd "curl -s https://packagecloud.io/install/repositories/${PC_OWNER}/${PC_DOWNLOAD_REPO}/${PC_REPO_ADD_SH} | bash" + curl -s https://packagecloud.io/install/repositories/${PC_OWNER}/${PC_DOWNLOAD_REPO}/${PC_REPO_ADD_SH} | bash + if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : could not add packagecloud.io repository." + exit 1 + fi +fi + +#--------------------------------------------------------------------- +# Install packages +#--------------------------------------------------------------------- +# +# Update +# +if [ "${IS_OS_UBUNTU}" -eq 1 ] || [ "${IS_OS_DEBIAN}" -eq 1 ]; then + # [NOTE] + # When start to update, it may come across an unexpected interactive interface. + # (May occur with time zone updates) + # Set environment variables to avoid this. + # + export DEBIAN_FRONTEND=noninteractive +fi +echo "[INFO] ${PRGNAME} : Update local packages." +run_cmd "${INSTALLER_BIN}" update -y "${INSTALL_QUIET_ARG}" + +# +# Install +# +if [ "X${INSTALL_PKG_LIST}" != "X" ]; then + echo "[INFO] ${PRGNAME} : Install packages." + run_cmd "${INSTALLER_BIN}" install -y "${INSTALL_QUIET_ARG}" "${INSTALL_PKG_LIST}" +fi + +# +# Install packagecloud.io CLI tool(package_cloud) +# +if [ "${IS_PUBLISH}" -eq 1 ]; then + echo "[INFO] ${PRGNAME} : Install packagecloud.io CLI tool(package_cloud)." + GEM_BIN="gem" + + if [ "${IS_OS_CENTOS}" -eq 1 ]; then + # + # For CentOS + # + if [ "X${CI_OSTYPE}" = "Xcentos:7" ] || [ "X${CI_OSTYPE}" = "Xcentos:centos7" ] || [ "X${CI_OSTYPE}" = "Xcentos:6" ] || [ "X${CI_OSTYPE}" = "Xcentos:centos6" ]; then + # + # For CentOS6/7, using RHSCL because centos has older ruby + # + run_cmd "${INSTALLER_BIN}" install -y "${INSTALL_QUIET_ARG}" centos-release-scl + run_cmd "${INSTALLER_BIN}" install -y "${INSTALL_QUIET_ARG}" rh-ruby24 rh-ruby24-ruby-devel rh-ruby24-rubygem-rake + . /opt/rh/rh-ruby24/enable + fi + run_cmd "${GEM_BIN}" install package_cloud + else + # + # For other than CentOS + # + run_cmd "${GEM_BIN}" install rake package_cloud + fi +fi + +#--------------------------------------------------------------------- +# Build (using /tmp directory) +#--------------------------------------------------------------------- +# +# Copy sources to /tmp directory +# +echo "[INFO] ${PRGNAME} : Copy sources to /tmp directory." +run_cmd cp -rp "${SRCTOP}" /tmp +TMPSRCTOP=$(basename "${SRCTOP}") +BUILD_SRCTOP="/tmp/${TMPSRCTOP}" + +# +# Change current directory +# +run_cmd cd "${BUILD_SRCTOP}" + +# +# Start build +# +echo "[INFO] ${PRGNAME} : Build - run autogen." +run_cmd ./autogen.sh + +echo "[INFO] ${PRGNAME} : Build - run configure." +run_cmd ./configure --prefix=/usr "${CONFIGURE_EXT_OPT}" + +echo "[INFO] ${PRGNAME} : Build - run build and shellcheck." +run_cmd make build + +echo "[INFO] ${PRGNAME} : Build - run check(test)." +run_cmd make check + +#--------------------------------------------------------------------- +# Start packaging +#--------------------------------------------------------------------- +echo "[INFO] ${PRGNAME} : Start packaging." + +if [ "${PKG_TYPE_RPM}" -eq 1 ]; then + # + # Create rpm packages + # + prn_cmd ./buildutils/rpm_build.sh -buildnum ${BUILD_NUMBER} -y + ./buildutils/rpm_build.sh -buildnum ${BUILD_NUMBER} -y +else + # + # Create debian packages + # + DEBUILD_OPT="" + if [ ${IS_PUBLISH} -ne 1 ]; then + DEBUILD_OPT="-nodebuild" + fi + prn_cmd CONFIGUREOPT="${CONFIGURE_EXT_OPT}" ./buildutils/debian_build.sh -buildnum "${BUILD_NUMBER}" -disttype "${DIST_TAG}" -y "${DEBUILD_OPT}" + CONFIGUREOPT="${CONFIGURE_EXT_OPT}" ./buildutils/debian_build.sh -buildnum "${BUILD_NUMBER}" -disttype "${DIST_TAG}" -y "${DEBUILD_OPT}" +fi +if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : Failed to build packages" + exit 1 +fi + +#--------------------------------------------------------------------- +# Start publishing +#--------------------------------------------------------------------- +if [ "${IS_PUBLISH}" -eq 1 ]; then + echo "[INFO] ${PRGNAME} : Start publishing." + + if [ "X${PC_TOKEN}" = "X" ]; then + echo "[ERROR] ${PRGNAME} : Token for uploading to packagecloud.io is not specified." + exit 1 + fi + PC_PUBLISH_PATH="${PC_OWNER}/${PC_PUBLISH_REPO}/${DIST_TAG}" + + prn_cmd PACKAGECLOUD_TOKEN="${PC_TOKEN}" package_cloud push "${PC_PUBLISH_PATH}" "${BUILD_SRCTOP}/${PKG_OUTPUT_DIR}"/*."${PKG_EXT}" + PACKAGECLOUD_TOKEN="${PC_TOKEN}" package_cloud push "${PC_PUBLISH_PATH}" "${BUILD_SRCTOP}/${PKG_OUTPUT_DIR}"/*."${PKG_EXT}" + if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : Failed to publish *.${PKG_EXT} packages" + exit 1 + fi +fi + +#--------------------------------------------------------------------- +# Finish +#--------------------------------------------------------------------- +echo "[SUCCESS] ${PRGNAME} : Finished without error." +exit 0 + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..9e82a35 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,146 @@ +# +# Utility helper tools for Github Actions by AntPickax +# +# Copyright 2020 Yahoo Japan Corporation. +# +# AntPickax provides utility tools for supporting autotools +# builds. +# +# These tools retrieve the necessary information from the +# repository and appropriately set the setting values of +# configure, Makefile, spec,etc file and so on. +# These tools were recreated to reduce the number of fixes and +# reduce the workload of developers when there is a change in +# the project configuration. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Fri, Nov 13 2020 +# REVISION: 1.0 +# + +#------------------------------------------------------------------------------------ +# Github Actions +#------------------------------------------------------------------------------------ +name: Nobuild AntPickax CI + +# +# Events +# +on: + push: + pull_request: + # + # CRON event is fire on every sunday(UTC). + # + schedule: + - cron: '0 15 * * 0' + +# +# Jobs +# +jobs: + build: + runs-on: ubuntu-latest + + # + # build matrix for containers + # + strategy: + # + # do not stop jobs automatically if any of the jobs fail + # + fail-fast: false + + # + # matrix for containers + # + matrix: + container: + - ubuntu:20.04 + - ubuntu:18.04 + - ubuntu:16.04 + - debian:buster + - debian:stretch + - centos:centos8 + - centos:centos7 + - fedora:32 + - fedora:31 + - fedora:30 + + container: + image: ${{ matrix.container }} + + steps: + # + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + # + # [NOTE] using checkout@v1 instead of @v2 + # We need to run on centos6, it fails checkout@v2 because checkout@v2 uses nodejs12 + # and nodejs12 cannot run old os(like centos6). + # However checkout@v1 does not use nodejs, then it can run on old os(like centos6) + # see: https://github.com/actions/runner/issues/337 + # + - name: Checkout sources + uses: actions/checkout@v1 + + # + # Run building and packaging helper + # + # [NOTE] Secrets and Environments + # When passing parameters to build_helper.sh in build and packaging, + # use Secret of organization or repository. + # + # By setting the correct values for the following variable names, + # they will be passed as parameters in build_helper.sh. + # OSTYPE_VARS_FILE : specify custom variables file + # BUILD_NUMBER : buld number for packaging + # DEVELOPER_FULLNAME : developer name for package + # DEVELOPER_EMAIL : developer e-mail for package + # FORCE_PUBLISH : true means force to publish packages, false means never publish + # USE_PACKAGECLOUD_REPO : true means using pacakgecloud.io repo, false is not using + # * PACKAGECLOUD_TOKEN : The token for publishing to packagcloud.io + # PACKAGECLOUD_OWNER : owner name as a pat of path to packagcloud.io for publishing/downloading + # PACKAGECLOUD_PUBLISH_REPO : repo name as a pat of path to packagcloud.io for publishing + # PACKAGECLOUD_DOWNLOAD_REPO : repo name as a pat of path to packagcloud.io for downloading + # + # [REQUIRED] + # PACKAGECLOUD_TOKEN is required to publish(upload) the package!!! + # For yahoojapan organization repositories, always set PACKAGECLOUD_TOKEN!!! + # + - name: Run building and packaging + env: + TMP_CI_OSTYPE_VARS_FILE: ${{ secrets.OSTYPE_VARS_FILE }} + TMP_CI_BUILD_NUMBER: ${{ secrets.BUILD_NUMBER }} + TMP_CI_DEVELOPER_FULLNAME: ${{ secrets.DEVELOPER_FULLNAME }} + TMP_CI_DEVELOPER_EMAIL: ${{ secrets.DEVELOPER_EMAIL }} + TMP_CI_FORCE_PUBLISH: ${{ secrets.FORCE_PUBLISH }} + TMP_CI_USE_PACKAGECLOUD_REPO: ${{ secrets.USE_PACKAGECLOUD_REPO }} + TMP_CI_PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }} + TMP_CI_PACKAGECLOUD_OWNER: ${{ secrets.PACKAGECLOUD_OWNER }} + TMP_CI_PACKAGECLOUD_PUBLISH_REPO: ${{ secrets.PACKAGECLOUD_PUBLISH_REPO }} + TMP_CI_PACKAGECLOUD_DOWNLOAD_REPO: ${{ secrets.PACKAGECLOUD_DOWNLOAD_REPO }} + + run: | + if [ "X${TMP_CI_OSTYPE_VARS_FILE}" != "X" ]; then OPT_CI_OSTYPE_VARS_FILE="-f ${TMP_CI_OSTYPE_VARS_FILE}"; fi + if [ "X${TMP_CI_BUILD_NUMBER}" != "X" ]; then OPT_CI_BUILD_NUMBER="-num ${TMP_CI_BUILD_NUMBER}"; fi + if [ "X${TMP_CI_DEVELOPER_FULLNAME}" != "X" ]; then OPT_CI_DEVELOPER_FULLNAME="-devname ${TMP_CI_DEVELOPER_FULLNAME}"; fi + if [ "X${TMP_CI_DEVELOPER_EMAIL}" != "X" ]; then OPT_CI_DEVELOPER_EMAIL="-devmail ${TMP_CI_DEVELOPER_EMAIL}"; fi + if [ "X${TMP_CI_FORCE_PUBLISH}" = "Xtrue" ]; then OPT_CI_FORCE_PUBLISH="-p"; elif [ "X${TMP_CI_FORCE_PUBLISH}" = "Xfalse" ]; then OPT_CI_FORCE_PUBLISH="-np"; fi + if [ "X${TMP_CI_USE_PACKAGECLOUD_REPO}" = "Xtrue" ]; then OPT_CI_USE_PACKAGECLOUD_REPO="-usepc"; elif [ "X${TMP_CI_USE_PACKAGECLOUD_REPO}" = "Xfalse" ]; then OPT_CI_USE_PACKAGECLOUD_REPO="-notpc"; fi + if [ "X${TMP_CI_PACKAGECLOUD_TOKEN}" != "X" ]; then OPT_CI_PACKAGECLOUD_TOKEN="-pctoken ${TMP_CI_PACKAGECLOUD_TOKEN}"; fi + if [ "X${TMP_CI_PACKAGECLOUD_OWNER}" != "X" ]; then OPT_CI_PACKAGECLOUD_OWNER="-pcowner ${TMP_CI_PACKAGECLOUD_OWNER}"; fi + if [ "X${TMP_CI_PACKAGECLOUD_PUBLISH_REPO}" != "X" ]; then OPT_CI_PACKAGECLOUD_PUBLISH_REPO="-pcprepo ${TMP_CI_PACKAGECLOUD_PUBLISH_REPO}"; fi + if [ "X${TMP_CI_PACKAGECLOUD_DOWNLOAD_REPO}" != "X" ]; then OPT_CI_PACKAGECLOUD_DOWNLOAD_REPO="-pcdlrepo ${TMP_CI_PACKAGECLOUD_DOWNLOAD_REPO}"; fi + /bin/sh -c "$GITHUB_WORKSPACE/.github/workflows/build_helper.sh -os ${{ matrix.container }} $OPT_CI_OSTYPE_VARS_FILE $OPT_CI_BUILD_NUMBER $OPT_CI_DEVELOPER_FULLNAME $OPT_CI_DEVELOPER_EMAIL $OPT_CI_FORCE_PUBLISH $OPT_CI_USE_PACKAGECLOUD_REPO $OPT_CI_PACKAGECLOUD_TOKEN $OPT_CI_PACKAGECLOUD_OWNER $OPT_CI_PACKAGECLOUD_PUBLISH_REPO $OPT_CI_PACKAGECLOUD_DOWNLOAD_REPO" + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: expandtab sw=4 ts=4 fdm=marker +# vim<600: expandtab sw=4 ts=4 +# diff --git a/.github/workflows/ostypevars.sh b/.github/workflows/ostypevars.sh new file mode 100644 index 0000000..7f5cb4a --- /dev/null +++ b/.github/workflows/ostypevars.sh @@ -0,0 +1,216 @@ +# +# Utility helper tools for Github Actions by AntPickax +# +# Copyright 2020 Yahoo Japan Corporation. +# +# AntPickax provides utility tools for supporting autotools +# builds. +# +# These tools retrieve the necessary information from the +# repository and appropriately set the setting values of +# configure, Makefile, spec,etc file and so on. +# These tools were recreated to reduce the number of fixes and +# reduce the workload of developers when there is a change in +# the project configuration. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Fri, Nov 13 2020 +# REVISION: 1.0 +# + +#--------------------------------------------------------------------- +# About this file +#--------------------------------------------------------------------- +# This file is loaded into the build_helper.sh script. +# The build_helper.sh script is a Github Actions helper script that +# builds and packages the target repository. +# This file is mainly created to define variables that differ depending +# on the OS and version. +# It also contains different information(such as packages to install) +# for each repository. +# +# In the initial state, you need to set the following variables: +# DIST_TAG : "Distro/Version" for publishing packages +# INSTALL_PKG_LIST : A list of packages to be installed for build and +# packaging +# CONFIGURE_EXT_OPT : Options to specify when running configure +# INSTALLER_BIN : Package management command +# PKG_TYPE_DEB : Set to 1 for debian packages, 0 otherwise +# PKG_TYPE_RPM : Set to 1 for rpm packages, 0 otherwise +# PKG_OUTPUT_DIR : Set the directory path where the package will +# be created relative to the top directory of the +# source +# PKG_EXT : The extension of the package file +# IS_OS_UBUNTU : Set to 1 for Ubuntu, 0 otherwise +# IS_OS_DEBIAN : Set to 1 for Debian, 0 otherwise +# IS_OS_CENTOS : Set to 1 for CentOS, 0 otherwise +# IS_OS_FEDORA : Set to 1 for Fedora, 0 otherwise +# +# Set these variables according to the CI_OSTYPE variable. +# The value of the CI_OSTYPE variable matches the name of the +# Container (docker image) used in Github Actions. +# Check the ".github/workflow/***.yml" file for the value. +# + +#--------------------------------------------------------------------- +# Default values +#--------------------------------------------------------------------- +DIST_TAG= +INSTALL_PKG_LIST= +CONFIGURE_EXT_OPT= +INSTALLER_BIN= +PKG_TYPE_DEB=0 +PKG_TYPE_RPM=0 +PKG_OUTPUT_DIR= +PKG_EXT= +IS_OS_UBUNTU=0 +IS_OS_DEBIAN=0 +IS_OS_CENTOS=0 +IS_OS_FEDORA=0 + +#--------------------------------------------------------------------- +# Variables for each OS Type +#--------------------------------------------------------------------- +if [ "X${CI_OSTYPE}" = "Xubuntu:20.04" ] || [ "X${CI_OSTYPE}" = "Xubuntu:focal" ]; then + DIST_TAG="ubuntu/focal" + INSTALL_PKG_LIST="git autoconf autotools-dev make dh-make fakeroot dpkg-dev devscripts pkg-config ruby-dev rubygems rubygems-integration procps shellcheck" + CONFIGURE_EXT_OPT="" + INSTALLER_BIN="apt-get" + INSTALL_QUIET_ARG="-qq" + PKG_TYPE_DEB=1 + PKG_TYPE_RPM=0 + PKG_OUTPUT_DIR="debian_build" + PKG_EXT="deb" + IS_OS_UBUNTU=1 + +elif [ "X${CI_OSTYPE}" = "Xubuntu:18.04" ] || [ "X${CI_OSTYPE}" = "Xubuntu:bionic" ]; then + DIST_TAG="ubuntu/bionic" + INSTALL_PKG_LIST="git autoconf autotools-dev make dh-make fakeroot dpkg-dev devscripts pkg-config ruby-dev rubygems rubygems-integration procps shellcheck" + CONFIGURE_EXT_OPT="" + INSTALLER_BIN="apt-get" + INSTALL_QUIET_ARG="-qq" + PKG_TYPE_DEB=1 + PKG_TYPE_RPM=0 + PKG_OUTPUT_DIR="debian_build" + PKG_EXT="deb" + IS_OS_UBUNTU=1 + +elif [ "X${CI_OSTYPE}" = "Xubuntu:16.04" ] || [ "X${CI_OSTYPE}" = "Xubuntu:xenial" ]; then + DIST_TAG="ubuntu/xenial" + INSTALL_PKG_LIST="git autoconf autotools-dev make dh-make fakeroot dpkg-dev devscripts pkg-config ruby-dev rubygems rubygems-integration procps shellcheck" + CONFIGURE_EXT_OPT="" + INSTALLER_BIN="apt-get" + INSTALL_QUIET_ARG="-qq" + PKG_TYPE_DEB=1 + PKG_TYPE_RPM=0 + PKG_OUTPUT_DIR="debian_build" + PKG_EXT="deb" + IS_OS_UBUNTU=1 + +elif [ "X${CI_OSTYPE}" = "Xdebian:10" ] || [ "X${CI_OSTYPE}" = "Xdebian:buster" ]; then + DIST_TAG="debian/buster" + INSTALL_PKG_LIST="git autoconf autotools-dev make dh-make fakeroot dpkg-dev devscripts pkg-config ruby-dev rubygems rubygems-integration procps shellcheck" + CONFIGURE_EXT_OPT="" + INSTALLER_BIN="apt-get" + INSTALL_QUIET_ARG="-qq" + PKG_TYPE_DEB=1 + PKG_TYPE_RPM=0 + PKG_OUTPUT_DIR="debian_build" + PKG_EXT="deb" + IS_OS_DEBIAN=1 + +elif [ "X${CI_OSTYPE}" = "Xdebian:9" ] || [ "X${CI_OSTYPE}" = "Xdebian:stretch" ]; then + DIST_TAG="debian/stretch" + INSTALL_PKG_LIST="git autoconf autotools-dev make dh-make fakeroot dpkg-dev devscripts pkg-config ruby-dev rubygems rubygems-integration procps shellcheck" + CONFIGURE_EXT_OPT="" + INSTALLER_BIN="apt-get" + INSTALL_QUIET_ARG="-qq" + PKG_TYPE_DEB=1 + PKG_TYPE_RPM=0 + PKG_OUTPUT_DIR="debian_build" + PKG_EXT="deb" + IS_OS_DEBIAN=1 + +elif [ "X${CI_OSTYPE}" = "Xcentos:8" ] || [ "X${CI_OSTYPE}" = "Xcentos:centos8" ]; then + DIST_TAG="el/8" + INSTALL_PKG_LIST="git autoconf automake gcc-c++ make pkgconfig redhat-rpm-config rpm-build ruby-devel rubygems procps" + CONFIGURE_EXT_OPT="" + INSTALLER_BIN="dnf" + INSTALL_QUIET_ARG="-qq" + PKG_TYPE_DEB=0 + PKG_TYPE_RPM=1 + PKG_OUTPUT_DIR="." + PKG_EXT="rpm" + IS_OS_CENTOS=1 + + # [NOTE] + # Since it is difficult to install ShellCheck on CentOS8, it will not be installed. + # + +elif [ "X${CI_OSTYPE}" = "Xcentos:7" ] || [ "X${CI_OSTYPE}" = "Xcentos:centos7" ]; then + DIST_TAG="el/7" + INSTALL_PKG_LIST="git autoconf automake gcc-c++ make pkgconfig redhat-rpm-config rpm-build ruby-devel rubygems procps" + CONFIGURE_EXT_OPT="" + INSTALLER_BIN="yum" + INSTALL_QUIET_ARG="" + PKG_TYPE_DEB=0 + PKG_TYPE_RPM=1 + PKG_OUTPUT_DIR="." + PKG_EXT="rpm" + IS_OS_CENTOS=1 + + # + # For ShellCheck + # + "${INSTALLER_BIN}" install -y "${INSTALL_QUIET_ARG}" epel-release + yum-config-manager --disable epel + "${INSTALLER_BIN}" --enablerepo=epel install -y "${INSTALL_QUIET_ARG}" ShellCheck + +elif [ "X${CI_OSTYPE}" = "Xfedora:32" ]; then + DIST_TAG="fedora/32" + INSTALL_PKG_LIST="git autoconf automake gcc-c++ make pkgconfig redhat-rpm-config rpm-build ruby-devel rubygems procps ShellCheck" + CONFIGURE_EXT_OPT="" + INSTALLER_BIN="dnf" + INSTALL_QUIET_ARG="-qq" + PKG_TYPE_DEB=0 + PKG_TYPE_RPM=1 + PKG_OUTPUT_DIR="." + PKG_EXT="rpm" + IS_OS_FEDORA=1 + +elif [ "X${CI_OSTYPE}" = "Xfedora:31" ]; then + DIST_TAG="fedora/31" + INSTALL_PKG_LIST="git autoconf automake gcc-c++ make pkgconfig redhat-rpm-config rpm-build ruby-devel rubygems procps ShellCheck" + CONFIGURE_EXT_OPT="" + INSTALLER_BIN="dnf" + INSTALL_QUIET_ARG="-qq" + PKG_TYPE_DEB=0 + PKG_TYPE_RPM=1 + PKG_OUTPUT_DIR="." + PKG_EXT="rpm" + IS_OS_FEDORA=1 + +elif [ "X${CI_OSTYPE}" = "Xfedora:30" ]; then + DIST_TAG="fedora/30" + INSTALL_PKG_LIST="git autoconf automake gcc-c++ make pkgconfig redhat-rpm-config rpm-build ruby-devel rubygems procps ShellCheck" + CONFIGURE_EXT_OPT="" + INSTALLER_BIN="dnf" + INSTALL_QUIET_ARG="-qq" + PKG_TYPE_DEB=0 + PKG_TYPE_RPM=1 + PKG_OUTPUT_DIR="." + PKG_EXT="rpm" + IS_OS_FEDORA=1 +fi + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1017111 --- /dev/null +++ b/.gitignore @@ -0,0 +1,96 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +# +# autotools +# +aclocal.m4 +autom4te.cache +autoscan.log +config.guess +config.log +config.status +configure +install-sh +missing +test-driver + +# +# automakes +# +Makefile.in +Makefile + +# +# archive files and backups +# +*.tgz +*.tar.gz +*.rpm +*.deb +*.swp + +# +# Other built objects +# +RELEASE_VERSION +VERSION +*.pc +*.spec +*.log +*.trs +buildutils/control +buildutils/copyright +rpmbuild +rpmbuild/* +debian +debian/* +debian_build +debian_build/* + +# +# Files in the k2hr3_cli repository that should be excluded +# +src/k2hr3 +src/libexec/common +src/libexec/common/* +src/libexec/config +src/libexec/config/* +src/libexec/token +src/libexec/token/* +src/libexec/resource +src/libexec/resource/* +src/libexec/policy +src/libexec/policy/* +src/libexec/role +src/libexec/role/* +src/libexec/userdata +src/libexec/userdata/* +test/util_request.sh +test/util_test.sh + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..909569f --- /dev/null +++ b/AUTHORS @@ -0,0 +1,7 @@ +1. Takeshi Nakatani + +Wrote from scratch the initial version of k2hdkc dbaas cli. + +3. Hirotaka Wakabayashi + +Technical engineer of k2hdkc dbaas cli product. diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..806162e --- /dev/null +++ b/COPYING @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Yahoo Japan Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/ChangeLog b/ChangeLog new file mode 100644 index 0000000..443145c --- /dev/null +++ b/ChangeLog @@ -0,0 +1,5 @@ +k2hdkc-dbaas-cli (1.0.0) trusty; urgency=low + + * First version + + -- Takeshi Nakatani Mon, 22 Mar 2021 09:15:16 +0900 diff --git a/INSTALL b/INSTALL new file mode 100644 index 0000000..e69de29 diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000..d1b206a --- /dev/null +++ b/Makefile.am @@ -0,0 +1,101 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +SUBDIRS = src buildutils test + +ACLOCAL_AMFLAGS = -I m4 + +EXTRA_DIST = RELEASE_VERSION + +# [CLEAN] +# Use clean-local instead of CLEANFILES as it contains subdirectories and so on. +# +clean-local: clean-local-files + +.PHONY: clean-local-files + +clean-local-files: + rm -f *.log; \ + rm -f src/libexec/database/VERSION; \ + DEFAULT_GIT_DOMAIN="@GIT_DOMAIN@" DEFAULT_K2HR3_CLI_ORG="@GIT_ORG@" buildutils/setup_k2hr3_cli_component.sh --clean + +# [BUILD] +# Since it is a shell script-only project, there is nothing to build, +# but we will create a VERSION file. +# Then, shellcheck checks all the source code. +# +build: setup_k2hr3_cli build_version shellcheck + +.PHONY: setup_k2hr3_cli build_version shellcheck + +# [SETUP] +# Copy the required files from the K2HR3 CLI. +# +setup_k2hr3_cli: + @if true; then \ + echo "*** Copy the required directories/files from the K2HR3 CLI repository."; \ + DEFAULT_GIT_DOMAIN="@GIT_DOMAIN@" DEFAULT_K2HR3_CLI_ORG="@GIT_ORG@" buildutils/setup_k2hr3_cli_component.sh --k2hr3_cli_repo k2hr3_cli; \ + fi + +# [VERSION] +# Create a VERSION file in the src/libexec/database directory. +# +build_version: + @if true; then \ + echo "*** Create VERSION file in src/libexec/database directory"; \ + RELEASE_VERSION_BASE=`cat RELEASE_VERSION`; \ + GIT_SHORT_HASH=`git rev-parse --short HEAD 2>/dev/null || echo \"Unknown\"`; \ + echo "$${RELEASE_VERSION_BASE}($${GIT_SHORT_HASH})" > src/libexec/database/VERSION 2>&1; \ + echo " -> Succeed"; \ + echo ""; \ + fi + +# [SHELLCHECK] +# +SHELLCHECK_CMD = shellcheck +SHELLCHECK_OPT = --shell=sh +SHELLCHECK_COMMON_IGN = --exclude=SC1117,SC1090,SC1091,SC2181 +SHELLCHECK_CUSTOM_IGN = --exclude=SC1117,SC1090,SC1091,SC2181,SC2034 + +shellcheck: + @if type shellcheck > /dev/null 2>&1; then \ + echo "*** Check all files with ShellCheck"; \ + $(SHELLCHECK_CMD) $(SHELLCHECK_OPT) $(SHELLCHECK_COMMON_IGN) $$(find buildutils -type f -name '*.sh' | grep -v ostypevars.sh) || exit 1; \ + $(SHELLCHECK_CMD) $(SHELLCHECK_OPT) $(SHELLCHECK_COMMON_IGN) $$(find buildutils -type f -name '*.sh') || exit 1; \ + if [ -f .github/workflows/ostypevars.sh ]; then \ + $(SHELLCHECK_CMD) $(SHELLCHECK_OPT) $(SHELLCHECK_CUSTOM_IGN) .github/workflows/ostypevars.sh || exit 1; \ + fi; \ + $(SHELLCHECK_CMD) $(SHELLCHECK_OPT) $(SHELLCHECK_COMMON_IGN) $$(find src/libexec/database -type f -name '*.sh') || exit 1; \ + $(SHELLCHECK_CMD) $(SHELLCHECK_OPT) $(SHELLCHECK_COMMON_IGN) $$(find test -type f -name '*.sh' | grep -v 'util_test.sh' | grep -v 'util_request.sh') || exit 1; \ + echo " -> No error was detected."; \ + echo ""; \ + else \ + echo "ShellCheck is not installed, so skip check by it."; \ + echo ""; \ + fi + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/NEWS b/NEWS new file mode 100644 index 0000000..e69de29 diff --git a/README b/README new file mode 100644 index 0000000..a1f7480 --- /dev/null +++ b/README @@ -0,0 +1,29 @@ +K2HDKC DBaaS Command Line Interface +----------------------------------- + +K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin + +### Overview +The K2HDKC DBaaS CLI (Command Line Interface of Database as a +Service for K2HDKC) is a tool for building a K2HDKC cluster in +conjunction with K2HR3. +The Trove version of K2HDKC DBaaS is available, but this +K2HDKC DBaaS CLI allows you to build K2HDKC clusters without +the need for a Trove system. +With the basic components of OpenStack and the K2HR3 system +that works with it, you can easily build a K2HD KC cluster +using the K2HDKC DBaaS CLI. + +### Feature +The K2HDKC DBaaS CLI(Command Line Interface) acts as a one of Plugin for the K2HR3 CLI(Command Line Interface). +This K2HDKC DBaaS CLI is included as one command "database" in the K2HR3 CLI, allowing you to build a K2HDKC cluster. + +### Documents +- Github Pages - https://dbaas.k2hdkc.antpick.ax/ +- Github Wiki - https://github.com/yahoojapan/k2hdkc_dbaas_cli/wiki +- AntPickax - https://antpick.ax/ + +### License +This software is released under the MIT License, see the license file. + +Copyright(C) 2021 Yahoo Japan Corporation. diff --git a/README.md b/README.md new file mode 100644 index 0000000..40c01e9 --- /dev/null +++ b/README.md @@ -0,0 +1,75 @@ +K2HDKC DBaaS Command Line Interface(K2HR3 CLI Plugin) +===================================================== +[![Nobuild AntPickax CI](https://github.com/yahoojapan/k2hdkc_dbaas_cli/workflows/Nobuild%20AntPickax%20CI/badge.svg)](https://github.com/yahoojapan/k2hdkc_dbaas_cli/actions) +[![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/yahoojapan/k2hdkc_dbaas_cli/blob/master/COPYING) +[![GitHub forks](https://img.shields.io/github/forks/yahoojapan/k2hdkc_dbaas_cli.svg)](https://github.com/yahoojapan/k2hdkc_dbaas_cli/network) +[![GitHub stars](https://img.shields.io/github/stars/yahoojapan/k2hdkc_dbaas_cli.svg)](https://github.com/yahoojapan/k2hdkc_dbaas_cli/stargazers) +[![GitHub issues](https://img.shields.io/github/issues/yahoojapan/k2hdkc_dbaas_cli.svg)](https://github.com/yahoojapan/k2hdkc_dbaas_cli/issues) +[![debian packages](https://img.shields.io/badge/deb-packagecloud.io-844fec.svg)](https://packagecloud.io/antpickax/stable) +[![RPM packages](https://img.shields.io/badge/rpm-packagecloud.io-844fec.svg)](https://packagecloud.io/antpickax/stable) + +## **K2HDKC DBaaS CLI(Command Line Interface)** - (K2HR3 CLI Plugin) +The **K2HDKC DBaaS CLI** (Command Line Interface of Database as a Service for K2HDKC) acts as a one of Plugin for the [K2HR3 CLI(Command Line Interface)](https://k2hr3.antpick.ax/). +This **K2HDKC DBaaS CLI** is included as one command `database` in the K2HR3 CLI, allowing you to build a K2HDKC cluster. + +![K2HDKC DBaaS](https://dbaas.k2hdkc.antpick.ax/images/top_k2hdkc_dbaas.png) + +## K2HKDC DBaaS system Overview +K2HDKC DBaaS provides its functionality through Trove as a panel(feature) of OpenStack. +And the [K2HR3](https://k2hr3.antpick.ax/) system is used as the back end as an RBAC(Role Base Access Control) system dedicated to K2HDKC DBaaS. +Normally, users do not need to use the K2HR3 system directly, and the function as DBaaS uses Trove Dashboard(or Trove CLI). + +The overall system overview diagram is shown below. +![K2HDKC DBaaS system](https://dbaas.k2hdkc.antpick.ax/images/overview.png) + +### K2HR3 System Overview +**K2HR3** (**K2H**dkc based **R**esource and **R**oles and policy **R**ules) is one of extended **RBAC** (**R**ole **B**ased **A**ccess **C**ontrol) system. +K2HR3 works as RBAC in cooperation with **OpenStack** which is one of **IaaS**(Infrastructure as a Service), and also provides useful functions for using RBAC. + +K2HR3 is a system that defines and controls **HOW**(policy Rule), **WHO**(Role), **WHAT**(Resource), as RBAC. +Users of K2HR3 can define **Role**(WHO) groups to access freely defined **Resource**(WHAT) and control access by **policy Rule**(HOW). +By defining the information and assets required for any system as a **Resource**(WHAT), K2HR3 system can give the opportunity to provide access control in every situation. + +K2HR3 provides **+SERVICE** feature, it **strongly supports** user system, function and information linkage. + +![K2HR3 system overview](https://k2hr3.antpick.ax/images/overview_abstract.png) + +K2HR3 is built [k2hdkc](https://github.com/yahoojapan/k2hdkc), [k2hash](https://github.com/yahoojapan/k2hash), [chmpx](https://github.com/yahoojapan/chmpx) and [k2hash transaction plugin](https://github.com/yahoojapan/k2htp_dtor) components by [AntPickax](https://antpick.ax/). + +## Documents +[K2HDKC DBaaS Document](https://dbaas.k2hdkc.antpick.ax/index.html) +[Github wiki page](https://github.com/yahoojapan/k2hdkc_dbaas_cli/wiki) + +[About k2hdkc Document](https://k2hdkc.antpick.ax/index.html) +[About k2hr3 Document](https://k2hr3.antpick.ax/index.html) +[About chmpx Document](https://chmpx.antpick.ax/index.html) +[About k2hr3 Document](https://k2hr3.antpick.ax/index.html) + +[About AntPickax](https://antpick.ax/) + +## Repositories +[k2hdkc dbaas](https://github.com/yahoojapan/k2hdkc_dbaas) +[k2hr3](https://github.com/yahoojapan/k2hr3) +[k2hr3_app](https://github.com/yahoojapan/k2hr3_app) +[k2hr3_api](https://github.com/yahoojapan/k2hr3_api) +[k2hr3_get_resource](https://github.com/yahoojapan/k2hr3_get_resource) +[k2hdkc](https://github.com/yahoojapan/k2hdkc) +[k2hdkc_dbaas_override_conf](https://github.com/yahoojapan/k2hdkc_dbaas_override_conf) +[chmpx](https://github.com/yahoojapan/chmpx) + +## Packages +[k2hdkc(packagecloud.io)](https://packagecloud.io/app/antpickax/stable/search?q=k2hdkc) +[chmpx(packagecloud.io)](https://packagecloud.io/app/antpickax/stable/search?q=chmpx) +[k2hdkc-dbaas-override-conf(packagecloud.io)](https://packagecloud.io/app/antpickax/stable/search?q=k2hdkc-dbaas-override-conf) +[k2hr3-cli(packagecloud.io)](https://packagecloud.io/app/antpickax/stable/search?q=k2hr3-cli) +[k2hr3-app(npm packages)](https://www.npmjs.com/package/k2hr3-app) +[k2hr3-api(npm packages)](https://www.npmjs.com/package/k2hr3-api) +[k2hr3-get-resource(packagecloud.io)](https://packagecloud.io/app/antpickax/stable/search?q=k2hr3-get-resource) + +### License +This software is released under the MIT License, see the license file. + +### AntPickax +K2HDKC DbaaS CLI is one of [AntPickax](https://antpick.ax/) products. + +Copyright(C) 2021 Yahoo Japan Corporation. diff --git a/autogen.sh b/autogen.sh new file mode 100755 index 0000000..dbfaba7 --- /dev/null +++ b/autogen.sh @@ -0,0 +1,118 @@ +#!/bin/sh +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +# +# Usage: autogen.sh [-noupdate_version_file] [-no_aclocal_force] [-no_check_ver_diff] +# +AUTOGEN_NAME=$(basename "$0") +AUTOGEN_DIR=$(dirname "$0") +SRCTOP=$(cd "${AUTOGEN_DIR}" || exit 1; pwd) + +echo "** run autogen.sh" + +# +# Parameter +# +NOUPDATE="no" +FORCEPARAM="--force" +PARAMETERS="" +while [ $# -ne 0 ]; do + if [ "X$1" = "X-noupdate_version_file" ]; then + NOUPDATE="yes" + FORCEPARAM="" # do not need force + elif [ "X$1" = "X-no_aclocal_force" ]; then + FORCEPARAM="" + elif [ "X$1" = "X-no_check_ver_diff" ]; then + PARAMETERS="${PARAMETERS} $1" + elif [ "X$1" = "X-h" ] || [ "X$1" = "X--help" ]; then + echo "Usage: ${AUTOGEN_NAME} [-noupdate_version_file] [-no_aclocal_force] [-no_check_ver_diff]" + exit 1 + else + echo "ERROR: Unknown option $1" + echo "Usage: ${AUTOGEN_NAME} [-noupdate_version_file] [-no_aclocal_force] [-no_check_ver_diff]" + exit 1 + fi + shift +done + +# +# update RELEASE_VERSION file +# +if [ "X${NOUPDATE}" = "Xno" ]; then + echo "--- run make_release_version_file.sh" + /bin/sh -c "${SRCTOP}/buildutils/make_release_version_file.sh" "${PARAMETERS}" + if [ $? -ne 0 ]; then + echo "ERROR: update RELEASE_VERSION file" + exit 1 + fi +fi + +# +# Check files +# +if [ ! -f "${SRCTOP}/NEWS" ]; then + touch "${SRCTOP}/NEWS" +fi +if [ ! -f "${SRCTOP}/README" ]; then + touch "${SRCTOP}/README" +fi +if [ ! -f "${SRCTOP}/AUTHORS" ]; then + touch "${SRCTOP}/AUTHORS" +fi +if [ ! -f "${SRCTOP}/ChangeLog" ]; then + touch "${SRCTOP}/ChangeLog" +fi + +# +# Build configure and Makefile +# +echo "--- run aclocal ${FORCEPARAM}" +aclocal ${FORCEPARAM} +if [ $? -ne 0 ]; then + echo "ERROR: something error occurred in aclocal ${FORCEPARAM}" + exit 1 +fi + +echo "--- run automake -c --add-missing" +automake -c --add-missing +if [ $? -ne 0 ]; then + echo "ERROR: something error occurred in automake -c --add-missing" + exit 1 +fi + +echo "--- run autoconf" +autoconf +if [ $? -ne 0 ]; then + echo "ERROR: something error occurred in autoconf" + exit 1 +fi + +echo "** SUCCEED: autogen" +exit 0 + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/buildutils/Makefile.am b/buildutils/Makefile.am new file mode 100644 index 0000000..7dc4e85 --- /dev/null +++ b/buildutils/Makefile.am @@ -0,0 +1,35 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +EXTRA_DIST =make_variables.sh \ + make_release_version_file.sh \ + make_rpm_changelog.sh \ + debian_build.sh \ + rpm_build.sh \ + setup_k2hr3_cli_component.sh + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/buildutils/control.in b/buildutils/control.in new file mode 100644 index 0000000..2e5d2df --- /dev/null +++ b/buildutils/control.in @@ -0,0 +1,24 @@ +Source: @PACKAGE_NAME@ +Section: utils +Priority: optional +Maintainer: @DEV_NAME@ <@DEV_EMAIL@> +Build-Depends: @DEBHELPER_DEP@ +Standards-Version: 3.9.8 +Homepage: https://@GIT_DOMAIN@/@GIT_ORG@/@GIT_REPO@ +Vcs-Git: git://@GIT_DOMAIN@/@GIT_ORG@/@GIT_REPO@.git +Vcs-Browser: https://@GIT_DOMAIN@/@GIT_ORG@/@GIT_REPO@ + +Package: @PACKAGE_NAME@ +Section: utils +Architecture: all +Depends: ${misc:Depends}, k2hr3-cli +Description: @SHORTDESC@ + The K2HDKC DBaaS CLI (Command Line Interface of Database as a + Service for K2HDKC) is a tool for building a K2HDKC cluster + in conjunction with K2HR3. + The Trove version of K2HDKC DBaaS is available, but this + K2HDKC DBaaS CLI allows you to build K2HDKC clusters without + the need for a Trove system. + With the basic components of OpenStack and the K2HR3 system + that works with it, you can easily build a K2HD KC cluster + using the K2HDKC DBaaS CLI. diff --git a/buildutils/copyright.in b/buildutils/copyright.in new file mode 100644 index 0000000..79547f5 --- /dev/null +++ b/buildutils/copyright.in @@ -0,0 +1,8 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: @PACKAGE_NAME@ +Source: https://@GIT_DOMAIN@/@GIT_ORG@/@GIT_REPO@ + +Files: * +Copyright: 2021 Yahoo Japan Corporation +License: @PKGLICENSE@ +@DEBCOPYING@ diff --git a/buildutils/debian_build.sh b/buildutils/debian_build.sh new file mode 100755 index 0000000..6b303c8 --- /dev/null +++ b/buildutils/debian_build.sh @@ -0,0 +1,385 @@ +#!/bin/sh +# +# Utility tools for building configure/packages by AntPickax +# +# Copyright 2018 Yahoo Japan Corporation. +# +# AntPickax provides utility tools for supporting autotools +# builds. +# +# These tools retrieve the necessary information from the +# repository and appropriately set the setting values of +# configure, Makefile, spec,etc file and so on. +# These tools were recreated to reduce the number of fixes and +# reduce the workload of developers when there is a change in +# the project configuration. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Fri, Apr 13 2018 +# REVISION: +# + +# +# Autobuid for debian package +# +func_usage() +{ + echo "" + echo "Usage: $1 [-buildnum ] [-nodebuild] [-rootdir] [-product ] [-class ] [-disttype ] [-y] [additional debuild options]" + echo " -buildnum specify build number for packaging(default 1)" + echo " -nodebuild stops before do debuild command." + echo " -rootdir layout \"debian\" directory for packaging under source top directory" + echo " -product specify product name(use PACKAGE_NAME in Makefile s default)" + echo " -class specify package class name(optional)" + echo " -disttype specify \"OS/version name\", ex: ubuntu/trusty" + echo " -y runs no interactive mode." + echo " additional debuild options this script run debuild with \"-uc -us\", can specify additional options." + echo " -h print help" + echo "" +} + +func_get_default_class() +{ + dh_make -h 2>/dev/null | grep '\--multi' >/dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "multi" + else + echo "library" + fi +} + +PRGNAME=$(basename "$0") +MYSCRIPTDIR=$(dirname "$0") +MYSCRIPTDIR=$(cd "${MYSCRIPTDIR}" || exit 1; pwd) +SRCTOP=$(cd "${MYSCRIPTDIR}/.." || exit 1; pwd) +BUILDDEBDIR=${SRCTOP}/debian_build + +# +# Check options +# +IS_DEBUILD=1 +IS_INTERACTIVE=1 +IS_ROOTDIR=0 +DH_MAKE_AUTORUN_OPTION="-y" +BUILD_NUMBER=1 +IS_OS_UBUNTU=0 +OS_VERSION_NAME= +DEBUILD_OPT="" +PKGCLASSNAME=$(func_get_default_class) +while [ $# -ne 0 ]; do + if [ "X$1" = "X" ]; then + echo "WARNING: (null) option is specified, so skip this." 1>&2 + + elif [ "X$1" = "X-h" ] || [ "X$1" = "X-help" ]; then + func_usage "${PRGNAME}" + exit 0 + + elif [ "X$1" = "X-buildnum" ]; then + shift + if [ $# -eq 0 ]; then + echo "ERROR: -buildnum option needs parameter." 1>&2 + exit 1 + fi + BUILD_NUMBER=$1 + + elif [ "X$1" = "X-nodebuild" ]; then + IS_DEBUILD=0 + BUILD_NUMBER= + + elif [ "X$1" = "X-rootdir" ]; then + IS_ROOTDIR=1 + + elif [ "X$1" = "X-product" ]; then + shift + if [ $# -eq 0 ]; then + echo "ERROR: -product option needs parameter." 1>&2 + exit 1 + fi + PACKAGE_NAME=$1 + + elif [ "X$1" = "X-class" ]; then + shift + if [ $# -eq 0 ]; then + echo "ERROR: -class option needs parameter." 1>&2 + exit 1 + fi + PKGCLASSNAME=$1 + + elif [ "X$1" = "X-disttype" ]; then + shift + if [ $# -eq 0 ]; then + echo "ERROR: -disttype option needs parameter." 1>&2 + exit 1 + fi + OS_VERSION_NAME=$1 + echo "${OS_VERSION_NAME}" | grep -i 'ubuntu' >/dev/null 2>&1 + if [ $? -eq 0 ]; then + IS_OS_UBUNTU=1 + OS_VERSION_NAME=$(echo "${OS_VERSION_NAME}" | sed 's#[Uu][Bb][Uu][Nn][Tt][Uu]/##g') + + else + echo "${OS_VERSION_NAME}" | grep -i 'debian' >/dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "ERROR: -disttype option parameter must be ubuntu or debian." 1>&2 + exit 1 + fi + IS_OS_UBUNTU=0 + OS_VERSION_NAME=$(echo "${OS_VERSION_NAME}" | sed 's#[Dd][Ee][Bb][Ii][Aa][Nn]/##g') + fi + + elif [ "X$1" = "X-y" ]; then + IS_INTERACTIVE=0 + DH_MAKE_AUTORUN_OPTION="-y" + + else + if [ "X${DEBUILD_OPT}" != "X" ]; then + DEBUILD_OPT="${DEBUILD_OPT} $1" + else + DEBUILD_OPT="$1" + fi + fi + shift +done + +# +# Package name +# +if [ "X${PACKAGE_NAME}" = "X" ]; then + PACKAGE_NAME=$(grep "^PACKAGE_NAME" "${SRCTOP}/Makefile" 2>/dev/null | awk '{print $3}' 2>/dev/null) + if [ "X${PACKAGE_NAME}" = "X" ]; then + echo "ERROR: no product name" 1>&2 + exit 1 + fi +fi + +# +# Welcome message and confirming for interactive mode +# +if [ "${IS_INTERACTIVE}" -eq 1 ]; then + echo "---------------------------------------------------------------" + echo " Do you change these file and commit to github?" + echo " - ChangeLog modify / add changes like dch tool format" + echo " - Git TAG stamp git tag for release" + echo "---------------------------------------------------------------" + while true; do + echo "Confirm: [y/n] " | tr -d '\n' + read -r CONFIRM + + if [ "X${CONFIRM}" = "XY" ] || [ "X${CONFIRM}" = "Xy" ]; then + break; + elif [ "X${CONFIRM}" = "XN" ] || [ "X${CONFIRM}" = "Xn" ]; then + echo "Bye..." + exit 1 + fi + done + echo "" +fi + +# +# Make dist package by make dist +# +"${SRCTOP}/autogen.sh" || exit 1 +"${SRCTOP}/configure" "${CONFIGUREOPT}" || exit 1 +PACKAGE_VERSION=$("${MYSCRIPTDIR}/make_variables.sh" -pkg_version) +# shellcheck disable=SC2034 +PACKAGE_MAJOR_VER=$("${MYSCRIPTDIR}/make_variables.sh" -major_number) + +echo "===== make dist: start ==============================" +make dist || exit 1 +echo "===== make dist: end ==============================" + +# +# Create debian package directory and change current +# +echo "===== prepare working directory: start =============" + +if [ -f "${BUILDDEBDIR}" ]; then + echo "ERROR: debian file exists, could not make debian directory." 1>&2 + exit 1 +fi +if [ -d "${BUILDDEBDIR}" ]; then + echo "WANING: debian directory exists, remove and remake it." 1>&2 + rm -rf "${BUILDDEBDIR}" || exit 1 +fi +mkdir "${BUILDDEBDIR}" || exit 1 +cd "${BUILDDEBDIR}" || exit 1 + +# +# copy dist package and expand source files +# +cp "${SRCTOP}/${PACKAGE_NAME}-${PACKAGE_VERSION}.tar.gz" . || exit 1 +tar xvfz "${PACKAGE_NAME}-${PACKAGE_VERSION}.tar.gz" || exit 1 + +# +# change current directory +# +EXPANDDIR="${BUILDDEBDIR}/${PACKAGE_NAME}-${PACKAGE_VERSION}" +cd "${EXPANDDIR}" || exit 1 + +# +# initialize debian directory +# +if [ "X${LOGNAME}" = "X" ] && [ "X${USER}" = "X" ]; then + # [NOTE] + # if run in docker container, Neither LOGNAME nor USER may be set in the environment variables. + # dh_make needs one of these environments. + # + export USER="root" + export LOGNAME="root" +fi +dh_make -f "${BUILDDEBDIR}/${PACKAGE_NAME}-${PACKAGE_VERSION}.tar.gz" --createorig --"${PKGCLASSNAME}" "${DH_MAKE_AUTORUN_OPTION}" || exit 1 + +# +# remove unnecessary template files +# +rm -rf "${EXPANDDIR}"/debian/*.ex "${EXPANDDIR}"/debian/*.EX "${EXPANDDIR}/debian/${PACKAGE_NAME}"-doc.* "${EXPANDDIR}"/debian/README.* "${EXPANDDIR}"/debian/docs "${EXPANDDIR}"/debian/*.install + +# +# adding some lines into rules file +# +mv "${EXPANDDIR}/debian/rules" "${EXPANDDIR}/debian/rules.base" +head -1 "${EXPANDDIR}/debian/rules.base" > "${EXPANDDIR}/debian/rules" || exit 1 +sed '/^#/d' "${EXPANDDIR}/debian/rules.base" | sed '/^$/{N; /^\n$/D;}' >> "${EXPANDDIR}/debian/rules" || exit 1 +echo "" >> "${EXPANDDIR}/debian/rules" || exit 1 +echo "# for ${PACKAGE_NAME} package" >> "${EXPANDDIR}/debian/rules" || exit 1 +echo "override_dh_auto_install:" >> "${EXPANDDIR}/debian/rules" || exit 1 +echo " dh_auto_install --destdir=debian/${PACKAGE_NAME}" >> "${EXPANDDIR}/debian/rules" || exit 1 + +if [ "X${CONFIGUREOPT}" != "X" ]; then + echo "" >> "${EXPANDDIR}/debian/rules" || exit 1 + echo "override_dh_auto_configure:" >> "${EXPANDDIR}/debian/rules" || exit 1 + echo " dh_auto_configure -- ${CONFIGUREOPT}" >> "${EXPANDDIR}/debian/rules" || exit 1 +fi + +rm "${EXPANDDIR}/debian/rules.base" + +# +# files for other +# +echo "src/libexec/database/CREDIT usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/VERSION usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/k2hdkc_dbaas_create_host.templ usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/k2hdkc_dbaas_resource_keys.config usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/k2hdkc_dbaas_resource.templ usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/command.sh usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/functions.sh usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/help.sh usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/openstack.sh usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/options.sh usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/summary.sh usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit +echo "src/libexec/database/variables.sh usr/libexec/k2hr3/database" >> "${EXPANDDIR}/debian/${PACKAGE_NAME}.install" || exit + +# +# copy copyright +# +cp "${MYSCRIPTDIR}/copyright" "${EXPANDDIR}/debian/copyright" || exit 1 + +# +# copy control file +# +cp "${MYSCRIPTDIR}/control" "${EXPANDDIR}/debian/control" || exit 1 + +# +# copy changelog with converting build number +# +CHLOG_ORG_MENT=$(grep "^ --" ChangeLog | head -1) +CHLOG_NEW_MENT=$(grep "^ --" "${EXPANDDIR}/debian/changelog" | head -1) +if [ "X${BUILD_NUMBER}" = "X" ]; then + if [ ${IS_OS_UBUNTU} -eq 1 ]; then + sed -e "s/${CHLOG_ORG_MENT}/${CHLOG_NEW_MENT}/g" -e "s/ trusty;/ ${OS_VERSION_NAME};/g" < ChangeLog > "${EXPANDDIR}/debian/changelog" || exit 1 + else + sed -e "s/${CHLOG_ORG_MENT}/${CHLOG_NEW_MENT}/g" -e 's/ trusty;/ unstable;/g' < ChangeLog > "${EXPANDDIR}/debian/changelog" || exit 1 + fi +else + if [ ${IS_OS_UBUNTU} -eq 1 ]; then + sed -e "s/${PACKAGE_VERSION}/${PACKAGE_VERSION}-${BUILD_NUMBER}/g" -e "s/${CHLOG_ORG_MENT}/${CHLOG_NEW_MENT}/g" -e "s/ trusty;/ ${OS_VERSION_NAME};/g" < ChangeLog > "${EXPANDDIR}/debian/changelog" || exit 1 + else + sed -e "s/${PACKAGE_VERSION}/${PACKAGE_VERSION}-${BUILD_NUMBER}/g" -e "s/${CHLOG_ORG_MENT}/${CHLOG_NEW_MENT}/g" -e "s/ trusty;/ unstable;/g" < ChangeLog > "${EXPANDDIR}/debian/changelog" || exit 1 + fi +fi +if [ ! -f "${EXPANDDIR}/debian/compat" ]; then + echo "9" > "${EXPANDDIR}/debian/compat" +fi + +echo "===== prepare working directory: end ===============" + +# +# change debian directory to source top directory +# +if [ ${IS_ROOTDIR} -eq 1 ]; then + if [ -f "${SRCTOP}/debian" ]; then + echo "ERROR: ${SRCTOP}/debian file exists, could not make debian directory." 1>&2 + exit 1 + fi + if [ -d "${SRCTOP}/debian" ]; then + echo "${SRCTOP}/debian directory exists, remove and remake it..." 1>&2 + rm -rf "${SRCTOP}/debian" || exit 1 + fi + cp -rp "${EXPANDDIR}/debian" "${SRCTOP}/." || exit 1 + + # + # change current directory + # + cd "${SRCTOP}" || exit 1 + + # + # base directory is changed + # + BUILDDEBDIR=${SRCTOP} +fi + +# +# Check stop before debuild(for manually) +# +if [ ${IS_DEBUILD} -ne 1 ]; then + # + # Not run debuild (this means just stop preparing the file) + # + echo "MESSAGE: ${PRGNAME} exits immediately before debuild is executed," + echo " that is, it prepares only files and directories." + echo " By running \"debuild -uc -us(-tc -b)\", you can create" + echo " the debian package manually and find the created package" + echo " in \"${BUILDDEBDIR}/..\" directory." + echo "" + + exit 0 +fi + +# +# Run debuild +# +echo "===== build package: start =========================" +debuild -us -uc || exit 1 +echo "===== build package: end ===========================" + +# +# Check and show debian package +# +ls "${BUILDDEBDIR}/${PACKAGE_NAME}_${PACKAGE_VERSION}-${BUILD_NUMBER}"*.deb >/dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "===== show ${BUILDDEBDIR}/${PACKAGE_NAME}_${PACKAGE_VERSION}-${BUILD_NUMBER}*.deb package: start =====" + dpkg -c "${BUILDDEBDIR}/${PACKAGE_NAME}_${PACKAGE_VERSION}-${BUILD_NUMBER}"*.deb + echo "" + dpkg -I "${BUILDDEBDIR}/${PACKAGE_NAME}_${PACKAGE_VERSION}-${BUILD_NUMBER}"*.deb + echo "===== show ${BUILDDEBDIR}/${PACKAGE_NAME}_${PACKAGE_VERSION}-${BUILD_NUMBER}*.deb package: end =====" +fi + +# +# finish +# +echo "" +echo "You can find ${PACKAGE_NAME} ${PACKAGE_VERSION}-${BUILD_NUMBER} version debian package in ${BUILDDEBDIR} directory." +echo "" +exit 0 + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/buildutils/k2hdkc-dbaas-cli.spec.in b/buildutils/k2hdkc-dbaas-cli.spec.in new file mode 100644 index 0000000..4540e78 --- /dev/null +++ b/buildutils/k2hdkc-dbaas-cli.spec.in @@ -0,0 +1,102 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +# +# Local macros/define +# +%if %{undefined package_revision} +%global package_revision 1 +%endif + +%if %{undefined make_build} +%global make_build %{__make} %{?_smp_mflags} +%endif + +%if %{undefined autosetup} +%global autosetup %setup -q +%endif + +%global gittag v@VERSION@ + +%if %{undefined make_check} +%global make_check 1 +%endif + +# +# main package +# +Summary: @SHORTDESC@ +Name: @PACKAGE_NAME@ +Version: @VERSION@ +Release: %{package_revision}%{?dist} +License: @PKGLICENSE@ +@RPMPKG_GROUP@ +URL: https://@GIT_DOMAIN@/@GIT_ORG@/@PACKAGE_NAME@ +Source0: https://@GIT_DOMAIN@/@GIT_ORG@/@PACKAGE_NAME@/archive/%{gittag}/%{name}-%{version}.tar.gz +Requires: k2hr3-cli +BuildRequires: git-core make +BuildArch: noarch + +%description +The K2HDKC DBaaS CLI (Command Line Interface of Database as a +Service for K2HDKC) is a tool for building a K2HDKC cluster +in conjunction with K2HR3. +The Trove version of K2HDKC DBaaS is available, but this +K2HDKC DBaaS CLI allows you to build K2HDKC clusters without +the need for a Trove system. +With the basic components of OpenStack and the K2HR3 system +that works with it, you can easily build a K2HD KC cluster +using the K2HDKC DBaaS CLI. + +%prep +%autosetup -n %{name}-%{version} + +%build +./autogen.sh +%configure --disable-static @CONFIGUREWITHOPT@ +make build + +%install +%make_install +install -D -m 444 src/libexec/database/CREDIT %{buildroot}/usr/libexec/k2hr3/database/CREDIT +install -D -m 444 src/libexec/database/VERSION %{buildroot}/usr/libexec/k2hr3/database/VERSION +install -D -m 444 src/libexec/database/k2hdkc_dbaas_create_host.templ %{buildroot}/usr/libexec/k2hr3/database/k2hdkc_dbaas_create_host.templ +install -D -m 444 src/libexec/database/k2hdkc_dbaas_resource_keys.config %{buildroot}/usr/libexec/k2hr3/database/k2hdkc_dbaas_resource_keys.config +install -D -m 444 src/libexec/database/k2hdkc_dbaas_resource.templ %{buildroot}/usr/libexec/k2hr3/database/k2hdkc_dbaas_resource.templ +install -D -m 444 src/libexec/database/command.sh %{buildroot}/usr/libexec/k2hr3/database/command.sh +install -D -m 444 src/libexec/database/functions.sh %{buildroot}/usr/libexec/k2hr3/database/functions.sh +install -D -m 444 src/libexec/database/help.sh %{buildroot}/usr/libexec/k2hr3/database/help.sh +install -D -m 444 src/libexec/database/openstack.sh %{buildroot}/usr/libexec/k2hr3/database/openstack.sh +install -D -m 444 src/libexec/database/options.sh %{buildroot}/usr/libexec/k2hr3/database/options.sh +install -D -m 444 src/libexec/database/summary.sh %{buildroot}/usr/libexec/k2hr3/database/summary.sh +install -D -m 444 src/libexec/database/variables.sh %{buildroot}/usr/libexec/k2hr3/database/variables.sh + +%if %{make_check} +%check +%{__make} check +%endif + +%files +%license COPYING +%doc README AUTHORS ChangeLog +/usr/libexec/k2hr3/database/* + +%changelog +@RPMCHANGELOG@ diff --git a/buildutils/make_release_version_file.sh b/buildutils/make_release_version_file.sh new file mode 100755 index 0000000..026b04c --- /dev/null +++ b/buildutils/make_release_version_file.sh @@ -0,0 +1,257 @@ +#!/bin/sh +# +# Utility tools for building configure/packages by AntPickax +# +# Copyright 2018 Yahoo Japan Corporation. +# +# AntPickax provides utility tools for supporting autotools +# builds. +# +# These tools retrieve the necessary information from the +# repository and appropriately set the setting values of +# configure, Makefile, spec,etc file and so on. +# These tools were recreated to reduce the number of fixes and +# reduce the workload of developers when there is a change in +# the project configuration. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Fri, Apr 13 2018 +# REVISION: +# + +# +# Make RELEASE_VERSION file in source top directory +# +# RELEASE_VERSION file is used from configure.ac and other files for building. +# RELEASE_VERSION has version(release) number in it, it is created from Git +# release tag or latest version in ChangeLog file. +# +func_usage() +{ + echo "" + echo "Usage: $1 [-not_use_git] [-no_check_ver_diff] [-f changelog file path]" + echo " -not_use_git specify for not checking git release tag by git tag command" + echo " -no_check_ver_diff specify for not comparing git release tag and changelog" + echo " -f changelog specify changelog file name in source top directory" + echo " -h print help" + echo "" +} +PRGNAME=$(basename "$0") +MYSCRIPTDIR=$(dirname "$0") +SRCTOP=$(cd "${MYSCRIPTDIR}/.." || exit 1; pwd) +RELEASE_VERSION_FILE="${SRCTOP}/RELEASE_VERSION" +GIT_DIR="${SRCTOP}/.git" + + +# +# Check options +# +NOGIT="no" +NOCHECKVERDIFF="no" +CHANGELOGFILE="${SRCTOP}/ChangeLog" +while [ $# -ne 0 ]; do + if [ "X$1" = "X-h" ] || [ "X$1" = "X-help" ]; then + func_usage "${PRGNAME}" + exit 0 + + elif [ "X$1" = "X-not_use_git" ]; then + NOGIT="yes" + + elif [ "X$1" = "X-no_check_ver_diff" ]; then + NOCHECKVERDIFF="yes" + + elif [ "X$1" = "X-f" ]; then + shift + if [ $# -eq 0 ]; then + echo "ERROR: Must set changelog file name after -f option." 1>&2 + exit 1 + fi + if [ ! -f "${SRCTOP}/$1" ]; then + echo "ERROR: Not found changelog($1) file " 1>&2 + exit 1 + fi + CHANGELOGFILE=${SRCTOP}/$1 + + else + echo "ERROR: Unknown option $1" 1>&2 + exit 1 + fi + shift +done + +# +# Version number from git tag command +# +# get version number from git release tag formatted following: +# "v10", "v 10", "ver10", "ver-10", "version10", "version,10" +# "v10.0.0", "v 10.0", "ver 10.0.0a", "v10.0.0-1", etc +# +# and the last build number is cut.(ex, "v10.0.1-1" -> "10.0.1") +# +if [ -d "${GIT_DIR}" ]; then + if [ "X${NOGIT}" = "Xno" ]; then + GIT_RELEASE_VERSION=$(git tag | grep '^[v|V]\([e|E][r|R]\([s|S][i|I][o|O][n|N]\)\{0,1\}\)\{0,1\}' | sed 's/^[v|V]\([e|E][r|R]\([s|S][i|I][o|O][n|N]\)\{0,1\}\)\{0,1\}//' | grep -o '[0-9]\+\([\.]\([0-9]\)\+\)\+\(.\)*$' | sed 's/-\(.\)*$//' | sort -t . -n -k 1,1 -k 2,2 -k 3,3 -k 4,4 | uniq | tail -1 | tr -d '\n') + + if [ "X${GIT_RELEASE_VERSION}" = "X" ]; then + echo "WARNING: Could not get latest release tag from git release tag" 1>&2 + GIT_RELEASE_VERSION= + fi + else + GIT_RELEASE_VERSION= + fi +else + echo "WARNING: ${GIT_DIR} directory is not existed." 1>&2 + GIT_RELEASE_VERSION= +fi + +# +# Version number from ChangeLog +# +# get version number from ChangeLog file formatted like debian. +# and the last build number is cut.(ex, "10.0.1-1" -> "10.0.1") +# +if [ -f "${CHANGELOGFILE}" ]; then + CH_RELEASE_VERSION=$(grep -o '^.*[(].*[)].*[;].*$' "${CHANGELOGFILE}" | grep -o '[(].*[)]' | head -1 | sed 's/[(|)]//g') + + if [ "X${CH_RELEASE_VERSION}" = "X" ]; then + echo "WARNING: Could not get latest release tag from ChangeLog file ( ${CHANGELOGFILE} )" 1>&2 + CH_RELEASE_VERSION= + fi +else + echo "MESSAGE: not found ChangeLog file ( ${CHANGELOGFILE} )" 1>&2 + CH_RELEASE_VERSION= +fi + +# +# Check version number between git release tag and ChangeLog file +# +# If version number from git release tag is later than one from ChangeLog, +# this script puts error and exits. +# The other case, this script continue to work and puts version number +# to RELEASE_VERSION file. +# If there are no version number from git release tag and ChangeLog, this +# script checks RELEASE_VERSION file existing. +# +IS_PUT_RELEASE_VERSION_FILE=yes + +if [ "X${GIT_RELEASE_VERSION}" != "X" ] && [ "X${CH_RELEASE_VERSION}" != "X" ]; then + if [ "X${NOCHECKVERDIFF}" = "Xno" ]; then + # + # Check latest version + # + GIT_VERS=$(echo "${GIT_RELEASE_VERSION}" | sed 's/\./ /g') + CH_VERS=$(echo "${CH_RELEASE_VERSION}" | sed 's/\./ /g') + + GIT_VER_PART_CNT=0 + LATEST_VER_TYPE= + for git_ver_part in ${GIT_VERS}; do + ch_ver_part= + + CH_VER_PART_CNT=0 + for ver_tmp in ${CH_VERS}; do + ch_ver_part="${ver_tmp}" + CH_VER_PART_CNT=$((CH_VER_PART_CNT + 1)) + + if [ ${GIT_VER_PART_CNT} -lt ${CH_VER_PART_CNT} ]; then + break + fi + done + + if [ "X${ch_ver_part}" != "X" ]; then + if [ "${git_ver_part}" -gt "${ch_ver_part}" ]; then + LATEST_VER_TYPE=gitver + break + elif [ "${git_ver_part}" -lt "${ch_ver_part}" ]; then + LATEST_VER_TYPE=chver + break + fi + else + LATEST_VER_TYPE=gitver + break + fi + + GIT_VER_PART_CNT=$((GIT_VER_PART_CNT + 1)) + done + + if [ "X${LATEST_VER_TYPE}" = "X" ]; then + GIT_VER_PART_CNT=0 + for git_ver_part in ${GIT_VERS}; do + GIT_VER_PART_CNT=$((GIT_VER_PART_CNT + 1)) + done + + CH_VER_PART_CNT=0 + for ver_tmp in ${CH_VERS}; do + CH_VER_PART_CNT=$((CH_VER_PART_CNT + 1)) + done + + if [ ${GIT_VER_PART_CNT} -lt ${CH_VER_PART_CNT} ]; then + LATEST_VER_TYPE=chver + fi + fi + + if [ "X${LATEST_VER_TYPE}" = "Xgitver" ]; then + echo "ERROR: git release tag ( ${GIT_RELEASE_VERSION} ) is later than ChangeLog file ( ${CHANGELOGFILE} ) version ( ${CH_RELEASE_VERSION} )." 1>&2 + exit 1 + + elif [ "X${LATEST_VER_TYPE}" = "Xchver" ]; then + echo "WARNING: ChangeLog file ( ${CHANGELOGFILE} ) version ( ${CH_RELEASE_VERSION} ) is later than git release tag ( ${GIT_RELEASE_VERSION} )." 1>&2 + echo " Then RELEASE_VERSION file is put git release tag ( ${GIT_RELEASE_VERSION} )" 1>&2 + + RELEASE_VERSION=${GIT_RELEASE_VERSION} + + else + # LATEST_VER_TYPE is not set, this means same version. + + RELEASE_VERSION=${GIT_RELEASE_VERSION} + fi + + else + # + # Not check version number, so only use it from git + # + RELEASE_VERSION=${GIT_RELEASE_VERSION} + fi + +elif [ "X${GIT_RELEASE_VERSION}" != "X" ]; then + RELEASE_VERSION=${GIT_RELEASE_VERSION} + +elif [ "X${CH_RELEASE_VERSION}" != "X" ]; then + RELEASE_VERSION=${CH_RELEASE_VERSION} + +elif [ -f "${RELEASE_VERSION_FILE}" ]; then + RELEASE_VERSION=$(cat "${RELEASE_VERSION_FILE}") + IS_PUT_RELEASE_VERSION_FILE=no + +else + echo "ERROR: There is no version number information." 1>&2 + echo " The version number must be given by git release tag" 1>&2 + echo " or ChangeLog file or RELEASE_VERSION file." 1>&2 + exit 1 +fi + +# +# Make RELEASE_VERSION file +# +if [ "X${IS_PUT_RELEASE_VERSION_FILE}" = "Xyes" ]; then + echo "MESSAGE: Put version number ${RELEASE_VERSION} to RELEASE_VERSION file" 1>&2 + echo "${RELEASE_VERSION}" | tr -d '\n' | sed -e 's/[[:space:]]*$//g' > "${RELEASE_VERSION_FILE}" +fi + + +# +# finish +# +echo "SUCCEED: Version number is ${RELEASE_VERSION}" 1>&2 +exit 0 + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/buildutils/make_rpm_changelog.sh b/buildutils/make_rpm_changelog.sh new file mode 100755 index 0000000..13434cf --- /dev/null +++ b/buildutils/make_rpm_changelog.sh @@ -0,0 +1,140 @@ +#!/bin/sh +# +# Utility tools for building configure/packages by AntPickax +# +# Copyright 2018 Yahoo Japan Corporation. +# +# AntPickax provides utility tools for supporting autotools +# builds. +# +# These tools retrieve the necessary information from the +# repository and appropriately set the setting values of +# configure, Makefile, spec,etc file and so on. +# These tools were recreated to reduce the number of fixes and +# reduce the workload of developers when there is a change in +# the project configuration. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Fri, Apr 13 2018 +# REVISION: +# + +# +# Convert ChangeLog to use it in spec file for rpm +# +func_usage() +{ + echo "" + echo "Usage: $1 [-h]" + echo " specify ChnageLog file path. if not specify, use ChangeLog file in top directory as default." + echo " -h(help) print help." + echo "" +} +PRGNAME=$(basename "$0") +MYSCRIPTDIR=$(dirname "$0") +SRCTOP=$(cd "${MYSCRIPTDIR}/.." || exit 1; pwd) + +# +# Check options +# +CHANGELOG_FILE="${SRCTOP}/ChangeLog" +ISSETCHANGELOG=0 +while [ $# -ne 0 ]; do + if [ "X$1" = "X" ]; then + break; + + elif [ "X$1" = "X-h" ] || [ "X$1" = "X-help" ]; then + func_usage "${PRGNAME}" + exit 0 + + else + if [ ${ISSETCHANGELOG} -ne 0 ]; then + echo "ERROR: already ${CHANGELOG_FILE} file is specified." 1>&2 + echo "No changelog by ${CHANGELOG_FILE} with error." + exit 1 + fi + if [ ! -f "$1" ]; then + echo "ERROR: $1 file is not existed." 1>&2 + echo "No changelog by ${CHANGELOG_FILE} with error." + exit 1 + fi + CHANGELOG_FILE=$1 + ISSETCHANGELOG=1 + fi + shift +done + +# +# convert ChangeLog to spec file format for rpm +# +if [ "X${BUILD_NUMBER}" = "X" ]; then + # default build number is 1 + BUILD_NUMBER_STR="-1" +else + BUILD_NUMBER_STR="-${BUILD_NUMBER}" +fi + +INONEVER=0 +DETAILS="" +ALLLINES="" +SET_FIRST_VERSION=0 +while read -r oneline; do + oneline="${oneline}" + if [ "X${oneline}" = "X" ]; then + continue + fi + + if [ "${INONEVER}" -eq 0 ]; then + PKG_VERSION=$(echo "${oneline}" | grep '^.*[(].*\..*[)].*[;].*$' | grep -o '[(].*[)]' | sed 's/[(|)]//g') + if [ "X${PKG_VERSION}" != "X" ]; then + INONEVER=1 + DETAILS="" + if [ "${SET_FIRST_VERSION}" -eq 0 ]; then + PKG_VERSION="${PKG_VERSION}${BUILD_NUMBER_STR}" + SET_FIRST_VERSION=1 + fi + fi + else + TEST_CONTENTS=$(echo "${oneline}" | grep '^[-][-].*[ ][ ].*$') + PKG_RF2822=$(echo "${TEST_CONTENTS}" | grep -o '[ ][ ].*') + PKG_RF2822="${PKG_RF2822}" + PKG_COMMITTER=$(echo "${TEST_CONTENTS}" | grep -o '.*[ ][ ]' | sed 's/^[-][-][ ]//') + if [ "X${PKG_RF2822}" != "X" ] && [ "X${PKG_COMMITTER}" != "X" ]; then + INONEVER=0 + PKG_DATE=$(echo "${PKG_RF2822}" | sed 's/,/ /g' | awk '{print $1" "$3" "$2" "$4}') + PKG_LINE="* ${PKG_DATE} ${PKG_COMMITTER} ${PKG_VERSION}${DETAILS}" + if [ "X${ALLLINES}" != "X" ]; then + ALLLINES="${ALLLINES}\\n\\n${PKG_LINE}" + else + ALLLINES="${PKG_LINE}" + fi + else + ONEDETAIL=$(echo "${oneline}" | grep '^[\*][ ].*' | sed 's/^[\*]//g') + if [ "X${ONEDETAIL}" != "X" ]; then + DETAILS="${DETAILS}\\n- ${ONEDETAIL}" + fi + fi + fi +done < "${CHANGELOG_FILE}" + +# +# print changelog +# +# NOTE: echo command on ubuntu is print '-e', we need to cut it. +# +# shellcheck disable=SC2039 +echo -e "${ALLLINES}" | sed 's/^-e //g' + +exit 0 + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/buildutils/make_variables.sh b/buildutils/make_variables.sh new file mode 100755 index 0000000..fbf7df9 --- /dev/null +++ b/buildutils/make_variables.sh @@ -0,0 +1,205 @@ +#!/bin/sh +# +# Utility tools for building configure/packages by AntPickax +# +# Copyright 2018 Yahoo Japan Corporation. +# +# AntPickax provides utility tools for supporting autotools +# builds. +# +# These tools retrieve the necessary information from the +# repository and appropriately set the setting values of +# configure, Makefile, spec,etc file and so on. +# These tools were recreated to reduce the number of fixes and +# reduce the workload of developers when there is a change in +# the project configuration. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Fri, Apr 13 2018 +# REVISION: +# + +# +# Puts project version/revision/age/etc variables for building +# +func_usage() +{ + echo "" + echo "Usage: $1 [-pkg_version | -lib_version_info | -lib_version_for_link | -major_number | -debhelper_dep | -rpmpkg_group]" + echo " -pkg_version returns package version." + echo " -lib_version_info returns library libtools revision" + echo " -lib_version_for_link return library version for symbolic link" + echo " -major_number return major version number" + echo " -debhelper_dep return debhelper dependency string" + echo " -rpmpkg_group return group string for rpm package" + echo " -h(help) print help." + echo "" +} +PRGNAME=$(basename "$0") +MYSCRIPTDIR=$(dirname "$0") +SRCTOP=$(cd "${MYSCRIPTDIR}/.." || exit 1; pwd) +RELEASE_VERSION_FILE="${SRCTOP}/RELEASE_VERSION" + +# +# Check options +# +PRGMODE="" +while [ $# -ne 0 ]; do + if [ "X$1" = "X" ]; then + break; + + elif [ "X$1" = "X-h" ] || [ "X$1" = "X-help" ]; then + func_usage "${PRGNAME}" + exit 0 + + elif [ "X$1" = "X-pkg_version" ]; then + PRGMODE="PKG" + + elif [ "X$1" = "X-lib_version_info" ]; then + PRGMODE="LIB" + + elif [ "X$1" = "X-lib_version_for_link" ]; then + PRGMODE="LINK" + + elif [ "X$1" = "X-major_number" ]; then + PRGMODE="MAJOR" + + elif [ "X$1" = "X-debhelper_dep" ]; then + PRGMODE="DEBHELPER" + + elif [ "X$1" = "X-rpmpkg_group" ]; then + PRGMODE="RPMGROUP" + + else + echo "ERROR: unknown option $1" 1>&2 + echo "0" | tr -d '\n' + exit 1 + fi + shift +done +if [ "X${PRGMODE}" = "X" ]; then + echo "ERROR: option is not specified." 1>&2 + echo "0" | tr -d '\n' + exit 1 +fi + +# +# Make result +# +if [ "${PRGMODE}" = "PKG" ]; then + RESULT=$(cat "${RELEASE_VERSION_FILE}") + +elif [ "${PRGMODE}" = "LIB" ] || [ "${PRGMODE}" = "LINK" ]; then + MAJOR_VERSION=$(sed 's/["|\.]/ /g' "${RELEASE_VERSION_FILE}" | awk '{print $1}') + MID_VERSION=$(sed 's/["|\.]/ /g' "${RELEASE_VERSION_FILE}" | awk '{print $2}') + LAST_VERSION=$(sed 's/["|\.]/ /g' "${RELEASE_VERSION_FILE}" | awk '{print $3}') + + # check version number + # shellcheck disable=SC2003 + expr "${MAJOR_VERSION}" + 1 >/dev/null 2>&1 + if [ $? -ge 2 ]; then + echo "ERROR: wrong version number in RELEASE_VERSION file" 1>&2 + echo "0" | tr -d '\n' + exit 1 + fi + # shellcheck disable=SC2003 + expr "${MID_VERSION}" + 1 >/dev/null 2>&1 + if [ $? -ge 2 ]; then + echo "ERROR: wrong version number in RELEASE_VERSION file" 1>&2 + echo "0" | tr -d '\n' + exit 1 + fi + # shellcheck disable=SC2003 + expr "${LAST_VERSION}" + 1 >/dev/null 2>&1 + if [ $? -ge 2 ]; then + echo "ERROR: wrong version number in RELEASE_VERSION file" 1>&2 + echo "0" | tr -d '\n' + exit 1 + fi + + # make library revision number + if [ "${MID_VERSION}" -gt 0 ]; then + # shellcheck disable=SC2003 + REV_VERSION=$(expr "${MID_VERSION}" \* 100) + REV_VERSION=$((LAST_VERSION + REV_VERSION)) + else + REV_VERSION=${LAST_VERSION} + fi + + if [ ${PRGMODE} = "LIB" ]; then + RESULT="${MAJOR_VERSION}:${REV_VERSION}:0" + else + RESULT="${MAJOR_VERSION}.0.${REV_VERSION}" + fi + +elif [ "${PRGMODE}" = "MAJOR" ]; then + RESULT=$(sed 's/["|\.]/ /g' "${RELEASE_VERSION_FILE}" | awk '{print $1}') + +elif [ "${PRGMODE}" = "DEBHELPER" ]; then + # [NOTE] + # This option returns debhelper dependency string in control file for debian package. + # That string is depended debhelper package version and os etc. + # (if not ubuntu/debian os, returns default string) + # + apt-cache --version >/dev/null 2>&1 + if [ $? -eq 0 ]; then + IS_OS_UBUNTU=0 + if [ -f /etc/lsb-release ]; then + grep "[Uu]buntu" /etc/lsb-release >/dev/null 2>&1 + if [ $? -eq 0 ]; then + IS_OS_UBUNTU=1 + fi + fi + + DEBHELPER_MAJOR_VER=$(apt-cache show debhelper 2>/dev/null | grep Version 2>/dev/null | awk '{print $2}' 2>/dev/null | sed 's/\..*/ /g' 2>/dev/null) + # shellcheck disable=SC2003 + expr "${DEBHELPER_MAJOR_VER}" + 1 >/dev/null 2>&1 + if [ $? -ne 0 ]; then + DEBHELPER_MAJOR_VER=0 + else + DEBHELPER_MAJOR_VER=$((DEBHELPER_MAJOR_VER + 0)) + fi + if [ ${DEBHELPER_MAJOR_VER} -lt 10 ]; then + RESULT="debhelper (>= 9), autotools-dev" + else + if [ ${IS_OS_UBUNTU} -eq 1 ]; then + RESULT="debhelper (>= 10)" + else + RESULT="debhelper (>= 10), autotools-dev" + fi + fi + else + # Not debian/ubuntu, set default + RESULT="debhelper (>= 10), autotools-dev" + fi + +elif [ ${PRGMODE} = "RPMGROUP" ]; then + # [NOTE] + # Fedora rpm does not need "Group" key in spec file. + # If not fedora, returns "NEEDRPMGROUP", and you must replace this string in configure.ac + # + if [ -f /etc/fedora-release ]; then + RESULT="" + else + RESULT="NEEDRPMGROUP" + fi +fi + +# +# Output result +# +echo "${RESULT}" | tr -d '\n' | sed -e 's/[[:space:]]*$//g' + +exit 0 + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/buildutils/rpm_build.sh b/buildutils/rpm_build.sh new file mode 100755 index 0000000..0dbbcb5 --- /dev/null +++ b/buildutils/rpm_build.sh @@ -0,0 +1,194 @@ +#!/bin/sh +# +# Utility tools for building configure/packages by AntPickax +# +# Copyright 2018 Yahoo Japan Corporation. +# +# AntPickax provides utility tools for supporting autotools +# builds. +# +# These tools retrieve the necessary information from the +# repository and appropriately set the setting values of +# configure, Makefile, spec,etc file and so on. +# These tools were recreated to reduce the number of fixes and +# reduce the workload of developers when there is a change in +# the project configuration. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Thu, Nov 22 2018 +# REVISION: +# + +# +# Autobuid for rpm package +# +func_usage() +{ + echo "" + echo "Usage: $1 [-buildnum ] [-product ] [-y]" + echo " -buildnum specify build number for packaging(default 1)" + echo " -product specify product name(use PACKAGE_NAME in Makefile s default)" + echo " -y runs no interactive mode." + echo " -h print help" + echo "" +} +PRGNAME=$(basename "$0") +MYSCRIPTDIR=$(dirname "$0") +MYSCRIPTDIR=$(cd "${MYSCRIPTDIR}" || exit 1; pwd) +SRCTOP=$(cd "${MYSCRIPTDIR}/.." || exit 1; pwd) +RPM_TOPDIR=${SRCTOP}/rpmbuild + +# +# Check options +# +IS_INTERACTIVE=1 +BUILD_NUMBER=1 +while [ $# -ne 0 ]; do + if [ "X$1" = "X" ]; then + break + + elif [ "X$1" = "X-h" ] || [ "X$1" = "X-help" ]; then + func_usage "${PRGNAME}" + exit 0 + + elif [ "X$1" = "X-buildnum" ]; then + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : -buildnum option needs parameter." 1>&2 + exit 1 + fi + BUILD_NUMBER=$1 + + elif [ "X$1" = "X-product" ]; then + shift + if [ $# -eq 0 ]; then + echo "[ERROR] ${PRGNAME} : -product option needs parameter." 1>&2 + exit 1 + fi + PACKAGE_NAME=$1 + + elif [ "X$1" = "X-y" ]; then + IS_INTERACTIVE=0 + + else + echo "[ERROR] ${PRGNAME} : unknown option $1." 1>&2 + exit 1 + fi + shift +done + +# +# Package name +# +if [ "X${PACKAGE_NAME}" = "X" ]; then + PACKAGE_NAME=$(grep "^PACKAGE_NAME" "${SRCTOP}/Makefile" 2>/dev/null | awk '{print $3}' 2>/dev/null) + if [ "X${PACKAGE_NAME}" = "X" ]; then + echo "[ERROR] ${PRGNAME} : no product name" 1>&2 + exit 1 + fi +fi + +# +# Welcome message and confirming for interactive mode +# +if [ ${IS_INTERACTIVE} -eq 1 ]; then + echo "---------------------------------------------------------------" + echo " Do you change these file and commit to github?" + echo " - ChangeLog modify / add changes like dch tool format" + echo " - Git TAG stamp git tag for release" + echo "---------------------------------------------------------------" + while true; do + echo "Confirm: [y/n] " | tr -d '\n' + read -r CONFIRM + + if [ "X${CONFIRM}" = "XY" ] || [ "X${CONFIRM}" = "Xy" ]; then + break; + elif [ "X${CONFIRM}" = "XN" ] || [ "X${CONFIRM}" = "Xn" ]; then + echo "Bye..." + exit 1 + fi + done + echo "" +fi + +# +# before building +# +cd "${SRCTOP}" || exit 1 + +# +# package version +# +PACKAGE_VERSION=$("${MYSCRIPTDIR}/make_variables.sh" -pkg_version) + +# +# copy spec file +# +cp "${SRCTOP}"/buildutils/*.spec "${SRCTOP}/" +if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : could not find and copy spec files." 1>&2 + exit 1 +fi + +# +# create rpm top directory and etc +# +_SUB_RPM_DIRS="BUILD BUILDROOT RPM SOURCES SPECS SRPMS" +for _SUB_RPM_DIR in ${_SUB_RPM_DIRS}; do + mkdir -p "${RPM_TOPDIR}/${_SUB_RPM_DIR}" + if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : could not make ${RPM_TOPDIR}/${_SUB_RPM_DIR} directory." 1>&2 + exit 1 + fi +done + +# +# copy source tar.gz from git by archive +# +git archive HEAD --prefix="${PACKAGE_NAME}-${PACKAGE_VERSION}/" --output="${RPM_TOPDIR}/SOURCES/${PACKAGE_NAME}-${PACKAGE_VERSION}.tar.gz" +if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : could not make source tar ball(${RPM_TOPDIR}/SOURCES/${PACKAGE_NAME}-${PACKAGE_VERSION}.tar.gz) from github repository." 1>&2 + exit 1 +fi + +# +# rpm build +# +# shellcheck disable=SC2035 +rpmbuild -vv -ba --define "_topdir ${RPM_TOPDIR}" --define "_prefix /usr" --define "_mandir /usr/share/man" --define "_defaultdocdir /usr/share/doc" --define "package_revision ${BUILD_NUMBER}" --define "debug_package %{nil}" *.spec +if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : failed to build rpm packages by rpmbuild." 1>&2 + exit 1 +fi + +# +# copy RPM files to package directory for uploading +# +cp "${SRCTOP}"/rpmbuild/RPMS/*/*.rpm "${SRCTOP}/" +if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : failed to copy rpm files to ${SRCTOP} directory." 1>&2 + exit 1 +fi + +cp "${SRCTOP}"/rpmbuild/SRPMS/*.rpm "${SRCTOP}/" +if [ $? -ne 0 ]; then + echo "[ERROR] ${PRGNAME} : failed to copy source rpm files to ${SRCTOP} directory." 1>&2 + exit 1 +fi + +# +# finish +# +echo "" +echo "You can find ${PACKAGE_NAME} ${PACKAGE_VERSION}-${BUILD_NUMBER} version rpm package in ${SRCTOP} directory." +echo "" +exit 0 + +# +# VIM modelines +# +# vim:set ts=4 fenc=utf-8: +# diff --git a/buildutils/setup_k2hr3_cli_component.sh b/buildutils/setup_k2hr3_cli_component.sh new file mode 100755 index 0000000..260b95f --- /dev/null +++ b/buildutils/setup_k2hr3_cli_component.sh @@ -0,0 +1,306 @@ +#!/bin/sh +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +# [NOTE] +# This script copies the required files and directories from the +# k2hr3_cli repository. +# Since the K2HR3 CLI PLUGIN has only the plugin elements, it +# requires the k2hr3_cli component under the current directory +# for testing and debugging. +# This script allows you to deploy the minimum required components. +# + +#-------------------------------------------------------------- +# Variables +#-------------------------------------------------------------- +PRGNAME=$(basename "$0") +MYSCRIPTDIR=$(dirname "$0") +SRCTOP=$(cd "${MYSCRIPTDIR}/.." || exit 1; pwd) + +# +# Git directory/file +# +GIT_DIR="${SRCTOP}/.git" +GIT_CONFIG_FILE="${GIT_DIR}/config" + +# +# k2hr3_cli default +# +if [ -z "${DEFAULT_GIT_DOMAIN}" ]; then + DEFAULT_GIT_DOMAIN="github.com" +fi +if [ -z "${DEFAULT_K2HR3_CLI_ORG}" ]; then + DEFAULT_K2HR3_CLI_ORG="yahoojapan" +fi +if [ -z "${K2HR3_CLI_REPO_NAME}" ]; then + K2HR3_CLI_REPO_NAME="k2hr3_cli" +fi + +# +# expand directory +# +EXPAND_TOP_DIR="/tmp/.k2hdkc_dbaas_cli_tmp" + +#-------------------------------------------------------------- +# Functions +#-------------------------------------------------------------- +func_usage() +{ + echo "" + echo "Usage: $1 [--clean(-c)] [--force_archive(-f)] [--k2hr3_cli_repo ] [--help(-h)]" + echo " --clean(-c) Cleanup directories and files" + echo " --force_archive(-f) Forcibly use the archive(default: not use archive)" + echo " --k2hr3_cli_repo Specify k2hr3_cli repository name(default: k2hr3_cli)" + echo " --help(-h) Display help." + echo "" +} + +#-------------------------------------------------------------- +# Options +#-------------------------------------------------------------- +# +# Check options +# +IS_CLEANUP=0 +USE_ARCHIVE=0 +_TMP_K2HR3_CLI_REPO_NAME="" +while [ $# -ne 0 ]; do + if [ "X$1" = "X" ]; then + break; + + elif [ "X$1" = "X-h" ] || [ "X$1" = "X-help" ]; then + func_usage "${PRGNAME}" + exit 0 + + elif [ "X$1" = "X--clean" ] || [ "X$1" = "X--CLEAN" ] || [ "X$1" = "X-c" ] || [ "X$1" = "X-C" ]; then + if [ "${IS_CLEANUP}" -eq 1 ]; then + echo "[ERROR] ${PRGNAME} - Already specified \"$1\" option." 1>&2 + exit 1 + fi + IS_CLEANUP=1 + + elif [ "X$1" = "X--force_archive" ] || [ "X$1" = "X--FORCE_ARCHIVE" ] || [ "X$1" = "X-f" ] || [ "X$1" = "X-F" ]; then + if [ "${USE_ARCHIVE}" -eq 1 ]; then + echo "[ERROR] ${PRGNAME} - Already specified \"$1\" option." 1>&2 + exit 1 + fi + USE_ARCHIVE=1 + + elif [ "X$1" = "X--k2hr3_cli_repo" ] || [ "X$1" = "X--K2HR3_CLI_REPO" ]; then + if [ "X${_TMP_K2HR3_CLI_REPO_NAME}" != "X" ]; then + echo "[ERROR] ${PRGNAME} - Already specified \"$1\" option." 1>&2 + exit 1 + fi + shift + if [ $# -le 0 ]; then + echo "[ERROR] ${PRGNAME} - \"$1\" option needs parameter." 1>&2 + exit 1 + fi + _TMP_K2HR3_CLI_REPO_NAME="$1" + + else + echo "[ERROR] ${PRGNAME} - Unknown option \"$1\"" 1>&2 + echo "0" | tr -d '\n' + exit 1 + fi + shift +done +if [ "X${_TMP_K2HR3_CLI_REPO_NAME}" != "X" ]; then + K2HR3_CLI_REPO_NAME="${_TMP_K2HR3_CLI_REPO_NAME}" +fi + +#-------------------------------------------------------------- +# Main processing +#-------------------------------------------------------------- +# +# Cleanup +# + +# +# [TODO] ... Directories may still be added. +# +rm -f "${SRCTOP}/src/libexec/database/VERSION" +rm -f "${SRCTOP}/src/k2hr3" +rm -rf "${SRCTOP}/src/libexec/common" +rm -rf "${SRCTOP}/src/libexec/config" +rm -rf "${SRCTOP}/src/libexec/token" +rm -rf "${SRCTOP}/src/libexec/resource" +rm -rf "${SRCTOP}/src/libexec/policy" +rm -rf "${SRCTOP}/src/libexec/role" +rm -rf "${SRCTOP}/src/libexec/userdata" +rm -f "${SRCTOP}/test/util_test.sh" +rm -f "${SRCTOP}/test/util_request.sh" +if [ "${IS_CLEANUP}" -eq 1 ]; then + exit 0 +fi + +# +# Check .git/config file +# +USE_GIT_CONFIG=0 +if [ -d "${GIT_DIR}" ]; then + if [ -f "${GIT_CONFIG_FILE}" ]; then + USE_GIT_CONFIG=1 + fi +fi + +# +# Check git domain and organaization +# +if [ "${USE_GIT_CONFIG}" -eq 1 ]; then + # + # Check git information + # + echo "[INFO] ${PRGNAME} - Check .git/config for git domain and organaiztion" 1>&2 + + GIT_URL_THIS_REPO=$(grep '^[[:space:]]*url[[:space:]]*=[[:space:]]*' .git/config | grep '.git$' | head -1 | sed -e 's/^[[:space:]]*url[[:space:]]*=[[:space:]]*//g') + + if [ "X${GIT_URL_THIS_REPO}" != "X" ]; then + # + # Get git domain and organaization + # + GIT_DOMAIN_NAME=$(echo "${GIT_URL_THIS_REPO}" | sed -e 's/^git@//g' -e 's#^http[s]*://##g' -e 's/:/ /g' -e 's#/# #g' | awk '{print $1}') + GIT_ORG_NAME=$(echo "${GIT_URL_THIS_REPO}" | sed -e 's/^git@//g' -e 's#^http[s]*://##g' -e 's/:/ /g' -e 's#/# #g' | awk '{print $2}') + + if [ "X${GIT_DOMAIN_NAME}" = "X" ] || [ "X${GIT_ORG_NAME}" = "X" ]; then + echo "[WARNING] ${PRGNAME} - Unknown git dmain and organaization in .git/config" 1>&2 + USE_ARCHIVE=1 + GIT_DOMAIN_NAME=${DEFAULT_GIT_DOMAIN} + GIT_ORG_NAME=${DEFAULT_K2HR3_CLI_ORG} + fi + else + echo "[WARNING] ${PRGNAME} - Unknown git url in .git/config" 1>&2 + USE_ARCHIVE=1 + GIT_DOMAIN_NAME=${DEFAULT_GIT_DOMAIN} + GIT_ORG_NAME=${DEFAULT_K2HR3_CLI_ORG} + fi +else + echo "[INFO] ${PRGNAME} - .git/config is not existed." 1>&2 + USE_ARCHIVE=1 + GIT_DOMAIN_NAME=${DEFAULT_GIT_DOMAIN} + GIT_ORG_NAME=${DEFAULT_K2HR3_CLI_ORG} +fi + +# +# Git clone / Download archive +# +mkdir -p "${EXPAND_TOP_DIR}" + +K2HR3_CLI_EXPAND_DIR="" +if [ "${USE_ARCHIVE}" -ne 1 ]; then + # + # Git clone k2hr3_cli + # + echo "[INFO] ${PRGNAME} - Try git clone all ${K2HR3_CLI_REPO_NAME}" 1>&2 + + K2HR3_CLI_GIT_URI="https://${GIT_DOMAIN_NAME}/${GIT_ORG_NAME}/${K2HR3_CLI_REPO_NAME}.git" + + CURRENT_DIR=$(pwd) + cd "${EXPAND_TOP_DIR}" || exit 1 + git clone "${K2HR3_CLI_GIT_URI}" + if [ $? -eq 0 ]; then + if [ -d "${K2HR3_CLI_REPO_NAME}" ]; then + K2HR3_CLI_EXPAND_DIR="${EXPAND_TOP_DIR}/${K2HR3_CLI_REPO_NAME}" + else + echo "[ERROR] ${PRGNAME} - Not found ${K2HR3_CLI_REPO_NAME} directory." 1>&2 + fi + else + echo "[ERROR] ${PRGNAME} - Failed to clone ${K2HR3_CLI_REPO_NAME}" 1>&2 + fi +else + # + # Download k2hr3_cli archive + # + echo "[INFO] ${PRGNAME} - Try Download ${K2HR3_CLI_REPO_NAME} archive" 1>&2 + + K2HR3_CLI_ZIP_NAME="master.zip" + K2HR3_CLI_ZIP_URI="https://${GIT_DOMAIN_NAME}/${GIT_ORG_NAME}/${K2HR3_CLI_REPO_NAME}/archive/${K2HR3_CLI_ZIP_NAME}" + curl -s -L "${K2HR3_CLI_ZIP_URI}" --output "${EXPAND_TOP_DIR}/${K2HR3_CLI_ZIP_NAME}" + if [ $? -eq 0 ]; then + if [ -f "${EXPAND_TOP_DIR}/${K2HR3_CLI_ZIP_NAME}" ]; then + CURRENT_DIR=$(pwd) + cd "${EXPAND_TOP_DIR}" || exit 1 + + unzip "${K2HR3_CLI_ZIP_NAME}" >/dev/null + if [ $? -eq 0 ]; then + if [ -d "${K2HR3_CLI_REPO_NAME}-master" ]; then + mv "${K2HR3_CLI_REPO_NAME}-master" "${K2HR3_CLI_REPO_NAME}" + K2HR3_CLI_EXPAND_DIR="${EXPAND_TOP_DIR}/${K2HR3_CLI_REPO_NAME}" + else + echo "[ERROR] ${PRGNAME} - Not found ${EXPAND_TOP_DIR}/${K2HR3_CLI_REPO_NAME}-master" 1>&2 + fi + else + echo "[ERROR] ${PRGNAME} - Failed to unzip ${EXPAND_TOP_DIR}/${K2HR3_CLI_ZIP_NAME}" 1>&2 + fi + cd "${CURRENT_DIR}" || exit 1 + else + echo "[ERROR] ${PRGNAME} - Not found download file(${EXPAND_TOP_DIR}/${K2HR3_CLI_ZIP_NAME})" 1>&2 + fi + else + echo "[ERROR] ${PRGNAME} - Failed to download ${K2HR3_CLI_REPO_NAME} archive" 1>&2 + fi +fi + +# +# Check download +# +if [ "X${K2HR3_CLI_EXPAND_DIR}" = "X" ]; then + rm -rf "${EXPAND_TOP_DIR}" + exit 1 +fi +if [ ! -d "${K2HR3_CLI_EXPAND_DIR}" ]; then + rm -rf "${EXPAND_TOP_DIR}" + exit 1 +fi + +# +# Copy(mv) files/directories +# +echo "[INFO] ${PRGNAME} - Copy ${K2HR3_CLI_REPO_NAME} files/directories" 1>&2 + +# +# [TODO] ... Directories may still be added. +# +cp -r "${K2HR3_CLI_EXPAND_DIR}/src/k2hr3" "${SRCTOP}/src" || (rm -rf "${EXPAND_TOP_DIR}"; exit 1) +cp -r "${K2HR3_CLI_EXPAND_DIR}/src/libexec/common" "${SRCTOP}/src/libexec" || (rm -rf "${EXPAND_TOP_DIR}"; exit 1) +cp -r "${K2HR3_CLI_EXPAND_DIR}/src/libexec/config" "${SRCTOP}/src/libexec" || (rm -rf "${EXPAND_TOP_DIR}"; exit 1) +cp -r "${K2HR3_CLI_EXPAND_DIR}/src/libexec/token" "${SRCTOP}/src/libexec" || (rm -rf "${EXPAND_TOP_DIR}"; exit 1) +cp -r "${K2HR3_CLI_EXPAND_DIR}/src/libexec/resource" "${SRCTOP}/src/libexec" || (rm -rf "${EXPAND_TOP_DIR}"; exit 1) +cp -r "${K2HR3_CLI_EXPAND_DIR}/src/libexec/policy" "${SRCTOP}/src/libexec" || (rm -rf "${EXPAND_TOP_DIR}"; exit 1) +cp -r "${K2HR3_CLI_EXPAND_DIR}/src/libexec/role" "${SRCTOP}/src/libexec" || (rm -rf "${EXPAND_TOP_DIR}"; exit 1) +cp -r "${K2HR3_CLI_EXPAND_DIR}/src/libexec/userdata" "${SRCTOP}/src/libexec" || (rm -rf "${EXPAND_TOP_DIR}"; exit 1) +cp -r "${K2HR3_CLI_EXPAND_DIR}/test/util_request.sh" "${SRCTOP}/test" || (rm -rf "${EXPAND_TOP_DIR}"; exit 1) +cp -r "${K2HR3_CLI_EXPAND_DIR}/test/util_test.sh" "${SRCTOP}/test" || (rm -rf "${EXPAND_TOP_DIR}"; exit 1) + +rm -rf "${EXPAND_TOP_DIR}" + +echo "[INFO] ${PRGNAME} - Finish" 1>&2 + +exit 0 + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/configure.ac b/configure.ac new file mode 100644 index 0000000..a344c6a --- /dev/null +++ b/configure.ac @@ -0,0 +1,90 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. +# +AC_PREREQ([2.63]) +AC_INIT(k2hdkc-dbaas-cli, m4_esyscmd([tr -d '\n' < $(pwd)/RELEASE_VERSION])) +AM_INIT_AUTOMAKE() + +# +# Checks for programs. +# +AC_PROG_AWK +AC_PROG_INSTALL +AC_PROG_LN_S +AC_PROG_MAKE_SET + +# +# Symbols for buildutils +# +AC_SUBST([GIT_DOMAIN], "github.com") +AC_SUBST([GIT_ORG], "yahoojapan") +AC_SUBST([GIT_REPO], "k2hdkc_dbaas_cli") +AC_SUBST([DEV_EMAIL], "`echo ${DEBEMAIL:-antpickax-support@mail.yahoo.co.jp}`") +AC_SUBST([DEV_NAME], "`echo ${DEBFULLNAME:-K2HR3_DEVELOPER}`") + +AC_SUBST([RPMCHANGELOG], "`$(pwd)/buildutils/make_rpm_changelog.sh $(pwd)/ChangeLog`") +AC_SUBST([SHORTDESC], "K2HDKC DBaaS Command Line Interface(K2HR3 CLI Plugin)") +AC_SUBST([PKG_VERSION], "`$(pwd)/buildutils/make_variables.sh -pkg_version`") +AC_SUBST([PKGLICENSE], "`grep License COPYING | sed 's/ License//g'`") +AC_SUBST([DEBCOPYING], "`tail -n +5 COPYING | sed 's/^$/./g' | sed 's/^/ /g'`") +AC_SUBST([DEBHELPER_DEP], "`$(pwd)/buildutils/make_variables.sh -debhelper_dep`") +AC_SUBST([RPMPKG_GROUP], ["`$(pwd)/buildutils/make_variables.sh -rpmpkg_group | sed 's#NEEDRPMGROUP#Group: Applications/Other#g'`"]) +AC_SUBST([CONFIGUREWITHOPT], "") +AM_SUBST_NOTMAKE([CURRENTREV]) +AM_SUBST_NOTMAKE([RPMCHANGELOG]) +AM_SUBST_NOTMAKE([SHORTDESC]) +AM_SUBST_NOTMAKE([DEBCOPYING]) +AM_SUBST_NOTMAKE([DEBHELPER_DEP]) +AM_SUBST_NOTMAKE([RPMPKG_GROUP]) +AM_SUBST_NOTMAKE([CONFIGUREWITHOPT]) + +# +# pkg-config for old version +# +AC_PATH_PROG(PKG_CONFIG, pkg-config, no) +AS_IF([test "$PKG_CONFIG" = "no"], [AC_MSG_WARN(You have to install pkg-config to compile $PACKAGE_NAME v$PACKAGE_VERSION)]) + +# +# Config files +# +AC_CONFIG_FILES([Makefile + src/Makefile + src/libexec/Makefile + src/libexec/database/Makefile + test/Makefile + test/snapshots/Makefile + buildutils/Makefile + buildutils/control + buildutils/copyright + buildutils/k2hdkc-dbaas-cli.spec]) + +AC_OUTPUT + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/src/Makefile.am b/src/Makefile.am new file mode 100644 index 0000000..f3b43c0 --- /dev/null +++ b/src/Makefile.am @@ -0,0 +1,30 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +SUBDIRS = libexec + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/src/libexec/Makefile.am b/src/libexec/Makefile.am new file mode 100644 index 0000000..c7adeda --- /dev/null +++ b/src/libexec/Makefile.am @@ -0,0 +1,30 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +SUBDIRS = database + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/src/libexec/database/CREDIT b/src/libexec/database/CREDIT new file mode 100644 index 0000000..9290cc4 --- /dev/null +++ b/src/libexec/database/CREDIT @@ -0,0 +1,16 @@ +----------------------------------------------------------- +K2HDKC DBaaS Command Line Interface - VERSION_NUMBER + +Copyright 2021 Yahoo! Japan Corporation. + +The K2HDKC DBaaS CLI (Command Line Interface of Database as a +Service for K2HDKC) is a tool for building a K2HDKC cluster +in conjunction with K2HR3. +The Trove version of K2HDKC DBaaS is available, but this +K2HDKC DBaaS CLI allows you to build K2HDKC clusters without +the need for a Trove system. +With the basic components of OpenStack and the K2HR3 system +that works with it, you can easily build a K2HD KC cluster +using the K2HDKC DBaaS CLI. + +This software is released under the MIT License. diff --git a/src/libexec/database/Makefile.am b/src/libexec/database/Makefile.am new file mode 100644 index 0000000..696bbd5 --- /dev/null +++ b/src/libexec/database/Makefile.am @@ -0,0 +1,41 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +EXTRA_DIST = CREDIT \ + VERSION \ + help.sh \ + summary.sh \ + command.sh \ + variables.sh \ + options.sh \ + functions.sh \ + openstack.sh \ + k2hdkc_dbaas_resource.templ \ + k2hdkc_dbaas_resource_keys.config \ + k2hdkc_dbaas_create_host.templ + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/src/libexec/database/command.sh b/src/libexec/database/command.sh new file mode 100644 index 0000000..0634a97 --- /dev/null +++ b/src/libexec/database/command.sh @@ -0,0 +1,1122 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#-------------------------------------------------------------- +# Variables +#-------------------------------------------------------------- +# +# k2hr3 bin +# +K2HR3CLIBIN=${BINDIR}/${BINNAME} + +# +# Directry Path +# +# shellcheck disable=SC2034 +_DATABASE_CURRENT_DIR=${LIBEXECDIR}/${K2HR3CLI_MODE} + +# +# SubCommand(2'nd option) +# +_DATABASE_COMMAND_SUB_CREATE="create" +_DATABASE_COMMAND_SUB_SHOW="show" +_DATABASE_COMMAND_SUB_ADD="add" +_DATABASE_COMMAND_SUB_DELETE="delete" +_DATABASE_COMMAND_SUB_OPENSTACK="openstack" +_DATABASE_COMMAND_SUB_LIST="list" + +# +# option for type +# +_DATABASE_COMMAND_TYPE_HOST="host" +_DATABASE_COMMAND_TYPE_CONF="conf" +_DATABASE_COMMAND_TYPE_CONF_LONG="configuration" +_DATABASE_COMMAND_TYPE_CLUSTER="cluster" +_DATABASE_COMMAND_TYPE_OPUTOKEN="utoken" +_DATABASE_COMMAND_TYPE_OPTOKEN="token" +_DATABASE_COMMAND_TYPE_IMAGES="images" +_DATABASE_COMMAND_TYPE_FLAVORS="flavors" + +# +# option for target +# +_DATABASE_COMMAND_TARGET_SERVER="server" +_DATABASE_COMMAND_TARGET_SLAVE="slave" + +#-------------------------------------------------------------- +# Load Option name for DBaaS +#-------------------------------------------------------------- +# +# DBaaS option +# +if [ -f "${LIBEXECDIR}/database/options.sh" ]; then + . "${LIBEXECDIR}/database/options.sh" +fi + +# +# Utility functions +# +if [ -f "${LIBEXECDIR}/database/functions.sh" ]; then + . "${LIBEXECDIR}/database/functions.sh" +fi + +# +# OpenStack utility +# +if [ -f "${LIBEXECDIR}/database/openstack.sh" ]; then + . "${LIBEXECDIR}/database/openstack.sh" +fi + +# +# Check dbaas options +# +parse_dbaas_option "$@" +if [ $? -ne 0 ]; then + exit 1 +else + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} +fi + +#-------------------------------------------------------------- +# Parse arguments +#-------------------------------------------------------------- +# +# Sub Command +# +parse_noprefix_option "$@" +if [ $? -ne 0 ]; then + exit 1 +fi +if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_SUBCOMMAND="" +else + # + # Always using lower case + # + K2HR3CLI_SUBCOMMAND=$(to_lower "${K2HR3CLI_OPTION_NOPREFIX}") +fi +# shellcheck disable=SC2086 +set -- ${K2HR3CLI_OPTION_PARSER_REST} + +# +# After sub command +# +if [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_CREATE}" ]; then + # + # Create Cluster(cluster name) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_CLUSTER_NAME="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_CLUSTER_NAME=${K2HR3CLI_OPTION_NOPREFIX} + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_SHOW}" ]; then + # + # Show host/configuration(type) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_SHOW_TYPE="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_SHOW_TYPE=$(to_lower "${K2HR3CLI_OPTION_NOPREFIX}") + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + + # + # Show host/configuration(target) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_SHOW_TARGET="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_SHOW_TARGET=$(to_lower "${K2HR3CLI_OPTION_NOPREFIX}") + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + + # + # Show host/configuration(cluster name) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_CLUSTER_NAME="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_CLUSTER_NAME=${K2HR3CLI_OPTION_NOPREFIX} + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_ADD}" ]; then + # + # Add host(type) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_ADD_TYPE="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_ADD_TYPE=$(to_lower "${K2HR3CLI_OPTION_NOPREFIX}") + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + + # + # Add host(target) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_ADD_TARGET="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_ADD_TARGET=$(to_lower "${K2HR3CLI_OPTION_NOPREFIX}") + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + + # + # Add host(cluster name) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_CLUSTER_NAME="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_CLUSTER_NAME=${K2HR3CLI_OPTION_NOPREFIX} + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + + # + # Add host(hostname) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_HOST_NAME="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_HOST_NAME=${K2HR3CLI_OPTION_NOPREFIX} + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_DELETE}" ]; then + # + # Delete host/cluster(type) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_DELETE_TYPE="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_DELETE_TYPE=$(to_lower "${K2HR3CLI_OPTION_NOPREFIX}") + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + + # + # Delete host/cluster(cluster name) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_CLUSTER_NAME="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_CLUSTER_NAME=${K2HR3CLI_OPTION_NOPREFIX} + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + + if [ "X${K2HR3CLI_DBAAS_DELETE_TYPE}" = "X${_DATABASE_COMMAND_TYPE_HOST}" ]; then + # + # Delete host(hostname) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_HOST_NAME="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_HOST_NAME=${K2HR3CLI_OPTION_NOPREFIX} + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + fi + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_OPENSTACK}" ]; then + # + # OpenStack (type) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_OPENSTACK_TYPE="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_OPENSTACK_TYPE=$(to_lower "${K2HR3CLI_OPTION_NOPREFIX}") + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} + + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_LIST}" ]; then + # + # List (type) + # + parse_noprefix_option "$@" + if [ $? -ne 0 ]; then + exit 1 + fi + if [ "X${K2HR3CLI_OPTION_NOPREFIX}" = "X" ]; then + K2HR3CLI_DBAAS_LIST_TYPE="" + else + # + # Always using lower case + # + K2HR3CLI_DBAAS_LIST_TYPE=$(to_lower "${K2HR3CLI_OPTION_NOPREFIX}") + fi + # shellcheck disable=SC2086 + set -- ${K2HR3CLI_OPTION_PARSER_REST} +fi + +#-------------------------------------------------------------- +# Processing +#-------------------------------------------------------------- +# +# Check URI +# +if [ "X${K2HR3CLI_OPENSTACK_IDENTITY_URI}" = "X" ]; then + prn_warn "The URI for OpenStack(Identity) is not specified, some commands require this. Please specify with the ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IDENTITY_URI_LONG} option, K2HR3CLI_OPENSTACK_IDENTITY_URI environment variable, or configuration." +fi + +# +# Check Cluster name parameter +# +if [ "X${K2HR3CLI_SUBCOMMAND}" != "X${_DATABASE_COMMAND_SUB_OPENSTACK}" ] && [ "X${K2HR3CLI_SUBCOMMAND}" != "X${_DATABASE_COMMAND_SUB_LIST}" ] && [ "X${K2HR3CLI_DBAAS_CLUSTER_NAME}" = "X" ]; then + prn_err "Cluster name is not specified, please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 +fi + +# +# Main +# +if [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_CREATE}" ]; then + # + # DATABASE CREATE + # + + # + # Get Scoped Token + # + complement_scoped_token + if [ $? -ne 0 ]; then + exit 1 + fi + prn_dbg "${K2HR3CLI_SCOPED_TOKEN}" + + # + # Create new token + # + complement_op_token + if [ $? -ne 0 ]; then + prn_err "Failed to create OpenStack Scoped Token" + exit 1 + fi + + # + # Get resource template file path + # + dbaas_get_resource_filepath + if [ $? -ne 0 ]; then + exit 1 + fi + + # + # Get Tenant name from Scoped Token + # + _DATABASE_TENANT_NAME=$(dbaas_get_current_tenant) + if [ $? -ne 0 ]; then + exit 1 + fi + + #------------------------------------------------------ + # Create Main Resource + #------------------------------------------------------ + # + # Make String Resource/Keys Paramter + # + _DATABASE_CONFIG_FILE_TMP="/tmp/.k2hdkc_dbaas_resource_$$.templ" + _DATABASE_RESOURCE_DATE=$(date) + sed -e "s/__K2HDKC_DBAAS_CLI_DATE__/${_DATABASE_RESOURCE_DATE}/g" -e "s/__K2HDKC_DBAAS_CLI_TENANT_NAME__/${_DATABASE_TENANT_NAME}/g" -e "s/__K2HDKC_DBAAS_CLI_CLUSTER_NAME__/${K2HR3CLI_DBAAS_CLUSTER_NAME}/g" "${_DATABASE_CONFIG_FILE}" > "${_DATABASE_CONFIG_FILE_TMP}" 2>/dev/null + + # + # Make resource keys data + # + _DATABASE_RESOURCE_KEYS_RUN_USER="" + if [ "X${K2HR3CLI_OPT_DBAAS_RUN_USER}" != "X" ]; then + _DATABASE_RESOURCE_KEYS_RUN_USER=",\"k2hdkc-dbaas-proc-user\":\"${K2HR3CLI_OPT_DBAAS_RUN_USER}\"" + fi + if [ "X${K2HR3CLI_OPT_DBAAS_CREATE_USER}" = "X1" ]; then + _DATABASE_RESOURCE_KEYS_RUN_USER="${_DATABASE_RESOURCE_KEYS_RUN_USER},\"k2hdkc-dbaas-add-user\":1" + fi + _DATABASE_RESOURCE_KEYS="{\"cluster-name\":\"${K2HR3CLI_DBAAS_CLUSTER_NAME}\",\"chmpx-server-port\":${K2HR3CLI_OPT_DBAAS_SERVER_PORT},\"chmpx-server-ctlport\":${K2HR3CLI_OPT_DBAAS_SERVER_CTLPORT},\"chmpx-slave-ctlport\":${K2HR3CLI_OPT_DBAAS_SLAVE_CTLPORT}${_DATABASE_RESOURCE_KEYS_RUN_USER}}" + + # + # Run k2hr3 + # + K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" resource create "${K2HR3CLI_DBAAS_CLUSTER_NAME}" -type string --datafile "${_DATABASE_CONFIG_FILE_TMP}" --keys "${_DATABASE_RESOURCE_KEYS}" > /dev/null + + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}\"" + rm -f "${_DATABASE_CONFIG_FILE_TMP}" + exit 1 + fi + prn_msg "${CGRN}Succeed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}\" Resource" + rm -f "${_DATABASE_CONFIG_FILE_TMP}" + + #------------------------------------------------------ + # Create Sub Server/Slave Resource + #------------------------------------------------------ + # + # Load keys data + # + dbaas_load_resource_keys + if [ $? -ne 0 ]; then + exit 1 + fi + + # + # Make Keys Paramter for server + # + _DATABASE_RESOURCE_SERVER_KEYS="{\"chmpx-mode\":\"SERVER\",\"k2hr3-init-packages\":\"${DATABASE_SERVER_KEY_INI_PKG}\",\"k2hr3-init-packagecloud-packages\":\"${DATABASE_SERVER_KEY_INI_PCPKG}\",\"k2hr3-init-systemd-packages\":\"${DATABASE_SERVER_KEY_INI_SYSPKG}\"}" + + # + # Run k2hr3 for server resource + # + K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" resource create "${K2HR3CLI_DBAAS_CLUSTER_NAME}/server" --keys "${_DATABASE_RESOURCE_SERVER_KEYS}" > /dev/null + + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}/server\" Resource" + exit 1 + fi + prn_msg "${CGRN}Succeed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}/server\" Resource" + + # + # Make Keys Paramter for slave + # + _DATABASE_RESOURCE_SLAVE_KEYS="{\"chmpx-mode\":\"SLAVE\",\"k2hr3-init-packages\":\"${DATABASE_SLAVE_KEY_INI_PKG}\",\"k2hr3-init-packagecloud-packages\":\"${DATABASE_SLAVE_KEY_INI_PCPKG}\",\"k2hr3-init-systemd-packages\":\"${DATABASE_SLAVE_KEY_INI_SYSPKG}\"}" + + # + # Run k2hr3 for server resource + # + K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" resource create "${K2HR3CLI_DBAAS_CLUSTER_NAME}/slave" --keys "${_DATABASE_RESOURCE_SLAVE_KEYS}" > /dev/null + + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}/slave\" Resource" + exit 1 + fi + prn_msg "${CGRN}Succeed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}/slave\" Resource" + + #------------------------------------------------------ + # Create Main Policy + #------------------------------------------------------ + # + # Make Resources Paramter + # + _DATABASE_POLICY_RESOURCES="[\"yrn:yahoo:::${_DATABASE_TENANT_NAME}:resource:${K2HR3CLI_DBAAS_CLUSTER_NAME}/server\",\"yrn:yahoo:::${_DATABASE_TENANT_NAME}:resource:${K2HR3CLI_DBAAS_CLUSTER_NAME}/slave\"]" + + # + # Run k2hr3 + # + K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" policy create "${K2HR3CLI_DBAAS_CLUSTER_NAME}" --effect 'allow' --action 'yrn:yahoo::::action:read' --resource "${_DATABASE_POLICY_RESOURCES}" >/dev/null + + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}\" Policy" + exit 1 + fi + prn_msg "${CGRN}Succeed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}\" Policy" + + #------------------------------------------------------ + # Create Main Role + #------------------------------------------------------ + # + # Make Policies Paramter + # + _DATABASE_ROLE_POLICIES="yrn:yahoo:::${_DATABASE_TENANT_NAME}:policy:${K2HR3CLI_DBAAS_CLUSTER_NAME}" + + # + # Run k2hr3 + # + K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role create "${K2HR3CLI_DBAAS_CLUSTER_NAME}" --policies "${_DATABASE_ROLE_POLICIES}" >/dev/null + + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}\" Role" + exit 1 + fi + prn_msg "${CGRN}Succeed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}\" Role" + + #------------------------------------------------------ + # Create Server/Slave Role + #------------------------------------------------------ + # + # Run k2hr3 for server + # + K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role create "${K2HR3CLI_DBAAS_CLUSTER_NAME}/server" >/dev/null + + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}/server\" Role" + exit 1 + fi + prn_msg "${CGRN}Succeed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}/server\" Role" + + # + # Run k2hr3 for slave + # + K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role create "${K2HR3CLI_DBAAS_CLUSTER_NAME}/slave" >/dev/null + + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}/slave\" Role" + exit 1 + fi + prn_msg "${CGRN}Succeed${CDEF} : Phase : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}/slave\" Role" + + #------------------------------------------------------ + # Create Security Group on OpenStack + #------------------------------------------------------ + # + # Create security group + # + if [ "${K2HR3CLI_OPENSTACK_NO_SECGRP}" -ne 1 ]; then + if ! check_op_security_group "${K2HR3CLI_DBAAS_CLUSTER_NAME}"; then + # + # Security group for server + # + create_op_security_group "${K2HR3CLI_DBAAS_CLUSTER_NAME}" 0 + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}\"cluster : Could not create security group for server." + exit 1 + fi + + # + # Security group for slave + # + create_op_security_group "${K2HR3CLI_DBAAS_CLUSTER_NAME}" 1 + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Create \"${K2HR3CLI_DBAAS_CLUSTER_NAME}\"cluster : Could not create security group for slave." + exit 1 + fi + fi + fi + + # + # Finished + # + prn_msg "${CGRN}Succeed${CDEF} : Registration of cluster \"${K2HR3CLI_DBAAS_CLUSTER_NAME}\" with K2HR3 is complete" + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_SHOW}" ]; then + # + # DATABASE SHOW + # + + # + # Get Scoped Token + # + complement_scoped_token + if [ $? -ne 0 ]; then + exit 1 + fi + prn_dbg "${K2HR3CLI_SCOPED_TOKEN}" + + if [ "X${K2HR3CLI_DBAAS_SHOW_TYPE}" = "X${_DATABASE_COMMAND_TYPE_HOST}" ]; then + # + # DATABASE SHOW HOST + # + if [ "X${K2HR3CLI_DBAAS_SHOW_TARGET}" != "X${_DATABASE_COMMAND_TARGET_SERVER}" ] && [ "X${K2HR3CLI_DBAAS_SHOW_TARGET}" != "X${_DATABASE_COMMAND_TARGET_SLAVE}" ]; then + prn_err "\"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_SUBCOMMAND} ${K2HR3CLI_DBAAS_SHOW_TYPE}\" must also specify the (${_DATABASE_COMMAND_TARGET_SERVER} or ${_DATABASE_COMMAND_TARGET_SLAVE}), please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 + fi + + # + # Run k2hr3 for host in role + # + _DATABSE_SHOW_BACKUP_OPT_JSON=${K2HR3CLI_OPT_JSON} + K2HR3CLI_OPT_JSON=0 + + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_OPT_JSON="${K2HR3CLI_OPT_JSON}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role show "${K2HR3CLI_DBAAS_CLUSTER_NAME}/${K2HR3CLI_DBAAS_SHOW_TARGET}") + + K2HR3CLI_OPT_JSON=${_DATABSE_SHOW_BACKUP_OPT_JSON} + + # + # Check Result + # + if [ $? -ne 0 ]; then + if [ "X${_DATABASE_RESULT}" = "X" ]; then + prn_msg "${CRED}Failed${CDEF} : Show ${K2HR3CLI_DBAAS_SHOW_TARGET} host for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster : Failed Sub Process" + else + prn_msg "${CRED}Failed${CDEF} : Show ${K2HR3CLI_DBAAS_SHOW_TARGET} host for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster : Sub Process Result(${_DATABASE_RESULT})" + fi + exit 1 + fi + + # + # Parse Result + # + jsonparser_parse_json_string "${_DATABASE_RESULT}" + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Show ${K2HR3CLI_DBAAS_SHOW_TARGET} host for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster : Failed to parse result" + exit 1 + fi + + # + # Parse Result + # + dbaas_show_all_hosts "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Show ${K2HR3CLI_DBAAS_SHOW_TARGET} host for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster" + rm -f "${JP_PAERSED_FILE}" + exit 1 + fi + rm -f "${JP_PAERSED_FILE}" + + # + # Print result + # + jsonparser_dump_string "${DATABSE_HOST_LIST}" + if [ "X${K2HR3CLI_OPT_JSON}" != "X1" ]; then + pecho "" + fi + + elif [ "X${K2HR3CLI_DBAAS_SHOW_TYPE}" = "X${_DATABASE_COMMAND_TYPE_CONF}" ] || [ "X${K2HR3CLI_DBAAS_SHOW_TYPE}" = "X${_DATABASE_COMMAND_TYPE_CONF_LONG}" ]; then + # + # DATABASE SHOW CONFIGURATION + # + if [ "X${K2HR3CLI_DBAAS_SHOW_TARGET}" != "X${_DATABASE_COMMAND_TARGET_SERVER}" ] && [ "X${K2HR3CLI_DBAAS_SHOW_TARGET}" != "X${_DATABASE_COMMAND_TARGET_SLAVE}" ]; then + prn_err "\"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_SUBCOMMAND} ${K2HR3CLI_DBAAS_SHOW_TYPE}\" must also specify the (${_DATABASE_COMMAND_TARGET_SERVER} or ${_DATABASE_COMMAND_TARGET_SLAVE}), please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 + fi + + # + # Run k2hr3 for server resource + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_OPT_JSON="${K2HR3CLI_OPT_JSON}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" resource show "${K2HR3CLI_DBAAS_CLUSTER_NAME}/${K2HR3CLI_DBAAS_SHOW_TARGET}" --expand) + + # + # Check Result + # + if [ $? -ne 0 ]; then + if [ "X${_DATABASE_RESULT}" = "X" ]; then + prn_msg "${CRED}Failed${CDEF} : Show ${K2HR3CLI_DBAAS_SHOW_TARGET} configuration for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster : Failed Sub Process" + else + prn_msg "${CRED}Failed${CDEF} : Show ${K2HR3CLI_DBAAS_SHOW_TARGET} configuration for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster : Sub Process Result(${_DATABASE_RESULT})" + fi + exit 1 + fi + + # + # Display Result + # + pecho "${_DATABASE_RESULT}" + + else + prn_err "\"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_SUBCOMMAND}\" must also specify the type(${_DATABASE_COMMAND_TYPE_HOST} or ${_DATABASE_COMMAND_TYPE_CONF_LONG}(${_DATABASE_COMMAND_TYPE_CONF})), please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 + fi + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_ADD}" ]; then + # + # DATABASE ADD + # + if [ "X${K2HR3CLI_DBAAS_ADD_TYPE}" != "X${_DATABASE_COMMAND_TYPE_HOST}" ]; then + prn_err "\"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_SUBCOMMAND}\" must also specify the (${_DATABASE_COMMAND_TYPE_HOST}), please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 + fi + if [ "X${K2HR3CLI_DBAAS_ADD_TARGET}" != "X${_DATABASE_COMMAND_TARGET_SERVER}" ] && [ "X${K2HR3CLI_DBAAS_ADD_TARGET}" != "X${_DATABASE_COMMAND_TARGET_SLAVE}" ]; then + prn_err "\"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_SUBCOMMAND}\" must also specify the (${_DATABASE_COMMAND_TARGET_SERVER} or ${_DATABASE_COMMAND_TARGET_SLAVE}), please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 + fi + if [ "X${K2HR3CLI_DBAAS_HOST_NAME}" = "X" ]; then + prn_err "\"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_SUBCOMMAND}\" must specify the host name, please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 + fi + + # + # Get Scoped Token + # + complement_scoped_token + if [ $? -ne 0 ]; then + exit 1 + fi + prn_dbg "${K2HR3CLI_SCOPED_TOKEN}" + + # + # Get OpenStack Scoped Token + # + complement_op_token + if [ $? -ne 0 ]; then + prn_err "Failed to get OpenStack Scoped Token" + exit 1 + fi + + # + # Role name + # + _DATABASE_ADD_HOST_CLUSTER=${K2HR3CLI_DBAAS_CLUSTER_NAME}/${K2HR3CLI_DBAAS_ADD_TARGET} + + # + # Check Existed Role Token + # + _DATABASE_ADD_HOST_ROLETOKEN="" + _DATABASE_ADD_HOST_REGISTERPATH="" + if [ "X${K2HR3CLI_OPT_DBAAS_CREATE_ROLETOKEN}" != "X1" ]; then + dbaas_get_existed_role_token "${_DATABASE_ADD_HOST_CLUSTER}" + if [ $? -eq 0 ]; then + _DATABASE_ADD_HOST_ROLETOKEN=${DBAAS_FOUND_ROLETOKEN} + _DATABASE_ADD_HOST_REGISTERPATH=${DBAAS_FOUND_REGISTERPATH} + fi + fi + + # + # Create New Role Token + # + if [ "X${_DATABASE_ADD_HOST_ROLETOKEN}" = "X" ]; then + dbaas_create_role_token "${_DATABASE_ADD_HOST_CLUSTER}" + if [ $? -eq 0 ]; then + _DATABASE_ADD_HOST_ROLETOKEN=${DBAAS_NEW_ROLETOKEN} + _DATABASE_ADD_HOST_REGISTERPATH=${DBAAS_NEW_REGISTERPATH} + fi + fi + + # + # Check Role Token + # + if [ "${_DATABASE_ADD_HOST_ROLETOKEN}" = "X" ] || [ "${_DATABASE_ADD_HOST_REGISTERPATH}" = "X" ]; then + prn_msg "${CRED}Failed${CDEF} : Add ${K2HR3CLI_DBAAS_ADD_TARGET} host for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster : Could not create(find) Role Token." + exit 1 + fi + + # + # Set User Data Script + # + # [MEMO] + # #include + # ${K2HR3CLI_API_URI}/v1/userdata/${_DATABASE_ADD_HOST_REGISTERPATH} + # + _DATABASE_ADD_HOST_USD="#include\n${K2HR3CLI_API_URI}/v1/userdata/${_DATABASE_ADD_HOST_REGISTERPATH}" + _DATABASE_ADD_HOST_USD64=$(pecho -n "${_DATABASE_ADD_HOST_USD}" | sed 's/\\n/\n/g' | base64 | tr -d '\n') + + # + # Check security group + # + _DATABASE_ADD_HOST_SECGRP="" + if check_op_security_group "${K2HR3CLI_DBAAS_CLUSTER_NAME}"; then + if [ "X${K2HR3CLI_DBAAS_ADD_TARGET}" = "X${_DATABASE_COMMAND_TARGET_SERVER}" ]; then + _DATABASE_ADD_HOST_SECGRP=$(get_op_security_group_name "${K2HR3CLI_DBAAS_CLUSTER_NAME}" 0) + else + _DATABASE_ADD_HOST_SECGRP=$(get_op_security_group_name "${K2HR3CLI_DBAAS_CLUSTER_NAME}" 1) + fi + fi + + # + # Check Keypair + # + if [ "X${K2HR3CLI_OPENSTACK_KEYPAIR}" != "X" ]; then + check_op_keypair "${K2HR3CLI_OPENSTACK_KEYPAIR}" + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Add ${K2HR3CLI_DBAAS_ADD_TARGET} host for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster : Could not find keypair(${K2HR3CLI_OPENSTACK_KEYPAIR})." + exit 1 + fi + fi + + # + # Check image + # + if [ "X${K2HR3CLI_OPENSTACK_IMAGE_ID}" = "X" ]; then + if ! check_op_image "${K2HR3CLI_OPENSTACK_IMAGE}"; then + prn_msg "${CRED}Failed${CDEF} : Add ${K2HR3CLI_DBAAS_ADD_TARGET} host for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster : The OS image name is not specified or wrong image name(${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_LONG} option)." + exit 1 + fi + fi + + # + # Check flavor + # + if [ "X${K2HR3CLI_OPENSTACK_FLAVOR_ID}" = "X" ]; then + if ! check_op_flavor "${K2HR3CLI_OPENSTACK_FLAVOR}"; then + prn_msg "${CRED}Failed${CDEF} : Add ${K2HR3CLI_DBAAS_ADD_TARGET} host for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster : The flavor name is not specified or flavor name(${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_LONG} option)." + exit 1 + fi + fi + + # + # Make create host post data + # + _DATABASE_ADD_HOST_POST_DATA=$(dbaas_get_openstack_launch_post_data \ + "${K2HR3CLI_DBAAS_HOST_NAME}" \ + "${K2HR3CLI_OPENSTACK_IMAGE_ID}" \ + "${K2HR3CLI_OPENSTACK_FLAVOR_ID}" \ + "${_DATABASE_ADD_HOST_USD64}" \ + "${K2HR3CLI_OPENSTACK_KEYPAIR}" \ + "${_DATABASE_ADD_HOST_SECGRP}") + + # + # Create Virtual Machine + # + create_op_host "${_DATABASE_ADD_HOST_POST_DATA}" + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Add ${K2HR3CLI_DBAAS_ADD_TARGET} host(${K2HR3CLI_DBAAS_HOST_NAME}) for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster." + exit 1 + fi + + prn_msg "${CGRN}Succeed${CDEF} : Add ${K2HR3CLI_DBAAS_ADD_TARGET} host(${K2HR3CLI_DBAAS_HOST_NAME} - \"${K2HR3CLI_OPENSTACK_CREATED_SERVER_ID}\") for ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster." + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_DELETE}" ]; then + # + # DATABASE DELETE + # + + # + # Get Scoped Token + # + complement_scoped_token + if [ $? -ne 0 ]; then + exit 1 + fi + prn_dbg "${K2HR3CLI_SCOPED_TOKEN}" + + # + # Get OpenStack Scoped Token + # + complement_op_token + if [ $? -ne 0 ]; then + prn_err "Failed to get OpenStack Scoped Token" + exit 1 + fi + + if [ "X${K2HR3CLI_DBAAS_DELETE_TYPE}" = "X${_DATABASE_COMMAND_TYPE_HOST}" ]; then + # + # HOST DELETE + # + if [ "X${K2HR3CLI_DBAAS_HOST_NAME}" = "X" ]; then + prn_err "\"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_SUBCOMMAND} ${K2HR3CLI_DBAAS_DELETE_TYPE}\" must also specify the hostname, please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 + fi + + # + # Search host in roles + # + _DBAAS_FIND_HOST_ROLE="" + dbaas_find_role_host "${K2HR3CLI_DBAAS_CLUSTER_NAME}/${_DATABASE_COMMAND_TARGET_SERVER}" "${K2HR3CLI_DBAAS_HOST_NAME}" + if [ $? -ne 0 ]; then + dbaas_find_role_host "${K2HR3CLI_DBAAS_CLUSTER_NAME}/${_DATABASE_COMMAND_TARGET_SLAVE}" "${K2HR3CLI_DBAAS_HOST_NAME}" + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Not found ${K2HR3CLI_DBAAS_HOST_NAME} in ${K2HR3CLI_DBAAS_CLUSTER_NAME} role." + exit 1 + else + _DBAAS_FIND_HOST_ROLE="${K2HR3CLI_DBAAS_CLUSTER_NAME}/${_DATABASE_COMMAND_TARGET_SLAVE}" + fi + else + _DBAAS_FIND_HOST_ROLE="${K2HR3CLI_DBAAS_CLUSTER_NAME}/${_DATABASE_COMMAND_TARGET_SERVER}" + fi + + # + # Delete host in OpenStack + # + if [ "X${DBAAS_FIND_ROLE_HOST_CUK}" != "X" ]; then + delete_op_host "${DBAAS_FIND_ROLE_HOST_CUK}" + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Delete ${K2HR3CLI_DBAAS_HOST_NAME} from OpenStack." + exit 1 + fi + else + prn_warn "Found ${K2HR3CLI_DBAAS_HOST_NAME} host in ${K2HR3CLI_DBAAS_CLUSTER_NAME} role, but it does not have Host id for opensteck. Then could not delete it from OpenStack." + fi + + # + # Delete host from role + # + dbaas_delete_role_host "${_DBAAS_FIND_HOST_ROLE}" "${DBAAS_FIND_ROLE_HOST_NAME}" "${DBAAS_FIND_ROLE_HOST_PORT}" "${DBAAS_FIND_ROLE_HOST_CUK}" + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Delete ${K2HR3CLI_DBAAS_HOST_NAME} from ${K2HR3CLI_DBAAS_CLUSTER_NAME} role" + exit 1 + fi + + prn_msg "${CGRN}Succeed${CDEF} : Delete host ${K2HR3CLI_DBAAS_HOST_NAME} from ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster(OpenStack and K2HR3)." + + elif [ "X${K2HR3CLI_DBAAS_DELETE_TYPE}" = "X${_DATABASE_COMMAND_TYPE_CLUSTER}" ]; then + # + # CLUSTER DELETE + # + + # + # Special Message and need confirm + # + if [ "X${K2HR3CLI_OPENSTACK_CONFIRM_YES}" != "X1" ]; then + _OLD_K2HR3CLI_OPT_INTERACTIVE=${K2HR3CLI_OPT_INTERACTIVE} + K2HR3CLI_OPT_INTERACTIVE=1 + + completion_variable_auto "_DBAAS_DELETE_CONFIRM" "${CRED}[IMPORTANT CONFIRM]${CDEF} You will lose all data/server in your cluster, Do you still want to run it? (y/n) " 0 + if [ "X${_DBAAS_DELETE_CONFIRM}" != "Xy" ] && [ "X${_DBAAS_DELETE_CONFIRM}" != "Xyes" ] && [ "X${_DBAAS_DELETE_CONFIRM}" != "XY" ] && [ "X${_DBAAS_DELETE_CONFIRM}" != "XYES" ]; then + exit 0 + fi + K2HR3CLI_OPT_INTERACTIVE=${_OLD_K2HR3CLI_OPT_INTERACTIVE} + fi + prn_msg "${CRED}[NOTICE] Delete all of the cluster configuration, data, cluster hosts, and so on.${CDEF}" + + # + # Delete all host from OpenStack and K2HR3 + # + dbaas_delete_role_host_all "${K2HR3CLI_DBAAS_CLUSTER_NAME}" + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Delete ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster, because could not delele a host." + exit 1 + fi + + # + # Delete Security Group + # + delete_op_security_groups "${K2HR3CLI_DBAAS_CLUSTER_NAME}" + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Delete ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster, because could not delele security groups." + exit 1 + fi + + # + # Delete all in K2HR3 + # + dbaas_delete_all_k2hr3 "${K2HR3CLI_DBAAS_CLUSTER_NAME}" + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : Delete ${K2HR3CLI_DBAAS_CLUSTER_NAME} in K2HR3." + exit 1 + fi + + prn_msg "${CGRN}Succeed${CDEF} : Delete all ${K2HR3CLI_DBAAS_CLUSTER_NAME} cluster(OpenStack and K2HR3)." + + else + prn_err "\"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_SUBCOMMAND}\" must also specify the (${_DATABASE_COMMAND_TYPE_HOST} or ${_DATABASE_COMMAND_TYPE_CLUSTER}), please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 + fi + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_OPENSTACK}" ]; then + # + # OPENSTACK TOKEN + # + if [ "X${K2HR3CLI_DBAAS_OPENSTACK_TYPE}" = "X${_DATABASE_COMMAND_TYPE_OPUTOKEN}" ]; then + # + # CREATE UNSCOPED TOKEN + # + + # + # Clear current openstack token + # + K2HR3CLI_OPENSTACK_TOKEN= + + # + # Create new token + # + complement_op_utoken + if [ $? -ne 0 ]; then + prn_err "Failed to create OpenStack Unscoped Token" + exit 1 + fi + + # + # Save + # + if [ "X${K2HR3CLI_OPT_SAVE}" = "X1" ]; then + config_default_set_key "K2HR3CLI_OPENSTACK_TOKEN" "${K2HR3CLI_OPENSTACK_TOKEN}" + if [ $? -ne 0 ]; then + prn_err "Created OpenStack Unscoped Token, but failed to save it to configuration." + exit 1 + fi + fi + prn_msg "${K2HR3CLI_OPENSTACK_TOKEN}" + + elif [ "X${K2HR3CLI_DBAAS_OPENSTACK_TYPE}" = "X${_DATABASE_COMMAND_TYPE_OPTOKEN}" ]; then + # + # CREATE SCOPED TOKEN + # + + # + # Create new token + # + complement_op_token + if [ $? -ne 0 ]; then + prn_err "Failed to create OpenStack Scoped Token" + exit 1 + fi + + # + # Save + # + if [ "X${K2HR3CLI_OPT_SAVE}" = "X1" ]; then + config_default_set_key "K2HR3CLI_OPENSTACK_TOKEN" "${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + if [ $? -ne 0 ]; then + prn_err "Created OpenStack Scoped Token, but failed to save it to configuration." + exit 1 + fi + fi + prn_msg "${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + + else + prn_err "\"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_SUBCOMMAND}\" must also specify the token type(${_DATABASE_COMMAND_TYPE_OPUTOKEN} or ${_DATABASE_COMMAND_TYPE_OPTOKEN}), please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 + fi + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_LIST}" ]; then + # + # LIST IMAGES/FLAVORS + # + + # + # Get OpenStack Scoped Token + # + complement_op_token + if [ $? -ne 0 ]; then + prn_err "Failed to get OpenStack Scoped Token" + exit 1 + fi + + if [ "X${K2HR3CLI_DBAAS_LIST_TYPE}" = "X${_DATABASE_COMMAND_TYPE_IMAGES}" ]; then + # + # List images + # + display_op_image_list + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : List OpenStack Images." + return 1 + fi + + elif [ "X${K2HR3CLI_DBAAS_LIST_TYPE}" = "X${_DATABASE_COMMAND_TYPE_FLAVORS}" ]; then + # + # List flavors + # + + # + # Get tenant id + # + complement_op_tenant + if [ $? -ne 0 ]; then + prn_err "Failed to get OpenStack Tenant" + return 1 + fi + + display_op_flavor_list + if [ $? -ne 0 ]; then + prn_msg "${CRED}Failed${CDEF} : List OpenStack Flavors." + return 1 + fi + else + prn_err "\"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_SUBCOMMAND}\" must also specify the list type(${_DATABASE_COMMAND_TYPE_IMAGES} or ${_DATABASE_COMMAND_TYPE_FLAVORS}), please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 + fi + +elif [ "X${K2HR3CLI_SUBCOMMAND}" = "X" ]; then + prn_err "\"${BINNAME} ${K2HR3CLI_MODE}\" must also specify the subcommand(${_DATABASE_COMMAND_SUB_CREATE}, ${_DATABASE_COMMAND_SUB_SHOW}, ${_DATABASE_COMMAND_SUB_ADD} or ${_DATABASE_COMMAND_SUB_DELETE}), please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 +else + prn_err "Unknown subcommand(\"${K2HR3CLI_SUBCOMMAND}\") is specified, please run \"${BINNAME} ${K2HR3CLI_MODE} ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})\" for confirmation." + exit 1 +fi + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/src/libexec/database/functions.sh b/src/libexec/database/functions.sh new file mode 100644 index 0000000..652f437 --- /dev/null +++ b/src/libexec/database/functions.sh @@ -0,0 +1,1480 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#-------------------------------------------------------------- +# Variables +#-------------------------------------------------------------- +# +# DBaaS Resoruce Template file name +# +_DATABASE_DEFAULT_CONFIG_FILENAME="k2hdkc_dbaas_resource.templ" +_DATABASE_DEFAULT_KEYS_FILENAME="k2hdkc_dbaas_resource_keys.config" + +# +# The template for OpenStack Nova +# +_DATABASE_DEFAULT_CREATE_HOST_FILENAME="k2hdkc_dbaas_create_host.templ" + +#-------------------------------------------------------------- +# Functions +#-------------------------------------------------------------- +# +# Get DBaaS Resoruce template file path +# +# $? : result +# Output +# _DATABASE_CONFIG_FILE : Configuration file for resource template +# +dbaas_get_resource_filepath() +{ + _DATABASE_CONFIG_FILE="" + + if [ "X${K2HR3CLI_DBAAS_CONFIG}" != "X" ]; then + # + # Specified custom dbaas configuration directory + # + if [ -d "${K2HR3CLI_DBAAS_CONFIG}" ]; then + if [ -f "${K2HR3CLI_DBAAS_CONFIG}/${_DATABASE_DEFAULT_CONFIG_FILENAME}" ]; then + _DATABASE_CONFIG_FILE="${K2HR3CLI_DBAAS_CONFIG}/${_DATABASE_DEFAULT_CONFIG_FILENAME}" + else + prn_err "Specified K2HDKC DBaaS CLI Configuration(${K2HR3CLI_DBAAS_CONFIG}/${_DATABASE_DEFAULT_CONFIG_FILENAME}) is not existed." + return 1 + fi + else + prn_err "Specified K2HDKC DBaaS CLI Configuration directory(${K2HR3CLI_DBAAS_CONFIG}) is not existed." + return 1 + fi + else + # + # Check user home dbaas configuration + # + _DATABASE_USER_CONFIG_DIR=$(config_get_default_user_dir) + if [ -d "${_DATABASE_USER_CONFIG_DIR}" ]; then + if [ -f "${_DATABASE_USER_CONFIG_DIR}/${_DATABASE_DEFAULT_CONFIG_FILENAME}" ]; then + _DATABASE_CONFIG_FILE="${_DATABASE_USER_CONFIG_DIR}/${_DATABASE_DEFAULT_CONFIG_FILENAME}" + fi + fi + + if [ "X${_DATABASE_CONFIG_FILE}" = "X" ]; then + # + # Default dbaas configuration + # + if [ -d "${_DATABASE_CURRENT_DIR}" ]; then + if [ -f "${_DATABASE_CURRENT_DIR}/${_DATABASE_DEFAULT_CONFIG_FILENAME}" ]; then + _DATABASE_CONFIG_FILE="${_DATABASE_CURRENT_DIR}/${_DATABASE_DEFAULT_CONFIG_FILENAME}" + else + prn_err "Default K2HDKC DBaaS CLI Configuration(${_DATABASE_CURRENT_DIR}/${_DATABASE_DEFAULT_CONFIG_FILENAME}) is not existed." + return 1 + fi + else + prn_err "Default K2HDKC DBaaS CLI Directory(${_DATABASE_CURRENT_DIR}) is not existed." + return 1 + fi + fi + fi + return 0 +} + +# +# Load DBaaS Resoruce keys +# +# $? : result +# Output +# DATABASE_SERVER_KEY_INI_PKG : for "k2hr3-init-packages" key +# DATABASE_SERVER_KEY_INI_PCPKG : for "k2hr3-init-packagecloud-packages" key +# DATABASE_SERVER_KEY_INI_SYSPKG : for "k2hr3-init-systemd-packages" key +# +# DATABASE_SLAVE_KEY_INI_PKG : for "k2hr3-init-packages" key +# DATABASE_SLAVE_KEY_INI_PCPKG : for "k2hr3-init-packagecloud-packages" key +# DATABASE_SLAVE_KEY_INI_SYSPKG : for "k2hr3-init-systemd-packages" key +# +dbaas_load_resource_keys() +{ + _DATABASE_KEYS_FILE="" + + if [ "X${K2HR3CLI_DBAAS_CONFIG}" != "X" ]; then + # + # Specified custom dbaas configuration directory + # + if [ -d "${K2HR3CLI_DBAAS_CONFIG}" ]; then + if [ -f "${K2HR3CLI_DBAAS_CONFIG}/${_DATABASE_DEFAULT_KEYS_FILENAME}" ]; then + _DATABASE_KEYS_FILE="${K2HR3CLI_DBAAS_CONFIG}/${_DATABASE_DEFAULT_KEYS_FILENAME}" + else + prn_err "Specified K2HDKC DBaaS CLI Configuration(${K2HR3CLI_DBAAS_CONFIG}/${_DATABASE_DEFAULT_KEYS_FILENAME}) is not existed." + return 1 + fi + else + prn_err "Specified K2HDKC DBaaS CLI Configuration directory(${K2HR3CLI_DBAAS_CONFIG}) is not existed." + return 1 + fi + else + # + # Check user home dbaas configuration + # + _DATABASE_USER_CONFIG_DIR=$(config_get_default_user_dir) + if [ -d "${_DATABASE_USER_CONFIG_DIR}" ]; then + if [ -f "${_DATABASE_USER_CONFIG_DIR}/${_DATABASE_DEFAULT_KEYS_FILENAME}" ]; then + _DATABASE_KEYS_FILE="${_DATABASE_USER_CONFIG_DIR}/${_DATABASE_DEFAULT_KEYS_FILENAME}" + fi + fi + + if [ "X${_DATABASE_KEYS_FILE}" = "X" ]; then + # + # Default dbaas configuration + # + if [ -d "${_DATABASE_CURRENT_DIR}" ]; then + if [ -f "${_DATABASE_CURRENT_DIR}/${_DATABASE_DEFAULT_KEYS_FILENAME}" ]; then + _DATABASE_KEYS_FILE="${_DATABASE_CURRENT_DIR}/${_DATABASE_DEFAULT_KEYS_FILENAME}" + else + prn_warn "Default K2HDKC DBaaS CLI Configuration(${_DATABASE_CURRENT_DIR}/${_DATABASE_DEFAULT_KEYS_FILENAME}) is not existed." + fi + else + prn_warn "Default K2HDKC DBaaS CLI Directory(${_DATABASE_CURRENT_DIR}) is not existed." + fi + fi + fi + + # + # Load values + # + if [ "X${_DATABASE_KEYS_FILE}" != "X" ]; then + . "${_DATABASE_KEYS_FILE}" + else + # + # File is not found, set default + # + DATABASE_SERVER_KEY_INI_PKG="" + DATABASE_SERVER_KEY_INI_PCPKG="k2hdkc-dbaas-override-conf,k2hr3-get-resource,chmpx,k2hdkc" + DATABASE_SERVER_KEY_INI_SYSPKG="chmpx.service,k2hdkc.service,k2hr3-get-resource.timer" + DATABASE_SLAVE_KEY_INI_PKG="" + DATABASE_SLAVE_KEY_INI_PCPKG="k2hdkc-dbaas-override-conf,k2hr3-get-resource,chmpx" + DATABASE_SLAVE_KEY_INI_SYSPKG="chmpx.service,k2hr3-get-resource.timer" + fi + + # + # Check values(cut space) + # + DATABASE_SERVER_KEY_INI_PKG=$(pecho -n "${DATABASE_SERVER_KEY_INI_PKG}" | sed -e 's/ //g') + DATABASE_SERVER_KEY_INI_PCPKG=$(pecho -n "${DATABASE_SERVER_KEY_INI_PCPKG}" | sed -e 's/ //g') + DATABASE_SERVER_KEY_INI_SYSPKG=$(pecho -n "${DATABASE_SERVER_KEY_INI_SYSPKG}" | sed -e 's/ //g') + DATABASE_SLAVE_KEY_INI_PKG=$(pecho -n "${DATABASE_SLAVE_KEY_INI_PKG}" | sed -e 's/ //g') + DATABASE_SLAVE_KEY_INI_PCPKG=$(pecho -n "${DATABASE_SLAVE_KEY_INI_PCPKG}" | sed -e 's/ //g') + DATABASE_SLAVE_KEY_INI_SYSPKG=$(pecho -n "${DATABASE_SLAVE_KEY_INI_SYSPKG}" | sed -e 's/ //g') + + return 0 +} + +# +# Get Current Tenant from Scoped Token +# +# $? : result +# Output : Tenant name +# +dbaas_get_current_tenant() +{ + if [ "X${K2HR3CLI_SCOPED_TOKEN}" = "X" ]; then + return 1 + fi + + # + # Run k2hr3 for token show + # + _DATABASE_TOKEN_INFO=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" token show token --scopedtoken "${K2HR3CLI_SCOPED_TOKEN}") + if [ $? -ne 0 ]; then + prn_err "Failed to get scoped token information." + return 1 + fi + + # + # Parse Result + # + jsonparser_parse_json_string "${_DATABASE_TOKEN_INFO}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse scoped token information." + return 1 + fi + + # + # Top element + # + jsonparser_get_key_value '%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse scoped token information." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + if [ "X${JSONPARSER_FIND_VAL_TYPE}" != "X${JP_TYPE_ARR}" ]; then + prn_err "Scoped token information is not array." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + if [ "X${JSONPARSER_FIND_VAL}" = "X" ]; then + prn_err "Scoped token information is empty array." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + # + # Use only first element + # + _DATABASE_TOKEN_INFO_POS=$(pecho -n "${JSONPARSER_FIND_KEY_VAL}" | awk '{print $1}') + _DATABASE_TOKEN_INFO_POS_RAW=$(pecho -n "${_DATABASE_TOKEN_INFO_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%${_DATABASE_TOKEN_INFO_POS_RAW}%\"name\"%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse scoped token information(element does not have \"name\")." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + if [ "X${JSONPARSER_FIND_VAL_TYPE}" != "X${JP_TYPE_STR}" ]; then + prn_err "Failed to parse scoped token information(\"name\" is not string)." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + if [ "X${JSONPARSER_FIND_STR_VAL}" = "X" ]; then + prn_err "Failed to parse scoped token information(\"name\" value is empty)." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + rm -f "${JP_PAERSED_FILE}" + + prn_dbg "(dbaas_get_current_tenant) Scoped Token Tenant is \"${JSONPARSER_FIND_STR_VAL}\"" + pecho -n "${JSONPARSER_FIND_STR_VAL}" + + return 0 +} + +# +# Search Role Token(Maximum expiration date) +# +# $1 : role name +# $? : result +# Output +# DBAAS_FOUND_ROLETOKEN : found existed role token string +# DBAAS_FOUND_REGISTERPATH : found existed role token's registerpath +# +dbaas_get_existed_role_token() +{ + DBAAS_FOUND_ROLETOKEN="" + DBAAS_FOUND_REGISTERPATH="" + + if [ $# -lt 1 ]; then + return 1 + fi + _DATABASE_GET_RTOKEN_ROLE=$1 + + # + # (1) Get Role Token list + # + # [MEMO] + # ["49963578ddfe93dfa214e509426eb59f2fddfb4778bd47972d3fad2fe9c3a434","fdc09c83575df90e103d70ee9acb64d2085c96e425d296342dc7029b4abd091c"] + # + _DATABASE_GET_RTOKEN_ARR_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role token show "${_DATABASE_GET_RTOKEN_ROLE}") + + # + # Check Result + # + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_get_existed_role_token) Role token for ${_DATABASE_GET_RTOKEN_ROLE} is not existed or failed to get those." + return 1 + fi + + # + # Parse Result + # + jsonparser_parse_json_string "${_DATABASE_GET_RTOKEN_ARR_RESULT}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_get_existed_role_token) Failed to parse Role token for ${_DATABASE_GET_RTOKEN_ROLE}." + return 1 + fi + jsonparser_get_key_value '%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_get_existed_role_token) Failed to parse Role token for ${_DATABASE_GET_RTOKEN_ROLE}." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + if [ "X${JSONPARSER_FIND_VAL_TYPE}" != "X${JP_TYPE_ARR}" ]; then + prn_dbg "(dbaas_get_existed_role_token) Role token for ${_DATABASE_GET_RTOKEN_ROLE} is not array." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + if [ "X${JSONPARSER_FIND_VAL}" = "X" ]; then + prn_dbg "(dbaas_get_existed_role_token) Role token for ${_DATABASE_GET_RTOKEN_ROLE} is empty." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DATABASE_GET_RTOKEN_LIST_FILE=${JP_PAERSED_FILE} + _DATABASE_GET_RTOKEN_POS_LIST=${JSONPARSER_FIND_KEY_VAL} + + # + # (2) Get Role Token Details + # + # [MEMO] + # { + # "....TOKEN STRING....": { + # "date": "2021-01-01T00:00:00.000Z", + # "expire": "2031-01-01T00:00:00.000Z", + # "user": "user", + # "hostname": null, + # "ip": null, + # "port": 0, + # "cuk": null, + # "registerpath": ".... path ...." + # }, + # {...} + # } + # + _DATABASE_GET_RTOKEN_OBJ_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role token show "${_DATABASE_GET_RTOKEN_ROLE}" --expand) + + # + # Check Result + # + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_get_existed_role_token) Role token for ${_DATABASE_GET_RTOKEN_ROLE} is not existed or failed to get those." + rm -f "${_DATABASE_GET_RTOKEN_LIST_FILE}" + return 1 + fi + + # + # Parse Result + # + jsonparser_parse_json_string "${_DATABASE_GET_RTOKEN_OBJ_RESULT}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_get_existed_role_token) Failed to parse Role token detail for ${_DATABASE_GET_RTOKEN_ROLE}." + return 1 + fi + _DATABASE_GET_RTOKEN_OBJ_FILE="${JP_PAERSED_FILE}" + + # + # Loop - Role Token List + # + _DATABASE_GET_RTOKEN_MAX_TOKEN="" + _DATABASE_GET_RTOKEN_MAX_EXPIRE=0 + _DATABASE_GET_RTOKEN_MAX_REGPATH="" + for _DATABASE_GET_RTOKEN_POS in ${_DATABASE_GET_RTOKEN_POS_LIST}; do + # + # Get token string from array + # + _DATABASE_GET_RTOKEN_POS_RAW=$(pecho -n "${_DATABASE_GET_RTOKEN_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%${_DATABASE_GET_RTOKEN_POS_RAW}%" "${_DATABASE_GET_RTOKEN_LIST_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse scoped token information at \"%${_DATABASE_GET_RTOKEN_POS_RAW}%\"." + rm -f "${_DATABASE_GET_RTOKEN_OBJ_FILE}" + rm -f "${_DATABASE_GET_RTOKEN_LIST_FILE}" + return 1 + fi + if [ "X${JSONPARSER_FIND_VAL_TYPE}" != "X${JP_TYPE_STR}" ]; then + prn_err "Failed to parse scoped token information at \"%${_DATABASE_GET_RTOKEN_POS_RAW}%\" is not string." + continue + fi + if [ "X${JSONPARSER_FIND_VAL}" = "X" ]; then + prn_err "Failed to parse scoped token information at \"%${_DATABASE_TOKEN_INFO_POS}%\" is empty string." + continue + fi + _DATABASE_GET_RTOKEN_STR=${JSONPARSER_FIND_STR_VAL} + _DATABASE_GET_RTOKEN_KEY=${JSONPARSER_FIND_VAL} + + # + # Search token in object(registerpath) + # + jsonparser_get_key_value "%${_DATABASE_GET_RTOKEN_KEY}%\"registerpath\"%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_get_existed_role_token) ${_DATABASE_GET_RTOKEN_TOKENSTR} role token does not have registerpath key." + continue + fi + if [ "X${JSONPARSER_FIND_STR_VAL}" = "X" ]; then + prn_dbg "(dbaas_get_existed_role_token) ${_DATABASE_GET_RTOKEN_TOKENSTR} role token registerpath is empty." + continue + fi + _DATABASE_GET_RTOKEN_REGISTERPATH=${JSONPARSER_FIND_STR_VAL} + + # + # Search token in object(expire) + # + jsonparser_get_key_value "%${_DATABASE_GET_RTOKEN_KEY}%\"expire\"%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_get_existed_role_token) ${_DATABASE_GET_RTOKEN_TOKENSTR} role token does not have expire key." + continue + fi + if [ "X${JSONPARSER_FIND_STR_VAL}" = "X" ]; then + prn_dbg "(dbaas_get_existed_role_token) ${_DATABASE_GET_RTOKEN_TOKENSTR} role token expire is empty." + continue + fi + _DATABASE_GET_RTOKEN_EXPIRE=${JSONPARSER_FIND_STR_VAL} + + # + # Make expire number string + # + _DATABASE_GET_RTOKEN_EXPIRE_NUM=$(pecho -n "${_DATABASE_GET_RTOKEN_EXPIRE}" | sed -e 's/[.].*$//g' -e s'/[:]//g' -e s'/[-|+|T]//g') + _DATABASE_GET_RTOKEN_MAX_EXPIRE_NUM=$(pecho -n "${_DATABASE_GET_RTOKEN_MAX_EXPIRE}" | sed -e 's/[.].*$//g' -e s'/[:]//g' -e s'/[-|+|T]//g') + + # + # Compare + # + if [ "${_DATABASE_GET_RTOKEN_EXPIRE_NUM}" -gt "${_DATABASE_GET_RTOKEN_MAX_EXPIRE_NUM}" ]; then + # + # Detected role token with a longer expiration date + # + _DATABASE_GET_RTOKEN_MAX_TOKEN=$(pecho -n "${_DATABASE_GET_RTOKEN_STR}") + _DATABASE_GET_RTOKEN_MAX_EXPIRE=$(pecho -n "${_DATABASE_GET_RTOKEN_EXPIRE}") + _DATABASE_GET_RTOKEN_MAX_REGPATH=$(pecho -n "${_DATABASE_GET_RTOKEN_REGISTERPATH}") + fi + done + + rm -f "${_DATABASE_GET_RTOKEN_OBJ_FILE}" + rm -f "${_DATABASE_GET_RTOKEN_LIST_FILE}" + + if [ "X${_DATABASE_GET_RTOKEN_MAX_TOKEN}" = "X" ] || [ "X${_DATABASE_GET_RTOKEN_MAX_REGPATH}" = "X" ]; then + prn_dbg "(dbaas_get_existed_role_token) Not found existed Role token." + return 1 + fi + + DBAAS_FOUND_ROLETOKEN=${_DATABASE_GET_RTOKEN_MAX_TOKEN} + DBAAS_FOUND_REGISTERPATH=${_DATABASE_GET_RTOKEN_MAX_REGPATH} + + prn_dbg "(dbaas_get_existed_role_token) Found existed Role token : ${DBAAS_FOUND_ROLETOKEN}" + prn_dbg "(dbaas_get_existed_role_token) Found existed Role token Registerpath : ${DBAAS_FOUND_REGISTERPATH}" + + return 0 +} + +# +# Create New Role Token +# +# $1 : role name +# $? : result +# Output +# DBAAS_NEW_ROLETOKEN : role token string +# DBAAS_NEW_REGISTERPATH : role token's registerpath +# +dbaas_create_role_token() +{ + if [ $# -lt 1 ]; then + return 1 + fi + _DATABASE_CREATE_RTOKEN_ROLE=$1 + + # + # (1) Get Role Token list + # + # [MEMO] + # Succeed : ROLE TOKEN=...... + # REGISTERPATH=...... + # + _DATABASE_CREATE_RTOKEN_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role token create "${_DATABASE_CREATE_RTOKEN_ROLE}" --expire "0") + + # + # Check Result + # + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_create_role_token) Failed to create new Role token for ${_DATABASE_GET_RTOKEN_ROLE}." + return 1 + fi + if [ "X${_DATABASE_CREATE_RTOKEN_RESULT}" = "X" ]; then + prn_dbg "(dbaas_create_role_token) Failed to create new Role token for ${_DATABASE_GET_RTOKEN_ROLE}(result is empty)" + return 1 + fi + + # + # Parse Result + # + jsonparser_parse_json_string "${_DATABASE_CREATE_RTOKEN_RESULT}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_create_role_token) Failed to parse new Role token for ${_DATABASE_GET_RTOKEN_ROLE}" + return 1 + fi + + jsonparser_get_key_value '%"token"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_create_role_token) Failed to create new Role token for ${_DATABASE_GET_RTOKEN_ROLE}(result token is wrong format)" + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DATABASE_CREATE_RTOKEN=${JSONPARSER_FIND_STR_VAL} + + jsonparser_get_key_value '%"registerpath"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_create_role_token) Failed to create new Role token for ${_DATABASE_GET_RTOKEN_ROLE}(result registerpath is wrong format)" + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DATABASE_CREATE_REGPATH=${JSONPARSER_FIND_STR_VAL} + + rm -f "${JP_PAERSED_FILE}" + + if [ "X${_DATABASE_CREATE_RTOKEN}" = "X" ] || [ "X${_DATABASE_CREATE_REGPATH}" = "X" ]; then + prn_dbg "(dbaas_create_role_token) Failed to create new Role token for ${_DATABASE_GET_RTOKEN_ROLE}(result is something wrong)" + return 1 + fi + + DBAAS_NEW_ROLETOKEN=${_DATABASE_CREATE_RTOKEN} + DBAAS_NEW_REGISTERPATH=${_DATABASE_CREATE_REGPATH} + + prn_dbg "(dbaas_create_role_token) Created Role token : ${DBAAS_NEW_ROLETOKEN}" + prn_dbg "(dbaas_create_role_token) Created Role token Registerpath : ${DBAAS_NEW_REGISTERPATH}" + + return 0 +} + +# +# Get OpenStack Nova template +# +# $1 : server name +# $2 : image id +# $3 : flavor id +# $4 : user data +# $5 : keypair name(allow empty) +# $6 : security group name(allow empty) +# $? : result +# Output : json post data for launching host +# +dbaas_get_openstack_launch_post_data() +{ + if [ $# -lt 4 ]; then + prn_dbg "(dbaas_get_openstack_launch_post_data) Parameter wrong." + pecho -n "" + return 1 + fi + if [ "X$1" = "X" ] || [ "X$2" = "X" ] || [ "X$3" = "X" ] || [ "X$4" = "X" ]; then + prn_dbg "(dbaas_get_openstack_launch_post_data) Parameters($1, $2, $3, $4) wrong." + pecho -n "" + return 1 + fi + _DBAAS_LAUNCH_DATA_SERVER_NAME=$1 + _DBAAS_LAUNCH_DATA_IMAGE_ID=$2 + _DBAAS_LAUNCH_DATA_FLAVOR_ID=$3 + _DBAAS_LAUNCH_DATA_USERDATA=$4 + if [ "X$5" = "X" ]; then + _DBAAS_LAUNCH_DATA_KEYPAIR="" + else + _DBAAS_LAUNCH_DATA_KEYPAIR=$5 + fi + if [ "X$6" = "X" ]; then + _DBAAS_LAUNCH_DATA_SECGRP="" + else + _DBAAS_LAUNCH_DATA_SECGRP=$6 + fi + + # + # Check template file + # + _DATABASE_CREATE_HOST_FILE="" + if [ "X${K2HR3CLI_DBAAS_CONFIG}" != "X" ]; then + # + # Specified custom dbaas configuration directory + # + if [ -d "${K2HR3CLI_DBAAS_CONFIG}" ]; then + if [ -f "${K2HR3CLI_DBAAS_CONFIG}/${_DATABASE_DEFAULT_CREATE_HOST_FILENAME}" ]; then + _DATABASE_CREATE_HOST_FILE="${K2HR3CLI_DBAAS_CONFIG}/${_DATABASE_DEFAULT_CREATE_HOST_FILENAME}" + else + prn_err "Specified K2HDKC DBaaS CLI Configuration(${K2HR3CLI_DBAAS_CONFIG}/${_DATABASE_DEFAULT_CREATE_HOST_FILENAME}) is not existed." + pecho -n "" + return 1 + fi + else + prn_err "Specified K2HDKC DBaaS CLI Configuration directory(${K2HR3CLI_DBAAS_CONFIG}) is not existed." + pecho -n "" + return 1 + fi + else + # + # Check user home dbaas configuration + # + _DATABASE_USER_CONFIG_DIR=$(config_get_default_user_dir) + if [ -d "${_DATABASE_USER_CONFIG_DIR}" ]; then + if [ -f "${_DATABASE_USER_CONFIG_DIR}/${_DATABASE_DEFAULT_CREATE_HOST_FILENAME}" ]; then + _DATABASE_CREATE_HOST_FILE="${_DATABASE_USER_CONFIG_DIR}/${_DATABASE_DEFAULT_CREATE_HOST_FILENAME}" + fi + fi + + if [ "X${_DATABASE_CREATE_HOST_FILE}" = "X" ]; then + # + # Default dbaas configuration + # + if [ -d "${_DATABASE_CURRENT_DIR}" ]; then + if [ -f "${_DATABASE_CURRENT_DIR}/${_DATABASE_DEFAULT_CREATE_HOST_FILENAME}" ]; then + _DATABASE_CREATE_HOST_FILE="${_DATABASE_CURRENT_DIR}/${_DATABASE_DEFAULT_CREATE_HOST_FILENAME}" + else + prn_warn "Default K2HDKC DBaaS CLI Configuration(${_DATABASE_CURRENT_DIR}/${_DATABASE_DEFAULT_CREATE_HOST_FILENAME}) is not existed." + fi + else + prn_warn "Default K2HDKC DBaaS CLI Directory(${_DATABASE_CURRENT_DIR}) is not existed." + fi + fi + fi + + # + # Load template file to string + # + _DATABASE_CREATE_HOST_DATA=$(sed -e 's/#.*$//g' -e 's/^[[:space:]]\+//g' -e 's/[[:space:]]\+$//g' "${_DATABASE_CREATE_HOST_FILE}" | tr -d '\n') + if [ $? -ne 0 ]; then + prn_err "Could load the template file for launching host." + pecho -n "" + return 1 + fi + + # + # Replace keyword + # + if [ "X${_DBAAS_LAUNCH_DATA_SECGRP}" != "X" ]; then + # + # Set Security Group + # "security_groups": [ + # { + # "name":"default" + # }, + # { + # "name":"<...security group...>" + # } + # ], + # + _DBAAS_LAUNCH_DATA_SECGRP="\"security_groups\":[{\"name\":\"default\"},{\"name\":\"${_DBAAS_LAUNCH_DATA_SECGRP}\"}]," + fi + if [ "X${_DBAAS_LAUNCH_DATA_KEYPAIR}" != "X" ]; then + # + # Set Keypair + # "key_name":"<...name...>", + # + _DBAAS_LAUNCH_DATA_KEYPAIR="\"key_name\":\"${_DBAAS_LAUNCH_DATA_KEYPAIR}\"," + fi + + _DATABASE_CREATE_HOST_DATA=$(pecho -n "${_DATABASE_CREATE_HOST_DATA}" | sed \ + -e "s|__K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_SECGRP_SET__|${_DBAAS_LAUNCH_DATA_SECGRP}|" \ + -e "s|__K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_KEYPAIR_SET__|${_DBAAS_LAUNCH_DATA_KEYPAIR}|" \ + -e "s|__K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_SERVER_NAME__|${_DBAAS_LAUNCH_DATA_SERVER_NAME}|" \ + -e "s|__K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_IMAGE_ID__|${_DBAAS_LAUNCH_DATA_IMAGE_ID}|" \ + -e "s|__K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_FLAVOR_ID__|${_DBAAS_LAUNCH_DATA_FLAVOR_ID}|" \ + -e "s|__K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_USER_DATA__|${_DBAAS_LAUNCH_DATA_USERDATA}|") + + if [ "X${_DATABASE_CREATE_HOST_DATA}" = "X" ]; then + prn_err "Could load the template file for launching host." + pecho -n "" + return 1 + fi + + pecho -n "${_DATABASE_CREATE_HOST_DATA}" + + return 0 +} + +# +# Parse one k2hr3 host information +# +# $1 : one host information(space separator) +# $? : result +# +# Output Variables +# DATABASE_PARSE_K2HR3_HOSTNAME : first part +# DATABASE_PARSE_K2HR3_PORT : 2'nd part +# DATABASE_PARSE_K2HR3_CUK : 3'rd part +# DATABASE_PARSE_K2HR3_EXTRA : 4'th part +# DATABASE_PARSE_K2HR3_TAG : last part +# +dbaas_parse_k2hr3_host_info() +{ + DATABASE_PARSE_K2HR3_HOSTNAME="" + DATABASE_PARSE_K2HR3_PORT=0 + DATABASE_PARSE_K2HR3_CUK="" + DATABASE_PARSE_K2HR3_EXTRA="" + DATABASE_PARSE_K2HR3_TAG="" + + _DATABASE_PARSE_K2HR3_REMAINING="$1" + + # + # First part(hostname or ip address) + # + DATABASE_PARSE_K2HR3_HOSTNAME=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | awk '{print $1}') + _DATABASE_TMP_NEXT_POS=$((${#DATABASE_PARSE_K2HR3_HOSTNAME} + 2)) + _DATABASE_PARSE_K2HR3_REMAINING=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -c "${_DATABASE_TMP_NEXT_POS}"-) + + # + # 2'nd part + # + _DATABASE_PARSE_K2HR3_CHAR=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -b 1) + if [ "X${_DATABASE_PARSE_K2HR3_CHAR}" = "X" ];then + # + # No more data + # + return 0 + fi + if [ "X${_DATABASE_PARSE_K2HR3_CHAR}" != "X " ]; then + # + # 2'nd part is existed + # + DATABASE_PARSE_K2HR3_PORT=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | awk '{print $1}') + + # + # Next + # + _DATABASE_TMP_NEXT_POS=$((${#DATABASE_PARSE_K2HR3_PORT} + 2)) + _DATABASE_PARSE_K2HR3_REMAINING=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -c "${_DATABASE_TMP_NEXT_POS}"-) + + if [ "X${DATABASE_PARSE_K2HR3_PORT}" = "X" ] || [ "X${DATABASE_PARSE_K2HR3_PORT}" = "X*" ]; then + DATABASE_PARSE_K2HR3_PORT=0 + fi + else + _DATABASE_PARSE_K2HR3_REMAINING=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -c 2-) + fi + + # + # 3'rd part + # + _DATABASE_PARSE_K2HR3_CHAR=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -b 1) + if [ "X${_DATABASE_PARSE_K2HR3_CHAR}" = "X" ];then + # + # No more data + # + return 0 + fi + if [ "X${_DATABASE_PARSE_K2HR3_CHAR}" != "X " ]; then + # + # 3'rd part is existed + # + DATABASE_PARSE_K2HR3_CUK=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | awk '{print $1}') + + # + # Next + # + _DATABASE_TMP_NEXT_POS=$((${#DATABASE_PARSE_K2HR3_CUK} + 2)) + _DATABASE_PARSE_K2HR3_REMAINING=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -c "${_DATABASE_TMP_NEXT_POS}"-) + else + _DATABASE_PARSE_K2HR3_REMAINING=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -c 2-) + fi + + # + # 4'th part + # + _DATABASE_PARSE_K2HR3_CHAR=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -b 1) + if [ "X${_DATABASE_PARSE_K2HR3_CHAR}" = "X" ];then + # + # No more data + # + return 0 + fi + if [ "X${_DATABASE_PARSE_K2HR3_CHAR}" != "X " ]; then + # + # 4'th part is existed + # + DATABASE_PARSE_K2HR3_EXTRA=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | awk '{print $1}') + + # + # Next + # + _DATABASE_TMP_NEXT_POS=$((${#DATABASE_PARSE_K2HR3_EXTRA} + 2)) + _DATABASE_PARSE_K2HR3_REMAINING=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -c "${_DATABASE_TMP_NEXT_POS}"-) + else + _DATABASE_PARSE_K2HR3_REMAINING=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -c 2-) + fi + + # + # Last part + # + _DATABASE_PARSE_K2HR3_CHAR=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -b 1) + if [ "X${_DATABASE_PARSE_K2HR3_CHAR}" = "X" ];then + # + # No more data + # + return 0 + fi + if [ "X${_DATABASE_PARSE_K2HR3_CHAR}" != "X " ]; then + # + # Last part is existed + # + DATABASE_PARSE_K2HR3_TAG=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | awk '{print $1}') + + # + # Next + # + _DATABASE_TMP_NEXT_POS=$((${#DATABASE_PARSE_K2HR3_TAG} + 2)) + _DATABASE_PARSE_K2HR3_REMAINING=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -c "${_DATABASE_TMP_NEXT_POS}"-) + else + _DATABASE_PARSE_K2HR3_REMAINING=$(pecho -n "${_DATABASE_PARSE_K2HR3_REMAINING}" | cut -c 2-) + fi + + return 0 +} + +# +# Search host in role +# +# $1 : role path +# $2 : host +# $? : result +# Output +# DBAAS_FIND_ROLE_HOST_NAME : hostname or ip +# DBAAS_FIND_ROLE_HOST_PORT : port(* to 0) +# DBAAS_FIND_ROLE_HOST_CUK : cuk +# +dbaas_find_role_host() +{ + # shellcheck disable=SC2034 + DBAAS_FIND_ROLE_HOST_NAME="" + # shellcheck disable=SC2034 + DBAAS_FIND_ROLE_HOST_PORT=0 + # shellcheck disable=SC2034 + DBAAS_FIND_ROLE_HOST_CUK="" + + if [ "X$1" = "X" ] || [ "X$2" = "X" ]; then + prn_dbg "(dbaas_find_role_host) Parameter is wrong." + return 1 + fi + _DBAAS_DEL_ROLE_PATH=$1 + + # + # Get host list(run k2hr3) + # + # [MEMO] + # Host is " " + # { + # "policies": [], + # "aliases": [], + # "hosts": { + # "hostnames": [ + # "hostname * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx openstack-auto-v1 localhostname" + # ], + # "ips": [ + # "10.0.0.1 * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx openstack-auto-v1 localhostname" + # ] + # } + # } + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role show "${_DBAAS_DEL_ROLE_PATH}") + + # + # Parse + # + jsonparser_parse_json_string "${_DATABASE_RESULT}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_find_role_host) Failed to parse host list." + return 1 + fi + + # + # Search in hosts->hostnames + # + jsonparser_get_key_value '%"hosts"%"hostnames"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_find_role_host) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->hostnames, thus skip this role." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DATABASE_RESULT_HOSTNAME_LIST=${JSONPARSER_FIND_KEY_VAL} + + # + # Loop hostnames + # + for _DATABASE_RESULT_HOSTNAME_POS in ${_DATABASE_RESULT_HOSTNAME_LIST}; do + _DATABASE_RESULT_HOSTNAME_POS_RAW=$(pecho -n "${_DATABASE_RESULT_HOSTNAME_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"hosts\"%\"hostnames\"%${_DATABASE_RESULT_HOSTNAME_POS_RAW}%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_find_role_host) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->hostnames[${_DATABASE_RESULT_HOSTNAME_POS_RAW}], thus skip this role." + continue + fi + + dbaas_parse_k2hr3_host_info "${JSONPARSER_FIND_STR_VAL}" + if [ "X${DATABASE_PARSE_K2HR3_HOSTNAME}" = "X$2" ] || [ "X${DATABASE_PARSE_K2HR3_TAG}" = "X$2" ]; then + # + # Found (The TAG may have a hostname and the HOSTNAME may be an IP address) + # + # shellcheck disable=SC2034 + DBAAS_FIND_ROLE_HOST_NAME=${DATABASE_PARSE_K2HR3_HOSTNAME} + # shellcheck disable=SC2034 + DBAAS_FIND_ROLE_HOST_PORT=${DATABASE_PARSE_K2HR3_PORT} + # shellcheck disable=SC2034 + DBAAS_FIND_ROLE_HOST_CUK=${DATABASE_PARSE_K2HR3_CUK} + return 0 + fi + done + + # + # Search in hosts->ips + # + jsonparser_get_key_value '%"hosts"%"ips"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_find_role_host) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->ips, thus skip this role." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DATABASE_RESULT_IP_LIST=${JSONPARSER_FIND_KEY_VAL} + + # + # Loop ips + # + for _DATABASE_RESULT_IP_POS in ${_DATABASE_RESULT_IP_LIST}; do + _DATABASE_RESULT_IP_POS_RAW=$(pecho -n "${_DATABASE_RESULT_IP_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"hosts\"%\"ips\"%${_DATABASE_RESULT_IP_POS_RAW}%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_find_role_host) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->ips[${_DATABASE_RESULT_IP_POS_RAW}], thus skip this role." + continue + fi + + dbaas_parse_k2hr3_host_info "${JSONPARSER_FIND_STR_VAL}" + if [ "X${DATABASE_PARSE_K2HR3_HOSTNAME}" = "X$2" ] || [ "X${DATABASE_PARSE_K2HR3_TAG}" = "X$2" ]; then + # + # Found (The TAG may have a hostname and the HOSTNAME may be an IP address) + # + # shellcheck disable=SC2034 + DBAAS_FIND_ROLE_HOST_NAME=${DATABASE_PARSE_K2HR3_HOSTNAME} + # shellcheck disable=SC2034 + DBAAS_FIND_ROLE_HOST_PORT=${DATABASE_PARSE_K2HR3_PORT} + # shellcheck disable=SC2034 + DBAAS_FIND_ROLE_HOST_CUK=${DATABASE_PARSE_K2HR3_CUK} + return 0 + fi + done + + rm -f "${JP_PAERSED_FILE}" + + return 1 +} + +# +# show all hosts +# +# $1 : json parsed file +# $? : result +# +# Output Variables +# DATABSE_HOST_LIST +# +dbaas_show_all_hosts() +{ + if [ "X$1" = "X" ]; then + return 1 + fi + if [ ! -f "$1" ]; then + return 1 + fi + _DATABASE_HOST_PAERSED_FILE=$1 + _DATABSE_HOST_ISSET=0 + DATABSE_HOST_LIST="[" + + # + # "hostnames" key + # + jsonparser_get_key_value '%"hosts"%"hostnames"%' "${_DATABASE_HOST_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "(dbaas_show_all_hosts) The result \"hosts\" key does not have \"hostnames\" element." + else + if [ "X${JSONPARSER_FIND_VAL_TYPE}" != "X${JP_TYPE_ARR}" ]; then + prn_warn "(dbaas_show_all_hosts) The result \"hosts\"->\"hostnames\" key is not array." + else + _DATABASE_HOST_HOSTNAMES=${JSONPARSER_FIND_KEY_VAL} + for _DATABASE_HOST_HOSTNAME_POS in ${_DATABASE_HOST_HOSTNAMES}; do + _DATABASE_HOST_HOSTNAME_POS_RAW=$(pecho -n "${_DATABASE_HOST_HOSTNAME_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"hosts\"%\"hostnames\"%${_DATABASE_HOST_HOSTNAME_POS_RAW}%" "${_DATABASE_HOST_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "(dbaas_show_all_hosts) The result \"hosts\"->\"hostnames[${_DATABASE_HOST_HOSTNAME_POS_RAW}]\" is not found." + continue + fi + if [ "X${JSONPARSER_FIND_VAL_TYPE}" != "X${JP_TYPE_STR}" ]; then + prn_warn "(dbaas_show_all_hosts) The result \"hosts\"->\"hostnames[${_DATABASE_HOST_HOSTNAME_POS_RAW}]\" is not string type." + continue + fi + # + # Parse host information + # + dbaas_parse_k2hr3_host_info "${JSONPARSER_FIND_STR_VAL}" + if [ "X${DATABASE_PARSE_K2HR3_HOSTNAME}" = "X" ]; then + prn_warn "(dbaas_show_all_hosts) The result \"hosts\"->\"hostnames[${_DATABASE_HOST_HOSTNAME_POS_RAW}]\" is something wrong." + continue + fi + _DATABSE_HOST_ONE_HOST="{\"name\":\"${DATABASE_PARSE_K2HR3_TAG}\",\"id\":\"${DATABASE_PARSE_K2HR3_CUK}\",\"hostname\":\"${DATABASE_PARSE_K2HR3_HOSTNAME}\"}" + + if [ "${_DATABSE_HOST_ISSET}" -eq 0 ]; then + DATABSE_HOST_LIST="${DATABSE_HOST_LIST}${_DATABSE_HOST_ONE_HOST}" + else + DATABSE_HOST_LIST="${DATABSE_HOST_LIST},${_DATABSE_HOST_ONE_HOST}" + fi + done + fi + fi + + + # + # "ips" key + # + jsonparser_get_key_value '%"hosts"%"ips"%' "${_DATABASE_HOST_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "(dbaas_show_all_hosts) The result \"hosts\" key does not have \"ips\" element." + else + if [ "X${JSONPARSER_FIND_VAL_TYPE}" != "X${JP_TYPE_ARR}" ]; then + prn_warn "(dbaas_show_all_hosts) The result \"hosts\"->\"ips\" key is not array." + else + _DATABASE_HOST_IPS=${JSONPARSER_FIND_KEY_VAL} + for _DATABASE_HOST_IPS_POS in ${_DATABASE_HOST_IPS}; do + _DATABASE_HOST_IPS_POS_RAW=$(pecho -n "${_DATABASE_HOST_IPS_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"hosts\"%\"ips\"%${_DATABASE_HOST_IPS_POS_RAW}%" "${_DATABASE_HOST_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "(dbaas_show_all_hosts) The result \"hosts\"->\"ips[${_DATABASE_HOST_HOSTNAME_POS_RAW}]\" is not found." + continue + fi + if [ "X${JSONPARSER_FIND_VAL_TYPE}" != "X${JP_TYPE_STR}" ]; then + prn_warn "(dbaas_show_all_hosts) The result \"hosts\"->\"ips[${_DATABASE_HOST_HOSTNAME_POS_RAW}]\" is not string type." + continue + fi + # + # Parse host information + # + dbaas_parse_k2hr3_host_info "${JSONPARSER_FIND_STR_VAL}" + if [ "X${DATABASE_PARSE_K2HR3_HOSTNAME}" = "X" ]; then + prn_warn "(dbaas_show_all_hosts) The result \"hosts\"->\"hostnames[${_DATABASE_HOST_HOSTNAME_POS_RAW}]\" is something wrong." + continue + fi + _DATABSE_HOST_ONE_HOST="{\"name\":\"${DATABASE_PARSE_K2HR3_TAG}\",\"id\":\"${DATABASE_PARSE_K2HR3_CUK}\",\"ip\":\"${DATABASE_PARSE_K2HR3_HOSTNAME}\"}" + + if [ "${_DATABSE_HOST_ISSET}" -eq 0 ]; then + DATABSE_HOST_LIST="${DATABSE_HOST_LIST}${_DATABSE_HOST_ONE_HOST}" + else + DATABSE_HOST_LIST="${DATABSE_HOST_LIST},${_DATABSE_HOST_ONE_HOST}" + fi + done + fi + fi + + DATABSE_HOST_LIST="${DATABSE_HOST_LIST}]" + return 0 +} + +# +# Delete host in role +# +# $1 : role path +# $2 : host name +# $3 : port +# $4 : cuk +# $? : result +# +dbaas_delete_role_host() +{ + if [ "X$1" = "X" ] || [ "X$2" = "X" ]; then + prn_dbg "(dbaas_delete_role_host) Parameter is wrong." + return 1 + fi + _DBAAS_DEL_ROLE_PATH=$1 + _DBAAS_DEL_ROLE_HOST_NAME=$2 + + if [ "X$3" != "X" ]; then + _DBAAS_DEL_ROLE_HOST_PORT=$3 + else + _DBAAS_DEL_ROLE_HOST_PORT=0 + fi + + if [ "X$4" != "X" ]; then + _DBAAS_DEL_ROLE_HOST_CUK=$4 + _DBAAS_DEL_ROLE_HOST_CUK_OPT="--cuk" + else + _DBAAS_DEL_ROLE_HOST_CUK="" + _DBAAS_DEL_ROLE_HOST_CUK_OPT="" + fi + + # + # Delete host from role(Run k2hr3) + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role host delete "${_DBAAS_DEL_ROLE_PATH}" --host "${_DBAAS_DEL_ROLE_HOST_NAME}" --port "${_DBAAS_DEL_ROLE_HOST_PORT}" "${_DBAAS_DEL_ROLE_HOST_CUK_OPT}" "${_DBAAS_DEL_ROLE_HOST_CUK}") + + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host) Failed to delete ${_DBAAS_DEL_ROLE_HOST_NAME} from ${_DBAAS_DEL_ROLE_PATH} role : ${_DATABASE_RESULT}" + return 1 + fi + + prn_dbg "(dbaas_delete_role_host) Deleted host ${_DBAAS_DEL_ROLE_HOST_NAME} from ${_DBAAS_DEL_ROLE_PATH} role." + return 0 +} + +# +# delete all host in role/openstack +# +# $1 : root role path +# $? : result +# +# [NOTE] +# This function calls openstack function. +# +dbaas_delete_role_host_all() +{ + if [ "X$1" = "X" ]; then + prn_dbg "(dbaas_delete_role_host_all) Parameter is wrong." + return 1 + fi + _DBAAS_DELALL_ROLE_PATH=$1 + + #------------------------------------------------------ + # Loop server host list(run k2hr3) + #------------------------------------------------------ + # [MEMO] + # Host is " " + # { + # "policies": [], + # "aliases": [], + # "hosts": { + # "hostnames": [ + # "hostname * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx openstack-auto-v1 localhostname" + # ], + # "ips": [ + # "10.0.0.1 * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx openstack-auto-v1 localhostname" + # ] + # } + # } + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role show "${_DBAAS_DELALL_ROLE_PATH}/server") + + # + # Parse + # + jsonparser_parse_json_string "${_DATABASE_RESULT}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host_all) Failed to parse result." + return 1 + fi + _DATABASE_HOSTS_PAERSED_FILE=${JP_PAERSED_FILE} + + # + # Search in server hosts->hostnames + # + jsonparser_get_key_value '%"hosts"%"hostnames"%' "${_DATABASE_HOSTS_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host_all) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->hostnames, thus skip this role." + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + _DATABASE_RESULT_HOSTNAME_LIST=${JSONPARSER_FIND_KEY_VAL} + + # + # Loop server hosts->hostnames + # + for _DATABASE_RESULT_HOSTNAME_POS in ${_DATABASE_RESULT_HOSTNAME_LIST}; do + _DATABASE_RESULT_HOSTNAME_POS_RAW=$(pecho -n "${_DATABASE_RESULT_HOSTNAME_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"hosts\"%\"hostnames\"%${_DATABASE_RESULT_HOSTNAME_POS_RAW}%" "${_DATABASE_HOSTS_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host_all) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->hostnames[${_DATABASE_RESULT_HOSTNAME_POS_RAW}], thus skip this role." + continue + fi + + dbaas_parse_k2hr3_host_info "${JSONPARSER_FIND_STR_VAL}" + _DATABASE_TMP_ROLE_HOST_NAME=${DATABASE_PARSE_K2HR3_HOSTNAME} + _DATABASE_TMP_ROLE_HOST_PORT=${DATABASE_PARSE_K2HR3_PORT} + _DATABASE_TMP_ROLE_HOST_CUK=${DATABASE_PARSE_K2HR3_CUK} + + # + # Delete host from openstack + # + if [ "X${_DATABASE_TMP_ROLE_HOST_CUK}" != "X" ]; then + delete_op_host "${_DATABASE_TMP_ROLE_HOST_CUK}" + if [ $? -ne 0 ]; then + prn_err "Failed to delete ${_DATABASE_TMP_ROLE_HOST_NAME} from OpenStack." + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + else + prn_dbg "Found ${_DATABASE_TMP_ROLE_HOST_NAME} host in ${_DBAAS_DELALL_ROLE_PATH}/server role, but it does not have Host id for opensteck." + fi + + # + # Delete host from k2hr3 + # + dbaas_delete_role_host "${_DBAAS_DELALL_ROLE_PATH}/server" "${_DATABASE_TMP_ROLE_HOST_NAME}" "${_DATABASE_TMP_ROLE_HOST_PORT}" "${_DATABASE_TMP_ROLE_HOST_CUK}" + if [ $? -ne 0 ]; then + prn_err "Failed to delete ${_DATABASE_TMP_ROLE_HOST_NAME} from ${_DBAAS_DELALL_ROLE_PATH}/server role" + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + done + + # + # Search in server hosts->ips + # + jsonparser_get_key_value '%"hosts"%"ips"%' "${_DATABASE_HOSTS_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host_all) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->ips, thus skip this role." + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + _DATABASE_RESULT_HOSTNAME_LIST=${JSONPARSER_FIND_KEY_VAL} + + # + # Loop server hosts->ips + # + for _DATABASE_RESULT_HOSTNAME_POS in ${_DATABASE_RESULT_HOSTNAME_LIST}; do + _DATABASE_RESULT_HOSTNAME_POS_RAW=$(pecho -n "${_DATABASE_RESULT_HOSTNAME_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"hosts\"%\"ips\"%${_DATABASE_RESULT_HOSTNAME_POS_RAW}%" "${_DATABASE_HOSTS_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host_all) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->ips[${_DATABASE_RESULT_HOSTNAME_POS_RAW}], thus skip this role." + continue + fi + + dbaas_parse_k2hr3_host_info "${JSONPARSER_FIND_STR_VAL}" + _DATABASE_TMP_ROLE_HOST_NAME=${DATABASE_PARSE_K2HR3_HOSTNAME} + _DATABASE_TMP_ROLE_HOST_PORT=${DATABASE_PARSE_K2HR3_PORT} + _DATABASE_TMP_ROLE_HOST_CUK=${DATABASE_PARSE_K2HR3_CUK} + + # + # Delete host from openstack + # + if [ "X${_DATABASE_TMP_ROLE_HOST_CUK}" != "X" ]; then + delete_op_host "${_DATABASE_TMP_ROLE_HOST_CUK}" + if [ $? -ne 0 ]; then + prn_err "Failed to delete ${_DATABASE_TMP_ROLE_HOST_NAME} from OpenStack." + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + else + prn_dbg "Found ${_DATABASE_TMP_ROLE_HOST_NAME} host in ${_DBAAS_DELALL_ROLE_PATH}/server role, but it does not have Host id for opensteck." + fi + + # + # Delete host from k2hr3 + # + dbaas_delete_role_host "${_DBAAS_DELALL_ROLE_PATH}/server" "${_DATABASE_TMP_ROLE_HOST_NAME}" "${_DATABASE_TMP_ROLE_HOST_PORT}" "${_DATABASE_TMP_ROLE_HOST_CUK}" + if [ $? -ne 0 ]; then + prn_err "Failed to delete ${_DATABASE_TMP_ROLE_HOST_NAME} from ${_DBAAS_DELALL_ROLE_PATH}/server role" + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + done + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + + #------------------------------------------------------ + # Loop slave host list(run k2hr3) + #------------------------------------------------------ + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role show "${_DBAAS_DELALL_ROLE_PATH}/slave") + + # + # Parse + # + jsonparser_parse_json_string "${_DATABASE_RESULT}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host_all) Failed to parse result." + return 1 + fi + _DATABASE_HOSTS_PAERSED_FILE=${JP_PAERSED_FILE} + + # + # Search in slave hosts->hostnames + # + jsonparser_get_key_value '%"hosts"%"hostnames"%' "${_DATABASE_HOSTS_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host_all) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->hostnames, thus skip this role." + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + _DATABASE_RESULT_HOSTNAME_LIST=${JSONPARSER_FIND_KEY_VAL} + + # + # Loop slave hosts->hostnames + # + for _DATABASE_RESULT_HOSTNAME_POS in ${_DATABASE_RESULT_HOSTNAME_LIST}; do + _DATABASE_RESULT_HOSTNAME_POS_RAW=$(pecho -n "${_DATABASE_RESULT_HOSTNAME_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"hosts\"%\"hostnames\"%${_DATABASE_RESULT_HOSTNAME_POS_RAW}%" "${_DATABASE_HOSTS_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host_all) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->hostnames[${_DATABASE_RESULT_HOSTNAME_POS_RAW}], thus skip this role." + continue + fi + + dbaas_parse_k2hr3_host_info "${JSONPARSER_FIND_STR_VAL}" + _DATABASE_TMP_ROLE_HOST_NAME=${DATABASE_PARSE_K2HR3_HOSTNAME} + _DATABASE_TMP_ROLE_HOST_PORT=${DATABASE_PARSE_K2HR3_PORT} + _DATABASE_TMP_ROLE_HOST_CUK=${DATABASE_PARSE_K2HR3_CUK} + + # + # Delete host from openstack + # + if [ "X${_DATABASE_TMP_ROLE_HOST_CUK}" != "X" ]; then + delete_op_host "${_DATABASE_TMP_ROLE_HOST_CUK}" + if [ $? -ne 0 ]; then + prn_err "Failed to delete ${_DATABASE_TMP_ROLE_HOST_NAME} from OpenStack." + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + else + prn_dbg "Found ${_DATABASE_TMP_ROLE_HOST_NAME} host in ${_DBAAS_DELALL_ROLE_PATH}/slave role, but it does not have Host id for opensteck." + fi + + # + # Delete host from k2hr3 + # + dbaas_delete_role_host "${_DBAAS_DELALL_ROLE_PATH}/slave" "${_DATABASE_TMP_ROLE_HOST_NAME}" "${_DATABASE_TMP_ROLE_HOST_PORT}" "${_DATABASE_TMP_ROLE_HOST_CUK}" + if [ $? -ne 0 ]; then + prn_err "Failed to delete ${_DATABASE_TMP_ROLE_HOST_NAME} from ${_DBAAS_DELALL_ROLE_PATH}/slave role" + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + done + + # + # Search in slave hosts->ips + # + jsonparser_get_key_value '%"hosts"%"ips"%' "${_DATABASE_HOSTS_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host_all) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->ips, thus skip this role." + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + _DATABASE_RESULT_HOSTNAME_LIST=${JSONPARSER_FIND_KEY_VAL} + + # + # Loop slave hosts->ips + # + for _DATABASE_RESULT_HOSTNAME_POS in ${_DATABASE_RESULT_HOSTNAME_LIST}; do + _DATABASE_RESULT_HOSTNAME_POS_RAW=$(pecho -n "${_DATABASE_RESULT_HOSTNAME_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"hosts\"%\"ips\"%${_DATABASE_RESULT_HOSTNAME_POS_RAW}%" "${_DATABASE_HOSTS_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_role_host_all) Failed to get ${_DBAAS_DEL_ROLE_PATH} hosts->ips[${_DATABASE_RESULT_HOSTNAME_POS_RAW}], thus skip this role." + continue + fi + + dbaas_parse_k2hr3_host_info "${JSONPARSER_FIND_STR_VAL}" + _DATABASE_TMP_ROLE_HOST_NAME=${DATABASE_PARSE_K2HR3_HOSTNAME} + _DATABASE_TMP_ROLE_HOST_PORT=${DATABASE_PARSE_K2HR3_PORT} + _DATABASE_TMP_ROLE_HOST_CUK=${DATABASE_PARSE_K2HR3_CUK} + + # + # Delete host from openstack + # + if [ "X${_DATABASE_TMP_ROLE_HOST_CUK}" != "X" ]; then + delete_op_host "${_DATABASE_TMP_ROLE_HOST_CUK}" + if [ $? -ne 0 ]; then + prn_err "Failed to delete ${_DATABASE_TMP_ROLE_HOST_NAME} from OpenStack." + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + else + prn_dbg "Found ${_DATABASE_TMP_ROLE_HOST_NAME} host in ${_DBAAS_DELALL_ROLE_PATH}/slave role, but it does not have Host id for opensteck." + fi + + # + # Delete host from k2hr3 + # + dbaas_delete_role_host "${_DBAAS_DELALL_ROLE_PATH}/slave" "${_DATABASE_TMP_ROLE_HOST_NAME}" "${_DATABASE_TMP_ROLE_HOST_PORT}" "${_DATABASE_TMP_ROLE_HOST_CUK}" + if [ $? -ne 0 ]; then + prn_err "Failed to delete ${_DATABASE_TMP_ROLE_HOST_NAME} from ${_DBAAS_DELALL_ROLE_PATH}/slave role" + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + return 1 + fi + done + rm -f "${_DATABASE_HOSTS_PAERSED_FILE}" + + return 0 +} + +# +# delete all role/policy/resource +# +# $1 : root role path +# $? : result +# +dbaas_delete_all_k2hr3() +{ + if [ "X$1" = "X" ]; then + prn_dbg "(dbaas_delete_all_k2hr3) Parameter is wrong." + return 1 + fi + _DBAAS_DEL_CLUSTER_NAME=$1 + _DBAAS_DEL_CLUSTER_SERVER="${_DBAAS_DEL_CLUSTER_NAME}/server" + _DBAAS_DEL_CLUSTER_SLAVE="${_DBAAS_DEL_CLUSTER_NAME}/slave" + + # + # Delete Slave Role + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role delete "${_DBAAS_DEL_CLUSTER_SLAVE}") + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_all_k2hr3) Failed to delete ${_DBAAS_DEL_CLUSTER_SLAVE} role : ${_DATABASE_RESULT}" + return 1 + fi + + # + # Delete Server Role + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role delete "${_DBAAS_DEL_CLUSTER_SERVER}") + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_all_k2hr3) Failed to delete ${_DBAAS_DEL_CLUSTER_SERVER} role : ${_DATABASE_RESULT}" + return 1 + fi + + # + # Delete Top Role + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" role delete "${_DBAAS_DEL_CLUSTER_NAME}") + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_all_k2hr3) Failed to delete ${_DBAAS_DEL_CLUSTER_NAME} role : ${_DATABASE_RESULT}" + return 1 + fi + + # + # Delete Policy + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" policy delete "${_DBAAS_DEL_CLUSTER_NAME}") + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_all_k2hr3) Failed to delete ${_DBAAS_DEL_CLUSTER_NAME} policy : ${_DATABASE_RESULT}" + return 1 + fi + + # + # Delete Slave Resource + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" resource delete "${_DBAAS_DEL_CLUSTER_SLAVE}") + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_all_k2hr3) Failed to delete ${_DBAAS_DEL_CLUSTER_SLAVE} resource : ${_DATABASE_RESULT}" + return 1 + fi + + # + # Delete Server Resource + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" resource delete "${_DBAAS_DEL_CLUSTER_SERVER}") + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_all_k2hr3) Failed to delete ${_DBAAS_DEL_CLUSTER_SERVER} resource : ${_DATABASE_RESULT}" + return 1 + fi + + # + # Delete Top Resource + # + _DATABASE_RESULT=$(K2HR3CLI_API_URI="${K2HR3CLI_API_URI}" K2HR3CLI_OPT_CONFIG="${K2HR3CLI_OPT_CONFIG}" K2HR3CLI_MSGLEVEL="${K2HR3CLI_MSGLEVEL_VALUE}" K2HR3CLI_OPT_CURLDBG="${K2HR3CLI_OPT_CURLDBG}" K2HR3CLI_OPT_CURLBODY="${K2HR3CLI_OPT_CURLBODY}" K2HR3CLI_SCOPED_TOKEN="${K2HR3CLI_SCOPED_TOKEN}" K2HR3CLI_SCOPED_TOKEN_VERIFIED="${K2HR3CLI_SCOPED_TOKEN_VERIFIED}" \ + "${K2HR3CLIBIN}" resource delete "${_DBAAS_DEL_CLUSTER_NAME}") + if [ $? -ne 0 ]; then + prn_dbg "(dbaas_delete_all_k2hr3) Failed to delete ${_DBAAS_DEL_CLUSTER_NAME} resource : ${_DATABASE_RESULT}" + return 1 + fi + + return 0 +} + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/src/libexec/database/help.sh b/src/libexec/database/help.sh new file mode 100644 index 0000000..060c577 --- /dev/null +++ b/src/libexec/database/help.sh @@ -0,0 +1,296 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#-------------------------------------------------------------- +# Load Option name for DBaaS +#-------------------------------------------------------------- +if [ -f "${LIBEXECDIR}/database/options.sh" ]; then + . "${LIBEXECDIR}/database/options.sh" +fi + +#--------------------------------------------------------------------- +# Put Help +#--------------------------------------------------------------------- +# [NOTE] +# Adjust the start and end positions of the characters according to the +# scale below, and arrange the lines. +# +# +-- start position(ex. title) +# | +-- indent for description +# | | +# v v +# +---+----+----+----+----+----+----+----+----+----+----+----+----| +# +echo "" +echo "K2HDKC DBaaS CLI command - K2HDKC Cluster" +echo " See https://dbaas.k2hdkc.antpick.ax/" +echo "" +echo "${K2HR3CLI_MODE} is a command that operates for K2HDKC DBaaS with" +echo "K2HR3 system." +echo "${K2HR3CLI_MODE} has the \"create\", \"show\", \"add\", \"delete\"" +echo "and \"list\" subcommands." +echo "" +echo "CREATE DATABASE: Create K2HDKC Cluster" +echo " This command is used to create K2HDKC Cluster data." +echo "" +echo " USAGE: ${BINNAME} ${K2HR3CLI_MODE} create " +echo "" +echo " ${BINNAME} ${K2HR3CLI_MODE} create " +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_PORT_LONG} " +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_CTLPORT_LONG} " +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_SLAVE_CTLPORT_LONG} " +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_RUN_USER_LONG} " +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_CREATE_USER_LONG}" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_NO_SECGRP_LONG}" +echo "" +echo "DELETE DATABASE: Delete K2HDKC Cluster" +echo " This command is used to delete K2HDKC Cluster." +echo "" +echo " USAGE: ${BINNAME} ${K2HR3CLI_MODE} delete cluster " +echo "" +echo " ${BINNAME} ${K2HR3CLI_MODE} delete cluster " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TOKEN_LONG}(${K2HR3CLI_COMMAND_OPT_OPENSTACK_TOKEN_SHORT}) " +echo "" +echo "SHOW DATABASE: Show Host/Resource" +echo " This command is used to show all hosts in K2HDKC Cluster or" +echo " the RESOURCE data for K2HDKC Cluster." +echo "" +echo " USAGE: ${BINNAME} ${K2HR3CLI_MODE} show [host|configuration(conf)] [server|slave] " +echo "" +echo " ${BINNAME} ${K2HR3CLI_MODE} show host [server|slave] " +echo "" +echo " ${BINNAME} ${K2HR3CLI_MODE} show configuration(conf) [server|slave] " +echo "" +echo "ADD HOST: Add Host to Cluster" +echo " This command is used to add hosts to K2HDKC Cluster." +echo "" +echo " USAGE: ${BINNAME} ${K2HR3CLI_MODE} add host [server|slave] " +echo "" +echo " ${BINNAME} ${K2HR3CLI_MODE} add host [server|slave] " +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_CREATE_ROLETOKEN_LONG}" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TOKEN_LONG}(${K2HR3CLI_COMMAND_OPT_OPENSTACK_TOKEN_SHORT}) " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_KEYPAIR_LONG} (Optional)" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_LONG} " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_ID_LONG} " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_LONG} " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_ID_LONG} " +echo "" +echo "DELETE HOST: Delete Host from Cluster" +echo " This command is used to delete hosts from K2HDKC Cluster." +echo "" +echo " USAGE: ${BINNAME} ${K2HR3CLI_MODE} delete host " +echo "" +echo " ${BINNAME} ${K2HR3CLI_MODE} delete host " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TOKEN_LONG}(${K2HR3CLI_COMMAND_OPT_OPENSTACK_TOKEN_SHORT}) " +echo "" +echo "OPENSTACK TOKEN: Create OpenStack Token" +echo " This command is used to create the Scoped OpenStack Token." +echo " The OpenStack Scoped Token is required to execute the \"add\"" +echo " and \"delete\" Sub-Commands of \"databse\"." +echo " This command is for getting the Token of the OpenStack" +echo " system that works with the K2HR3 system." +echo "" +echo " USAGE: ${BINNAME} ${K2HR3CLI_MODE} openstack [utoken|token] " +echo "" +echo " ${BINNAME} ${K2HR3CLI_MODE} openstack utoken " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_USER_LONG} " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_PASS_LONG} " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TENANT_LONG} " +echo " ${K2HR3CLI_COMMON_OPT_SAVE_LONG}(${K2HR3CLI_COMMON_OPT_SAVE_SHORT})" +echo "" +echo " *) If ${K2HR3CLI_COMMAND_OPT_OPENSTACK_USER_LONG} / ${K2HR3CLI_COMMAND_OPT_OPENSTACK_PASS_LONG} / ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TENANT_LONG} are" +echo " omitted, the username / passphrase / tenant used to" +echo " get the token to K2HR3 will be used." +echo " If these options are not specified, Configuration(" +echo " K2HR3CLI_OPENSTACK_USER / K2HR3CLI_OPENSTACK_PASS /" +echo " K2HR3CLI_OPENSTACK_TENANT) or the value of the" +echo " corresponding environment variable is used." +echo " Then, if the value is not found, it will try to use" +echo " the value specified by ${K2HR3CLI_COMMAND_OPT_USER_LONG}(${K2HR3CLI_COMMAND_OPT_USER_SHORT}) / ${K2HR3CLI_COMMAND_OPT_PASS_LONG}(${K2HR3CLI_COMMAND_OPT_PASS_SHORT}) /" +echo " ${K2HR3CLI_COMMAND_OPT_TENANT_LONG}(${K2HR3CLI_COMMAND_OPT_TENANT_SHORT}) options." +echo " And if the value is not found yet at the end, use the" +echo " value of Configuration, the value set in the" +echo " environment variable." +echo "" +echo " ${BINNAME} ${K2HR3CLI_MODE} openstack token " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TOKEN_LONG}(${K2HR3CLI_COMMAND_OPT_OPENSTACK_TOKEN_SHORT}) " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TENANT_LONG} " +echo " ${K2HR3CLI_COMMON_OPT_SAVE_LONG}(${K2HR3CLI_COMMON_OPT_SAVE_SHORT})" +echo "" +echo " *) If ${K2HR3CLI_COMMAND_OPT_OPENSTACK_USER_LONG} and ${K2HR3CLI_COMMAND_OPT_OPENSTACK_PASS_LONG} are omitted," +echo " the OpenStack Token(specified in the configuration," +echo " environment variables or options) is used." +echo "" +echo "LIST OPENSTACK RESOURCE: List OpenStack resources(images/flavors)" +echo " This command is used to list information about OpenStack" +echo " resources (images, flavors), it lists the names and IDs of" +echo " images and flavors." +echo " If you specify the name of the image and flavor with the" +echo " \"add host\" command, the performance may not be good depending" +echo " on the environment. In that case, specify the ID of the image" +echo " and flavor for better performance." +echo " This command is used to get the ID of the image and flavor." +echo "" +echo " USAGE: ${BINNAME} ${K2HR3CLI_MODE} list [images|flavors] " +echo "" +echo " ${BINNAME} ${K2HR3CLI_MODE} list [images|flavors] " +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TENANT_LONG} " +echo "" +# +---+----+----+----+----+----+----+----+----+----+----+----+----| +echo "COMMON OPTION:" +echo " The following options are available for all commands." +echo " Options related to URIs can be stored in configuration." +echo " Then, even if the URI of Nova(compute), Glance(image) and" +echo " Neutron(network) is not set in the corresponding option and" +echo " configuration, the appropriate URI is automatically set from" +echo " the generation(information) of Scoped Token." +echo " If these are set automatically, performance will suffer." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_CONFIG_LONG}" +echo " Specifies the DBaaS configuration directory path." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IDENTITY_URI_LONG}" +echo " Specifies the OpenStack Identity URI." +echo " This URI is saved in the configuration and does not" +echo " need to be specified." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_NOVA_URI_LONG}" +echo " Specifies the OpenStack Nova(compute) URI." +echo " This URI is saved in the configuration and does not" +echo " need to be specified." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_GLANCE_URI_LONG}" +echo " Specifies the OpenStack Glance(image) URI." +echo " This URI is saved in the configuration and does not" +echo " need to be specified." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_NEUTRON_URI_LONG}" +echo " Specifies the OpenStack Neutron(network) URI." +echo " This URI is saved in the configuration and does not" +echo " need to be specified." +echo "" +echo "OPTION:" +echo " ${K2HR3CLI_COMMON_OPT_HELP_LONG}(${K2HR3CLI_COMMON_OPT_HELP_SHORT})" +echo " Display ${K2HR3CLI_MODE} command help." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_PORT_LONG}" +echo " Specifies the chmpx port on the K2HDKC DBaaS server node." +echo " If this option is omitted or 0 specified, the default" +echo " port 8020 will be used." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_CTLPORT_LONG}" +echo " Specifies the chmpx control port on the K2HDKC DBaaS" +echo " server node." +echo " If this option is omitted or 0 specified, the default" +echo " port 8021 will be used." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_SLAVE_CTLPORT_LONG}" +echo " Specifies the chmpx control port on the K2HDKC DBaaS" +echo " slaver node." +echo " If this option is omitted or 0 specified, the default" +echo " port 8031 will be used." +echo " Specifies the DBaaS configuration directory path." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_RUN_USER_LONG}" +echo " Specifies the execution user for the k2hdkc and chmpx" +echo " processes. If omitted, the default is the k2hdkc user." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_CREATE_USER_LONG}" +echo " Specify this option if the execution user for the k2hdkc" +echo " and chmpx processes (default k2hdkc) does not exist and" +echo " you want to create that user at startup." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_NO_SECGRP_LONG}" +echo " Specifying this option prevents the security group from" +echo " being created in the OpenStack network settings." +echo " By default, when you create a cluster, a corresponding" +echo " security group is created." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_DBAAS_CREATE_ROLETOKEN_LONG}" +echo " Normally, if a role token already exists, use that role" +echo " token." +echo " If this option is specified, a new role token will be" +echo " created even if an existing role token exists." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_KEYPAIR_LONG}" +echo " Specify the name of the Keypair to be set for the host" +echo " to be started. If this option is omitted, Keypair will" +echo " not be set." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_LONG}" +echo " Specifies the name of the Flavor that starts the host." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_ID_LONG}" +echo " Specifies the id of the Flavor that starts the host." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_LONG}" +echo " Specifies the name of the OS Image that boots the host." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_ID_LONG}" +echo " Specifies the id of the OS Image that boots the host." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_USER_LONG}" +echo " Specifies the OpenStack user name for credential" +echo " authentication." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_PASS_LONG}" +echo " Specify the OpenStack passphrase for credential" +echo " authentication." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TENANT_LONG}" +echo " Specify the OpenStack tenant(project) for" +echo " credential authentication." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TOKEN_LONG}(${K2HR3CLI_COMMAND_OPT_OPENSTACK_TOKEN_SHORT})" +echo " Specify the Token(Unscoped or Scoped) issued by the" +echo " OpenStack system to obtain the Unscoped and Scoped" +echo " Token." +echo " \"K2HR3CLI_OPENSTACK_TOKEN\" Environment or a variable" +echo " in configuration file is instead of this option." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_TENANT_LONG}(${K2HR3CLI_COMMAND_OPT_TENANT_SHORT})" +echo " Specify the OpenStack tenant(project) to get the Scoped" +echo " Token." +echo "" +echo " Options other than the above" +echo " The Token command uses the following options." +echo " You can get help with run \"${BINNAME} --help(-h)\" to find" +echo " out more about these options." +echo "" +echo " ${K2HR3CLI_COMMAND_OPT_USER_LONG}(${K2HR3CLI_COMMAND_OPT_USER_SHORT}) " +echo " ${K2HR3CLI_COMMAND_OPT_PASS_LONG}(${K2HR3CLI_COMMAND_OPT_PASS_SHORT}) " +echo " ${K2HR3CLI_COMMAND_OPT_UNSCOPED_TOKEN_LONG}(${K2HR3CLI_COMMAND_OPT_UNSCOPED_TOKEN_SHORT}) " +echo " ${K2HR3CLI_COMMAND_OPT_SCOPED_TOKEN_LONG}(${K2HR3CLI_COMMAND_OPT_SCOPED_TOKEN_SHORT}) " +echo " ${K2HR3CLI_COMMON_OPT_MSGLEVEL_LONG}(${K2HR3CLI_COMMON_OPT_MSGLEVEL_SHORT})" +echo "" +echo " You can pass values in environment variables equivalent" +echo " to them without the options mentioned above." +echo " And you can also define variables in the configuration" +echo " file instead of environment variables." +echo "" + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/src/libexec/database/k2hdkc_dbaas_create_host.templ b/src/libexec/database/k2hdkc_dbaas_create_host.templ new file mode 100644 index 0000000..2bc8a31 --- /dev/null +++ b/src/libexec/database/k2hdkc_dbaas_create_host.templ @@ -0,0 +1,57 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#--------------------------------------------------------------------- +# Post data template for launching +#--------------------------------------------------------------------- +# +# The K2HDKC DBAAS CLI uses the OpenStack (Nova) Compute API to launch +# a Virtual Machine. +# +# This file sets the POST data template to use when calling OpenStack +# Compute API. +# By default, only the "server name", "image id", "flavor id", +# "user data", "security group name", and "Keypair name" are specified. +# If you want to pass any other value, you can change the following +# JSON data. +# However, the value set by default cannot be described. +# (Do not change special keywords.) +# Be sure to pass them as options. +# + +{ + "server":{ + __K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_SECGRP_SET__ + __K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_KEYPAIR_SET__ + "name":"__K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_SERVER_NAME__", + "imageRef":"__K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_IMAGE_ID__", + "flavorRef":"__K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_FLAVOR_ID__", + "user_data":"__K2HDKC_DBAAS_LAUNCH_VM_TEMPLATE_USER_DATA__" + } +} + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: expandtab sw=4 ts=4 fdm=marker +# vim<600: expandtab sw=4 ts=4 +# diff --git a/src/libexec/database/k2hdkc_dbaas_resource.templ b/src/libexec/database/k2hdkc_dbaas_resource.templ new file mode 100644 index 0000000..c882621 --- /dev/null +++ b/src/libexec/database/k2hdkc_dbaas_resource.templ @@ -0,0 +1,132 @@ +{{#!k2hr3template }} +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +# +# GLOBAL SECTION +# +[GLOBAL] +FILEVERSION = 1 +DATE = __K2HDKC_DBAAS_CLI_DATE__ +GROUP = {{= %cluster-name% }} +MODE = {{= %chmpx-mode% }} +CHMPXIDTYPE = CUSTOM +DELIVERMODE = hash +MAXCHMPX = 256 +REPLICA = 1 +MAXMQSERVER = 8 +MAXMQCLIENT = 128 +MQPERATTACH = 8 +MAXQPERSERVERMQ = 8 +MAXQPERCLIENTMQ = 8 +MAXMQPERCLIENT = 16 +MAXHISTLOG = 0 +{{ if 'SERVER' == %chmpx-mode% }} +PORT = {{= %chmpx-server-port% }} +CTLPORT = {{= %chmpx-server-ctlport% }} +SELFCTLPORT = {{= %chmpx-server-ctlport% }} +{{ else }} +CTLPORT = {{= %chmpx-slave-ctlport% }} +SELFCTLPORT = {{= %chmpx-slave-ctlport% }} +{{ endif }} +SELFCUK = __SELF_INSTANCE_ID__ +RWTIMEOUT = 10000 +RETRYCNT = 500 +CONTIMEOUT = 1000 +MQRWTIMEOUT = 500 +MQRETRYCNT = 10000 +MQACK = no +AUTOMERGE = on +DOMERGE = on +MERGETIMEOUT = 0 +SOCKTHREADCNT = 8 +MQTHREADCNT = 8 +MAXSOCKPOOL = 16 +SOCKPOOLTIMEOUT = 0 +SSL = no +K2HFULLMAP = on +K2HMASKBIT = 4 +K2HCMASKBIT = 8 +K2HMAXELE = 16 + +# +# SERVER NODES SECTION +# +{{ foreach %host_key% in %yrn:yahoo:::__K2HDKC_DBAAS_CLI_TENANT_NAME__:role:__K2HDKC_DBAAS_CLI_CLUSTER_NAME__/server/hosts/ip% }} + {{ %one_host% = %yrn:yahoo:::__K2HDKC_DBAAS_CLI_TENANT_NAME__:role:__K2HDKC_DBAAS_CLI_CLUSTER_NAME__/server/hosts/ip%{%host_key%} }} +[SVRNODE] +NAME = {{= %one_host%{'host'} }} +CUK = {{= %one_host%{'cuk'} }} +PORT = {{= %chmpx-server-port% }} +CTLPORT = {{= %chmpx-server-ctlport% }} +CUSTOM_ID_SEED = {{= %one_host%{'tag'} }} +SSL = no +{{ done }} + +# +# SLAVE NODES SECTION +# +{{ if 0 < %yrn:yahoo:::__K2HDKC_DBAAS_CLI_TENANT_NAME__:role:__K2HDKC_DBAAS_CLI_CLUSTER_NAME__/slave/hosts/ip%.length }} + {{ foreach %host_key% in %yrn:yahoo:::__K2HDKC_DBAAS_CLI_TENANT_NAME__:role:__K2HDKC_DBAAS_CLI_CLUSTER_NAME__/slave/hosts/ip% }} + {{ %one_host% = %yrn:yahoo:::__K2HDKC_DBAAS_CLI_TENANT_NAME__:role:__K2HDKC_DBAAS_CLI_CLUSTER_NAME__/slave/hosts/ip%{%host_key%} }} +[SLVNODE] +NAME = {{= %one_host%{'host'} }} +CUK = {{= %one_host%{'cuk'} }} +CTLPORT = {{= %chmpx-slave-ctlport% }} +CUSTOM_ID_SEED = {{= %one_host%{'tag'} }} +SSL = no + {{ done }} +{{ else }} +# +# This is output as a dummy slave node when there are no slave nodes. +# If the slave node definition does not exist, CHMPX will not start. +# To avoid this, register only one localhost as a dummy. +# +[SLVNODE] +NAME = 127.0.0.1 +CUK = dummy_cuk +CTLPORT = {{= %chmpx-slave-ctlport% }} +SSL = no +{{ endif }} + +{{ if 'SERVER' == %chmpx-mode% }} +# +# K2HDKC SECTION +# +[K2HDKC] +K2HTYPE = file +K2HFILE = /var/lib/k2hdkc/data/k2hdkc.k2h +K2HFULLMAP = on +K2HINIT = no +K2HMASKBIT = 8 +K2HCMASKBIT = 8 +K2HMAXELE = 32 +K2HPAGESIZE = 512 +MAXTHREAD = 20 +{{ endif }} + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: expandtab sw=4 ts=4 fdm=marker +# vim<600: expandtab sw=4 ts=4 +# diff --git a/src/libexec/database/k2hdkc_dbaas_resource_keys.config b/src/libexec/database/k2hdkc_dbaas_resource_keys.config new file mode 100644 index 0000000..28d22ba --- /dev/null +++ b/src/libexec/database/k2hdkc_dbaas_resource_keys.config @@ -0,0 +1,60 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#--------------------------------------------------------------------- +# Sub RESOURCE Keys for K2HDKC DBAAS CLI +#--------------------------------------------------------------------- +# +# Configuration for values that can be set for the K2HDKC DBAAS CLI. +# The format is a format that can be read by a shell script. +# = +# (please do not put a space before and after the equal.) +# + +# +# Server RESOURCE Keys +# +# DATABASE_SERVER_KEY_INI_PKG : for "k2hr3-init-packages" key +# DATABASE_SERVER_KEY_INI_PCPKG : for "k2hr3-init-packagecloud-packages" key +# DATABASE_SERVER_KEY_INI_SYSPKG : for "k2hr3-init-systemd-packages" key +# +DATABASE_SERVER_KEY_INI_PKG="" +DATABASE_SERVER_KEY_INI_PCPKG="k2hdkc-dbaas-override-conf,k2hr3-get-resource,chmpx,k2hdkc" +DATABASE_SERVER_KEY_INI_SYSPKG="chmpx.service,k2hdkc.service,k2hr3-get-resource.timer" + +# +# Slave RESOURCE Keys +# +# DATABASE_SLAVE_KEY_INI_PKG : for "k2hr3-init-packages" key +# DATABASE_SLAVE_KEY_INI_PCPKG : for "k2hr3-init-packagecloud-packages" key +# DATABASE_SLAVE_KEY_INI_SYSPKG : for "k2hr3-init-systemd-packages" key +# +DATABASE_SLAVE_KEY_INI_PKG="" +DATABASE_SLAVE_KEY_INI_PCPKG="k2hdkc-dbaas-override-conf,k2hr3-get-resource,chmpx" +DATABASE_SLAVE_KEY_INI_SYSPKG="chmpx.service,k2hr3-get-resource.timer" + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: expandtab sw=4 ts=4 fdm=marker +# vim<600: expandtab sw=4 ts=4 +# diff --git a/src/libexec/database/openstack.sh b/src/libexec/database/openstack.sh new file mode 100644 index 0000000..14b4f06 --- /dev/null +++ b/src/libexec/database/openstack.sh @@ -0,0 +1,2707 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#-------------------------------------------------------------- +# Variables +#-------------------------------------------------------------- +#TEST +# # +# # Endpoints for openstack services +# # +# # These values are set when the Token is verified or the Scoped Token is issued. +# # +# K2HR3CLI_OPENSTACK_NOVA_URI="" +# K2HR3CLI_OPENSTACK_GLANCE_URI="" +# K2HR3CLI_OPENSTACK_NEUTRON_URI="" + +# +# Security Group Name +# +K2HR3CLI_OPENSTACK_SERVER_SECGRP_SUFFIX="-k2hdkc-server-sec" +K2HR3CLI_OPENSTACK_SLAVE_SECGRP_SUFFIX="-k2hdkc-slave-sec" + +#-------------------------------------------------------------- +# Functions for OpenStack +#-------------------------------------------------------------- +# +# Complement and Set OpenStack user name +# +# $? : result +# +# Access and Change Environment +# K2HR3CLI_OPENSTACK_USER +# K2HR3CLI_OPENSTACK_USER_ID +# K2HR3CLI_OPENSTACK_PASS +# K2HR3CLI_OPENSTACK_TOKEN +# K2HR3CLI_OPENSTACK_SCOPED_TOKEN +# K2HR3CLI_USER +# +complement_op_user_name() +{ + # + # Check alternative values + # + if [ "X${K2HR3CLI_OPENSTACK_USER}" = "X" ]; then + # + # Reset user id / passphrase / tokens + # + K2HR3CLI_OPENSTACK_USER_ID="" + K2HR3CLI_OPENSTACK_PASS="" + K2HR3CLI_OPENSTACK_TOKEN="" + K2HR3CLI_OPENSTACK_SCOPED_TOKEN="" + + if [ "X${K2HR3CLI_USER}" != "X" ]; then + K2HR3CLI_OPENSTACK_USER=${K2HR3CLI_USER} + fi + fi + + # + # Interacvive input + # + completion_variable_auto "K2HR3CLI_OPENSTACK_USER" "OpenStack User name: " 1 + _TOKEN_LIB_RESULT_TMP=$? + prn_dbg "(complement_op_user_name) OpenStack User name = \"${K2HR3CLI_OPENSTACK_USER}\"." + return ${_TOKEN_LIB_RESULT_TMP} +} + +# +# Complement and Set OpenStack user passphrase +# +# $? : result +# +# Access and Change Environment +# K2HR3CLI_OPENSTACK_PASS +# K2HR3CLI_PASS +# +complement_op_user_passphrase() +{ + # + # Check alternative values + # + if [ "X${K2HR3CLI_OPENSTACK_PASS}" = "X" ]; then + # + # Reset tokens + # + K2HR3CLI_OPENSTACK_TOKEN="" + K2HR3CLI_OPENSTACK_SCOPED_TOKEN="" + + if [ "X${K2HR3CLI_PASS}" != "X" ]; then + K2HR3CLI_OPENSTACK_PASS=${K2HR3CLI_PASS} + fi + fi + + # + # Interacvive input + # + completion_variable_auto "K2HR3CLI_OPENSTACK_PASS" "OpenStack User passphrase: " 1 1 + _TOKEN_LIB_RESULT_TMP=$? + prn_dbg "(complement_op_user_passphrase) OpenStack User passphrase = \"*****(${#K2HR3CLI_OPENSTACK_PASS})\"." + return ${_TOKEN_LIB_RESULT_TMP} +} + +# +# Complement and Set OpenStack Tenant +# +# $? : result +# +# Access and Change Environment +# K2HR3CLI_OPENSTACK_TENANT +# K2HR3CLI_OPENSTACK_TENANT_ID +# K2HR3CLI_TENANT +# +complement_op_tenant() +{ + # + # Check alternative values + # + if [ "X${K2HR3CLI_OPENSTACK_TENANT}" = "X" ]; then + # + # Reset tenant id / tokens + # + K2HR3CLI_OPENSTACK_TENANT_ID="" + K2HR3CLI_OPENSTACK_TOKEN="" + K2HR3CLI_OPENSTACK_SCOPED_TOKEN="" + + if [ "X${K2HR3CLI_TENANT}" != "X" ]; then + K2HR3CLI_OPENSTACK_TENANT=${K2HR3CLI_TENANT} + fi + fi + # + # Interacvive input + # + completion_variable_auto "K2HR3CLI_OPENSTACK_TENANT" "OpenStack Project(tenant) name: " 1 + _TOKEN_LIB_RESULT_TMP=$? + prn_dbg "(complement_op_tenant) OpenStack Project(tenant) name = \"${K2HR3CLI_OPENSTACK_TENANT}\"." + return ${_TOKEN_LIB_RESULT_TMP} +} + +# +# Complement and Set OpenStack unscoped token +# +# $? : result +# +# Set Variables +# K2HR3CLI_OPENSTACK_USER : user name +# K2HR3CLI_OPENSTACK_USER_ID : user id +# K2HR3CLI_OPENSTACK_TOKEN : valid token (unscoped token) +# K2HR3CLI_OPENSTACK_SCOPED_TOKEN +# +complement_op_utoken() +{ + # + # Reset Tokens + # + K2HR3CLI_OPENSTACK_TOKEN="" + K2HR3CLI_OPENSTACK_SCOPED_TOKEN="" + + # + # Get unscoped token + # + complement_op_user_name + if [ $? -ne 0 ]; then + return 1 + fi + complement_op_user_passphrase + if [ $? -ne 0 ]; then + return 1 + fi + + # + # Create Unscoped Token + # + get_op_utoken "${K2HR3CLI_OPENSTACK_USER}" "${K2HR3CLI_OPENSTACK_PASS}" + if [ $? -ne 0 ]; then + return 1 + fi + return 0 +} + +# +# Complement and Set OpenStack Scoped token +# +# $? : result +# +# Using and Set Variables +# K2HR3CLI_OPENSTACK_TOKEN : valid token (may be scoped token) +# K2HR3CLI_OPENSTACK_SCOPED_TOKEN : valid scoped token +# K2HR3CLI_OPENSTACK_USER : user name +# K2HR3CLI_OPENSTACK_USER_ID : user id +# K2HR3CLI_OPENSTACK_PASS : user name +# K2HR3CLI_OPENSTACK_TENANT : tenant(scoped token) +# K2HR3CLI_OPENSTACK_TENANT_ID : tenant id +# K2HR3CLI_OPENSTACK_NOVA_URI : endpoint uri for nova +# K2HR3CLI_OPENSTACK_GLANCE_URI : endpoint uri for glance +# K2HR3CLI_OPENSTACK_NEUTRON_URI : endpoint uri for neutron +# +complement_op_token() +{ + # + # Reset Tokens + # + K2HR3CLI_OPENSTACK_SCOPED_TOKEN="" + + # + # Check existed openstack token + # + if [ "X${K2HR3CLI_OPENSTACK_TOKEN}" != "X" ]; then + get_op_token_info "${K2HR3CLI_OPENSTACK_TOKEN}" + if [ $? -eq 0 ]; then + if [ "X${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" != "X" ]; then + # + # Valid token which is scoped token, so nothing to do + # + return 0 + fi + else + K2HR3CLI_OPENSTACK_TOKEN="" + fi + fi + + # + # Get unscoped token + # + if [ "X${K2HR3CLI_OPENSTACK_TOKEN}" = "X" ]; then + # + # No unscoped token, then create it + # + complement_op_user_name + if [ $? -ne 0 ]; then + return 1 + fi + complement_op_user_passphrase + if [ $? -ne 0 ]; then + return 1 + fi + + # + # Create Unscoped Token + # + get_op_utoken "${K2HR3CLI_OPENSTACK_USER}" "${K2HR3CLI_OPENSTACK_PASS}" + if [ $? -ne 0 ]; then + return 1 + fi + fi + + # + # Get tenant id + # + complement_op_tenant + if [ $? -ne 0 ]; then + return 1 + fi + get_op_tenant_id + if [ $? -ne 0 ]; then + return 1 + fi + + # + # Get Scoped Token + # + get_op_token + if [ $? -ne 0 ]; then + return 1 + fi + + return 0 +} + +#-------------------------------------------------------------- +# Functions for API Requests +#-------------------------------------------------------------- +# +# Get Service endpoint in catalog +# +# $1 : type(string) +# $2 : name(string) +# $3 : parsed json file(catalog response) +# $? : result +# +# [NOTE] catalog response +# { +# "catalog": [ +# { +# "endpoints": [ +# { +# "id": "...", +# "interface": "public", +# "region": "RegionOne", +# "url": "http://..." +# }, +# { +# "id": "...", +# "interface": "internal", +# "region": "RegionOne", +# "url": "http://..." +# }, +# ... +# ], +# "id": "...", +# "type": "identity", +# "name": "keystone" +# }, +# ... +# ], +# "links": { +# "self": "https://.../identity/v3/catalog", +# "previous": null, +# "next": null +# } +# } +# +# Set Variables +# DBAAS_OP_FOUND_SERVICE_EP_URI : endpoint(ex. https://XXX.XXX.XXX.XXX/...) +# +get_op_service_ep() +{ + DBAAS_OP_FOUND_SERVICE_EP_URI="" + + if [ "X$1" = "X" ] || [ "X$2" = "X" ] || [ "X$3" = "X" ]; then + return 1 + fi + _DBAAS_OP_SERVICE_TYPE=$1 + _DBAAS_OP_SERVICE_NAME=$2 + _DBAAS_OP_PARSED_FILE=$3 + + # + # Get catalog array + # + jsonparser_get_key_value '%"catalog"%' "$3" + if [ $? -ne 0 ]; then + prn_dbg "(get_op_service_ep) Failed to parse for \"catalog\"." + return 1 + fi + if [ "X${JSONPARSER_FIND_VAL_TYPE}" != "X${JP_TYPE_ARR}" ]; then + prn_dbg "(get_op_service_ep) \"catalog\" is not array." + return 1 + fi + _DATABASE_RESULT_CATALOG_LIST=${JSONPARSER_FIND_KEY_VAL} + + # + # Loop catalog + # + for _DATABASE_RESULT_CATALOG_POS in ${_DATABASE_RESULT_CATALOG_LIST}; do + # + # catalog[x]->name + # + _DATABASE_RESULT_CATALOG_POS_RAW=$(pecho -n "${_DATABASE_RESULT_CATALOG_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"catalog\"%${_DATABASE_RESULT_CATALOG_POS_RAW}%\"name\"%" "${_DBAAS_OP_PARSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(get_op_service_ep) Failed to get ${_DBAAS_DEL_ROLE_PATH} catalog[${_DATABASE_RESULT_CATALOG_POS_RAW}]->name." + continue + fi + _DBAAS_OP_CATALOG_NAME=${JSONPARSER_FIND_STR_VAL} + + # + # catalog[x]->type + # + jsonparser_get_key_value "%\"catalog\"%${_DATABASE_RESULT_CATALOG_POS_RAW}%\"type\"%" "${_DBAAS_OP_PARSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(get_op_service_ep) Failed to get ${_DBAAS_DEL_ROLE_PATH} catalog[${_DATABASE_RESULT_CATALOG_POS_RAW}]->type." + continue + fi + _DBAAS_OP_CATALOG_TYPE=${JSONPARSER_FIND_STR_VAL} + + # + # Compare + # + if [ "X${_DBAAS_OP_CATALOG_NAME}" = "X${_DBAAS_OP_SERVICE_NAME}" ] && [ "X${_DBAAS_OP_CATALOG_TYPE}" = "X${_DBAAS_OP_SERVICE_TYPE}" ]; then + # + # Found, get endpoints for service + # + jsonparser_get_key_value "%\"catalog\"%${_DATABASE_RESULT_CATALOG_POS_RAW}%\"endpoints\"%" "${_DBAAS_OP_PARSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(get_op_service_ep) Failed to get ${_DBAAS_DEL_ROLE_PATH} catalog[${_DATABASE_RESULT_CATALOG_POS_RAW}]->endpoints." + continue + fi + _DATABASE_RESULT_EP_LIST=${JSONPARSER_FIND_KEY_VAL} + + for _DATABASE_RESULT_EP_POS in ${_DATABASE_RESULT_EP_LIST}; do + # + # catalog[x]->endpoints[x]->url + # + _DATABASE_RESULT_EP_POS_RAW=$(pecho -n "${_DATABASE_RESULT_EP_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"catalog\"%${_DATABASE_RESULT_CATALOG_POS_RAW}%\"endpoints\"%${_DATABASE_RESULT_EP_POS_RAW}%\"url\"%" "${_DBAAS_OP_PARSED_FILE}" + if [ $? -eq 0 ]; then + # + # Cut last word if it is '/' and space + # + DBAAS_OP_FOUND_SERVICE_EP_URI=$(pecho -n "${JSONPARSER_FIND_STR_VAL}" | sed -e 's/[[:space:]]+//g' -e 's#/$##g') + return 0 + else + prn_dbg "(get_op_service_ep) Failed to get ${_DBAAS_DEL_ROLE_PATH} catalog[${_DATABASE_RESULT_CATALOG_POS_RAW}]->endpoints[${_DATABASE_RESULT_EP_POS_RAW}]->url." + fi + done + fi + done + + # + # Not found + # + return 1 +} + +# +# Set OpenStack services endpoints +# +# $1 : openstack scoped token +# $? : result +# +# Check and Set Variables +# K2HR3CLI_OPENSTACK_NOVA_URI : endpoint uri for nova +# K2HR3CLI_OPENSTACK_GLANCE_URI : endpoint uri for glance +# K2HR3CLI_OPENSTACK_NEUTRON_URI : endpoint uri for neutron +# +get_op_service_eps() +{ + if [ "X$1" = "X" ]; then + return 1 + fi + if [ "X${K2HR3CLI_OPENSTACK_NOVA_URI}" != "X" ] && [ "X${K2HR3CLI_OPENSTACK_GLANCE_URI}" != "X" ] && [ "X${K2HR3CLI_OPENSTACK_NEUTRON_URI}" != "X" ]; then + # + # All endpoints is set, nothing to do + # + prn_dbg "(get_op_service_eps) OpenStack Nova Endpoint = ${K2HR3CLI_OPENSTACK_NOVA_URI}" + prn_dbg "(get_op_service_eps) OpenStack Glance Endpoint = ${K2HR3CLI_OPENSTACK_GLANCE_URI}" + prn_dbg "(get_op_service_eps) OpenStack Neutron Endpoint = ${K2HR3CLI_OPENSTACK_NEUTRON_URI}" + return 0 + fi + + #------------------------------------------------------ + # Get Endpoints(Catalog) + #------------------------------------------------------ + # [MEMO] + # GET http:///v3/auth/catalog + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_IDENTITY_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:$1" + _DBAAS_OP_URL_PATH="/v3/auth/catalog" + + get_request "${_DBAAS_OP_URL_PATH}" 1 "${_DBAAS_OP_AUTH_HEADER}" "${_DBAAS_OP_TOKEN_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "200" 1 + if [ $? -ne 0 ]; then + prn_err "Failed to send the request to get catalog inforamtion." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Get endpoints in catalog + #------------------------------------------------------ + # [MEMO] + # { + # "catalog": [ + # { + # "endpoints": [ + # { + # "id": "...", + # "interface": "public", + # "region": "RegionOne", + # "url": "http://..." + # }, + # { + # "id": "...", + # "interface": "internal", + # "region": "RegionOne", + # "url": "http://..." + # }, + # ... + # ], + # "id": "...", + # "type": "identity", + # "name": "keystone" + # }, + # ... + # ], + # "links": { + # "self": "https://.../identity/v3/catalog", + # "previous": null, + # "next": null + # } + # } + # + _DBAAS_OP_SERVICE_EPS_RESULT=0 + + # + # Get Nova Uri + # + if [ "X${K2HR3CLI_OPENSTACK_NOVA_URI}" = "X" ]; then + get_op_service_ep "compute" "nova" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "OpenStack Nova endpoint is not found in catalog information." + _DBAAS_OP_SERVICE_EPS_RESULT=1 + else + K2HR3CLI_OPENSTACK_NOVA_URI=${DBAAS_OP_FOUND_SERVICE_EP_URI} + add_config_update_var "K2HR3CLI_OPENSTACK_NOVA_URI" + prn_dbg "(get_op_service_eps) OpenStack Nova Endpoint = ${K2HR3CLI_OPENSTACK_NOVA_URI}" + fi + fi + + # + # Get Glance Uri + # + if [ "X${K2HR3CLI_OPENSTACK_GLANCE_URI}" = "X" ]; then + get_op_service_ep "image" "glance" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "OpenStack Glance endpoint is not found in catalog information." + _DBAAS_OP_SERVICE_EPS_RESULT=1 + else + K2HR3CLI_OPENSTACK_GLANCE_URI=${DBAAS_OP_FOUND_SERVICE_EP_URI} + add_config_update_var "K2HR3CLI_OPENSTACK_GLANCE_URI" + prn_dbg "(get_op_service_eps) OpenStack Glance Endpoint = ${K2HR3CLI_OPENSTACK_GLANCE_URI}" + fi + fi + + # + # Get Neutron Uri + # + if [ "X${K2HR3CLI_OPENSTACK_NEUTRON_URI}" = "X" ]; then + get_op_service_ep "network" "neutron" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "OpenStack Neutron endpoint is not found in catalog information." + _DBAAS_OP_SERVICE_EPS_RESULT=1 + else + K2HR3CLI_OPENSTACK_NEUTRON_URI=${DBAAS_OP_FOUND_SERVICE_EP_URI} + add_config_update_var "K2HR3CLI_OPENSTACK_NEUTRON_URI" + prn_dbg "(get_op_service_eps) OpenStack Neutron Endpoint = ${K2HR3CLI_OPENSTACK_NEUTRON_URI}" + fi + fi + rm -f "${JP_PAERSED_FILE}" + + return "${_DBAAS_OP_SERVICE_EPS_RESULT}" +} + +# +# Check OpenStack (Un)scoped Token +# +# $1 : openstack token(unscoped/scoped) +# $? : result +# +# Set Variables +# K2HR3CLI_OPENSTACK_TOKEN : valid token (may be scoped token) +# K2HR3CLI_OPENSTACK_SCOPED_TOKEN : valid scoped token +# K2HR3CLI_OPENSTACK_USER : user name +# K2HR3CLI_OPENSTACK_USER_ID : user id +# K2HR3CLI_OPENSTACK_TENANT : tenant(scoped token) +# K2HR3CLI_OPENSTACK_TENANT_ID : tenant id +# K2HR3CLI_OPENSTACK_NOVA_URI : endpoint uri for nova +# K2HR3CLI_OPENSTACK_GLANCE_URI : endpoint uri for glance +# K2HR3CLI_OPENSTACK_NEUTRON_URI : endpoint uri for neutron +# +get_op_token_info() +{ + if [ "X${K2HR3CLI_OPENSTACK_IDENTITY_URI}" = "X" ]; then + prn_err "OpenStack(Identity) URI is not specified. Please specify with the ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IDENTITY_URI_LONG} option, K2HR3CLI_OPENSTACK_IDENTITY_URI environment variable, or configuration." + return 1 + fi + if [ "X$1" = "X" ]; then + return 1 + fi + + #------------------------------------------------------ + # Get token information + #------------------------------------------------------ + # [MEMO] + # GET http:///v3/auth/tokens?nocatalog + # + # ("nocatalog" argument is supported after Havana) + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_IDENTITY_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:$1" + _DBAAS_OP_TOKEN_HEADER="X-Subject-Token:$1" + _DBAAS_OP_URL_PATH="/v3/auth/tokens?nocatalog" + + get_request "${_DBAAS_OP_URL_PATH}" 1 "${_DBAAS_OP_AUTH_HEADER}" "${_DBAAS_OP_TOKEN_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + _DBAAS_OP_TOKEN_PAERSED_FILE=${JP_PAERSED_FILE} + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${_DBAAS_OP_TOKEN_PAERSED_FILE}" "200" 1 2>/dev/null + if [ $? -ne 0 ]; then + prn_info "Failed to send the request to get Token inforamtion." + rm -f "${_DBAAS_OP_TOKEN_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Get user id in result(parse result) + #------------------------------------------------------ + # [MEMO] + # { + # ... + # ... + # "token": { + # "methods": ["password"], + # "user": { + # "domain": { + # "id": "default", + # "name": "Default" + # }, + # "id": "", + # "name": "", + # "password_expires_at": null + # }, + # "audit_ids": ["...."], + # "expires_at": "2021-01-01T00:00:00.000000Z", + # "issued_at": "2021-01-01T00:00:00.000000Z" + # } + # } + # + _DBAAS_OP_USER= + _DBAAS_OP_USER_ID= + + # + # user id + # + jsonparser_get_key_value '%"token"%"user"%"id"%' "${_DBAAS_OP_TOKEN_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"token\"->\"user\"->\"id\" key in response body." + rm -f "${_DBAAS_OP_TOKEN_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_USER_ID=${JSONPARSER_FIND_STR_VAL} + + # + # user name + # + jsonparser_get_key_value '%"token"%"user"%"name"%' "${_DBAAS_OP_TOKEN_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"token\"->\"user\"->\"name\" key in response body." + rm -f "${_DBAAS_OP_TOKEN_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_USER=${JSONPARSER_FIND_STR_VAL} + + #------------------------------------------------------ + # Check Scoped Token and Tenant + #------------------------------------------------------ + # [MEMO] + # { + # ... + # ... + # "token": { + # "project": { + # "domain": { + # "id": "default", + # "name": "Default" + # }, + # "id": "", + # "name": "" + # } + # } + # } + # + _DBAAS_OP_TENANT= + _DBAAS_OP_TENANT_ID= + + # + # project -> Scoped Token + # + jsonparser_get_key_value '%"token"%"project"%' "${_DBAAS_OP_TOKEN_PAERSED_FILE}" + if [ $? -eq 0 ]; then + # + # tenant id + # + jsonparser_get_key_value '%"token"%"project"%"id"%' "${_DBAAS_OP_TOKEN_PAERSED_FILE}" + if [ $? -eq 0 ]; then + _DBAAS_OP_TENANT_ID=${JSONPARSER_FIND_STR_VAL} + + # + # tenant name + # + jsonparser_get_key_value '%"token"%"project"%"name"%' "${_DBAAS_OP_TOKEN_PAERSED_FILE}" + if [ $? -eq 0 ]; then + _DBAAS_OP_TENANT=${JSONPARSER_FIND_STR_VAL} + else + prn_warn "OpenStack token is scoped token, but there is no tenant name" + _DBAAS_OP_TENANT_ID= + fi + else + prn_warn "OpenStack token is scoped token, but there is no tenant id" + fi + else + prn_dbg "(get_op_token_info) OpenStack token is unscoped token.." + fi + rm -f "${_DBAAS_OP_TOKEN_PAERSED_FILE}" + + #------------------------------------------------------ + # Get Urls when scoped token + #------------------------------------------------------ + if [ "X${_DBAAS_OP_TENANT}" != "X" ] && [ "X${_DBAAS_OP_TENANT_ID}" != "X" ]; then + get_op_service_eps "$1" + if [ $? -ne 0 ]; then + prn_warn "Failed to set(get) OpenStack some service endpoints." + fi + fi + + # + # Set variables + # + K2HR3CLI_OPENSTACK_USER=${_DBAAS_OP_USER} + K2HR3CLI_OPENSTACK_USER_ID=${_DBAAS_OP_USER_ID} + K2HR3CLI_OPENSTACK_TOKEN="$1" + + add_config_update_var "K2HR3CLI_OPENSTACK_USER" + add_config_update_var "K2HR3CLI_OPENSTACK_USER_ID" + add_config_update_var "K2HR3CLI_OPENSTACK_TOKEN" + + prn_dbg "(get_op_token_info) OpenStack Unscoped Token = \"${K2HR3CLI_OPENSTACK_TOKEN}\"." + prn_dbg "(get_op_token_info) OpenStack User = \"${K2HR3CLI_OPENSTACK_USER}\"." + prn_dbg "(get_op_token_info) OpenStack User Id = \"${K2HR3CLI_OPENSTACK_USER_ID}\"." + + if [ "X${_DBAAS_OP_TENANT}" != "X" ] && [ "X${_DBAAS_OP_TENANT_ID}" != "X" ]; then + K2HR3CLI_OPENSTACK_TENANT=${_DBAAS_OP_TENANT} + K2HR3CLI_OPENSTACK_TENANT_ID=${_DBAAS_OP_TENANT_ID} + K2HR3CLI_OPENSTACK_SCOPED_TOKEN="$1" + + add_config_update_var "K2HR3CLI_OPENSTACK_TENANT" + add_config_update_var "K2HR3CLI_OPENSTACK_TENANT_ID" + add_config_update_var "K2HR3CLI_OPENSTACK_SCOPED_TOKEN" + + prn_dbg "(get_op_token_info) OpenStack Tenant = \"${K2HR3CLI_OPENSTACK_TENANT}\"." + prn_dbg "(get_op_token_info) OpenStack Tenant Id = \"${K2HR3CLI_OPENSTACK_TENANT_ID}\"." + prn_dbg "(get_op_token_info) OpenStack Scoped Token = \"${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}\"." + fi + + return 0 +} + +# +# Get OpenStack Unscoped Token from Credential +# +# $1 : openstack user name +# $2 : openstack user passphrase +# $? : result +# +# Set Variables +# K2HR3CLI_OPENSTACK_USER : user name +# K2HR3CLI_OPENSTACK_USER_ID : user id +# K2HR3CLI_OPENSTACK_PASS : user id +# K2HR3CLI_OPENSTACK_TOKEN : valid token (unscoped token) +# +get_op_utoken() +{ + if [ "X${K2HR3CLI_OPENSTACK_IDENTITY_URI}" = "X" ]; then + prn_err "OpenStack(Identity) URI is not specified. Please specify with the ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IDENTITY_URI_LONG} option, K2HR3CLI_OPENSTACK_IDENTITY_URI environment variable, or configuration." + return 1 + fi + if [ "X$1" = "X" ] || [ "X$2" = "X" ]; then + return 1 + fi + + #------------------------------------------------------ + # Send request for get unscoped token + #------------------------------------------------------ + # [MEMO] + # http:///v3/auth/tokens?nocatalog + # + # ("nocatalog" argument is supported after Havana) + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_IDENTITY_URI} + # shellcheck disable=SC2034 + K2HR3CLI_CURL_RESHEADER=1 + _DBAAS_OP_REQUEST_BODY="{\"auth\":{\"identity\":{\"password\":{\"user\":{\"domain\":{\"id\":\"default\"},\"password\":\"$2\",\"name\":\"$1\"}},\"methods\":[\"password\"]}}}" + _DBAAS_OP_URL_PATH="/v3/auth/tokens?nocatalog" + + post_string_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + # shellcheck disable=SC2034 + K2HR3CLI_CURL_RESHEADER=0 + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "201" 1 + if [ $? -ne 0 ]; then + prn_dbg "(get_op_utoken) Could not get unscoped token from existed token for openstack." + rm -f "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Parse uscoped token + #------------------------------------------------------ + # [MEMO] + # X-Subject-Token: + # + _DBAAS_OP_UTOKEN=$(grep '^X-Subject-Token:' "${K2HR3CLI_REQUEST_RESHEADER_FILE}" | sed -e 's/X-Subject-Token:[ ]*//g' | tr -d '\r' | tr -d '\n') + if [ $? -ne 0 ]; then + prn_warn "Failed to get unscoped token for OpenStack." + rm -f "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + if [ "X${_DBAAS_OP_UTOKEN}" = "X" ]; then + prn_warn "Got unscoped token for OpenStack is empty." + rm -f "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + + #------------------------------------------------------ + # Get user id in result(parse result) + #------------------------------------------------------ + # [MEMO] + # { + # "token": { + # "methods": ["password"], + # "user": { + # "domain": { + # "id": "default", + # "name": "Default" + # }, + # "id": "", + # "name": "", + # "password_expires_at": null + # }, + # "audit_ids": ["...."], + # "expires_at": "2021-01-01T00:00:00.000000Z", + # "issued_at": "2021-01-01T00:00:00.000000Z" + # } + # } + # + jsonparser_get_key_value '%"token"%"user"%"name"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"token\"->\"user\"->\"name\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_USER=${JSONPARSER_FIND_STR_VAL} + + jsonparser_get_key_value '%"token"%"user"%"id"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"token\"->\"user\"->\"id\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_USER_ID=${JSONPARSER_FIND_STR_VAL} + + rm -f "${JP_PAERSED_FILE}" + + # + # Success + # + K2HR3CLI_OPENSTACK_USER=${_DBAAS_OP_USER} + K2HR3CLI_OPENSTACK_USER_ID=${_DBAAS_OP_USER_ID} + K2HR3CLI_OPENSTACK_TOKEN=${_DBAAS_OP_UTOKEN} + K2HR3CLI_OPENSTACK_PASS="$2" + + add_config_update_var "K2HR3CLI_OPENSTACK_USER" + add_config_update_var "K2HR3CLI_OPENSTACK_USER_ID" + add_config_update_var "K2HR3CLI_OPENSTACK_TOKEN" + if [ "X${K2HR3CLI_OPT_SAVE_PASS}" = "X1" ] && [ "X${K2HR3CLI_PASS}" != "X" ]; then + add_config_update_var "K2HR3CLI_OPENSTACK_PASS" + fi + + prn_dbg "(get_op_utoken) OpenStack User = \"${K2HR3CLI_OPENSTACK_USER}\"." + prn_dbg "(get_op_utoken) OpenStack User ID = \"${K2HR3CLI_OPENSTACK_USER_ID}\"." + prn_dbg "(get_op_utoken) OpenStack Passphrase = \"********(${#K2HR3CLI_OPENSTACK_PASS})\"." + prn_dbg "(get_op_utoken) OpenStack Unscoped Token = \"${K2HR3CLI_OPENSTACK_TOKEN}\"." + + return 0 +} + +# +# Get OpenStack Scoped Token +# +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_TOKEN : unscoped token +# K2HR3CLI_OPENSTACK_TENANT : tenant +# K2HR3CLI_OPENSTACK_TENANT_ID : tenant id +# +# Set Variables +# K2HR3CLI_OPENSTACK_SCOPED_TOKEN : valid scoped token +# K2HR3CLI_OPENSTACK_NOVA_URI : endpoint uri for nova +# K2HR3CLI_OPENSTACK_GLANCE_URI : endpoint uri for glance +# K2HR3CLI_OPENSTACK_NEUTRON_URI : endpoint uri for neutron +# +get_op_token() +{ + if [ "X${K2HR3CLI_OPENSTACK_IDENTITY_URI}" = "X" ]; then + prn_err "OpenStack(Identity) URI is not specified. Please specify with the ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IDENTITY_URI_LONG} option, K2HR3CLI_OPENSTACK_IDENTITY_URI environment variable, or configuration." + return 1 + fi + if [ "X${K2HR3CLI_OPENSTACK_TOKEN}" = "X" ] || [ "X${K2HR3CLI_OPENSTACK_TENANT_ID}" = "X" ]; then + return 1 + fi + + #------------------------------------------------------ + # Send request for get scoped token + #------------------------------------------------------ + # [MEMO] + # http:///v3/auth/tokens?nocatalog + # + # ("nocatalog" argument is supported after Havana) + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_IDENTITY_URI} + # shellcheck disable=SC2034 + K2HR3CLI_CURL_RESHEADER=1 + _DBAAS_OP_REQUEST_BODY="{\"auth\":{\"identity\":{\"methods\":[\"token\"],\"token\":{\"id\":\"${K2HR3CLI_OPENSTACK_TOKEN}\"}},\"scope\":{\"project\":{\"id\":\"${K2HR3CLI_OPENSTACK_TENANT_ID}\"}}}}" + _DBAAS_OP_URL_PATH="/v3/auth/tokens?nocatalog" + + post_string_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + # shellcheck disable=SC2034 + K2HR3CLI_CURL_RESHEADER=0 + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + _DBAAS_PAERSED_FILE="${JP_PAERSED_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${_DBAAS_PAERSED_FILE}" "201" 1 + if [ $? -ne 0 ]; then + prn_dbg "(get_op_token) Could not get scoped token from unscoped token." + rm -f "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + rm -f "${_DBAAS_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Parse scoped token + #------------------------------------------------------ + # [MEMO] + # X-Subject-Token: + # + _DBAAS_OP_TOKEN=$(grep '^X-Subject-Token:' "${K2HR3CLI_REQUEST_RESHEADER_FILE}" | sed -e 's/X-Subject-Token:[ ]*//g' | tr -d '\r' | tr -d '\n') + if [ $? -ne 0 ]; then + prn_warn "Failed to get scoped token for OpenStack." + rm -f "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + rm -f "${_DBAAS_PAERSED_FILE}" + return 1 + fi + if [ "X${_DBAAS_OP_TOKEN}" = "X" ]; then + prn_warn "Got scoped token for OpenStack is empty." + rm -f "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + rm -f "${_DBAAS_PAERSED_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + rm -f "${_DBAAS_PAERSED_FILE}" + + #------------------------------------------------------ + # Check and Set service endpoints + #------------------------------------------------------ + get_op_service_eps "${_DBAAS_OP_TOKEN}" + + # + # Success + # + K2HR3CLI_OPENSTACK_SCOPED_TOKEN=${_DBAAS_OP_TOKEN} + + add_config_update_var "K2HR3CLI_OPENSTACK_SCOPED_TOKEN" + + prn_dbg "(get_op_token) OpenStack Scoped Token = \"${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}\"." + + return 0 +} + +# +# Get OpenStack tenant id +# +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_TOKEN : unscoped token +# K2HR3CLI_OPENSTACK_USER_ID : user id +# +# Set Variables +# K2HR3CLI_OPENSTACK_TENANT : tenant(scoped token) +# K2HR3CLI_OPENSTACK_TENANT_ID : tenant id +# +get_op_tenant_id() +{ + if [ "X${K2HR3CLI_OPENSTACK_IDENTITY_URI}" = "X" ]; then + prn_err "OpenStack(Identity) URI is not specified. Please specify with the ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IDENTITY_URI_LONG} option, K2HR3CLI_OPENSTACK_IDENTITY_URI environment variable, or configuration." + return 1 + fi + if [ "X${K2HR3CLI_OPENSTACK_TENANT}" != "X" ] && [ "X${K2HR3CLI_OPENSTACK_TENANT_ID}" != "X" ]; then + return 0 + fi + if [ "X${K2HR3CLI_OPENSTACK_TENANT}" = "X" ]; then + return 1 + fi + + #------------------------------------------------------ + # Send request for get project(tenant) list + #------------------------------------------------------ + # [MEMO] + # http:///v3/users//projects + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_IDENTITY_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_TOKEN}" + _DBAAS_OP_URL_PATH="/v3/users/${K2HR3CLI_OPENSTACK_USER_ID}/projects" + + get_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "200" 1 + if [ $? -ne 0 ]; then + prn_dbg "(get_op_tenant_id) Could not get tenant(project) list." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Search tenant id by name + #------------------------------------------------------ + # [MEMO] + # { + # "projects": [ + # { + # "id": "", + # "name": "", + # "domain_id": "default", + # "description": "", + # "enabled": true, + # "parent_id": "default", + # "is_domain": false, + # "tags": [], + # "options": {}, + # "links": { + # "self": "https://..." + # } + # }, + # {...} + # ] + # } + # + jsonparser_get_key_value '%"projects"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"projects\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_PROJECT_LIST=${JSONPARSER_FIND_KEY_VAL} + + for _DBAAS_OP_PROJECT_POS in ${_DBAAS_OP_PROJECT_LIST}; do + # + # Check tenant name + # + _DBAAS_OP_PROJECT_POS_RAW=$(pecho -n "${_DBAAS_OP_PROJECT_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"projects\"%${_DBAAS_OP_PROJECT_POS_RAW}%\"name\"%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + continue + fi + if [ "X${JSONPARSER_FIND_STR_VAL}" = "X${K2HR3CLI_OPENSTACK_TENANT}" ]; then + # + # Found same tenant name + # + jsonparser_get_key_value "%\"projects\"%${_DBAAS_OP_PROJECT_POS_RAW}%\"id\"%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + continue + fi + K2HR3CLI_OPENSTACK_TENANT_ID=${JSONPARSER_FIND_STR_VAL} + + add_config_update_var "K2HR3CLI_OPENSTACK_TENANT" + add_config_update_var "K2HR3CLI_OPENSTACK_TENANT_ID" + + prn_dbg "(get_op_tenant_id) OpenStack Tenant = \"${K2HR3CLI_OPENSTACK_TENANT}\"." + prn_dbg "(get_op_tenant_id) OpenStack Tenant Id = \"${K2HR3CLI_OPENSTACK_TENANT_ID}\"." + + rm -f "${JP_PAERSED_FILE}" + return 0 + else + # + # Maybe K2HR3CLI_OPENSTACK_TENANT is an id, so check the id + # + _DBAAS_OP_PROJECT_TENANT_TMP=${JSONPARSER_FIND_STR_VAL} + + jsonparser_get_key_value "%\"projects\"%${_DBAAS_OP_PROJECT_POS_RAW}%\"id\"%" "${JP_PAERSED_FILE}" + if [ $? -eq 0 ]; then + if [ "X${JSONPARSER_FIND_STR_VAL}" = "X${K2HR3CLI_OPENSTACK_TENANT}" ]; then + # + # Found same tenant id + # + K2HR3CLI_OPENSTACK_TENANT=${_DBAAS_OP_PROJECT_TENANT_TMP} + K2HR3CLI_OPENSTACK_TENANT_ID=${JSONPARSER_FIND_STR_VAL} + + add_config_update_var "K2HR3CLI_OPENSTACK_TENANT" + add_config_update_var "K2HR3CLI_OPENSTACK_TENANT_ID" + + prn_dbg "(get_op_tenant_id) OpenStack Tenant = \"${K2HR3CLI_OPENSTACK_TENANT}\"." + prn_dbg "(get_op_tenant_id) OpenStack Tenant Id = \"${K2HR3CLI_OPENSTACK_TENANT_ID}\"." + + rm -f "${JP_PAERSED_FILE}" + return 0 + fi + fi + fi + done + + rm -f "${JP_PAERSED_FILE}" + + return 1 +} + +#-------------------------------------------------------------- +# Functions for OpenStack Neutron +#-------------------------------------------------------------- +# +# Get security group name +# +# $1 : cluster name +# $2 : server(0: default)/slave(1) +# $? : result +# +get_op_security_group_name() +{ + if [ "X$1" = "X" ]; then + pecho -n "" + fi + if [ "X$2" = "X1" ]; then + _DBAAS_OP_SECGRP_NAME="$1${K2HR3CLI_OPENSTACK_SLAVE_SECGRP_SUFFIX}" + else + _DBAAS_OP_SECGRP_NAME="$1${K2HR3CLI_OPENSTACK_SERVER_SECGRP_SUFFIX}" + fi + pecho -n "${_DBAAS_OP_SECGRP_NAME}" +} + +# +# Check security group +# +# $1 : cluster name +# $2 : server(0: default)/slave(1) +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_NEUTRON_URI : neutron uri +# +# Set Variables +# K2HR3CLI_OPENSTACK_FIND_SECGRP_ID : security group id +# +check_op_security_group() +{ + K2HR3CLI_OPENSTACK_FIND_SECGRP_ID= + + if [ "X${K2HR3CLI_OPENSTACK_NEUTRON_URI}" = "X" ]; then + prn_err "OpenStack(Neutron) URI is not specified." + return 1 + fi + if [ "X$1" = "X" ]; then + return 1 + fi + _DBAAS_OP_SECGRP_NAME=$(get_op_security_group_name "$1" "$2") + + #------------------------------------------------------ + # Send request for get security group + #------------------------------------------------------ + # [MEMO] + # http:///v2.0/security-groups?name=&fields=id&fields=name + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NEUTRON_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_URL_PATH="/v2.0/security-groups?name=${_DBAAS_OP_SECGRP_NAME}&fields=id&fields=name" + + get_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "200" 1 + if [ $? -ne 0 ]; then + prn_dbg "(check_security_group) Could not get security group list." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Search segurity group + #------------------------------------------------------ + # [MEMO] + # { + # "security_groups": [ + # { + # "created_at": "2021-01-01T00:00:00Z", + # "description": "security group for k2hr3 server node", + # "id": "8fd53eb5-adaf-48ef-88f5-a61970bb03f5", + # "name": "mycluster-k2hdkc-server-sec", <---- Check this + # "project_id": "a0b790a86c5544b7bb8c5acf53e59e0a", + # "revision_number": 1, + # "security_group_rules": [ + # { + # "created_at": "2021-01-01T00:00:00Z", + # "description": "k2hdkc/chmpx server node port", + # "direction": "ingress", + # "ethertype": "IPv4", + # "id": "dac59a32-dd05-40ea-a208-7fc4bc9c68f2", + # "port_range_max": 8020, + # "port_range_min": 8020, + # "project_id": "a0b790a86c5544b7bb8c5acf53e59e0a", + # "protocol": "tcp", + # "remote_group_id": null, + # "remote_ip_prefix": "0.0.0.0/0", + # "revision_number": 0, + # "security_group_id": "8fd53eb5-adaf-48ef-88f5-a61970bb03f5", + # "tags": [], + # "tenant_id": "......", + # "updated_at": "2021-01-01T00:00:00Z" + # }, + # {...} + # ], + # "stateful": true, + # "tags": [], + # "tenant_id": ".....", + # "updated_at": "2021-01-01T00:00:00Z" + # } + # ] + # } + # + jsonparser_get_key_value '%"security_groups"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"security_groups\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_SECGRP_LIST=${JSONPARSER_FIND_KEY_VAL} + + for _DBAAS_OP_SECGRP_POS in ${_DBAAS_OP_SECGRP_LIST}; do + # + # Check security groups name + # + _DBAAS_OP_SECGRP_POS_RAW=$(pecho -n "${_DBAAS_OP_SECGRP_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"security_groups\"%${_DBAAS_OP_SECGRP_POS_RAW}%\"name\"%" "${JP_PAERSED_FILE}" + if [ $? -eq 0 ]; then + if [ "X${JSONPARSER_FIND_STR_VAL}" = "X${_DBAAS_OP_SECGRP_NAME}" ]; then + # + # Found same security group name + # + jsonparser_get_key_value "%\"security_groups\"%${_DBAAS_OP_SECGRP_POS_RAW}%\"id\"%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "\"security_groups\"->\"id\" value in response body is somthing wrong." + return 1 + fi + K2HR3CLI_OPENSTACK_FIND_SECGRP_ID=${JSONPARSER_FIND_STR_VAL} + + prn_dbg "(check_security_group) Found secury group." + rm -f "${JP_PAERSED_FILE}" + return 0 + fi + fi + done + + rm -f "${JP_PAERSED_FILE}" + + return 1 +} + +# +# Create security group(if not exists) +# +# $1 : cluster name +# $2 : server(0: default)/slave(1) +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_NEUTRON_URI : neutron uri +# K2HR3CLI_OPENSTACK_TENANT_ID : tenant id +# +# Set Variables +# K2HR3CLI_OPENSTACK_SERVER_SECGRP : security group name for server +# K2HR3CLI_OPENSTACK_SLAVE_SECGRP : security group name for slave +# +# [NOTE] +# This function does not check existing security groups. +# Please check before calling. +# +create_op_security_group() +{ + if [ "X${K2HR3CLI_OPENSTACK_NEUTRON_URI}" = "X" ]; then + prn_err "OpenStack(Neutron) URI is not specified." + return 1 + fi + if [ "X${K2HR3CLI_OPENSTACK_TENANT_ID}" = "X" ]; then + prn_err "OpenStack Project(tenant) id is not specified." + return 1 + fi + if [ "X$1" = "X" ]; then + return 1 + fi + if [ "X$2" = "X1" ]; then + _DBAAS_OP_SECGRP_TYPE="slave" + else + _DBAAS_OP_SECGRP_TYPE="server" + fi + + # + # Secutiry Group Name + # + _DBAAS_OP_SECGRP_NAME=$(get_op_security_group_name "$1" "$2") + + #------------------------------------------------------ + # Send request for create security group + #------------------------------------------------------ + # [MEMO] + # http:///v2.0/security-groups + # + # { + # "security_groups": [ + # { + # "name": "-k2hdkc-[server|slave]-sec", + # "description": "security group for k2hdkc [server|slave] node", + # "stateful": true, <---- old version openstack(neutron) don't understand this. + # "project_id": "........" + # } + # ] + # } + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NEUTRON_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_REQUEST_BODY="{\"security_groups\":[{\"name\":\"${_DBAAS_OP_SECGRP_NAME}\",\"description\":\"security group for k2hdkc $1 ${_DBAAS_OP_SECGRP_TYPE} node\",\"project_id\":\"${K2HR3CLI_OPENSTACK_TENANT_ID}\"}]}" + _DBAAS_OP_URL_PATH="/v2.0/security-groups" + + post_string_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "201" 1 + if [ $? -ne 0 ]; then + prn_dbg "(create_op_security_group) Failed to create security group." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Search segurity group id + #------------------------------------------------------ + # [MEMO] + # { + # "security_groups": [ + # { + # "created_at": "2021-01-01T00:00:00Z", + # "description": "security group for k2hdkc [server|slave] node", + # *** "id": "............", + # "name": "-k2hdkc-[server|slave]-sec", + # "project_id": "......", + # "revision_number": 1, + # "security_group_rules": [ + # {...} + # ], + # "stateful": true, + # "tags": [], + # "tenant_id": "......", + # "updated_at": "2021-01-01T00:00:00Z" + # } + # ] + # } + # + jsonparser_get_key_value '%"security_groups"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"security_groups\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_SECGRP_LIST=${JSONPARSER_FIND_KEY_VAL} + _DBAAS_OP_SECGRP_ID="" + + for _DBAAS_OP_SECGRP_POS in ${_DBAAS_OP_SECGRP_LIST}; do + # + # Check security groups id + # + _DBAAS_OP_SECGRP_POS_RAW=$(pecho -n "${_DBAAS_OP_SECGRP_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"security_groups\"%${_DBAAS_OP_SECGRP_POS_RAW}%\"id\"%" "${JP_PAERSED_FILE}" + if [ $? -eq 0 ]; then + # + # Found id + # + _DBAAS_OP_SECGRP_ID=${JSONPARSER_FIND_STR_VAL} + break + fi + done + if [ "X${_DBAAS_OP_SECGRP_ID}" = "X" ]; then + prn_warn "Not found \"security_groups\"->\"id\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + rm -f "${JP_PAERSED_FILE}" + + #------------------------------------------------------ + # Create security rules for security group + #------------------------------------------------------ + # [MEMO] + # http:///v2.0/security-group-rules + # + # { + # "security_group_rule": { + # "description": "k2hdkc/chmpx [server|slave] node (control) port", + # "protocol": "tcp", + # "direction": "ingress", + # "ethertype": "IPv4", + # "port_range_max": , + # "port_range_min": , + # "remote_group_id": null, + # "security_group_id": "......" + # } + # } + # + if [ "X${_DBAAS_OP_SECGRP_TYPE}" = "Xserver" ]; then + # + # Send request for create security rule for server port + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NEUTRON_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_REQUEST_BODY="{\"security_group_rule\":{\"description\":\"k2hdkc/chmpx ${_DBAAS_OP_SECGRP_TYPE} node port\",\"protocol\":\"tcp\",\"direction\":\"ingress\",\"ethertype\":\"IPv4\",\"port_range_max\":${K2HR3CLI_OPT_DBAAS_SERVER_PORT},\"port_range_min\":${K2HR3CLI_OPT_DBAAS_SERVER_PORT},\"remote_group_id\":null,\"security_group_id\":\"${_DBAAS_OP_SECGRP_ID}\"}}" + _DBAAS_OP_URL_PATH="/v2.0/security-group-rules" + + post_string_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "201" 1 + if [ $? -ne 0 ]; then + prn_dbg "(create_op_security_group) Failed to create security rule for server port." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + rm -f "${JP_PAERSED_FILE}" + + # + # Send request for create security rule for server control port + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NEUTRON_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_REQUEST_BODY="{\"security_group_rule\":{\"description\":\"k2hdkc/chmpx ${_DBAAS_OP_SECGRP_TYPE} node control port\",\"protocol\":\"tcp\",\"direction\":\"ingress\",\"ethertype\":\"IPv4\",\"port_range_max\":${K2HR3CLI_OPT_DBAAS_SERVER_CTLPORT},\"port_range_min\":${K2HR3CLI_OPT_DBAAS_SERVER_CTLPORT},\"remote_group_id\":null,\"security_group_id\":\"${_DBAAS_OP_SECGRP_ID}\"}}" + _DBAAS_OP_URL_PATH="/v2.0/security-group-rules" + + post_string_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "201" 1 + if [ $? -ne 0 ]; then + prn_dbg "(create_op_security_group) Failed to create security rule for server control port." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + rm -f "${JP_PAERSED_FILE}" + + else + # + # Send request for create security rule for slave control port + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NEUTRON_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_REQUEST_BODY="{\"security_group_rule\":{\"description\":\"k2hdkc/chmpx ${_DBAAS_OP_SECGRP_TYPE} node control port\",\"protocol\":\"tcp\",\"direction\":\"ingress\",\"ethertype\":\"IPv4\",\"port_range_max\":${K2HR3CLI_OPT_DBAAS_SLAVE_CTLPORT},\"port_range_min\":${K2HR3CLI_OPT_DBAAS_SLAVE_CTLPORT},\"remote_group_id\":null,\"security_group_id\":\"${_DBAAS_OP_SECGRP_ID}\"}}" + _DBAAS_OP_URL_PATH="/v2.0/security-group-rules" + + post_string_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "201" 1 + if [ $? -ne 0 ]; then + prn_dbg "(create_op_security_group) Failed to create security rule for slave control port." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + rm -f "${JP_PAERSED_FILE}" + fi + + # + # Set security group name + # + if [ "X$2" = "X1" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_SLAVE_SECGRP=${_DBAAS_OP_SECGRP_NAME} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_SERVER_SECGRP=${_DBAAS_OP_SECGRP_NAME} + fi + + return 0 +} + +# +# Delete security group +# +# $1 : cluster name +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_NEUTRON_URI : neutron uri +# +delete_op_security_groups() +{ + if [ "X${K2HR3CLI_OPENSTACK_NEUTRON_URI}" = "X" ]; then + prn_err "OpenStack(Neutron) URI is not specified." + return 1 + fi + if [ "X$1" = "X" ]; then + return 1 + fi + + # + # Check server security group exists + # + check_op_security_group "$1" 0 + if [ $? -eq 0 ]; then + #------------------------------------------------------ + # Send request for delete security group + #------------------------------------------------------ + # [MEMO] + # http:///v2.0/security-groups/ + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NEUTRON_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_URL_PATH="/v2.0/security-groups/${K2HR3CLI_OPENSTACK_FIND_SECGRP_ID}" + + delete_request "${_DBAAS_OP_URL_PATH}" 1 "${_DBAAS_OP_AUTH_HEADER}" + + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "204" 1 + if [ $? -ne 0 ]; then + prn_dbg "(delete_op_security_group) Failed to delete server security group." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + rm -f "${JP_PAERSED_FILE}" + fi + + # + # Check slave security group exists + # + check_op_security_group "$1" 1 + if [ $? -eq 0 ]; then + #------------------------------------------------------ + # Send request for delete security group + #------------------------------------------------------ + # [MEMO] + # http:///v2.0/security-groups/ + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NEUTRON_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_URL_PATH="/v2.0/security-groups/${K2HR3CLI_OPENSTACK_FIND_SECGRP_ID}" + + delete_request "${_DBAAS_OP_URL_PATH}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "204" 1 + if [ $? -ne 0 ]; then + prn_dbg "(delete_op_security_group) Failed to delete slave security group." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + rm -f "${JP_PAERSED_FILE}" + fi + + return 0 +} + +#-------------------------------------------------------------- +# Functions for OpenStack Keypair +#-------------------------------------------------------------- +# +# Check Keypair +# +# $1 : keypair name +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_NOVA_URI : nova uri(ex. http://xxx.xxx.xxx/compute/v2.1) +# +check_op_keypair() +{ + if [ "X${K2HR3CLI_OPENSTACK_NOVA_URI}" = "X" ]; then + prn_err "OpenStack(Nova) URI is not specified." + return 1 + fi + if [ "X$1" = "X" ]; then + return 1 + fi + _DBAAS_OP_KEYPAIR_NAME="$1" + + #------------------------------------------------------ + # Send request for get keypair list + #------------------------------------------------------ + # [MEMO] + # http:///os-keypairs + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NOVA_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_URL_PATH="/os-keypairs" + + get_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "200" 1 + if [ $? -ne 0 ]; then + prn_dbg "(check_op_keypair) Could not get keypair list." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Search keypair + #------------------------------------------------------ + # [MEMO] + # { + # "keypairs": [ + # { + # "keypair": { + # "fingerprint": "xx:xx:xx:xx:xx....", + # "name": "", + # "public_key": "ssh-rsa ..........." + # } + # } + # ] + # } + # + jsonparser_get_key_value '%"keypairs"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"keypairs\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_KEYPAIR_LIST=${JSONPARSER_FIND_KEY_VAL} + + for _DBAAS_OP_KEYPAIR_POS in ${_DBAAS_OP_KEYPAIR_LIST}; do + # + # Check keypair name + # + _DBAAS_OP_KEYPAIR_POS_RAW=$(pecho -n "${_DBAAS_OP_KEYPAIR_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"keypairs\"%${_DBAAS_OP_KEYPAIR_POS_RAW}%\"keypair\"%\"name\"%" "${JP_PAERSED_FILE}" + if [ $? -eq 0 ]; then + if [ "X${JSONPARSER_FIND_STR_VAL}" = "X${_DBAAS_OP_KEYPAIR_NAME}" ]; then + # + # Found same name + # + prn_dbg "(check_op_keypair) Found keypair." + rm -f "${JP_PAERSED_FILE}" + return 0 + fi + fi + done + + rm -f "${JP_PAERSED_FILE}" + + return 1 +} + +#-------------------------------------------------------------- +# Functions for OpenStack Flavor +#-------------------------------------------------------------- +# +# Check flavor +# +# $1 : flavor name +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_NOVA_URI : nova uri +# K2HR3CLI_OPENSTACK_TENANT_ID : tenant id +# +# Set Variables +# K2HR3CLI_OPENSTACK_FLAVOR_ID : flavor id +# +check_op_flavor() +{ + if [ "X${K2HR3CLI_OPENSTACK_NOVA_URI}" = "X" ]; then + prn_err "OpenStack(Nova) URI is not specified." + return 1 + fi + if [ "X${K2HR3CLI_OPENSTACK_TENANT_ID}" = "X" ]; then + prn_err "OpenStack Project(tenant) id is not specified." + return 1 + fi + if [ "X$1" = "X" ]; then + return 1 + fi + _DBAAS_OP_FLAVOR_NAME="$1" + + #------------------------------------------------------ + # Send request for get flavor list + #------------------------------------------------------ + # [MEMO] + # http:///flavors/detail + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NOVA_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_URL_PATH="/flavors/detail" + + get_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "200" 1 + if [ $? -ne 0 ]; then + prn_dbg "(check_op_flavor) Could not get flavor list." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Search flavor + #------------------------------------------------------ + # [MEMO] + # { + # "flavors": [ + # { + # "OS-FLV-DISABLED:disabled": false, + # "OS-FLV-EXT-DATA:ephemeral": 0, + # "disk": 10, + # *** "id": "", + # "links": [ + # { + # "href": "http:////flavors/", + # "rel": "self" + # }, + # { + # "href": "http:////flavors/", + # "rel": "bookmark" + # } + # ], + # *** "name": "", + # "os-flavor-access:is_public": true, + # "ram": 2048, + # "rxtx_factor": 1.0, + # "swap": "", + # "vcpus": 2 + # }, + # {....} + # ] + # } + # + jsonparser_get_key_value '%"flavors"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"flavors\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_FLAVOR_LIST=${JSONPARSER_FIND_KEY_VAL} + + for _DBAAS_OP_FLAVOR_POS in ${_DBAAS_OP_FLAVOR_LIST}; do + # + # Check flavor object name + # + _DBAAS_OP_FLAVOR_POS_RAW=$(pecho -n "${_DBAAS_OP_FLAVOR_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"flavors\"%${_DBAAS_OP_FLAVOR_POS_RAW}%\"name\"%" "${JP_PAERSED_FILE}" + if [ $? -eq 0 ]; then + if [ "X${JSONPARSER_FIND_STR_VAL}" = "X${_DBAAS_OP_FLAVOR_NAME}" ]; then + # + # Found same name -> get flavor id + # + jsonparser_get_key_value "%\"flavors\"%${_DBAAS_OP_FLAVOR_POS_RAW}%\"id\"%" "${JP_PAERSED_FILE}" + if [ $? -eq 0 ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_FLAVOR_ID=${JSONPARSER_FIND_STR_VAL} + prn_dbg "(check_op_flavor) Found flavor." + rm -f "${JP_PAERSED_FILE}" + return 0 + else + prn_warn "Found ${_DBAAS_OP_FLAVOR_NAME} flavor, but its id is not existed." + fi + fi + fi + done + + rm -f "${JP_PAERSED_FILE}" + + return 1 +} + +# +# List flavor +# +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_NOVA_URI : nova uri +# K2HR3CLI_OPENSTACK_TENANT_ID : tenant id +# +display_op_flavor_list() +{ + if [ "X${K2HR3CLI_OPENSTACK_NOVA_URI}" = "X" ]; then + prn_err "OpenStack(Nova) URI is not specified." + return 1 + fi + if [ "X${K2HR3CLI_OPENSTACK_TENANT_ID}" = "X" ]; then + prn_err "OpenStack Project(tenant) id is not specified." + return 1 + fi + + #------------------------------------------------------ + # Send request for get flavor list + #------------------------------------------------------ + # [MEMO] + # http:///flavors/detail + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NOVA_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_URL_PATH="/flavors/detail" + + get_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "200" 1 + if [ $? -ne 0 ]; then + prn_dbg "(check_op_flavor) Could not get flavor list." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Display flavors + #------------------------------------------------------ + # [MEMO] + # { + # "flavors": [ + # { + # "OS-FLV-DISABLED:disabled": false, + # "OS-FLV-EXT-DATA:ephemeral": 0, + # "disk": 10, + # *** "id": "", + # "links": [ + # { + # "href": "http:////flavors/", + # "rel": "self" + # }, + # { + # "href": "http:////flavors/", + # "rel": "bookmark" + # } + # ], + # *** "name": "", + # "os-flavor-access:is_public": true, + # "ram": 2048, + # "rxtx_factor": 1.0, + # "swap": "", + # "vcpus": 2 + # }, + # {....} + # ] + # } + # + jsonparser_get_key_value '%"flavors"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"flavors\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_FLAVOR_LIST=${JSONPARSER_FIND_KEY_VAL} + + # + # Display Start + # + if [ "X${K2HR3CLI_OPT_JSON}" = "X1" ]; then + pecho -n "[" + else + _DBAAS_OP_DISPLAY_JSON="[" + fi + + _DBAAS_OP_DISPLAY_LINE=0 + for _DBAAS_OP_FLAVOR_POS in ${_DBAAS_OP_FLAVOR_LIST}; do + # + # Check flavor name + # + _DBAAS_OP_FLAVOR_POS_RAW=$(pecho -n "${_DBAAS_OP_FLAVOR_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"flavors\"%${_DBAAS_OP_FLAVOR_POS_RAW}%\"name\"%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(display_op_flavor_list) flavors[${_DBAAS_OP_FLAVOR_POS_RAW}] does not have name element, skip it" + continue + fi + _DBAAS_OP_FLAVOR_NAME=${JSONPARSER_FIND_STR_VAL} + + # + # Check flavor id + # + jsonparser_get_key_value "%\"flavors\"%${_DBAAS_OP_FLAVOR_POS_RAW}%\"id\"%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(display_op_flavor_list) flavors[${_DBAAS_OP_FLAVOR_POS_RAW}] flavor=${_DBAAS_OP_FLAVOR_NAME} does not have id element, skip it" + continue + fi + _DBAAS_OP_FLAVOR_ID=${JSONPARSER_FIND_STR_VAL} + + # + # Display + # + if [ "X${K2HR3CLI_OPT_JSON}" = "X1" ]; then + if [ "${_DBAAS_OP_DISPLAY_LINE}" -eq 0 ]; then + pecho "" + _DBAAS_OP_DISPLAY_LINE=1 + else + pecho "," + fi + pecho " {" + pecho " \"name\": \"${_DBAAS_OP_FLAVOR_NAME}\"," + pecho " \"id\": \"${_DBAAS_OP_FLAVOR_ID}\"" + pecho -n " }" + else + if [ "${_DBAAS_OP_DISPLAY_LINE}" -eq 0 ]; then + _DBAAS_OP_DISPLAY_JSON="${_DBAAS_OP_DISPLAY_JSON}{\"name\":\"${_DBAAS_OP_FLAVOR_NAME}\",\"id\":\"${_DBAAS_OP_FLAVOR_ID}\"}" + _DBAAS_OP_DISPLAY_LINE=1 + else + _DBAAS_OP_DISPLAY_JSON="${_DBAAS_OP_DISPLAY_JSON},{\"name\":\"${_DBAAS_OP_FLAVOR_NAME}\",\"id\":\"${_DBAAS_OP_FLAVOR_ID}\"}" + fi + fi + done + + # + # Display End + # + if [ "X${K2HR3CLI_OPT_JSON}" = "X1" ]; then + if [ "${_DBAAS_OP_DISPLAY_LINE}" -eq 0 ]; then + pecho "]" + else + pecho "" + pecho "]" + fi + else + _DBAAS_OP_DISPLAY_JSON="${_DBAAS_OP_DISPLAY_JSON}]" + pecho "${_DBAAS_OP_DISPLAY_JSON}" + fi + + rm -f "${JP_PAERSED_FILE}" + + return 0 +} + +#-------------------------------------------------------------- +# Functions for OpenStack Image +#-------------------------------------------------------------- +# +# Check image +# +# $1 : image name +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_GLANCE_URI : glance uri +# +# Set Variables +# K2HR3CLI_OPENSTACK_IMAGE_ID : image id +# +check_op_image() +{ + if [ "X${K2HR3CLI_OPENSTACK_GLANCE_URI}" = "X" ]; then + prn_err "OpenStack(Glance) URI is not specified." + return 1 + fi + if [ "X$1" = "X" ]; then + return 1 + fi + _DBAAS_OP_IMAGE_NAME="$1" + + #------------------------------------------------------ + # Send request for get image list + #------------------------------------------------------ + # [MEMO] + # http:///v2/images?name= + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_GLANCE_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + + if pecho -n "${_DBAAS_OP_IMAGE_NAME}" | grep -q ":"; then + # [NOTE] + # If the image name contains a colon(:), glance will fail detection. + # In this case, get all the images.(Performance is the worst) + # + _DBAAS_OP_URL_PATH="/v2/images" + else + _DBAAS_OP_ESCAPED_IMAGE_NAME=$(k2hr3cli_urlencode "${_DBAAS_OP_IMAGE_NAME}") + _DBAAS_OP_URL_PATH="/v2/images?name=${_DBAAS_OP_ESCAPED_IMAGE_NAME}" + fi + + get_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "200" 1 2>/dev/null + if [ $? -ne 0 ]; then + prn_dbg "(check_op_image) Could not get flavor list." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Search image + #------------------------------------------------------ + # [MEMO] + # { + # "first": "/v2/images", + # "images": [ + # { + # "checksum": "....", + # "container_format": "bare", + # "created_at": "2021-01-01T00:00:00Z", + # "disk_format": "qcow2", + # "file": "/v2/images/.../file", + # *** "id": "", + # "min_disk": 0, + # "min_ram": 0, + # *** "name": "", + # "os_hash_algo": "sha512", + # "os_hash_value": "...", + # "os_hidden": false, + # "owner": "...", + # "owner_specified.openstack.md5": "", + # "owner_specified.openstack.object": "...", + # "owner_specified.openstack.sha256": "", + # "protected": false, + # "schema": "/v2/schemas/image", + # "self": "/v2/images/...", + # "size": ..., + # "status": "active", + # "tags": [], + # "updated_at": "2021-01-01T00:00:00Z", + # "virtual_size": null, + # "visibility": "public" + # }, + # {...} + # ], + # "schema": "/v2/schemas/images" + # } + # + jsonparser_get_key_value '%"images"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"images\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_IMAGE_LIST=${JSONPARSER_FIND_KEY_VAL} + + for _DBAAS_OP_IMAGE_POS in ${_DBAAS_OP_IMAGE_LIST}; do + # + # Check image name + # + _DBAAS_OP_IMAGE_POS_RAW=$(pecho -n "${_DBAAS_OP_IMAGE_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"images\"%${_DBAAS_OP_IMAGE_POS_RAW}%\"name\"%" "${JP_PAERSED_FILE}" + if [ $? -eq 0 ]; then + if [ "X${JSONPARSER_FIND_STR_VAL}" = "X${_DBAAS_OP_IMAGE_NAME}" ]; then + # + # Found same name -> get image id + # + jsonparser_get_key_value "%\"images\"%${_DBAAS_OP_IMAGE_POS_RAW}%\"id\"%" "${JP_PAERSED_FILE}" + if [ $? -eq 0 ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_IMAGE_ID=${JSONPARSER_FIND_STR_VAL} + prn_dbg "(check_op_image) Found image id." + rm -f "${JP_PAERSED_FILE}" + return 0 + else + prn_warn "Found ${_DBAAS_OP_IMAGE_NAME} image name, but its id is not existed." + fi + fi + fi + done + rm -f "${JP_PAERSED_FILE}" + + return 1 +} + +# +# List image +# +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_GLANCE_URI : glance uri +# +display_op_image_list() +{ + if [ "X${K2HR3CLI_OPENSTACK_GLANCE_URI}" = "X" ]; then + prn_err "OpenStack(Glance) URI is not specified." + return 1 + fi + + #------------------------------------------------------ + # Send request for get image list + #------------------------------------------------------ + # [MEMO] + # http:///v2/images + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_GLANCE_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_URL_PATH="/v2/images" + + get_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "200" 1 2>/dev/null + if [ $? -ne 0 ]; then + prn_dbg "(check_op_image) Could not get flavor list." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Display images + #------------------------------------------------------ + # [MEMO] + # { + # "first": "/v2/images", + # "images": [ + # { + # "checksum": "....", + # "container_format": "bare", + # "created_at": "2021-01-01T00:00:00Z", + # "disk_format": "qcow2", + # "file": "/v2/images/.../file", + # *** "id": "", + # "min_disk": 0, + # "min_ram": 0, + # *** "name": "", + # "os_hash_algo": "sha512", + # "os_hash_value": "...", + # "os_hidden": false, + # "owner": "...", + # "owner_specified.openstack.md5": "", + # "owner_specified.openstack.object": "...", + # "owner_specified.openstack.sha256": "", + # "protected": false, + # "schema": "/v2/schemas/image", + # "self": "/v2/images/...", + # "size": ..., + # "status": "active", + # "tags": [], + # "updated_at": "2021-01-01T00:00:00Z", + # "virtual_size": null, + # "visibility": "public" + # }, + # {...} + # ], + # "schema": "/v2/schemas/images" + # } + # + jsonparser_get_key_value '%"images"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"images\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + _DBAAS_OP_IMAGE_LIST=${JSONPARSER_FIND_KEY_VAL} + + # + # Display Start + # + if [ "X${K2HR3CLI_OPT_JSON}" = "X1" ]; then + pecho -n "[" + else + _DBAAS_OP_DISPLAY_JSON="[" + fi + + _DBAAS_OP_DISPLAY_LINE=0 + for _DBAAS_OP_IMAGE_POS in ${_DBAAS_OP_IMAGE_LIST}; do + # + # Check image name + # + _DBAAS_OP_IMAGE_POS_RAW=$(pecho -n "${_DBAAS_OP_IMAGE_POS}" | sed -e 's/\([^\\]\)\\s/\1 /g' -e 's/\\\\/\\/g') + jsonparser_get_key_value "%\"images\"%${_DBAAS_OP_IMAGE_POS_RAW}%\"name\"%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(display_op_image_list) images[${_DBAAS_OP_IMAGE_POS_RAW}] does not have name element, skip it" + continue + fi + _DBAAS_OP_IMAGE_NAME=${JSONPARSER_FIND_STR_VAL} + + # + # Check image id + # + jsonparser_get_key_value "%\"images\"%${_DBAAS_OP_IMAGE_POS_RAW}%\"id\"%" "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_dbg "(display_op_image_list) images[${_DBAAS_OP_IMAGE_POS_RAW}] name=${_DBAAS_OP_IMAGE_NAME} does not have id element, skip it" + fi + _DBAAS_OP_IMAGE_ID=${JSONPARSER_FIND_STR_VAL} + + # + # Display + # + if [ "X${K2HR3CLI_OPT_JSON}" = "X1" ]; then + if [ "${_DBAAS_OP_DISPLAY_LINE}" -eq 0 ]; then + pecho "" + _DBAAS_OP_DISPLAY_LINE=1 + else + pecho "," + fi + pecho " {" + pecho " \"name\": \"${_DBAAS_OP_IMAGE_NAME}\"," + pecho " \"id\": \"${_DBAAS_OP_IMAGE_ID}\"" + pecho -n " }" + else + if [ "${_DBAAS_OP_DISPLAY_LINE}" -eq 0 ]; then + _DBAAS_OP_DISPLAY_JSON="${_DBAAS_OP_DISPLAY_JSON}{\"name\":\"${_DBAAS_OP_IMAGE_NAME}\",\"id\":\"${_DBAAS_OP_IMAGE_ID}\"}" + _DBAAS_OP_DISPLAY_LINE=1 + else + _DBAAS_OP_DISPLAY_JSON="${_DBAAS_OP_DISPLAY_JSON},{\"name\":\"${_DBAAS_OP_IMAGE_NAME}\",\"id\":\"${_DBAAS_OP_IMAGE_ID}\"}" + fi + fi + done + + # + # Display End + # + if [ "X${K2HR3CLI_OPT_JSON}" = "X1" ]; then + if [ "${_DBAAS_OP_DISPLAY_LINE}" -eq 0 ]; then + pecho "]" + else + pecho "" + pecho "]" + fi + else + _DBAAS_OP_DISPLAY_JSON="${_DBAAS_OP_DISPLAY_JSON}]" + pecho "${_DBAAS_OP_DISPLAY_JSON}" + fi + + rm -f "${JP_PAERSED_FILE}" + + return 0 +} + +#-------------------------------------------------------------- +# Functions for Launch host +#-------------------------------------------------------------- +# +# Create host +# +# $1 : post data +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_NOVA_URI : nova uri +# K2HR3CLI_OPENSTACK_TENANT_ID : tenant id +# +# Set Variables +# K2HR3CLI_OPENSTACK_CREATED_SERVER_ID : server id +# +create_op_host() +{ + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_CREATED_SERVER_ID= + + if [ "X${K2HR3CLI_OPENSTACK_NOVA_URI}" = "X" ]; then + prn_err "OpenStack(Neutron) URI is not specified." + return 1 + fi + if [ "X${K2HR3CLI_OPENSTACK_TENANT_ID}" = "X" ]; then + prn_err "OpenStack Project(tenant) id is not specified." + return 1 + fi + if [ "X$1" = "X" ]; then + prn_err "OpenStack post data for launching is empty." + return 1 + fi + + #------------------------------------------------------ + # Send request for create host + #------------------------------------------------------ + # [MEMO] + # http:///servers + # + # { + # "server":{ + # "imageRef":"...", + # "flavorRef":"...", + # "name":"...", + # "user_data":"......", + # "security_groups": [ + # { + # "name": "default" + # }, + # { + # "name": "..." + # } + # ], + # "key_name":"..." + # } + # } + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NOVA_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_REQUEST_BODY="$1" + _DBAAS_OP_URL_PATH="/servers" + + post_string_request "${_DBAAS_OP_URL_PATH}" "${_DBAAS_OP_REQUEST_BODY}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "202" 1 + if [ $? -ne 0 ]; then + prn_dbg "(create_op_host) Failed to create host." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + #------------------------------------------------------ + # Search segurity group id + #------------------------------------------------------ + # [MEMO] + # { + # "server": { + # *** "id": "...", + # "links": [ + # { + # "rel": "self", + # "href": "http:////servers/" + # }, + # { + # "rel": "bookmark", + # "href": "http:////servers/" + # } + # ], + # "OS-DCF:diskConfig": "MANUAL", + # "security_groups": [ + # {...} + # ], + # "adminPass": "..." + # } + # } + # + jsonparser_get_key_value '%"server"%"id"%' "${JP_PAERSED_FILE}" + if [ $? -ne 0 ]; then + prn_warn "Not found \"server\"->\"id\" key in response body." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_CREATED_SERVER_ID=${JSONPARSER_FIND_STR_VAL} + rm -f "${JP_PAERSED_FILE}" + + return 0 +} + +#-------------------------------------------------------------- +# Functions for Delete host +#-------------------------------------------------------------- +# +# Delete host +# +# $1 : host id +# $? : result +# +# Use Variables +# K2HR3CLI_OPENSTACK_NOVA_URI : nova uri +# +delete_op_host() +{ + if [ "X${K2HR3CLI_OPENSTACK_NOVA_URI}" = "X" ]; then + prn_err "OpenStack(Nova) URI is not specified." + return 1 + fi + if [ "X$1" = "X" ]; then + prn_dbg "(delete_op_host) Parameter is wrong." + return 1 + fi + + #------------------------------------------------------ + # Send request for delete host + #------------------------------------------------------ + # [MEMO] + # http:///servers/ + # + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI=${K2HR3CLI_OPENSTACK_NOVA_URI} + _DBAAS_OP_AUTH_HEADER="X-Auth-Token:${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" + _DBAAS_OP_URL_PATH="/servers/$1" + + delete_request "${_DBAAS_OP_URL_PATH}" 1 "${_DBAAS_OP_AUTH_HEADER}" + _DBAAS_REQUEST_RESULT=$? + # shellcheck disable=SC2034 + K2HR3CLI_OVERRIDE_URI="" + + # + # Parse response body + # + jsonparser_parse_json_file "${K2HR3CLI_REQUEST_RESULT_FILE}" + if [ $? -ne 0 ]; then + prn_err "Failed to parse result." + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + return 1 + fi + rm -f "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # + # Check result + # + requtil_check_result "${_DBAAS_REQUEST_RESULT}" "${K2HR3CLI_REQUEST_EXIT_CODE}" "${JP_PAERSED_FILE}" "204" 1 + if [ $? -ne 0 ]; then + prn_dbg "(delete_op_host) Failed to delete host." + rm -f "${JP_PAERSED_FILE}" + return 1 + fi + rm -f "${JP_PAERSED_FILE}" + + return 0 +} + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/src/libexec/database/options.sh b/src/libexec/database/options.sh new file mode 100644 index 0000000..9120cfe --- /dev/null +++ b/src/libexec/database/options.sh @@ -0,0 +1,654 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +# +# Multiple read prevention +# +if [ "X${K2HR3CLI_DBAAS_OPTION_FILE_LOADED}" = "X1" ]; then + return 0 +fi +K2HR3CLI_DBAAS_OPTION_FILE_LOADED=1 + +#-------------------------------------------------------------- +# DBaaS Options +#-------------------------------------------------------------- +K2HR3CLI_COMMAND_OPT_DBAAS_CONFIG_LONG="--dbaas_config" +K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_PORT_LONG="--chmpx_server_port" +K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_CTLPORT_LONG="--chmpx_server_ctlport" +K2HR3CLI_COMMAND_OPT_DBAAS_SLAVE_CTLPORT_LONG="--chmpx_slave_ctlport" +K2HR3CLI_COMMAND_OPT_DBAAS_RUN_USER_LONG="--dbaas_user" +K2HR3CLI_COMMAND_OPT_DBAAS_CREATE_USER_LONG="--dbaas_create_user" +K2HR3CLI_COMMAND_OPT_DBAAS_CREATE_ROLETOKEN_LONG="--create_roletoken" +K2HR3CLI_COMMAND_OPT_OPENSTACK_IDENTITY_URI_LONG="--openstack_identity_uri" +K2HR3CLI_COMMAND_OPT_OPENSTACK_NOVA_URI_LONG="--openstack_nova_uri" +K2HR3CLI_COMMAND_OPT_OPENSTACK_GLANCE_URI_LONG="--openstack_glance_uri" +K2HR3CLI_COMMAND_OPT_OPENSTACK_NEUTRON_URI_LONG="--openstack_neutron_uri" +K2HR3CLI_COMMAND_OPT_OPENSTACK_USER_LONG="--op_user" +K2HR3CLI_COMMAND_OPT_OPENSTACK_PASS_LONG="--op_passphrase" +K2HR3CLI_COMMAND_OPT_OPENSTACK_TENANT_LONG="--op_tenant" +K2HR3CLI_COMMAND_OPT_OPENSTACK_NO_SECGRP_LONG="--op_no_secgrp" +K2HR3CLI_COMMAND_OPT_OPENSTACK_KEYPAIR_LONG="--op_keypair" +K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_LONG="--op_flavor" +K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_ID_LONG="--op_flavor_id" +K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_LONG="--op_image" +K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_ID_LONG="--op_image_id" +K2HR3CLI_COMMAND_OPT_OPENSTACK_CONFIRM_YES_SHORT="-y" +K2HR3CLI_COMMAND_OPT_OPENSTACK_CONFIRM_YES_LONG="--yes" + +# +# Default value +# +K2HR3CLI_COMMAND_OPT_DBAAS_DEFALT_SERVER_PORT=8020 +K2HR3CLI_COMMAND_OPT_DBAAS_DEFALT_SERVER_CTLPORT=8021 +K2HR3CLI_COMMAND_OPT_DBAAS_DEFALT_SLAVE_CTLPORT=8031 + +# +# Parse common option +# +# $@ option strings +# +# $? returns 1 for fatal errors +# Set global values +# K2HR3CLI_OPTION_PARSER_REST : the remaining option string with the help option cut off(for new $@) +# K2HR3CLI_DBAAS_CONFIG : --dbaas_config +# K2HR3CLI_OPT_DBAAS_SERVER_PORT : --chmpx_server_port +# K2HR3CLI_OPT_DBAAS_SERVER_CTLPORT : --chmpx_server_ctlport +# K2HR3CLI_OPT_DBAAS_SLAVE_CTLPORT : --chmpx_slave_ctlport +# K2HR3CLI_OPT_DBAAS_RUN_USER : --dbaas_user +# K2HR3CLI_OPT_DBAAS_CREATE_USER : --dbaas_create_user +# K2HR3CLI_OPT_DBAAS_CREATE_ROLETOKEN : --create_roletoken +# K2HR3CLI_OPENSTACK_IDENTITY_URI : --openstack_identity_uri +# K2HR3CLI_OPENSTACK_NOVA_URI : --openstack_nova_uri +# K2HR3CLI_OPENSTACK_GLANCE_URI : --openstack_glance_uri +# K2HR3CLI_OPENSTACK_NEUTRON_URI : --openstack_neutron_uri +# K2HR3CLI_OPENSTACK_USER : --op_user +# K2HR3CLI_OPENSTACK_PASS : --op_passphrase +# K2HR3CLI_OPENSTACK_TENANT : --op_tenant +# K2HR3CLI_OPENSTACK_NO_SECGRP : --op_no_secgrp +# K2HR3CLI_OPENSTACK_KEYPAIR : --op_keypair +# K2HR3CLI_OPENSTACK_FLAVOR : --op_flavor +# K2HR3CLI_OPENSTACK_FLAVOR_ID : --op_flavor_id +# K2HR3CLI_OPENSTACK_IMAGE : --op_image +# K2HR3CLI_OPENSTACK_IMAGE_ID : --op_image_id +# K2HR3CLI_OPENSTACK_CONFIRM_YES : --yes(-y) +# +parse_dbaas_option() +{ + # + # Temporary values + # + _OPT_TMP_DBAAS_CONFIG= + _OPT_TMP_DBAAS_SERVER_PORT= + _OPT_TMP_DBAAS_SERVER_CTLPORT= + _OPT_TMP_DBAAS_SLAVE_CTLPORT= + _OPT_TMP_DBAAS_RUN_USER= + _OPT_TMP_DBAAS_CREATE_USER= + _OPT_TMP_DBAAS_CREATE_ROLETOKEN= + _OPT_TMP_DBAAS_OPENSTACK_IDENTITY_URI= + _OPT_TMP_DBAAS_OPENSTACK_NOVA_URI= + _OPT_TMP_DBAAS_OPENSTACK_GLANCE_URI= + _OPT_TMP_DBAAS_OPENSTACK_NEUTRON_URI= + _OPT_TMP_DBAAS_OPENSTACK_USER= + _OPT_TMP_DBAAS_OPENSTACK_PASS= + _OPT_TMP_DBAAS_OPENSTACK_TENANT= + _OPT_TMP_DBAAS_OPENSTACK_NO_SECGRP= + _OPT_TMP_DBAAS_OPENSTACK_KEYPAIR= + _OPT_TMP_DBAAS_OPENSTACK_FLAVOR= + _OPT_TMP_DBAAS_OPENSTACK_FLAVOR_ID= + _OPT_TMP_DBAAS_OPENSTACK_IMAGE= + _OPT_TMP_DBAAS_OPENSTACK_IMAGE_ID= + _OPT_TMP_DBAAS_OPENSTACK_CONFIRM_YES= + + K2HR3CLI_OPTION_PARSER_REST="" + while [ $# -gt 0 ]; do + _OPTION_TMP=$(to_lower "$1") + + if [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_DBAAS_CONFIG_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_CONFIG}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_DBAAS_CONFIG_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_DBAAS_CONFIG_LONG} option needs parameter." + return 1 + fi + if [ ! -d "$1" ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_DBAAS_CONFIG_LONG} option parameter($1) directory does not exist." + return 1 + fi + _OPT_TMP_DBAAS_CONFIG=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_PORT_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_SERVER_PORT}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_PORT_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_PORT_LONG} option needs parameter." + return 1 + fi + if ! is_positive_number "$1"; then + prn_err "${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_PORT_LONG} option parameter must be 0 or positive number." + return 1 + fi + if [ "$1" -eq 0 ]; then + _OPT_TMP_DBAAS_SERVER_PORT="${K2HR3CLI_COMMAND_OPT_DBAAS_DEFALT_SERVER_PORT}" + else + _OPT_TMP_DBAAS_SERVER_PORT="$1" + fi + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_CTLPORT_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_SERVER_CTLPORT}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_CTLPORT_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_CTLPORT_LONG} option needs parameter." + return 1 + fi + if ! is_positive_number "$1"; then + prn_err "${K2HR3CLI_COMMAND_OPT_DBAAS_SERVER_CTLPORT_LONG} option parameter must be 0 or positive number." + return 1 + fi + if [ "$1" -eq 0 ]; then + _OPT_TMP_DBAAS_SERVER_CTLPORT="${K2HR3CLI_COMMAND_OPT_DBAAS_DEFALT_SERVER_CTLPORT}" + else + _OPT_TMP_DBAAS_SERVER_CTLPORT="$1" + fi + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_DBAAS_SLAVE_CTLPORT_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_SLAVE_CTLPORT}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_DBAAS_SLAVE_CTLPORT_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_DBAAS_SLAVE_CTLPORT_LONG} option needs parameter." + return 1 + fi + if ! is_positive_number "$1"; then + prn_err "${K2HR3CLI_COMMAND_OPT_DBAAS_SLAVE_CTLPORT_LONG} option parameter must be 0 or positive number." + return 1 + fi + if [ "$1" -eq 0 ]; then + _OPT_TMP_DBAAS_SLAVE_CTLPORT="${K2HR3CLI_COMMAND_OPT_DBAAS_DEFALT_SLAVE_CTLPORT}" + else + _OPT_TMP_DBAAS_SLAVE_CTLPORT="$1" + fi + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_DBAAS_RUN_USER_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_RUN_USER}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_DBAAS_RUN_USER_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_DBAAS_RUN_USER_LONG} option needs parameter." + return 1 + fi + _OPTION_TMP_VAL=$(echo "$1" | grep -v "[^a-zA-Z0-9_]") + if [ "X${_OPTION_TMP_VAL}" = "X" ]; then + prn_err "Invalid username specified with ${K2HR3CLI_COMMAND_OPT_DBAAS_RUN_USER_LONG} option." + return 1 + fi + _OPT_TMP_DBAAS_RUN_USER="$1" + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_DBAAS_CREATE_USER_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_CREATE_USER}" ]; then + prn_err "already specified K2HR3CLI_COMMAND_OPT_DBAAS_CREATE_USER_LONG option." + return 1 + fi + _OPT_TMP_DBAAS_CREATE_USER=1 + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_IDENTITY_URI_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_IDENTITY_URI}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IDENTITY_URI_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_IDENTITY_URI_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_IDENTITY_URI=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_IDENTITY_URI=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_IDENTITY_URI}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_NOVA_URI_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_NOVA_URI}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_NOVA_URI_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_NOVA_URI_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_NOVA_URI=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_NOVA_URI=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_NOVA_URI}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_GLANCE_URI_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_GLANCE_URI}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_GLANCE_URI_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_GLANCE_URI_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_GLANCE_URI=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_GLANCE_URI=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_GLANCE_URI}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_NEUTRON_URI_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_NEUTRON_URI}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_NEUTRON_URI_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_NEUTRON_URI_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_NEUTRON_URI=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_NEUTRON_URI=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_NEUTRON_URI}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_DBAAS_CREATE_ROLETOKEN_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_CREATE_ROLETOKEN}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_DBAAS_CREATE_ROLETOKEN_LONG} option." + return 1 + fi + _OPT_TMP_DBAAS_CREATE_ROLETOKEN=1 + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_USER_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_USER}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_USER_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_USER_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_USER=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_USER=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_USER}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_PASS_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_PASS}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_PASS_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_PASS_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_PASS=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_PASS=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_PASS}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_TENANT_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_TENANT}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_TENANT_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_TENANT_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_TENANT=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_TENANT=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_TENANT}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_NO_SECGRP_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_NO_SECGRP}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_NO_SECGRP_LONG} option." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_NO_SECGRP=1 + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_KEYPAIR_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_KEYPAIR}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_KEYPAIR_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_KEYPAIR_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_KEYPAIR=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_KEYPAIR=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_KEYPAIR}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_FLAVOR}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_FLAVOR=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_FLAVOR=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_FLAVOR}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_ID_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_FLAVOR_ID}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_ID_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_FLAVOR_ID_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_FLAVOR_ID=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_FLAVOR_ID=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_FLAVOR_ID}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_IMAGE}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_IMAGE=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_IMAGE=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_IMAGE}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_ID_LONG}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_IMAGE_ID}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_ID_LONG} option." + return 1 + fi + shift + if [ $# -le 0 ]; then + prn_err "${K2HR3CLI_COMMAND_OPT_OPENSTACK_IMAGE_ID_LONG} option needs parameter." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_IMAGE_ID=$(cut_special_words "$1" | sed -e 's/%20/ /g' -e 's/%25/%/g') + _OPT_TMP_DBAAS_OPENSTACK_IMAGE_ID=$(filter_null_string "${_OPT_TMP_DBAAS_OPENSTACK_IMAGE_ID}") + + elif [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_CONFIRM_YES_LONG}" ] || [ "X${_OPTION_TMP}" = "X${K2HR3CLI_COMMAND_OPT_OPENSTACK_CONFIRM_YES_SHORT}" ]; then + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_CONFIRM_YES}" ]; then + prn_err "already specified ${K2HR3CLI_COMMAND_OPT_OPENSTACK_CONFIRM_YES_LONG}(${K2HR3CLI_COMMAND_OPT_OPENSTACK_CONFIRM_YES_SHORT}) option." + return 1 + fi + _OPT_TMP_DBAAS_OPENSTACK_CONFIRM_YES=1 + + else + if [ "X${K2HR3CLI_OPTION_PARSER_REST}" = "X" ]; then + K2HR3CLI_OPTION_PARSER_REST="$1" + else + K2HR3CLI_OPTION_PARSER_REST="${K2HR3CLI_OPTION_PARSER_REST} $1" + fi + fi + shift + done + + # + # Set override default and global value + # + if [ -n "${_OPT_TMP_DBAAS_CONFIG}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_DBAAS_CONFIG=${_OPT_TMP_DBAAS_CONFIG} + fi + if [ -n "${_OPT_TMP_DBAAS_SERVER_PORT}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_SERVER_PORT=${_OPT_TMP_DBAAS_SERVER_PORT} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_SERVER_PORT=${K2HR3CLI_COMMAND_OPT_DBAAS_DEFALT_SERVER_PORT} + fi + if [ -n "${_OPT_TMP_DBAAS_SERVER_CTLPORT}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_SERVER_CTLPORT=${_OPT_TMP_DBAAS_SERVER_CTLPORT} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_SERVER_CTLPORT=${K2HR3CLI_COMMAND_OPT_DBAAS_DEFALT_SERVER_CTLPORT} + fi + if [ -n "${_OPT_TMP_DBAAS_SLAVE_CTLPORT}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_SLAVE_CTLPORT=${_OPT_TMP_DBAAS_SLAVE_CTLPORT} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_SLAVE_CTLPORT=${K2HR3CLI_COMMAND_OPT_DBAAS_DEFALT_SLAVE_CTLPORT} + fi + if [ -n "${_OPT_TMP_DBAAS_RUN_USER}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_RUN_USER=${_OPT_TMP_DBAAS_RUN_USER} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_RUN_USER="" + fi + if [ -n "${_OPT_TMP_DBAAS_CREATE_USER}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_CREATE_USER=${_OPT_TMP_DBAAS_CREATE_USER} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_CREATE_USER=0 + fi + if [ -n "${_OPT_TMP_DBAAS_CREATE_ROLETOKEN}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_CREATE_ROLETOKEN=${_OPT_TMP_DBAAS_CREATE_ROLETOKEN} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPT_DBAAS_CREATE_ROLETOKEN=0 + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_IDENTITY_URI}" ]; then + if [ "X${K2HR3CLI_OPENSTACK_IDENTITY_URI}" != "X${_OPT_TMP_DBAAS_OPENSTACK_IDENTITY_URI}" ]; then + add_config_update_var "K2HR3CLI_OPENSTACK_IDENTITY_URI" + fi + K2HR3CLI_OPENSTACK_IDENTITY_URI=${_OPT_TMP_DBAAS_OPENSTACK_IDENTITY_URI} + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_NOVA_URI}" ]; then + if [ "X${K2HR3CLI_OPENSTACK_NOVA_URI}" != "X${_OPT_TMP_DBAAS_OPENSTACK_NOVA_URI}" ]; then + add_config_update_var "K2HR3CLI_OPENSTACK_NOVA_URI" + fi + K2HR3CLI_OPENSTACK_NOVA_URI=${_OPT_TMP_DBAAS_OPENSTACK_NOVA_URI} + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_GLANCE_URI}" ]; then + if [ "X${K2HR3CLI_OPENSTACK_GLANCE_URI}" != "X${_OPT_TMP_DBAAS_OPENSTACK_GLANCE_URI}" ]; then + add_config_update_var "K2HR3CLI_OPENSTACK_GLANCE_URI" + fi + K2HR3CLI_OPENSTACK_GLANCE_URI=${_OPT_TMP_DBAAS_OPENSTACK_GLANCE_URI} + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_NEUTRON_URI}" ]; then + if [ "X${K2HR3CLI_OPENSTACK_NEUTRON_URI}" != "X${_OPT_TMP_DBAAS_OPENSTACK_NEUTRON_URI}" ]; then + add_config_update_var "K2HR3CLI_OPENSTACK_NEUTRON_URI" + fi + K2HR3CLI_OPENSTACK_NEUTRON_URI=${_OPT_TMP_DBAAS_OPENSTACK_NEUTRON_URI} + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_USER}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_USER=${_OPT_TMP_DBAAS_OPENSTACK_USER} + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_USER_ID="" + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_USER=${K2HR3CLI_USER} + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_PASS}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_PASS=${_OPT_TMP_DBAAS_OPENSTACK_PASS} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_PASS=${K2HR3CLI_PASS} + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_TENANT}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_TENANT=${_OPT_TMP_DBAAS_OPENSTACK_TENANT} + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_TENANT_ID="" + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_TENANT=${K2HR3CLI_TENANT} + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_NO_SECGRP}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_NO_SECGRP=${_OPT_TMP_DBAAS_OPENSTACK_NO_SECGRP} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_NO_SECGRP=0 + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_KEYPAIR}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_KEYPAIR=${_OPT_TMP_DBAAS_OPENSTACK_KEYPAIR} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_KEYPAIR="" + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_FLAVOR}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_FLAVOR=${_OPT_TMP_DBAAS_OPENSTACK_FLAVOR} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_FLAVOR="" + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_FLAVOR_ID}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_FLAVOR_ID=${_OPT_TMP_DBAAS_OPENSTACK_FLAVOR_ID} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_FLAVOR_ID="" + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_IMAGE}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_IMAGE=${_OPT_TMP_DBAAS_OPENSTACK_IMAGE} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_IMAGE="" + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_IMAGE_ID}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_IMAGE_ID=${_OPT_TMP_DBAAS_OPENSTACK_IMAGE_ID} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_IMAGE_ID="" + fi + if [ -n "${_OPT_TMP_DBAAS_OPENSTACK_CONFIRM_YES}" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_CONFIRM_YES=${_OPT_TMP_DBAAS_OPENSTACK_CONFIRM_YES} + else + # shellcheck disable=SC2034 + K2HR3CLI_OPENSTACK_CONFIRM_YES=0 + fi + + # + # Check special variable(K2HR3CLI_OPENSTACK_{IDENTITY,NOVA,GLANCE,NEUTRON}_URI) + # + # Cut last word if it is '/' and space + # + if [ -n "${K2HR3CLI_OPENSTACK_IDENTITY_URI}" ]; then + for _OPT_TMP_URI_POS in $(seq 0 ${#K2HR3CLI_OPENSTACK_IDENTITY_URI}); do + _OPT_TMP_URI_LAST_POS=$((${#K2HR3CLI_OPENSTACK_IDENTITY_URI} - _OPT_TMP_URI_POS)) + if [ "${_OPT_TMP_URI_LAST_POS}" -le 0 ]; then + break + fi + _OPT_TMP_URI_LAST_CH=$(pecho -n "${K2HR3CLI_OPENSTACK_IDENTITY_URI}" | cut -b "${_OPT_TMP_URI_LAST_POS}") + if [ "X${_OPT_TMP_URI_LAST_CH}" = "X/" ] || [ "X${_OPT_TMP_URI_LAST_CH}" = "X " ] || [ "X${_OPT_TMP_URI_LAST_CH}" = "X${K2HR3CLI_TAB_WORD}" ]; then + if [ "${_OPT_TMP_URI_LAST_POS}" -gt 1 ]; then + _OPT_TMP_URI_LAST_POS=$((_OPT_TMP_URI_LAST_POS - 1)) + K2HR3CLI_OPENSTACK_IDENTITY_URI=$(pecho -n "${K2HR3CLI_OPENSTACK_IDENTITY_URI}" | cut -c 1-"${_OPT_TMP_URI_LAST_POS}") + else + K2HR3CLI_OPENSTACK_IDENTITY_URI="" + break; + fi + else + break + fi + done + fi + if [ -n "${K2HR3CLI_OPENSTACK_NOVA_URI}" ]; then + for _OPT_TMP_URI_POS in $(seq 0 ${#K2HR3CLI_OPENSTACK_NOVA_URI}); do + _OPT_TMP_URI_LAST_POS=$((${#K2HR3CLI_OPENSTACK_NOVA_URI} - _OPT_TMP_URI_POS)) + if [ "${_OPT_TMP_URI_LAST_POS}" -le 0 ]; then + break + fi + _OPT_TMP_URI_LAST_CH=$(pecho -n "${K2HR3CLI_OPENSTACK_NOVA_URI}" | cut -b "${_OPT_TMP_URI_LAST_POS}") + if [ "X${_OPT_TMP_URI_LAST_CH}" = "X/" ] || [ "X${_OPT_TMP_URI_LAST_CH}" = "X " ] || [ "X${_OPT_TMP_URI_LAST_CH}" = "X${K2HR3CLI_TAB_WORD}" ]; then + if [ "${_OPT_TMP_URI_LAST_POS}" -gt 1 ]; then + _OPT_TMP_URI_LAST_POS=$((_OPT_TMP_URI_LAST_POS - 1)) + K2HR3CLI_OPENSTACK_NOVA_URI=$(pecho -n "${K2HR3CLI_OPENSTACK_NOVA_URI}" | cut -c 1-"${_OPT_TMP_URI_LAST_POS}") + else + K2HR3CLI_OPENSTACK_NOVA_URI="" + break; + fi + else + break + fi + done + fi + if [ -n "${K2HR3CLI_OPENSTACK_GLANCE_URI}" ]; then + for _OPT_TMP_URI_POS in $(seq 0 ${#K2HR3CLI_OPENSTACK_GLANCE_URI}); do + _OPT_TMP_URI_LAST_POS=$((${#K2HR3CLI_OPENSTACK_GLANCE_URI} - _OPT_TMP_URI_POS)) + if [ "${_OPT_TMP_URI_LAST_POS}" -le 0 ]; then + break + fi + _OPT_TMP_URI_LAST_CH=$(pecho -n "${K2HR3CLI_OPENSTACK_GLANCE_URI}" | cut -b "${_OPT_TMP_URI_LAST_POS}") + if [ "X${_OPT_TMP_URI_LAST_CH}" = "X/" ] || [ "X${_OPT_TMP_URI_LAST_CH}" = "X " ] || [ "X${_OPT_TMP_URI_LAST_CH}" = "X${K2HR3CLI_TAB_WORD}" ]; then + if [ "${_OPT_TMP_URI_LAST_POS}" -gt 1 ]; then + _OPT_TMP_URI_LAST_POS=$((_OPT_TMP_URI_LAST_POS - 1)) + K2HR3CLI_OPENSTACK_GLANCE_URI=$(pecho -n "${K2HR3CLI_OPENSTACK_GLANCE_URI}" | cut -c 1-"${_OPT_TMP_URI_LAST_POS}") + else + K2HR3CLI_OPENSTACK_GLANCE_URI="" + break; + fi + else + break + fi + done + fi + if [ -n "${K2HR3CLI_OPENSTACK_NEUTRON_URI}" ]; then + for _OPT_TMP_URI_POS in $(seq 0 ${#K2HR3CLI_OPENSTACK_NEUTRON_URI}); do + _OPT_TMP_URI_LAST_POS=$((${#K2HR3CLI_OPENSTACK_NEUTRON_URI} - _OPT_TMP_URI_POS)) + if [ "${_OPT_TMP_URI_LAST_POS}" -le 0 ]; then + break + fi + _OPT_TMP_URI_LAST_CH=$(pecho -n "${K2HR3CLI_OPENSTACK_NEUTRON_URI}" | cut -b "${_OPT_TMP_URI_LAST_POS}") + if [ "X${_OPT_TMP_URI_LAST_CH}" = "X/" ] || [ "X${_OPT_TMP_URI_LAST_CH}" = "X " ] || [ "X${_OPT_TMP_URI_LAST_CH}" = "X${K2HR3CLI_TAB_WORD}" ]; then + if [ "${_OPT_TMP_URI_LAST_POS}" -gt 1 ]; then + _OPT_TMP_URI_LAST_POS=$((_OPT_TMP_URI_LAST_POS - 1)) + K2HR3CLI_OPENSTACK_NEUTRON_URI=$(pecho -n "${K2HR3CLI_OPENSTACK_NEUTRON_URI}" | cut -c 1-"${_OPT_TMP_URI_LAST_POS}") + else + K2HR3CLI_OPENSTACK_NEUTRON_URI="" + break; + fi + else + break + fi + done + fi + + return 0 +} + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/src/libexec/database/summary.sh b/src/libexec/database/summary.sh new file mode 100644 index 0000000..a8b90ed --- /dev/null +++ b/src/libexec/database/summary.sh @@ -0,0 +1,46 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#--------------------------------------------------------------------- +# Put Summary for command +#--------------------------------------------------------------------- +# [NOTE] +# Adjust the start and end positions of the characters according to the +# scale below, and arrange the lines. +# +# +-- start position(ex. title) +# | +-- indent for description +# | | +# v v +# +---+----+----+----+----+----+----+----+----+----+----+----+----| +# +echo " ${K2HR3CLI_MODE}" +echo " CLI command for operating DBaaS(K2HDKC Cluster)." +echo " See https://dbaas.k2hdkc.antpick.ax/ about K2HDKC DBaaS." +echo "" + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/src/libexec/database/variables.sh b/src/libexec/database/variables.sh new file mode 100644 index 0000000..d05f4ac --- /dev/null +++ b/src/libexec/database/variables.sh @@ -0,0 +1,323 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#-------------------------------------------------------------- +# DBaaS Valiables +#-------------------------------------------------------------- +# The following values are used in the K2HDKC DBAAS CLI. +# +# K2HR3CLI_DBAAS_CONFIG +# K2HR3CLI_OPENSTACK_USER +# K2HR3CLI_OPENSTACK_USER_ID +# K2HR3CLI_OPENSTACK_PASS +# K2HR3CLI_OPENSTACK_TENANT +# K2HR3CLI_OPENSTACK_TENANT_ID +# K2HR3CLI_OPENSTACK_SCOPED_TOKEN +# K2HR3CLI_OPENSTACK_IDENTITY_URI +# K2HR3CLI_OPENSTACK_NOVA_URI +# K2HR3CLI_OPENSTACK_GLANCE_URI +# K2HR3CLI_OPENSTACK_NEUTRON_URI +# + +#-------------------------------------------------------------- +# DBaaS Variables for Configration +#-------------------------------------------------------------- +# +# Description +# +if [ "X${K2HR3CLI_PLUGIN_CONFIG_VAR_DESC}" != "X" ]; then + K2HR3CLI_PLUGIN_CONFIG_VAR_DESC="${K2HR3CLI_PLUGIN_CONFIG_VAR_DESC} config_var_desciption_dbaas" +else + K2HR3CLI_PLUGIN_CONFIG_VAR_DESC="config_var_desciption_dbaas" +fi + +# +# Names +# +if [ "X${K2HR3CLI_PLUGIN_CONFIG_VAR_NAME}" != "X" ]; then + K2HR3CLI_PLUGIN_CONFIG_VAR_NAME="${K2HR3CLI_PLUGIN_CONFIG_VAR_NAME} config_var_name_dbaas" +else + K2HR3CLI_PLUGIN_CONFIG_VAR_NAME="config_var_name_dbaas" +fi + +# +# Check DBaaS Variables +# +if [ "X${K2HR3CLI_PLUGIN_CONFIG_CHECK_VAR}" != "X" ]; then + K2HR3CLI_PLUGIN_CONFIG_CHECK_VAR="${K2HR3CLI_PLUGIN_CONFIG_CHECK_VAR} config_check_var_name_dbaas" +else + K2HR3CLI_PLUGIN_CONFIG_CHECK_VAR="config_check_var_name_dbaas" +fi + +#-------------------------------------------------------------- +# Functions +#-------------------------------------------------------------- +# +# Return variable description for this Example Plugin +# +# $? : result +# +# [NOTE] +# +---+----+----+----+----+----+----+----+----+----+----+----+----| +# ^ ^ +# | +--- Start for Description +# +------- Start for Variables Title +# +config_var_desciption_dbaas() +{ + prn_msg "K2HR3CLI_DBAAS_CONFIG" + prn_msg " Specifies the DBaaS configuration directory path." + prn_msg "" + prn_msg "K2HR3CLI_OPENSTACK_USER" + prn_msg " Set the user name of OpenStack." + prn_msg "" + prn_msg "K2HR3CLI_OPENSTACK_USER_ID" + prn_msg " Set the user id of OpenStack." + prn_msg "" + prn_msg "K2HR3CLI_OPENSTACK_PASS" + prn_msg " Set the passphrase for the OpenStack user." + prn_msg " RECOMMEND THAT THIS VALUE IS NOT SET TO ADDRESS SECURITY" + prn_msg " VULNERABILITIES." + prn_msg "" + prn_msg "K2HR3CLI_OPENSTACK_TENANT" + prn_msg " Specify the available tenant for OpenStack. A Scoped Token" + prn_msg " will be issued to this tenant." + prn_msg "" + prn_msg "K2HR3CLI_OPENSTACK_TENANT_ID" + prn_msg " Specify the available tenant id for OpenStack." + prn_msg "" + prn_msg "K2HR3CLI_OPENSTACK_SCOPED_TOKEN" + prn_msg " Set the Scoped Token of OpenStack." + prn_msg "" + prn_msg "K2HR3CLI_OPENSTACK_IDENTITY_URI" + prn_msg " Specifies the OpenStack Identity URI." + prn_msg "" + prn_msg "K2HR3CLI_OPENSTACK_NOVA_URI" + prn_msg " Specifies the OpenStack Nova(Compute) URI." + prn_msg "" + prn_msg "K2HR3CLI_OPENSTACK_GLANCE_URI" + prn_msg " Specifies the OpenStack Glance(Images) URI." + prn_msg "" + prn_msg "K2HR3CLI_OPENSTACK_NEUTRON_URI" + prn_msg " Specifies the OpenStack Neutron(Network) URI." + prn_msg "" +} + +# +# Return variable name +# +# $1 : variable name(if empty, it means all) +# $? : result +# Output : variable names(with separator is space) +# +config_var_name_dbaas() +{ + if [ "X$1" = "X" ]; then + if [ "X${K2HR3CLI_DBAAS_CONFIG}" != "X" ]; then + prn_msg "K2HR3CLI_DBAAS_CONFIG: \"${K2HR3CLI_DBAAS_CONFIG}\"" + else + prn_msg "K2HR3CLI_DBAAS_CONFIG: (empty)" + fi + if [ "X${K2HR3CLI_OPENSTACK_USER}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_USER: \"${K2HR3CLI_OPENSTACK_USER}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_USER: (empty)" + fi + if [ "X${K2HR3CLI_OPENSTACK_USER_ID}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_USER_ID: \"${K2HR3CLI_OPENSTACK_USER_ID}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_USER_ID: (empty)" + fi + if [ "X${K2HR3CLI_OPENSTACK_PASS}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_PASS: \"********(${#K2HR3CLI_OPENSTACK_PASS})\"" + else + prn_msg "K2HR3CLI_OPENSTACK_PASS: (empty)" + fi + if [ "X${K2HR3CLI_OPENSTACK_TENANT}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_TENANT: \"${K2HR3CLI_OPENSTACK_TENANT}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_TENANT: (empty)" + fi + if [ "X${K2HR3CLI_OPENSTACK_TENANT_ID}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_TENANT_ID: \"${K2HR3CLI_OPENSTACK_TENANT_ID}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_TENANT_ID: (empty)" + fi + if [ "X${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_SCOPED_TOKEN: \"${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_SCOPED_TOKEN: (empty)" + fi + if [ "X${K2HR3CLI_OPENSTACK_IDENTITY_URI}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_IDENTITY_URI: \"${K2HR3CLI_OPENSTACK_IDENTITY_URI}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_IDENTITY_URI: (empty)" + fi + if [ "X${K2HR3CLI_OPENSTACK_NOVA_URI}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_NOVA_URI: \"${K2HR3CLI_OPENSTACK_NOVA_URI}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_NOVA_URI: (empty)" + fi + if [ "X${K2HR3CLI_OPENSTACK_GLANCE_URI}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_GLANCE_URI: \"${K2HR3CLI_OPENSTACK_GLANCE_URI}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_GLANCE_URI: (empty)" + fi + if [ "X${K2HR3CLI_OPENSTACK_NEUTRON_URI}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_NEUTRON_URI: \"${K2HR3CLI_OPENSTACK_NEUTRON_URI}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_NEUTRON_URI: (empty)" + fi + return 0 + + elif [ "X$1" = "XK2HR3CLI_DBAAS_CONFIG" ]; then + if [ "X${K2HR3CLI_DBAAS_CONFIG}" != "X" ]; then + prn_msg "K2HR3CLI_DBAAS_CONFIG: \"${K2HR3CLI_DBAAS_CONFIG}\"" + else + prn_msg "K2HR3CLI_DBAAS_CONFIG: (empty)" + fi + return 0 + + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_USER" ]; then + if [ "X${K2HR3CLI_OPENSTACK_USER}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_USER: \"${K2HR3CLI_OPENSTACK_USER}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_USER: (empty)" + fi + return 0 + + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_USER_ID" ]; then + if [ "X${K2HR3CLI_OPENSTACK_USER_ID}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_USER_ID: \"${K2HR3CLI_OPENSTACK_USER_ID}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_USER_ID: (empty)" + fi + return 0 + + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_PASS" ]; then + if [ "X${K2HR3CLI_OPENSTACK_PASS}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_PASS: \"${K2HR3CLI_OPENSTACK_PASS}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_PASS: (empty)" + fi + return 0 + + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_TENANT" ]; then + if [ "X${K2HR3CLI_OPENSTACK_TENANT}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_TENANT: \"${K2HR3CLI_OPENSTACK_TENANT}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_TENANT: (empty)" + fi + return 0 + + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_TENANT_ID" ]; then + if [ "X${K2HR3CLI_OPENSTACK_TENANT_ID}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_TENANT_ID: \"${K2HR3CLI_OPENSTACK_TENANT_ID}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_TENANT_ID: (empty)" + fi + return 0 + + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_SCOPED_TOKEN" ]; then + if [ "X${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_SCOPED_TOKEN: \"${K2HR3CLI_OPENSTACK_SCOPED_TOKEN}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_SCOPED_TOKEN: (empty)" + fi + return 0 + + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_IDENTITY_URI" ]; then + if [ "X${K2HR3CLI_OPENSTACK_IDENTITY_URI}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_IDENTITY_URI: \"${K2HR3CLI_OPENSTACK_IDENTITY_URI}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_IDENTITY_URI: (empty)" + fi + return 0 + + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_NOVA_URI" ]; then + if [ "X${K2HR3CLI_OPENSTACK_NOVA_URI}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_NOVA_URI: \"${K2HR3CLI_OPENSTACK_NOVA_URI}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_NOVA_URI: (empty)" + fi + return 0 + + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_GLANCE_URI" ]; then + if [ "X${K2HR3CLI_OPENSTACK_GLANCE_URI}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_GLANCE_URI: \"${K2HR3CLI_OPENSTACK_GLANCE_URI}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_GLANCE_URI: (empty)" + fi + return 0 + + + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_NEUTRON_URI" ]; then + if [ "X${K2HR3CLI_OPENSTACK_NEUTRON_URI}" != "X" ]; then + prn_msg "K2HR3CLI_OPENSTACK_NEUTRON_URI: \"${K2HR3CLI_OPENSTACK_NEUTRON_URI}\"" + else + prn_msg "K2HR3CLI_OPENSTACK_NEUTRON_URI: (empty)" + fi + return 0 + fi + return 1 +} + +# +# Check variable name +# +# $1 : variable name +# $? : result +# +config_check_var_name_dbaas() +{ + if [ "X$1" = "XK2HR3CLI_DBAAS_CONFIG" ]; then + return 0 + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_USER" ]; then + return 0 + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_USER_ID" ]; then + return 0 + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_PASS" ]; then + return 0 + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_TENANT" ]; then + return 0 + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_TENANT_ID" ]; then + return 0 + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_SCOPED_TOKEN" ]; then + return 0 + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_IDENTITY_URI" ]; then + return 0 + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_NOVA_URI" ]; then + return 0 + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_GLANCE_URI" ]; then + return 0 + elif [ "X$1" = "XK2HR3CLI_OPENSTACK_NEUTRON_URI" ]; then + return 0 + fi + return 1 +} + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/test/Makefile.am b/test/Makefile.am new file mode 100644 index 0000000..7eaf63b --- /dev/null +++ b/test/Makefile.am @@ -0,0 +1,38 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +SUBDIRS = snapshots + +TESTS = test.sh + +EXTRA_DIST = test.sh \ + util_dbaas_request.sh \ + test_database.sh + +CLEANFILES = *.log + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/test/snapshots/Makefile.am b/test/snapshots/Makefile.am new file mode 100644 index 0000000..f5cf559 --- /dev/null +++ b/test/snapshots/Makefile.am @@ -0,0 +1,29 @@ +# +# K2HR3 Utilities - Command Line Interface +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HR3 is K2hdkc based Resource and Roles and policy Rules, gathers +# common management information for the cloud. +# K2HR3 can dynamically manage information as "who", "what", "operate". +# These are stored as roles, resources, policies in K2hdkc, and the +# client system can dynamically read and modify these information. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Feb 15 2021 +# REVISION: +# + +EXTRA_DIST = test_database.snapshot + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/test/snapshots/test_database.snapshot b/test/snapshots/test_database.snapshot new file mode 100644 index 0000000..7bb31fc --- /dev/null +++ b/test/snapshots/test_database.snapshot @@ -0,0 +1,117 @@ +TEST CASE : (1) Normal : Create OpenStack Unscoped Token +TEST_USER_OPENSTACK_UNSCOPED_TOKEN + +TEST CASE : (2) Normal : Create OpenStack Scoped Token +TEST_USER_OPENSTACK_SCOPED_TOKEN + +TEST CASE : (3) Normal : Create Cluster +Succeed : Phase : Create "testcluster" Resource +Succeed : Phase : Create "testcluster/server" Resource +Succeed : Phase : Create "testcluster/slave" Resource +Succeed : Phase : Create "testcluster" Policy +Succeed : Phase : Create "testcluster" Role +Succeed : Phase : Create "testcluster/server" Role +Succeed : Phase : Create "testcluster/slave" Role +Succeed : Registration of cluster "testcluster" with K2HR3 is complete + +TEST CASE : (4) Normal : Add server host to cluster +Succeed : Add server host(TESTSERVER - "TESTSERVER_ID") for testcluster cluster. + +TEST CASE : (5) Normal : Add slave host to cluster +Succeed : Add slave host(TESTSLAVE - "TESTSERVER_ID") for testcluster cluster. + +TEST CASE : (6) Normal : Delete host to cluster +Succeed : Delete host TESTSERVER from testcluster cluster(OpenStack and K2HR3). + +TEST CASE : (7) Normal : Show server host list +[ + { + "name": "TESTSERVER", + "id": "TESTSERVER_ID", + "ip": "127.0.0.1" + } +] + +TEST CASE : (8) Normal : Show slave host list +[ + { + "name": "TESTSLAVE", + "id": "TESTSLAVE_ID", + "ip": "127.0.0.1" + } +] + +TEST CASE : (9) Normal : Show server configuration +{ + "string": "TEST_DUMMY_RESOURCE_FOR_SERVER", + "object": null, + "keys": { + "cluster-name": "testcluster", + "chmpx-server-port": 98020, + "chmpx-server-ctlport": 98021, + "chmpx-slave-ctlport": 98031, + "k2hdkc-dbaas-add-user": 1, + "k2hdkc-dbaas-proc-user": "testrunner", + "chmpx-mode": "SERVER", + "k2hr3-init-packages": "", + "k2hr3-init-packagecloud-packages": "k2hdkc-dbaas-override-conf,k2hr3-get-resource,chmpx,k2hdkc", + "k2hr3-init-systemd-packages": "chmpx.service,k2hdkc.service,k2hr3-get-resource.timer", + "host_key": "127.0.0.1,0,TESTSERVER_ID", + "one_host": { + "host": "127.0.0.1", + "port": 0, + "extra": "openstack-auto-v1", + "tag": "TESTSERVER", + "cuk": "TESTSERVER_ID" + } + }, + "expire": null +} + +TEST CASE : (10) Normal : Show slave configuration +{ + "string": "TEST_DUMMY_RESOURCE_FOR_SLAVE", + "object": null, + "keys": { + "cluster-name": "testcluster", + "chmpx-server-port": 98020, + "chmpx-server-ctlport": 98021, + "chmpx-slave-ctlport": 98031, + "k2hdkc-dbaas-add-user": 1, + "k2hdkc-dbaas-proc-user": "testrunner", + "chmpx-mode": "SLAVE", + "k2hr3-init-packages": "", + "k2hr3-init-packagecloud-packages": "k2hdkc-dbaas-override-conf,k2hr3-get-resource,chmpx,k2hdkc", + "k2hr3-init-systemd-packages": "chmpx.service,k2hdkc.service,k2hr3-get-resource.timer", + "host_key": "127.0.0.1,0,TESTSLAVE_ID", + "one_host": { + "host": "127.0.0.1", + "port": 0, + "extra": "openstack-auto-v1", + "tag": "TESTSLAVE", + "cuk": "TESTSLAVE_ID" + } + }, + "expire": null +} + +TEST CASE : (11) Normal : Delete Cluster +[NOTICE] Delete all of the cluster configuration, data, cluster hosts, and so on. +Succeed : Delete all testcluster cluster(OpenStack and K2HR3). + +TEST CASE : (12) Normal : List Images +[ + { + "name": "TEST_IMAGE", + "id": "TEST_IMAGE_ID" + } +] + +TEST CASE : (13) Normal : List Flavors +[ + { + "name": "TEST_FLAVOR", + "id": "TEST_FLAVOR_ID" + } +] + diff --git a/test/test.sh b/test/test.sh new file mode 100755 index 0000000..241e067 --- /dev/null +++ b/test/test.sh @@ -0,0 +1,212 @@ +#!/bin/sh +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#--------------------------------------------------------------------- +# Variables +#--------------------------------------------------------------------- +TESTMAINBIN=$(basename "$0") +TESTMAINBASENAME=$(echo "${TESTMAINBIN}" | sed 's/[.]sh$//') + +TESTDIR=$(dirname "$0") +TESTDIR=$(cd "${TESTDIR}" || exit 1; pwd) +SRCDIR=$(cd "${TESTDIR}"/../src || exit 1; pwd) +# shellcheck disable=SC2034 +LIBEXECDIR=$(cd "${SRCDIR}"/libexec || exit 1; pwd) + +TEST_ALL_LOGFILE="${TESTDIR}/${TESTMAINBASENAME}.log" +TEST_EXTCODE_FILE="/tmp/.${TESTMAINBASENAME}.exitcode" +TEST_SUMMARY_FILE="${TESTDIR}/${TESTMAINBASENAME}.summary.log" + +# +# Special Environment +# +# [NOTE] +# The TEST_CREATE_DUMMY_FUNCTION environment variable modifies +# the behavior of the xxx_request() function in util_request.sh +# (k2hr3_cli). +# This environment variable is set to the create_dummy_response() +# function by default when util_request.sh(k2hr3_cli) is loaded. +# After loading this util_request.sh(k2hr3_cli), override the +# TEST_CREATE_DUMMY_RESPONSE_FUNC environment variable and replace +# it with the create_dummy_dbaas_response() function in this file. +# The create_dummy_dbaas_response() function handles the database +# command only. +# Otherwise, call the original create_dummy_response() function +# and let it do the work. +# This allows for dedicated testing of plugins. +# +export TEST_CREATE_DUMMY_RESPONSE_FUNC="create_dummy_dbaas_response" + +# +# Load DBaaS dummy request file +# +export K2HR3CLI_REQUEST_FILE="${TESTDIR}/util_dbaas_request.sh" +if [ -f "${K2HR3CLI_REQUEST_FILE}" ]; then + . "${K2HR3CLI_REQUEST_FILE}" +fi + +# +# Sub Test files +# +# The test file is a file with the "test_" prefix and the ".sh" suffix. +# +TEST_FILES="" +for _TEST_FILE_TMP in "${TESTDIR}"/*; do + _TEST_FILE_TMP=$(pecho -n "${_TEST_FILE_TMP}" | sed "s#^${TESTDIR}/##g") + case ${_TEST_FILE_TMP} in + ${TESTMAINBIN}) + ;; + test_*.sh) + if [ "X${TEST_FILES}" = "X" ]; then + TEST_FILES=${_TEST_FILE_TMP} + else + TEST_FILES="${TEST_FILES} ${_TEST_FILE_TMP}" + fi + ;; + *) + ;; + esac +done + +# +# Additional options for test +# +# [NOTE] +# Overwrite special options to common options(set to $@) in util_test.sh +# A value of --config specifies a file that does not exist. +# It's a warning, but be sure not to set it, as the test will fail if +# the real file affects the variable. +# +TEST_IDENTITY_URI="http://localhost:8080" +set -- "--config" "${TESTDIR}/k2hr3.config" "--apiuri" "http://localhost" "--openstack_identity_uri" "${TEST_IDENTITY_URI}" "--dbaas_config" "${TESTDIR}/../src/libexec/database" + +#--------------------------------------------------------------------- +# Functions +#--------------------------------------------------------------------- +func_usage() +{ + echo "" + echo "Usage: ${TESTMAINBIN} [option...]" + echo " --update(-u) update the test result comparison file with the current test result." + echo " --help(-h) print help." + echo "" +} + +#--------------------------------------------------------------------- +# Test all +#--------------------------------------------------------------------- +# +# Header +# +echo "" +echo "K2HR3 DBAAS CLI TEST ($(date -R))" | tee "${TEST_ALL_LOGFILE}" +echo "" | tee -a "${TEST_ALL_LOGFILE}" + +# +# Summary file +# +echo "${CREV}[Summary]${CDEF} K2HR3 DBAAS CLI TEST" > "${TEST_SUMMARY_FILE}" +echo "" >> "${TEST_SUMMARY_FILE}" + +# +# Test all +# +ALL_TEST_RESULT=0 + +for SUBTESTBIN in ${TEST_FILES}; do + # + # Title + # + SUBTEST_TITLE=$(pecho -n "${SUBTESTBIN}" | sed -e 's/^test_//g' -e 's/[.]sh$//g' | tr '[:lower:]' '[:upper:]') + + # + # Clear exit code file + # + rm -f "${TEST_EXTCODE_FILE}" + + # + # Run test + # + echo "${CREV}[${SUBTEST_TITLE}]${CDEF}:" | tee -a "${TEST_ALL_LOGFILE}" + ("${TESTDIR}/${SUBTESTBIN}" "${SUB_TEST_UPDATE_OPT}"; echo $? > "${TEST_EXTCODE_FILE}") | stdbuf -oL -eL sed -e 's/^/ /' | tee -a "${TEST_ALL_LOGFILE}" + + # + # Result + # + if [ -f "${TEST_EXTCODE_FILE}" ]; then + SUBTEST_RESULT=$(cat "${TEST_EXTCODE_FILE}") + if ! compare_part_string "${SUBTEST_RESULT}" >/dev/null 2>&1; then + echo " ${CYEL}(error) ${TESTMAINBIN} : result code for ${SUBTEST_TITLE} is wrong(${SUBTEST_RESULT}).${CDEF}" | tee -a "${TEST_ALL_LOGFILE}" + SUBTEST_RESULT=1 + fi + rm -f "${TEST_EXTCODE_FILE}" + else + echo " ${CYEL}(error) ${TESTMAINBIN} : result code file for ${SUBTEST_TITLE} is not existed.${CDEF}" | tee -a "${TEST_ALL_LOGFILE}" + SUBTEST_RESULT=1 + fi + + if [ ${SUBTEST_RESULT} -eq 0 ]; then + echo " => ${CGRN}Succeed${CDEF}" | tee -a "${TEST_ALL_LOGFILE}" + else + ALL_TEST_RESULT=1 + echo " => ${CRED}Failure${CDEF}" | tee -a "${TEST_ALL_LOGFILE}" + fi + echo "" | tee -a "${TEST_ALL_LOGFILE}" + + # + # Add Summary + # + if [ ${SUBTEST_RESULT} -eq 0 ]; then + echo " ${CGRN}PASS${CDEF} : ${SUBTEST_TITLE}" >> "${TEST_SUMMARY_FILE}" + else + echo " ${CRED}FAIL${CDEF} : ${SUBTEST_TITLE}" >> "${TEST_SUMMARY_FILE}" + fi +done + +# +# Print Summary +# +if [ -f "${TEST_SUMMARY_FILE}" ]; then + tee -a "${TEST_ALL_LOGFILE}" < "${TEST_SUMMARY_FILE}" + rm -f "${TEST_SUMMARY_FILE}" +fi + +# +# Result(Footer) +# +echo "" | tee -a "${TEST_ALL_LOGFILE}" +if [ ${ALL_TEST_RESULT} -eq 0 ]; then + echo "All Test ${CGRN}PASSED${CDEF} ($(date -R))" | tee -a "${TEST_ALL_LOGFILE}" +else + echo "All Test ${CRED}FAILED${CDEF} ($(date -R))" | tee -a "${TEST_ALL_LOGFILE}" +fi +echo "" + +exit ${ALL_TEST_RESULT} + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/test/test_database.sh b/test/test_database.sh new file mode 100755 index 0000000..42e3578 --- /dev/null +++ b/test/test_database.sh @@ -0,0 +1,386 @@ +#!/bin/sh +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#--------------------------------------------------------------------- +# Variables +#--------------------------------------------------------------------- +TESTNAME=$(basename "$0") +# shellcheck disable=SC2034 +TESTBASENAME=$(echo "${TESTNAME}" | sed 's/[.]sh$//') +TESTDIR=$(dirname "$0") +TESTDIR=$(cd "${TESTDIR}" || exit 1; pwd) + +# +# Special Environment +# +# [NOTE] +# The TEST_CREATE_DUMMY_FUNCTION environment variable modifies +# the behavior of the xxx_request() function in util_request.sh +# (k2hr3_cli). +# This environment variable is set to the create_dummy_response() +# function by default when util_request.sh(k2hr3_cli) is loaded. +# After loading this util_request.sh(k2hr3_cli), override the +# TEST_CREATE_DUMMY_RESPONSE_FUNC environment variable and replace +# it with the create_dummy_dbaas_response() function in this file. +# The create_dummy_dbaas_response() function handles the database +# command only. +# Otherwise, call the original create_dummy_response() function +# and let it do the work. +# This allows for dedicated testing of plugins. +# +export TEST_CREATE_DUMMY_RESPONSE_FUNC="create_dummy_dbaas_response" + +# +# Load DBaaS dummy request file +# +export K2HR3CLI_REQUEST_FILE="${TESTDIR}/util_dbaas_request.sh" +if [ -f "${K2HR3CLI_REQUEST_FILE}" ]; then + . "${K2HR3CLI_REQUEST_FILE}" +fi + +# +# Additional options for test +# +# [NOTE] +# Overwrite special options to common options(set to $@) in util_test.sh +# A value of --config specifies a file that does not exist. +# It's a warning, but be sure not to set it, as the test will fail if +# the real file affects the variable. +# +TEST_IDENTITY_URI="http://localhost:8080" +set -- "--config" "${TESTDIR}/k2hr3.config" "--apiuri" "http://localhost" "--openstack_identity_uri" "${TEST_IDENTITY_URI}" "--dbaas_config" "${TESTDIR}/../src/libexec/database" + +#===================================================================== +# Test for Database +#===================================================================== +TEST_EXIT_CODE=0 + +#--------------------------------------------------------------------- +# (1) Normal : Create OpenStack Unscoped Token +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(1) Normal : Create OpenStack Unscoped Token" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database openstack utoken --op_user "${_TEST_K2HR3_USER}" --op_passphrase "${_TEST_K2HR3_PASS}" "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (2) Normal : Create OpenStack Scoped Token +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(2) Normal : Create OpenStack Scoped Token" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database openstack token --openstacktoken TEST_USER_OPENSTACK_UNSCOPED_TOKEN --op_tenant "${_TEST_K2HR3_TENANT}" "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (3) Normal : Create Cluster +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(3) Normal : Create Cluster" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database create "${_TEST_K2HDKC_CLUSTER_NAME}" --chmpx_server_port 98020 --chmpx_server_ctlport 98021 --chmpx_slave_ctlport 98031 --dbaas_user testrunner --dbaas_create_user --scopedtoken "TEST_TOKEN_SCOPED_FOR_TENANT_${_TEST_K2HR3_TENANT}_USER_${_TEST_K2HR3_USER}" --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + + +#--------------------------------------------------------------------- +# (4) Normal : Add server host to cluster +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(4) Normal : Add server host to cluster" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +# [NOTE] +# In this test, we have given --create_roletoken. +# +"${K2HR3CLIBIN}" database add host server "${_TEST_K2HDKC_CLUSTER_NAME}" TESTSERVER --op_keypair TEST_KEYPAIR --op_flavor TEST_FLAVOR --op_image TEST_IMAGE --create_roletoken --scopedtoken "TEST_TOKEN_SCOPED_FOR_TENANT_${_TEST_K2HR3_TENANT}_USER_${_TEST_K2HR3_USER}" --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (5) Normal : Add slave host to cluster +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(5) Normal : Add slave host to cluster" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +# [NOTE] +# This test does not grant --create_roletoken. +# +"${K2HR3CLIBIN}" database add host slave "${_TEST_K2HDKC_CLUSTER_NAME}" TESTSLAVE --op_keypair TEST_KEYPAIR --op_flavor TEST_FLAVOR --op_image TEST_IMAGE --scopedtoken "TEST_TOKEN_SCOPED_FOR_TENANT_${_TEST_K2HR3_TENANT}_USER_${_TEST_K2HR3_USER}" --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (6) Normal : Delete host to cluster +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(6) Normal : Delete host to cluster" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database delete host "${_TEST_K2HDKC_CLUSTER_NAME}" TESTSERVER --scopedtoken "TEST_TOKEN_SCOPED_FOR_TENANT_${_TEST_K2HR3_TENANT}_USER_${_TEST_K2HR3_USER}" --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (7) Normal : Show server host list +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(7) Normal : Show server host list" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database show host server "${_TEST_K2HDKC_CLUSTER_NAME}" --json --scopedtoken "TEST_TOKEN_SCOPED_FOR_TENANT_${_TEST_K2HR3_TENANT}_USER_${_TEST_K2HR3_USER}" --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (8) Normal : Show slave host list +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(8) Normal : Show slave host list" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database show host slave "${_TEST_K2HDKC_CLUSTER_NAME}" --json --scopedtoken "TEST_TOKEN_SCOPED_FOR_TENANT_${_TEST_K2HR3_TENANT}_USER_${_TEST_K2HR3_USER}" --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (9) Normal : Show server configuration +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(9) Normal : Show server configuration" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database show configuration server "${_TEST_K2HDKC_CLUSTER_NAME}" --json --scopedtoken "TEST_TOKEN_SCOPED_FOR_TENANT_${_TEST_K2HR3_TENANT}_USER_${_TEST_K2HR3_USER}" --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (10) Normal : Show slave configuration +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(10) Normal : Show slave configuration" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database show configuration slave "${_TEST_K2HDKC_CLUSTER_NAME}" --json --scopedtoken "TEST_TOKEN_SCOPED_FOR_TENANT_${_TEST_K2HR3_TENANT}_USER_${_TEST_K2HR3_USER}" --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (11) Normal : Delete Cluster +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(11) Normal : Delete Cluster" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database delete cluster "${_TEST_K2HDKC_CLUSTER_NAME}" -y --scopedtoken "TEST_TOKEN_SCOPED_FOR_TENANT_${_TEST_K2HR3_TENANT}_USER_${_TEST_K2HR3_USER}" --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (12) Normal : List Images +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(12) Normal : List Images" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database list images --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN --json "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# (13) Normal : List Flavors +#--------------------------------------------------------------------- +# +# Title +# +TEST_TITLE="(13) Normal : List Flavors" +test_prn_title "${TEST_TITLE}" + +# +# Run +# +"${K2HR3CLIBIN}" database list flavors --openstacktoken TEST_USER_OPENSTACK_SCOPED_TOKEN --op_tenant "${_TEST_K2HR3_TENANT}" --json "$@" > "${SUB_TEST_PART_FILE}" + +# +# Check result +# +test_processing_result "$?" "${SUB_TEST_PART_FILE}" "${TEST_TITLE}" +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +#--------------------------------------------------------------------- +# Check update log +#--------------------------------------------------------------------- +test_update_snapshot +if [ $? -ne 0 ]; then + TEST_EXIT_CODE=1 +fi + +exit ${TEST_EXIT_CODE} + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +# diff --git a/test/util_dbaas_request.sh b/test/util_dbaas_request.sh new file mode 100644 index 0000000..c3f1c7d --- /dev/null +++ b/test/util_dbaas_request.sh @@ -0,0 +1,628 @@ +# +# K2HDKC DBaaS Command Line Interface - K2HR3 CLI Plugin +# +# Copyright 2021 Yahoo! Japan Corporation. +# +# K2HDKC DBaaS is a DataBase as a Service provided by Yahoo! JAPAN +# which is built K2HR3 as a backend and provides services in +# cooperation with OpenStack. +# The Override configuration for K2HDKC DBaaS serves to connect the +# components that make up the K2HDKC DBaaS. K2HDKC, K2HR3, CHMPX, +# and K2HASH are components provided as AntPickax. +# +# For the full copyright and license information, please view +# the license file that was distributed with this source code. +# +# AUTHOR: Takeshi Nakatani +# CREATE: Mon Mar 1 2021 +# REVISION: +# + +#-------------------------------------------------------------- +# Variables +#-------------------------------------------------------------- +# [NOTE] +# This file is loaded from the test script or from the k2hr3 process. +# So create the exact path to the test directory here. +# +_INIT_TESTDIR=$(dirname "$0") +TESTDIR=$(cd "${_INIT_TESTDIR}/../test" || exit 1; pwd) + +# +# Set own file path to K2HR3CLI_REQUEST_FILE if it is empty +# +if [ "X${K2HR3CLI_REQUEST_FILE}" = "X" ]; then + export K2HR3CLI_REQUEST_FILE="${TESTDIR}/util_dbaas_request.sh" +fi + +# +# Load K2HR3 Test dummy response file +# +UTIL_REQUESTFILE="util_request.sh" +if [ -f "${TESTDIR}/${UTIL_REQUESTFILE}" ]; then + . "${TESTDIR}/${UTIL_REQUESTFILE}" +fi + +# +# Load utility file for test +# +UTIL_TESTFILE="util_test.sh" +if [ -f "${TESTDIR}/${UTIL_TESTFILE}" ]; then + . "${TESTDIR}/${UTIL_TESTFILE}" +fi + +# +# Response Header File +# +K2HR3CLI_REQUEST_RESHEADER_FILE="/tmp/.${BINNAME}_$$_curl.header" + +# +# Test for common values +# +# shellcheck disable=SC2034,SC2037 +_TEST_K2HR3_USER="test" +# shellcheck disable=SC2034 +_TEST_K2HR3_PASS="password" +# shellcheck disable=SC2034 +_TEST_K2HR3_TENANT="test1" +_TEST_K2HDKC_CLUSTER_NAME="testcluster" + +#-------------------------------------------------------------- +# DBaaS Response for All test +#-------------------------------------------------------------- +# +# Create Dummy DBaaS Response(proxying) +# +create_dummy_dbaas_response() +{ + # + # Call own test response function + # + create_dummy_dbaas_response_sub "$@" + if [ $? -eq 3 ]; then + # + # Cases that I did not handle myself, Call k2hr3_cli test response function. + # + prn_dbg "(create_dummy_dbaas_response) Delegate requests that are not handled by DBaaS to create_dummy_response." + create_dummy_response "$@" + fi + return $? +} + +# +# Create Dummy DBaaS Response Sub +# +# $1 : Method(GET/PUT/POST/HEAD/DELETE) +# $2 : URL path and parameters in request +# $3 : body data(string) for post +# $4 : body data(file path) for post +# $5 : need content type header (* this value is not used) +# $6... : other headers (do not include spaces in each header) +# +# $? : result +# 0 success(request completed successfully, need to check K2HR3CLI_REQUEST_EXIT_CODE for the processing result) +# 1 failure(if the curl request fails) +# 2 fatal error +# 3 not handling +# Set global values +# K2HR3CLI_REQUEST_EXIT_CODE : http response code +# K2HR3CLI_REQUEST_RESULT_FILE : request result content file +# +create_dummy_dbaas_response_sub() +{ + if [ $# -lt 2 ]; then + prn_err "Missing options for calling request." + return 2 + fi + + # + # Check Parameters + # + _DUMMY_METHOD="$1" + if [ "X${_DUMMY_METHOD}" != "XGET" ] && [ "X${_DUMMY_METHOD}" != "XHEAD" ] && [ "X${_DUMMY_METHOD}" != "XPUT" ] && [ "X${_DUMMY_METHOD}" != "XPOST" ] && [ "X${_DUMMY_METHOD}" != "XDELETE" ]; then + prn_err "Unknown Method($1) options for calling requet." + return 2 + fi + + _DUMMY_URL_FULL="$2" + _DUMMY_URL_PATH=$(echo "${_DUMMY_URL_FULL}" | sed -e 's/?.*$//g' -e 's/&.*$//g') + + pecho -n "${_DUMMY_URL_FULL}" | grep -q '[?|&]' + if [ $? -eq 0 ]; then + _DUMMY_URL_ARGS=$(pecho -n "${_DUMMY_URL_FULL}" | sed -e 's/^.*?//g') + else + _DUMMY_URL_ARGS="" + fi + prn_dbg "(create_dummy_dbaas_response_sub) all url(${_DUMMY_METHOD}: ${_DUMMY_URL_FULL}) => url(${_DUMMY_METHOD}: ${_DUMMY_URL_PATH}) + args(${_DUMMY_URL_ARGS})" + + # shellcheck disable=SC2034 + _DUMMY_BODY_STRING="$3" + # shellcheck disable=SC2034 + _DUMMY_BODY_FILE="$4" + # shellcheck disable=SC2034 + _DUMMY_CONTENT_TYPE="$5" + if [ $# -le 5 ]; then + shift $# + else + shift 5 + fi + + # + # Common values + # + _UTIL_DBAAS_RESPONSE_DATE=$(date -R) + _UTIL_DBAAS_ISSUED_AT_DATE=$(date '+%Y-%m-%dT%H:%M:%S.000000Z') + + # + # Parse request + # + if [ "X${K2HR3CLI_OVERRIDE_URI}" != "X" ]; then + #------------------------------------------------------ + # Request for OpenStack API + #------------------------------------------------------ + if [ "X${_DUMMY_URL_PATH}" = "X/v3/auth/tokens" ]; then + #------------------------------------------------------ + # OpenStack Token + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XPOST" ]; then + if pecho -n "${_DUMMY_BODY_STRING}" | grep -q 'scope'; then + # + # Create OpenStack Scoped Token + # + pecho -n "${_DUMMY_BODY_STRING}" | grep 'scope' | grep 'project' | grep -q 'TEST_TENANT_ID' + if [ $? -ne 0 ]; then + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Create OpenStack Scoped Token, Tenant id is not found." + return 2 + fi + + util_search_urlarg "nocatalog" "${_DUMMY_URL_ARGS}" + if [ $? -eq 0 ]; then + _UTIL_DBAAS_RESPONSE_CONTENT="{\"token\":{\"audit_ids\":[\"TEST_TOKEN_AUDIT_ID\"],\"expires_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"is_domain\":false,\"issued_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"methods\":[\"token\",\"password\"],\"project\":{\"domain\":{\"id\":\"default\",\"name\":\"Default\"},\"id\":\"TEST_PROJECT_ID\",\"name\":\"demo\"},\"roles\":[{\"id\":\"TEST_MEMBER_ROLE_ID\",\"name\":\"member\"},{\"id\":\"TEST_READER_ROLE_ID\",\"name\":\"reader\"},{\"id\":\"TEST_OTHER_ROLE_ID\",\"name\":\"anotherrole\"}],\"user\":{\"domain\":{\"id\":\"default\",\"name\":\"Default\"},\"id\":\"TEST_USER_ID\",\"name\":\"test\",\"password_expires_at\":null}}}" + else + _UTIL_DBAAS_RESPONSE_CONTENT="{\"token\":{\"audit_ids\":[\"TEST_TOKEN_AUDIT_ID\"],\"catalog\":[{\"endpoints\":[{\"id\":\"TEST_OP_NOVA_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/compute/v2.1\"}],\"id\":\"TEST_OP_NOVA_MAIN_ID\",\"name\":\"nova\",\"type\":\"compute\"},{\"endpoints\":[{\"id\":\"TEST_OP_PUB_KEYSTONE_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/identity\"},{\"id\":\"TEST_OP_ADMIN_KEYSTONE_ID\",\"interface\":\"admin\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/identity\"}],\"id\":\"TEST_OP_MAIN_KEYSTONE_ID\",\"name\":\"keystone\",\"type\":\"identity\"},{\"endpoints\":[{\"id\":\"TEST_OP_NEUTRON_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost:9696/\"}],\"id\":\"TEST_OP_MAIN_NEUTRON_ID\",\"name\":\"neutron\",\"type\":\"network\"},{\"endpoints\":[{\"id\":\"TEST_OP_GLANCE_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/image\"}],\"id\":\"TEST_OP_MAIN_GLANCE_ID\",\"name\":\"glance\",\"type\":\"image\"}],\"expires_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"is_domain\":false,\"issued_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"methods\":[\"token\",\"password\"],\"project\":{\"domain\":{\"id\":\"default\",\"name\":\"Default\"},\"id\":\"TEST_PROJECT_ID\",\"name\":\"demo\"},\"roles\":[{\"id\":\"TEST_MEMBER_ROLE_ID\",\"name\":\"member\"},{\"id\":\"TEST_READER_ROLE_ID\",\"name\":\"reader\"},{\"id\":\"TEST_OTHER_ROLE_ID\",\"name\":\"anotherrole\"}],\"user\":{\"domain\":{\"id\":\"default\",\"name\":\"Default\"},\"id\":\"TEST_USER_ID\",\"name\":\"test\",\"password_expires_at\":null}}}" + fi + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + { + pecho "Date: ${_UTIL_DBAAS_RESPONSE_DATE}"; + pecho "Content-Type: application/json"; + pecho "Content-Length: ${#_UTIL_DBAAS_RESPONSE_CONTENT}"; + pecho "X-Subject-Token: TEST_USER_OPENSTACK_SCOPED_TOKEN"; + pecho "Vary: X-Auth-Token"; + pecho "x-openstack-request-id: REQ-POST-USER_SCOPED_TOKEN"; + pecho "Connection: close"; + } > "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=201 + + else + # + # Create OpenStack Unscoped Token + # + _UTIL_DBAAS_RESPONSE_CONTENT="{\"token\":{\"methods\":[\"password\"],\"user\":{\"domain\":{\"id\":\"default\",\"name\":\"Default\"},\"id\":\"OP_TEST_USER_ID\",\"name\":\"demo\",\"password_expires_at\":null},\"audit_ids\":[\"OP_TEST_USER_AUDIT_ID\"],\"expires_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"issued_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}}" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + { + pecho "Date: ${_UTIL_DBAAS_RESPONSE_DATE}"; + pecho "Content-Type: application/json"; + pecho "Content-Length: ${#_UTIL_DBAAS_RESPONSE_CONTENT}"; + pecho "X-Subject-Token: TEST_USER_OPENSTACK_UNSCOPED_TOKEN"; + pecho "Vary: X-Auth-Token"; + pecho "x-openstack-request-id: REQ-POST-USER_UNSCOPED_TOKEN"; + pecho "Connection: close"; + } > "${K2HR3CLI_REQUEST_RESHEADER_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=201 + fi + + elif [ "X${_DUMMY_METHOD}" = "XGET" ]; then + # + # Get OpenStack Token Info + # + if [ $# -lt 1 ]; then + prn_err "\"X-Auth-Token\" header is not specified." + return 2 + fi + # + # Search X-Auth-Token header + # + for _TEST_ONE_TOKEN_HEADER_POS in $(seq 1 $#); do + # shellcheck disable=SC1083,SC2039 + _TEST_ONE_TOKEN_HEADER=$(eval echo '$'{"${_TEST_ONE_TOKEN_HEADER_POS}"}) + _TEST_OPENSTACK_TOKEN=$(pecho -n "${_TEST_ONE_TOKEN_HEADER}" | grep '^X-Auth-Token:' | sed -e 's/X-Auth-Token:[[:space:]]*\(.*\)[[:space:]]*$/\1/g') + if [ "X${_TEST_OPENSTACK_TOKEN}" = "XTEST_USER_OPENSTACK_UNSCOPED_TOKEN" ]; then + # + # Unscoped Token + # + _UTIL_DBAAS_RESPONSE_CONTENT="{\"token\":{\"audit_ids\":[\"TEST_TOKEN_AUDIT_ID\"],\"expires_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"issued_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"methods\":[\"password\"],\"user\":{\"domain\":{\"id\":\"default\",\"name\":\"Default\"},\"id\":\"TEST_USER_ID\",\"name\":\"test\",\"password_expires_at\":null}}}" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=200 + return 0 + + + elif [ "X${_TEST_OPENSTACK_TOKEN}" = "XTEST_USER_OPENSTACK_SCOPED_TOKEN" ]; then + # + # Scoped Token + # + util_search_urlarg "nocatalog" "${_DUMMY_URL_ARGS}" + if [ $? -eq 0 ]; then + _UTIL_DBAAS_RESPONSE_CONTENT="{\"token\":{\"audit_ids\":[\"TEST_TOKEN_AUDIT_ID\"],\"expires_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"is_domain\":false,\"issued_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"methods\":[\"token\",\"password\"],\"project\":{\"domain\":{\"id\":\"default\",\"name\":\"Default\"},\"id\":\"TEST_PROJECT_ID\",\"name\":\"demo\"},\"roles\":[{\"id\":\"TEST_MEMBER_ROLE_ID\",\"name\":\"member\"},{\"id\":\"TEST_READER_ROLE_ID\",\"name\":\"reader\"},{\"id\":\"TEST_OTHER_ROLE_ID\",\"name\":\"anotherrole\"}],\"user\":{\"domain\":{\"id\":\"default\",\"name\":\"Default\"},\"id\":\"TEST_USER_ID\",\"name\":\"test\",\"password_expires_at\":null}}}" + else + _UTIL_DBAAS_RESPONSE_CONTENT="{\"token\":{\"audit_ids\":[\"TEST_TOKEN_AUDIT_ID\"],\"catalog\":[{\"endpoints\":[{\"id\":\"TEST_OP_NOVA_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/compute/v2.1\"}],\"id\":\"TEST_OP_NOVA_MAIN_ID\",\"name\":\"nova\",\"type\":\"compute\"},{\"endpoints\":[{\"id\":\"TEST_OP_PUB_KEYSTONE_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/identity\"},{\"id\":\"TEST_OP_ADMIN_KEYSTONE_ID\",\"interface\":\"admin\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/identity\"}],\"id\":\"TEST_OP_MAIN_KEYSTONE_ID\",\"name\":\"keystone\",\"type\":\"identity\"},{\"endpoints\":[{\"id\":\"TEST_OP_NEUTRON_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost:9696/\"}],\"id\":\"TEST_OP_MAIN_NEUTRON_ID\",\"name\":\"neutron\",\"type\":\"network\"},{\"endpoints\":[{\"id\":\"TEST_OP_GLANCE_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/image\"}],\"id\":\"TEST_OP_MAIN_GLANCE_ID\",\"name\":\"glance\",\"type\":\"image\"}],\"expires_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"is_domain\":false,\"issued_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"methods\":[\"token\",\"password\"],\"project\":{\"domain\":{\"id\":\"default\",\"name\":\"Default\"},\"id\":\"TEST_PROJECT_ID\",\"name\":\"demo\"},\"roles\":[{\"id\":\"TEST_MEMBER_ROLE_ID\",\"name\":\"member\"},{\"id\":\"TEST_READER_ROLE_ID\",\"name\":\"reader\"},{\"id\":\"TEST_OTHER_ROLE_ID\",\"name\":\"anotherrole\"}],\"user\":{\"domain\":{\"id\":\"default\",\"name\":\"Default\"},\"id\":\"TEST_USER_ID\",\"name\":\"test\",\"password_expires_at\":null}}}" + fi + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=200 + return 0 + + elif [ "X${_TEST_OPENSTACK_TOKEN}" != "X" ]; then + # + # Unknown token string -> so it returns expired + # + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Get OpenStack Token information, token is unknown(${_TEST_OPENSTACK_TOKEN})." + return 2 + fi + done + prn_err "\"X-Auth-Token\" header is not specified." + return 2 + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_METHOD}: ${_DUMMY_URL_PATH})." + return 2 + fi + + elif [ "X${_DUMMY_URL_PATH}" = "X/v3/auth/catalog" ]; then + #------------------------------------------------------ + # OpenStack Endpoint Catalog + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XGET" ]; then + # + # Get OpenStack Token Info + # + if [ $# -lt 1 ]; then + prn_err "\"X-Auth-Token\" header is not specified." + return 2 + fi + + _UTIL_DBAAS_RESPONSE_CONTENT="{\"catalog\":[{\"endpoints\":[{\"id\":\"TEST_OP_NOVA_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/compute/v2.1\"}],\"id\":\"TEST_OP_NOVA_MAIN_ID\",\"name\":\"nova\",\"type\":\"compute\"},{\"endpoints\":[{\"id\":\"TEST_OP_PUB_KEYSTONE_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/identity\"},{\"id\":\"TEST_OP_ADMIN_KEYSTONE_ID\",\"interface\":\"admin\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/identity\"}],\"id\":\"TEST_OP_MAIN_KEYSTONE_ID\",\"name\":\"keystone\",\"type\":\"identity\"},{\"endpoints\":[{\"id\":\"TEST_OP_NEUTRON_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost:9696/\"}],\"id\":\"TEST_OP_MAIN_NEUTRON_ID\",\"name\":\"neutron\",\"type\":\"network\"},{\"endpoints\":[{\"id\":\"TEST_OP_GLANCE_ID\",\"interface\":\"public\",\"region\":\"RegionOne\",\"region_id\":\"RegionOne\",\"url\":\"http://localhost/image\"}],\"id\":\"TEST_OP_MAIN_GLANCE_ID\",\"name\":\"glance\",\"type\":\"image\"}]}" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=200 + return 0 + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_METHOD}: ${_DUMMY_URL_PATH})." + return 2 + fi + + elif pecho -n "${_DUMMY_URL_PATH}" | grep -q '^/v3/users/[^/]*/projects'; then + #------------------------------------------------------ + # OpenStack Project information + #------------------------------------------------------ + _UTIL_DBAAS_USER_ID=$(pecho -n "${_DUMMY_URL_PATH}" | grep '^/v3/users/[^/]*/projects' | sed 's#^/v3/users/\([^/]*\)/projects$#\1#g') + if [ "X${_UTIL_DBAAS_USER_ID}" != "XTEST_USER_ID" ]; then + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Get User Project Information : Unknown user id(${_UTIL_DBAAS_USER_ID})." + return 2 + fi + + _UTIL_DBAAS_RESPONSE_CONTENT="{\"links\":{\"next\":null,\"previous\":null,\"self\":\"http://localhost/identity/v3/users/${_UTIL_DBAAS_USER_ID}/projects\"},\"projects\":[{\"description\":\"\",\"domain_id\":\"default\",\"enabled\":true,\"id\":\"TEST_TENANT_ID\",\"is_domain\":false,\"links\":{\"self\":\"http://localhost/identity/v3/projects/TEST_TENANT_ID\"},\"name\":\"test1\",\"options\":{},\"parent_id\":\"default\",\"tags\":[]}]}" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=200 + return 0 + + elif [ "X${_DUMMY_URL_PATH}" = "X/v2.0/security-groups" ]; then + #------------------------------------------------------ + # OpenStack Security Group + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XGET" ]; then + # + # Security Group Information + # + if [ "X${K2HR3CLI_SUBCOMMAND}" = "X${_DATABASE_COMMAND_SUB_CREATE}" ]; then + # + # In the case of Create command, a non-existent response is returned. + # + _UTIL_DBAAS_RESPONSE_CONTENT="{\"security_groups\":[{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"Default security group\",\"id\":\"TEST_SECGROUP_DEFAULT_ID\",\"name\":\"default\",\"project_id\":\"TEST_TENANT_ID\",\"revision_number\":1,\"security_group_rules\":[{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":null,\"direction\":\"egress\",\"ethertype\":\"IPv4\",\"id\":\"TEST_SECRULE_IPV4_ID\",\"port_range_max\":null,\"port_range_min\":null,\"project_id\":\"TEST_TENANT_ID\",\"protocol\":null,\"remote_group_id\":null,\"remote_ip_prefix\":null,\"revision_number\":0,\"security_group_id\":\"TEST_SECGROUP_DEFAULT_ID\",\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}],\"stateful\":true,\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}]}" + else + # + # If it is other than Create, it returns an existing response. + # + _UTIL_DBAAS_RESPONSE_CONTENT="{\"security_groups\":[{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"Default security group\",\"id\":\"TEST_SECGROUP_DEFAULT_ID\",\"name\":\"default\",\"project_id\":\"TEST_TENANT_ID\",\"revision_number\":1,\"security_group_rules\":[{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":null,\"direction\":\"egress\",\"ethertype\":\"IPv4\",\"id\":\"TEST_SECRULE_IPV4_ID\",\"port_range_max\":null,\"port_range_min\":null,\"project_id\":\"TEST_TENANT_ID\",\"protocol\":null,\"remote_group_id\":null,\"remote_ip_prefix\":null,\"revision_number\":0,\"security_group_id\":\"TEST_SECGROUP_DEFAULT_ID\",\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}],\"stateful\":true,\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"},{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"security group for k2hdkc testcluster server node\",\"id\":\"TEST_SECGROUP_SERVER_ID\",\"name\":\"${_TEST_K2HDKC_CLUSTER_NAME}-k2hdkc-server-sec\",\"project_id\":\"TEST_TENANT_ID\",\"revision_number\":4,\"security_group_rules\":[{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"k2hdkc/chmpx server node control port\",\"direction\":\"ingress\",\"ethertype\":\"IPv4\",\"id\":\"TEST_SECRULE_SERVER_IPV4_2_ID\",\"port_range_max\":98021,\"port_range_min\":98021,\"project_id\":\"TEST_TENANT_ID\",\"protocol\":\"tcp\",\"remote_group_id\":null,\"remote_ip_prefix\":null,\"revision_number\":0,\"security_group_id\":\"TEST_SECGROUP_SERVER_ID\",\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"},{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"k2hdkc/chmpx server node port\",\"direction\":\"ingress\",\"ethertype\":\"IPv4\",\"id\":\"TEST_SECRULE_SERVER_IPV4_1_ID\",\"port_range_max\":98020,\"port_range_min\":98020,\"project_id\":\"TEST_TENANT_ID\",\"protocol\":\"tcp\",\"remote_group_id\":null,\"remote_ip_prefix\":null,\"revision_number\":0,\"security_group_id\":\"TEST_SECGROUP_SERVER_ID\",\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}],\"stateful\":true,\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"},{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"security group for k2hdkc testcluster slave node\",\"id\":\"TEST_SECGROUP_SLAVE_ID\",\"name\":\"${_TEST_K2HDKC_CLUSTER_NAME}-k2hdkc-slave-sec\",\"project_id\":\"TEST_TENANT_ID\",\"revision_number\":3,\"security_group_rules\":[{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"k2hdkc/chmpx slave node control port\",\"direction\":\"ingress\",\"ethertype\":\"IPv4\",\"id\":\"TEST_SECRULE_SLAVE_IPV4_1_ID\",\"port_range_max\":98031,\"port_range_min\":98031,\"project_id\":\"TEST_TENANT_ID\",\"protocol\":\"tcp\",\"remote_group_id\":null,\"remote_ip_prefix\":null,\"revision_number\":0,\"security_group_id\":\"TEST_SECGROUP_SLAVE_ID\",\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}],\"stateful\":true,\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}]}" + fi + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=200 + return 0 + + elif [ "X${_DUMMY_METHOD}" = "XPOST" ]; then + # + # Create Security Group + # + # [NOTE] + # The caller only checks the ID and does not return exact data. + # + _UTIL_DBAAS_SECGRP_TYPE=$(pecho -n "${_DUMMY_BODY_STRING}" | grep '"name":[[:space:]]*".*",.*$' | sed -e 's/^.*"name":[[:space:]]*"\([^\"]*\)",.*$/\1/g' -e 's/^[^-]*-k2hdkc-\([^-]*\)-.*$/\1/g') + + if [ "X${_UTIL_DBAAS_SECGRP_TYPE}" = "Xserver" ]; then + _UTIL_DBAAS_RESPONSE_CONTENT="{\"security_groups\":[{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"security group for k2hdkc testcluster server node\",\"id\":\"TEST_SECGROUP_SERVER_ID\",\"name\":\"${_TEST_K2HDKC_CLUSTER_NAME}-k2hdkc-server-sec\",\"project_id\":\"TEST_TENANT_ID\",\"revision_number\":4,\"security_group_rules\":[{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"k2hdkc/chmpx server node control port\",\"direction\":\"ingress\",\"ethertype\":\"IPv4\",\"id\":\"TEST_SECRULE_SERVER_IPV4_2_ID\",\"port_range_max\":98021,\"port_range_min\":98021,\"project_id\":\"TEST_TENANT_ID\",\"protocol\":\"tcp\",\"remote_group_id\":null,\"remote_ip_prefix\":null,\"revision_number\":0,\"security_group_id\":\"TEST_SECGROUP_SERVER_ID\",\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"},{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"k2hdkc/chmpx server node port\",\"direction\":\"ingress\",\"ethertype\":\"IPv4\",\"id\":\"TEST_SECRULE_SERVER_IPV4_1_ID\",\"port_range_max\":98020,\"port_range_min\":98020,\"project_id\":\"TEST_TENANT_ID\",\"protocol\":\"tcp\",\"remote_group_id\":null,\"remote_ip_prefix\":null,\"revision_number\":0,\"security_group_id\":\"TEST_SECGROUP_SERVER_ID\",\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}],\"stateful\":true,\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}]}" + else + _UTIL_DBAAS_RESPONSE_CONTENT="{\"security_groups\":[{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"security group for k2hdkc testcluster slave node\",\"id\":\"TEST_SECGROUP_SLAVE_ID\",\"name\":\"${_TEST_K2HDKC_CLUSTER_NAME}-k2hdkc-slave-sec\",\"project_id\":\"TEST_TENANT_ID\",\"revision_number\":3,\"security_group_rules\":[{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"k2hdkc/chmpx slave node control port\",\"direction\":\"ingress\",\"ethertype\":\"IPv4\",\"id\":\"TEST_SECRULE_SLAVE_IPV4_1_ID\",\"port_range_max\":98031,\"port_range_min\":98031,\"project_id\":\"TEST_TENANT_ID\",\"protocol\":\"tcp\",\"remote_group_id\":null,\"remote_ip_prefix\":null,\"revision_number\":0,\"security_group_id\":\"TEST_SECGROUP_SLAVE_ID\",\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}],\"stateful\":true,\"tags\":[],\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}]}" + fi + + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=201 + return 0 + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_METHOD}: ${_DUMMY_URL_PATH})." + return 2 + fi + + elif pecho -n "${_DUMMY_URL_PATH}" | grep -q "^/v2.0/security-groups/"; then + #------------------------------------------------------ + # OpenStack Security Group + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XDELETE" ]; then + # + # Delete Security Group + # + _UTIL_DBAAS_RESPONSE_CONTENT="" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=204 + return 0 + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_METHOD}: ${_DUMMY_URL_PATH})." + return 2 + fi + + elif [ "X${_DUMMY_URL_PATH}" = "X/v2.0/security-group-rules" ]; then + #------------------------------------------------------ + # OpenStack Security Group Rule + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XPOST" ]; then + # + # Create Security Group Rule + # + # [NOTE] + # The caller only checks the ID and does not return exact data. + # + _UTIL_DBAAS_SECGRP_DESC=$(pecho -n "${_DUMMY_BODY_STRING}" | sed 's/^.*"description":[[:space:]]*"\([^"]*\)".*$/\1/g') + _UTIL_DBAAS_SECGRP_MAX=$(pecho -n "${_DUMMY_BODY_STRING}" | sed 's/^.*"port_range_max":[[:space:]]*\([^,]*\),.*$/\1/g') + _UTIL_DBAAS_SECGRP_MIN=$(pecho -n "${_DUMMY_BODY_STRING}" | sed 's/^.*"port_range_min":[[:space:]]*\([^,]*\),.*$/\1/g') + _UTIL_DBAAS_SECGRP_ID=$(pecho -n "${_DUMMY_BODY_STRING}" | sed 's/^.*"security_group_id":[[:space:]]*"\([^"]*\)".*$/\1/g') + + _UTIL_DBAAS_RESPONSE_CONTENT="{\"security_group_rule\":{\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"description\":\"${_UTIL_DBAAS_SECGRP_DESC}\",\"direction\":\"ingress\",\"ethertype\":\"IPv4\",\"id\":\"TEST_TENANT_ID\",\"port_range_max\":${_UTIL_DBAAS_SECGRP_MAX},\"port_range_min\":${_UTIL_DBAAS_SECGRP_MIN},\"project_id\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"protocol\":\"tcp\",\"remote_group_id\":null,\"remote_ip_prefix\":null,\"revision_number\":0,\"security_group_id\":\"${_UTIL_DBAAS_SECGRP_ID}\",\"tenant_id\":\"TEST_TENANT_ID\",\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\"}}" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=201 + return 0 + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_METHOD}: ${_DUMMY_URL_PATH})." + return 2 + fi + + elif [ "X${_DUMMY_URL_PATH}" = "X/os-keypairs" ]; then + #------------------------------------------------------ + # OpenStack Keypair + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XGET" ]; then + # + # Get Keypair list + # + _UTIL_DBAAS_RESPONSE_CONTENT="{\"keypairs\":[{\"keypair\":{\"fingerprint\":\"00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00\",\"name\":\"TEST_KEYPAIR\",\"public_key\":\"ssh-rsa test_keypair_public_key_contents testuser@localhost\"}}]}" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=200 + return 0 + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_METHOD}: ${_DUMMY_URL_PATH})." + return 2 + fi + + elif [ "X${_DUMMY_URL_PATH}" = "X/v2/images" ]; then + #------------------------------------------------------ + # OpenStack Glance + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XGET" ]; then + # + # Get Image list + # + _UTIL_DBAAS_RESPONSE_CONTENT="{\"first\":\"/v2/images\",\"images\":[{\"checksum\":\"TEST_IMAGE_CHECKSUM\",\"container_format\":\"bare\",\"created_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"disk_format\":\"qcow2\",\"file\":\"/v2/images/TEST_IMAGE_ID/file\",\"id\":\"TEST_IMAGE_ID\",\"min_disk\":0,\"min_ram\":0,\"name\":\"TEST_IMAGE\",\"os_hash_algo\":\"sha512\",\"os_hash_value\":\"TEST_OS_HASH_VALUE\",\"os_hidden\":false,\"owner\":\"TEST_USER_ID\",\"owner_specified.openstack.md5\":\"\",\"owner_specified.openstack.object\":\"images/TEST_IMAGE\",\"owner_specified.openstack.sha256\":\"\",\"protected\":false,\"schema\":\"/v2/schemas/image\",\"self\":\"/v2/images/TEST_IMAGE_ID\",\"size\":327680000,\"status\":\"active\",\"tags\":[],\"updated_at\":\"${_UTIL_DBAAS_ISSUED_AT_DATE}\",\"virtual_size\":null,\"visibility\":\"public\"}],\"schema\":\"/v2/schemas/images\"}" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=200 + return 0 + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_METHOD}: ${_DUMMY_URL_PATH})." + return 2 + fi + + elif [ "X${_DUMMY_URL_PATH}" = "X/flavors/detail" ]; then + #------------------------------------------------------ + # OpenStack Flavor + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XGET" ]; then + # + # Get Flavor list + # + _UTIL_DBAAS_RESPONSE_CONTENT="{\"flavors\":[{\"OS-FLV-DISABLED:disabled\":false,\"OS-FLV-EXT-DATA:ephemeral\":0,\"disk\":10,\"id\":\"TEST_FLAVOR_ID\",\"links\":[{\"href\":\"http://localhost/compute/v2.1/TEST_TENANT_ID/flavors/TEST_FLAVOR_ID\",\"rel\":\"self\"},{\"href\":\"http://localhost/compute/TEST_TENANT_ID/flavors/TEST_FLAVOR_ID\",\"rel\":\"bookmark\"}],\"name\":\"TEST_FLAVOR\",\"os-flavor-access:is_public\":true,\"ram\":2048,\"rxtx_factor\":1.0,\"swap\":\"\",\"vcpus\":2}]}" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=200 + return 0 + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_METHOD}: ${_DUMMY_URL_PATH})." + return 2 + fi + + elif pecho -n "${_DUMMY_URL_PATH}" | grep -q "^/servers$"; then + #------------------------------------------------------ + # OpenStack Create Servers (/servers) + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XPOST" ]; then + # + # Create Servers + # + _UTIL_DBAAS_SERVER_TENANT_ID=$(pecho -n "${_DUMMY_URL_PATH}" | sed 's#^/\([^/]*\)/servers$#\1#g') + + _UTIL_DBAAS_RESPONSE_CONTENT="{\"server\":{\"id\":\"TESTSERVER_ID\",\"links\":[{\"rel\":\"self\",\"href\":\"http://localhost/compute/v2.1/${_UTIL_DBAAS_SERVER_TENANT_ID}/servers/TESTSERVER_ID\"},{\"rel\":\"bookmark\",\"href\":\"http://localhost/compute/${_UTIL_DBAAS_SERVER_TENANT_ID}/servers/TESTSERVER_ID\"}],\"OS-DCF:diskConfig\":\"MANUAL\",\"security_groups\":[{\"name\":\"default\"}],\"adminPass\":\"TEST_ADMIN_PASS\"}}" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=202 + return 0 + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_METHOD}: ${_DUMMY_URL_PATH})." + return 2 + fi + + elif pecho -n "${_DUMMY_URL_PATH}" | grep -q "^/servers/.*$"; then + #------------------------------------------------------ + # OpenStack Delete Server (/servers/) + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XDELETE" ]; then + # + # Delete Server + # + _UTIL_DBAAS_RESPONSE_CONTENT="" + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=204 + return 0 + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_METHOD}: ${_DUMMY_URL_PATH})." + return 2 + fi + + else + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=400 + prn_err "Unknown URL(OpenStack URI: ${_DUMMY_URL_PATH})." + return 2 + fi + + else + #------------------------------------------------------ + # Request for K2HR3 API (Override k2hr3 test response) + #------------------------------------------------------ + if pecho -n "${_DUMMY_URL_PATH}" | grep -v "^/v1/role/token/" | grep -v "/v1/user/tokens" | grep -q "^/v1/role/"; then + #------------------------------------------------------ + # K2HR3 Role API + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XGET" ]; then + # + # Set role information + # + if pecho -n "${_DUMMY_URL_ARGS}" | grep -q "expand=true"; then + _UTIL_DBAAS_EXPAND=1 + else + _UTIL_DBAAS_EXPAND=0 + fi + if pecho -n "${_DUMMY_URL_PATH}" | grep -q "server"; then + if [ "${_UTIL_DBAAS_EXPAND}" -eq 0 ]; then + _UTIL_DBAAS_RESPONSE_CONTENT="{\"result\":true,\"message\":null,\"role\":{\"policies\":[],\"aliases\":[],\"hosts\":{\"hostnames\":[],\"ips\":[\"127.0.0.1 * TESTSERVER_ID openstack-auto-v1 TESTSERVER\"]}}}" + else + _UTIL_DBAAS_RESPONSE_CONTENT="{\"result\":true,\"message\":null,\"role\":{\"policies\":[\"yrn:yahoo:::demo:policy:${_TEST_K2HDKC_CLUSTER_NAME}\"]}}" + fi + else + if [ "${_UTIL_DBAAS_EXPAND}" -eq 0 ]; then + _UTIL_DBAAS_RESPONSE_CONTENT="{\"result\":true,\"message\":null,\"role\":{\"policies\":[],\"aliases\":[],\"hosts\":{\"hostnames\":[],\"ips\":[\"127.0.0.1 * TESTSLAVE_ID openstack-auto-v1 TESTSLAVE\"]}}}" + else + _UTIL_DBAAS_RESPONSE_CONTENT="{\"result\":true,\"message\":null,\"role\":{\"policies\":[\"yrn:yahoo:::demo:policy:${_TEST_K2HDKC_CLUSTER_NAME}\"]}}" + fi + fi + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=200 + return 0 + fi + + elif pecho -n "${_DUMMY_URL_PATH}" | grep -q "^/v1/resource/"; then + #------------------------------------------------------ + # K2HR3 Resource API + #------------------------------------------------------ + if [ "X${_DUMMY_METHOD}" = "XGET" ]; then + # + # Set role information + # + if pecho -n "${_DUMMY_URL_ARGS}" | grep -q "expand=true"; then + if pecho -n "${_DUMMY_URL_PATH}" | grep -q "server"; then + _UTIL_DBAAS_RESPONSE_CONTENT="{\"result\":true,\"message\":null,\"resource\":{\"string\":\"TEST_DUMMY_RESOURCE_FOR_SERVER\",\"object\":null,\"keys\":{\"cluster-name\":\"${_TEST_K2HDKC_CLUSTER_NAME}\",\"chmpx-server-port\":98020,\"chmpx-server-ctlport\":98021,\"chmpx-slave-ctlport\":98031,\"k2hdkc-dbaas-add-user\":1,\"k2hdkc-dbaas-proc-user\":\"testrunner\",\"chmpx-mode\":\"SERVER\",\"k2hr3-init-packages\":\"\",\"k2hr3-init-packagecloud-packages\":\"k2hdkc-dbaas-override-conf,k2hr3-get-resource,chmpx,k2hdkc\",\"k2hr3-init-systemd-packages\":\"chmpx.service,k2hdkc.service,k2hr3-get-resource.timer\",\"host_key\":\"127.0.0.1,0,TESTSERVER_ID\",\"one_host\":{\"host\":\"127.0.0.1\",\"port\":0,\"extra\":\"openstack-auto-v1\",\"tag\":\"TESTSERVER\",\"cuk\":\"TESTSERVER_ID\"}},\"expire\":null}}" + else + _UTIL_DBAAS_RESPONSE_CONTENT="{\"result\":true,\"message\":null,\"resource\":{\"string\":\"TEST_DUMMY_RESOURCE_FOR_SLAVE\",\"object\":null,\"keys\":{\"cluster-name\":\"${_TEST_K2HDKC_CLUSTER_NAME}\",\"chmpx-server-port\":98020,\"chmpx-server-ctlport\":98021,\"chmpx-slave-ctlport\":98031,\"k2hdkc-dbaas-add-user\":1,\"k2hdkc-dbaas-proc-user\":\"testrunner\",\"chmpx-mode\":\"SLAVE\",\"k2hr3-init-packages\":\"\",\"k2hr3-init-packagecloud-packages\":\"k2hdkc-dbaas-override-conf,k2hr3-get-resource,chmpx,k2hdkc\",\"k2hr3-init-systemd-packages\":\"chmpx.service,k2hdkc.service,k2hr3-get-resource.timer\",\"host_key\":\"127.0.0.1,0,TESTSLAVE_ID\",\"one_host\":{\"host\":\"127.0.0.1\",\"port\":0,\"extra\":\"openstack-auto-v1\",\"tag\":\"TESTSLAVE\",\"cuk\":\"TESTSLAVE_ID\"}},\"expire\":null}}" + fi + pecho "${_UTIL_DBAAS_RESPONSE_CONTENT}" > "${K2HR3CLI_REQUEST_RESULT_FILE}" + + # shellcheck disable=SC2034 + K2HR3CLI_REQUEST_EXIT_CODE=200 + return 0 + fi + fi + fi + return 3 + fi + + return 0 +} + +# +# Local variables: +# tab-width: 4 +# c-basic-offset: 4 +# End: +# vim600: noexpandtab sw=4 ts=4 fdm=marker +# vim<600: noexpandtab sw=4 ts=4 +#