Skip to content
This repository has been archived by the owner on May 3, 2024. It is now read-only.

[DEV] cortx monitor 3 node VM provisioning Automated

shriya-deshmukh edited this page Jul 15, 2021 · 7 revisions

3 Node VM Provisioning

Component: SSPL

Solution: LR2

Cortx version: 2

SSPL version: 2.0.0

Steps to deploy SSPL on VM

  1. Prepare template_values.3-node.txt

    Look at below sample file with example values.

[root@ssc-vm-2518 ~]# cat template_values.3-node.txt
# 3-node config
TMPL_CLUSTER_ID=C01
TMPL_NODE_NAME_1=srvnode-1
TMPL_NODE_NAME_2=srvnode-2
TMPL_NODE_NAME_3=srvnode-3
TMPL_ENCLOSURE_NAME_1=enclosure-1
TMPL_ENCLOSURE_NAME_2=enclosure-2
TMPL_ENCLOSURE_NAME_3=enclosure-3

TMPL_MACHINE_ID_1=30512e5ae6df9f1ea02327bab45e499d
TMPL_BMC_IP_1=
TMPL_BMC_SECRET_1=
TMPL_BMC_USER_1=
TMPL_HOSTNAME_1=ssc-vm-2217.colo.seagate.com
TMPL_DATA_PRIVATE_FQDN_1=localhost
TMPL_DATA_PRIVATE_INTERFACE_1=
TMPL_DATA_PUBLIC_FQDN_1=localhost
TMPL_DATA_PUBLIC_INTERFACE_1=
TMPL_MGMT_INTERFACE_1=eth0
TMPL_MGMT_PUBLIC_FQDN_1=localhost
TMPL_NODE_ID_1=N01
TMPL_RACK_ID_1=R01
TMPL_SITE_ID_1=S01
TMPL_ENCLOSURE_ID_1=enc_30512e5ae6df9f1ea02327bab45e499d
TMPL_SERVER_NODE_TYPE_1=VM

TMPL_MACHINE_ID_2=40512e5ae6df9f1ea02327bab45e499e
TMPL_BMC_IP_2=
TMPL_BMC_SECRET_2=
TMPL_BMC_USER_2=
TMPL_HOSTNAME_2=ssc-vm-2518.colo.seagate.com
TMPL_DATA_PRIVATE_FQDN_2=localhost
TMPL_DATA_PRIVATE_INTERFACE_2=
TMPL_DATA_PUBLIC_FQDN_2=localhost
TMPL_DATA_PUBLIC_INTERFACE_2=
TMPL_MGMT_INTERFACE_2=eth0
TMPL_MGMT_PUBLIC_FQDN_2=localhost
TMPL_NODE_ID_2=N02
TMPL_RACK_ID_2=R02
TMPL_SITE_ID_2=S02
TMPL_ENCLOSURE_ID_2=enc_40512e5ae6df9f1ea02327bab45e499e
TMPL_SERVER_NODE_TYPE_2=VM

TMPL_MACHINE_ID_3=50512e5ae6df9f1ea02327bab45e499f
TMPL_BMC_IP_3=
TMPL_BMC_SECRET_3=
TMPL_BMC_USER_3=
TMPL_HOSTNAME_3=ssc-vm-2524.colo.seagate.com
TMPL_DATA_PRIVATE_FQDN_3=localhost
TMPL_DATA_PRIVATE_INTERFACE_3=
TMPL_DATA_PUBLIC_FQDN_3=localhost
TMPL_DATA_PUBLIC_INTERFACE_3=
TMPL_MGMT_INTERFACE_3=
TMPL_MGMT_PUBLIC_FQDN_3=localhost
TMPL_NODE_ID_3=N03
TMPL_RACK_ID_3=R03
TMPL_SITE_ID_3=S03
TMPL_ENCLOSURE_ID_3=enc_50512e5ae6df9f1ea02327bab45e499f
TMPL_SERVER_NODE_TYPE_3=VM

TMPL_CONTROLLER_SECRET_1="gAAAAABgbcFLyZlF2EkDTTgIqFwd-KNSX_MWOJSdPI4xTIDdUPu11PtMbJpfzYKunjMTHmEsmHGzTTIK5CXkiY1H5cJCZZTCLQ=="
TMPL_PRIMARY_CONTROLLER_IP_1=10.0.0.2
TMPL_PRIMARY_CONTROLLER_PORT_1=80
TMPL_SECONDARY_CONTROLLER_IP_1=10.0.03
TMPL_SECONDARY_CONTROLLER_PORT_1=80
TMPL_CONTROLLER_TYPE_1=Gallium
TMPL_CONTROLLER_USER_1=manage
TMPL_ENCLOSURE_TYPE_1=virtual

TMPL_CONTROLLER_SECRET_2="gAAAAABgbcFLyZlF2EkDTTgIqFwd-KNSX_MWOJSdPI4xTIDdUPu11PtMbJpfzYKunjMTHmEsmHGzTTIK5CXkiY1H5cJCZZTCLQ=="
TMPL_PRIMARY_CONTROLLER_IP_2=10.0.0.2
TMPL_PRIMARY_CONTROLLER_PORT_2=80
TMPL_SECONDARY_CONTROLLER_IP_2=10.0.03
TMPL_SECONDARY_CONTROLLER_PORT_2=80
TMPL_CONTROLLER_TYPE_2=Gallium
TMPL_CONTROLLER_USER_2=manage
TMPL_ENCLOSURE_TYPE_2=virtual

TMPL_CONTROLLER_SECRET_3="gAAAAABgbcFLyZlF2EkDTTgIqFwd-KNSX_MWOJSdPI4xTIDdUPu11PtMbJpfzYKunjMTHmEsmHGzTTIK5CXkiY1H5cJCZZTCLQ=="
TMPL_PRIMARY_CONTROLLER_IP_3=10.0.0.2
TMPL_PRIMARY_CONTROLLER_PORT_3=80
TMPL_SECONDARY_CONTROLLER_IP_3=10.0.03
TMPL_SECONDARY_CONTROLLER_PORT_3=80
TMPL_CONTROLLER_TYPE_3=Gallium
TMPL_CONTROLLER_USER_3=manage
TMPL_ENCLOSURE_TYPE_3=virtual

Note:

  • Use localhost in case management/data private & public FQDN are not available and wish to skip nw validation on it.
  • BMC information such as IP, USER and SECRET can be left blank for VM

  1. Execute the following commands on all the 3 nodes

    -N takes comma separated node FQDN to form a cluster. Ex. -N <node-1_fqdn>,<node-2_fqdn>,<node-3_fqdn>


CORTX_MONITOR_BASE_URL="https://raw.githubusercontent.com/Seagate/cortx-monitor/main" 

curl $CORTX_MONITOR_BASE_URL/low-level/files/opt/seagate/sspl/setup/sspl_dev_deploy -o sspl_dev_deploy

chmod a+x sspl_dev_deploy

./sspl_dev_deploy --cleanup

BUILD_URL="http://cortx-storage.colo.seagate.com/releases/cortx/github/main/centos-7.8.2003/<build_number>/prod/"

./sspl_dev_deploy --prereq  -T $BUILD_URL  -N <node-1_fqdn>,<node-2_fqdn>,<node-3_fqdn>

./sspl_dev_deploy --deploy  -T $BUILD_URL  -N <node-1_fqdn>,<node-2_fqdn>,<node-3_fqdn> --variable_file /root/template_values.3-node.txt --storage_type RBOD--server_type HW

systemctl start sspl-ll

If you are installing local RPMS instead of build specific RPMS, then use -L option instead of -T.

Example:

./sspl_dev_deploy --prereq -L /root/MYRPMS -N <node-1_fqdn>,<node-2_fqdn>,<node-3_fqdn>

./sspl_dev_deploy --deploy -L /root/MYRPMS -N <node-1_fqdn>,<node-2_fqdn>,<node-3_fqdn> --variable_file /root/template_values.3-node.txt --storage_type RBOD --server_type HW

Steps to run sanity test


url=yaml:///etc/sspl.conf

global_config_url=$(conf $url get "SYSTEM_INFORMATION>global_config_copy_url")

global_config_url=$(echo $global_config_url | tr -d "["\" | tr -d "\"]")

/opt/seagate/cortx/sspl/bin/sspl_setup test --config $global_config_url --plan alerts

/opt/seagate/cortx/sspl/bin/sspl_setup test --config $global_config_url --plan dev_sanity

Steps to verify alert propagate through all servers in the cluster

On each node, execute below command from kafka installation path. Ex. /opt/kafka/kafka_2.13-2.7.0/


./bin/kafka-console-consumer.sh --bootstrap-server <node-1_fqdn>:9092,<node-2_fqdn>:9092,<node-3_fqdn>:9092 --topic alerts