Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
117 changes: 117 additions & 0 deletions passwd.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
[default]
#which section is actived
activeSession= server
#the sudo user, only used for sudo tasks
root = ubuntu
#passwd of sudoer
passwd = huangxd1989
#pem file path, if it is not empty, we will ignore passwd
pem_key =

#example of section
[server]
#uesr name, if use createUser task, this user name will be the new user
newuser = hxd
#password of newuser
passwd = hxd__hxd__1989
#pem file path, if it is not empty, we will ignore passwd
pem_key =
#cluster ips, use comma to split
hosts= 192.144.187.79,58.87.89.39,123.206.18.227
# private ips
private_hosts=10.8.4.53,10.8.4.68,10.8.4.106
#hostnames of cluster , the number of hostnames must equal with hosts. the hostnames is used for changeHostname tass
hostnames=node1,node2,node3
#only useful for appendHosts, and ssh task when you add new nods into an existed and initialized cluster
existed_hosts=
existed_private_hosts=
#only useful for appendHosts, and ssh task when you add new nods into an existed and initialized cluster
existed_hostnames=
#locally jdk file. the related path is ./
jdk_source_file=/Users/hxd/Downloads/jdk-8u191-linux-x64.tar.gz
# when the jdk.tar.gz is unziped, the folder in the tar file.
jdk_folder= jdk1.8.0_191
#locally scala file. the related path is ./
scala_source_file=/Users/hxd/Downloads/scala-2.12.6.tgz
# when the scala.tar.gz is unziped, the folder in the tar file.
scala_folder=scala-2.12.6
# the ntp server address you want to sync
ntp_server=
# the allowed net that can sync with your ntp server
ntp_net=
# the allowed net mask that can sync with your ntp server
ntp_net_mask=
#admin ip. first ly, admin ip should be in hosts. secondly, we will set admin ip can free-password ssh other nodes (if you use ssh2_OneToAll instead of ssh2)
admin_ip=192.144.187.79



##this is for different big data system (some are not big data )
[couch]
newuser =
passwd =
hosts=
hostnames=
jdk_source_file=
jdk_folder=
#bind which ip address for the couchdb #if not 0.0.0.0, only one server is successful installed
bind_address=


[collectd]
inflexdb_ip=
inflexdb_port=

[cassandra]
#admin ip. first ly, admin ip should be in hosts. secondly, we will set admin ip can free-password ssh other nodes (if you use ssh2_OneToAll instead of ssh2)
admin_ip=192.144.187.79



##this is for different big data system (some are not big data )
[couch]
newuser =
passwd =
hosts=
hostnames=
jdk_source_file=
jdk_folder=
#bind which ip address for the couchdb #if not 0.0.0.0, only one server is successful installed
bind_address=


[collectd]
inflexdb_ip=
inflexdb_port=

[cassandra]
cassandra_file=
cassandra_folder=
data_folder=
one_seed_ip=

[hadoop]
hadoop_file=/Users/hxd/Downloads/hadoop-2.8.5.tar.gz
cassandra_file=
cassandra_folder=
data_folder=
one_seed_ip=

[hadoop]
hadoop_file=/Users/hxd/Downloads/hadoop-2.8.5.tar.gz
hadoop_folder=hadoop-2.8.5
data_folder=/home/hxd/hadoop_data
master_ip=10.8.4.53
master_public_ip=192.144.187.79
slaves=10.8.4.53,10.8.4.68,10.8.4.106
format_cluster_name=test


[spark]
spark_file=/Users/zhangzhengmei/Desktop/Downloads/spark-2.4.0-bin-hadoop2.7.tar
spark_folder=spark-2.4.0-bin-hadoop2.7
spark_work=/home/hxd/spark_work
master_ip=10.8.4.53
master_public_ip=192.144.187.79
slaves=10.8.4.53,10.8.4.68,10.8.4.106
hadoop_dir=/home/hxd/hadoop-2.8.5