forked from beekhof/osp-ha-deploy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
keystone.scenario
264 lines (220 loc) · 9.49 KB
/
keystone.scenario
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
# This file can be used directly by 'phd', see 'build-all.sh' in this
# directory for how it can be invoked. The only requirement is a list
# of nodes you'd like it to modify.
#
# The scope of each command-block is controlled by the preceeding
# 'target' line.
#
# - target=all
# The commands are executed on evey node provided
#
# - target=local
# The commands are executed from the node hosting phd. When not
# using phd, they should be run from some other independant host
# (such as the puppet master)
#
# - target=$PHD_ENV_nodes{N}
# The commands are executed on the Nth node provided.
# For example, to run on only the first node would be target=$PHD_ENV_nodes1
#
# Tasks to be performed at this step include:
#################################
# Scenario Requirements Section #
#################################
= VARIABLES =
PHD_VAR_deployment
PHD_VAR_osp_configdir
PHD_VAR_secrets_keystone_secret
PHD_VAR_network_internal
PHD_VAR_network_hosts_rabbitmq
#################################
# Scenario Requirements Section #
#################################
= REQUIREMENTS =
nodes: 1
######################
# Deployment Scripts #
######################
= SCRIPTS =
target=all
....
yum install -y openstack-keystone openstack-utils python-openstackclient
mkdir -p ${PHD_VAR_osp_configdir}
openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token ${PHD_VAR_secrets_keystone_secret}
openstack-config --set /etc/keystone/keystone.conf oslo_messaging_rabbit rabbit_hosts ${PHD_VAR_network_hosts_rabbitmq}
openstack-config --set /etc/keystone/keystone.conf oslo_messaging_rabbit rabbit_ha_queues true
openstack-config --set /etc/keystone/keystone.conf oslo_messaging_rabbit heartbeat_timeout_threshold 60
# Define the API endpoints. Be careful with replacing vip-keystone and shell escapes.
openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_endpoint 'http://vip-keystone:%(admin_port)s/'
openstack-config --set /etc/keystone/keystone.conf DEFAULT public_endpoint 'http://vip-keystone:%(public_port)s/'
# Configure access to galera. Note that several entries in here are dependent on
# what has been configured before. 'keystone' user, 'keystonetest' password,
# vip-db.
openstack-config --set /etc/keystone/keystone.conf database connection mysql://keystone:keystonetest@vip-db/keystone
# Mare sure to retry connection to the DB if the DB is not available immediately at
# service startup.
openstack-config --set /etc/keystone/keystone.conf database max_retries -1
# Make sure the API service is listening on the internal IP addresses only.
# Once again those shell expansions only work for my specific environment.
# phase3: those are obsoleted by mod_wsgi and apache
openstack-config --set /etc/keystone/keystone.conf eventlet_server public_bind_host $(ip addr show dev eth1 scope global | grep dynamic| sed -e 's#.*inet ##g' -e 's#/.*##g')
openstack-config --set /etc/keystone/keystone.conf eventlet_server admin_bind_host $(ip addr show dev eth1 scope global | grep dynamic| sed -e 's#.*inet ##g' -e 's#/.*##g')
# workaround for buggy packaging (ayoung is informed)
openstack-config --set /etc/keystone/keystone.conf token driver keystone.token.persistence.backends.sql.Token
if [ ! -e ${PHD_VAR_osp_configdir}/keystone_ssl.tar ]; then
keystone-manage pki_setup --keystone-user keystone --keystone-group keystone
cd /etc/keystone/ssl
tar cvp -f ${PHD_VAR_osp_configdir}/keystone_ssl.tar *
fi
mkdir -p /etc/keystone/ssl
cd /etc/keystone/ssl
tar xvp -f ${PHD_VAR_osp_configdir}/keystone_ssl.tar
chown -R keystone:keystone /var/log/keystone /etc/keystone/ssl/
....
target=$PHD_ENV_nodes1
....
su keystone -s /bin/sh -c "keystone-manage -v -d db_sync"
pcs resource create openstack-keystone systemd:openstack-keystone --clone interleave=true
if [ $PHD_VAR_deployment = collapsed ]; then
# In a collapsed environment, we can instruct the cluster to start
# things in a particular order and require services to be active
# on the same hosts. We do this with constraints.
pcs constraint order start haproxy-clone then openstack-keystone-clone
pcs constraint order promote galera-master then openstack-keystone-clone
pcs constraint order start rabbitmq-clone then openstack-keystone-clone
pcs constraint order start memcached-clone then openstack-keystone-clone
fi
export OS_TOKEN=${PHD_VAR_secrets_keystone_secret}
export OS_URL="http://vip-keystone:35357/v2.0"
export OS_REGION_NAME=regionOne
while ! openstack service list; do
echo "Waiting for keystone to be active"
sleep 1
done
openstack service create \
--name=keystone \
--description="Keystone Identity Service" \
identity
openstack endpoint create \
--publicurl 'http://vip-keystone:5000/v2.0' \
--adminurl 'http://vip-keystone:35357/v2.0' \
--internalurl 'http://vip-keystone:5000/v2.0' \
--region regionOne \
keystone
openstack user create --password keystonetest admin
openstack role create admin
openstack project create admin
openstack role add --project admin --user admin admin
# Save admin credential in a file. This will be useful many times over the how-to!
cat > ${PHD_VAR_osp_configdir}/keystonerc_admin << EOF
export OS_USERNAME=admin
export OS_TENANT_NAME=admin
export OS_PROJECT_NAME=admin
export OS_REGION_NAME=regionOne
export OS_PASSWORD=keystonetest
export OS_AUTH_URL=http://vip-keystone:35357/v2.0/
export PS1='[\u@\h \W(keystone_admin)]\$ '
EOF
openstack user create --password redhat demo
openstack role create _member_
openstack project create demo
openstack role add --project demo --user demo _member_
# Save user credential in a file for testing purposes.
cat > ${PHD_VAR_osp_configdir}/keystonerc_user << EOF
export OS_USERNAME=demo
export OS_TENANT_NAME=demo
export OS_PROJECT_NAME=demo
export OS_REGION_NAME=regionOne
export OS_PASSWORD=redhat
export OS_AUTH_URL=http://vip-keystone:5000/v2.0/
export PS1='[\u@\h \W(keystone_user)]\$ '
EOF
# create service tenant/project
openstack project create --description "Services Tenant" services
# glance
openstack user create --password glancetest glance
openstack role add --project services --user glance admin
openstack service create --name=glance --description="Glance Image Service" image
openstack endpoint create \
--publicurl "http://vip-glance:9292" \
--adminurl "http://vip-glance:9292" \
--internalurl "http://vip-glance:9292" \
--region regionOne \
glance
# cinder
openstack user create --password cindertest cinder
openstack role add --project services --user cinder admin
openstack service create --name=cinder --description="Cinder Volume Service" volume
openstack endpoint create \
--publicurl "http://vip-cinder:8776/v1/\$(tenant_id)s" \
--adminurl "http://vip-cinder:8776/v1/\$(tenant_id)s" \
--internalurl "http://vip-cinder:8776/v1/\$(tenant_id)s" \
--region regionOne \
cinder
openstack service create --name=cinderv2 --description="OpenStack Block Storage" volumev2
openstack endpoint create \
--publicurl "http://vip-cinder:8776/v2/\$(tenant_id)s" \
--adminurl "http://vip-cinder:8776/v2/\$(tenant_id)s" \
--internalurl "http://vip-cinder:8776/v2/\$(tenant_id)s" \
--region regionOne \
cinderv2
# swift
openstack user create --password swifttest swift
openstack role add --project services --user swift admin
openstack service create --name=swift --description="Swift Storage Service" object-store
openstack endpoint create \
--publicurl "http://vip-swift:8080/v1/AUTH_\$(tenant_id)s" \
--adminurl "http://vip-swift:8080/v1" \
--internalurl "http://vip-swift:8080/v1/AUTH_\$(tenant_id)s" \
--region regionOne \
swift
# neutron
openstack user create --password neutrontest neutron
openstack role add --project services --user neutron admin
openstack service create --name=neutron --description="OpenStack Networking Service" network
openstack endpoint create \
--publicurl "http://vip-neutron:9696" \
--adminurl "http://vip-neutron:9696" \
--internalurl "http://vip-neutron:9696" \
--region regionOne \
neutron
# nova
openstack user create --password novatest compute
openstack role add --project services --user compute admin
openstack service create --name=compute --description="OpenStack Compute Service" compute
openstack endpoint create \
--publicurl "http://vip-nova:8774/v2/\$(tenant_id)s" \
--adminurl "http://vip-nova:8774/v2/\$(tenant_id)s" \
--internalurl "http://vip-nova:8774/v2/\$(tenant_id)s" \
--region regionOne \
compute
# heat
openstack user create --password heattest heat
openstack role add --project services --user heat admin
openstack service create --name=heat --description="Heat Orchestration Service" orchestration
openstack endpoint create \
--publicurl "http://vip-heat:8004/v1/%(tenant_id)s" \
--adminurl "http://vip-heat:8004/v1/%(tenant_id)s" \
--internalurl "http://vip-heat:8004/v1/%(tenant_id)s" \
--region regionOne \
heat
openstack service create --name=heat-cfn --description="Heat CloudFormation Service" cloudformation
openstack endpoint create \
--publicurl "http://vip-heat:8000/v1" \
--adminurl "http://vip-heat:8000/v1" \
--internalurl "http://vip-heat:8000/v1" \
--region regionOne \
heat-cfn
# ceilometer
openstack user create --password ceilometertest ceilometer
openstack role add --project services --user ceilometer admin
openstack role create ResellerAdmin
openstack role add --project services --user ceilometer ResellerAdmin
openstack service create --name=ceilometer --description="OpenStack Telemetry Service" metering
openstack endpoint create \
--publicurl "http://vip-ceilometer:8777" \
--adminurl "http://vip-ceilometer:8777" \
--internalurl "http://vip-ceilometer:8777" \
--region regionOne \
ceilometer
....