forked from redhat-cip/dci-openshift-app-agent
-
Notifications
You must be signed in to change notification settings - Fork 0
/
dci-openshift-app-agent.yml
313 lines (262 loc) · 9.14 KB
/
dci-openshift-app-agent.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
---
# Step 0a : set kubeconfig_path, OCP version vars and dci_topic. Check hooks
- name: "Execute kubeconfig step"
hosts: jumphost
tags:
- kubeconfig
tasks:
- block:
- name: Display proxy settings
debug:
msg: "http_proxy={{ lookup('env','http_proxy') }} https_proxy={{ lookup('env','https_proxy') }} no_proxy={{ lookup('env','no_proxy') }}"
- name: Check kubeconfig
include_tasks: 'plays/check_kubeconfig.yml'
# Check prerequisite
- name: "Check prerequisite"
include_tasks: 'plays/check_prerequisite.yml'
rescue: &error_with_upload_logs
- name: "Run common logging process"
include_tasks: plays/common_logging.yml
- name: "Execute the teardown process"
include_tasks: "{{ dci_config_dir }}/hooks/teardown.yml"
when:
- dci_teardown_on_failure
- check_teardown.stat.exists
- name: "Run the teardown play"
include_tasks: plays/teardown.yml
when:
- dci_teardown_on_failure
- name: "Execute the error process"
include_tasks: plays/error.yml
# The error may happen before creating the job_info variable, or it may happen in a
# job where dci tag is not enabled (so job_info variable is not created). Then,
# we have to set a different error message in that case
- name: Fail properly
fail:
msg: "Error: Something went wrong"
when: job_info is not defined
# Use dci tag to avoid problems with job_info.job.id, which would not be defined
# if dci tag is not enabled
- name: Fail properly
fail:
msg: "Error: Something went wrong, review the log at: https://www.distributed-ci.io/jobs/{{ job_info.job.id }}/jobStates"
tags: [dci]
# Step 0b : initial step
- name: "Execute initial step"
hosts: jumphost
gather_facts: false
tags:
- job
- dci
tasks:
- name: Read credentials from env vars
set_fact:
dci_client_id="{{ lookup('env','DCI_CLIENT_ID') }}"
dci_api_secret="{{ lookup('env','DCI_API_SECRET') }}"
dci_cs_url="{{ lookup('env','DCI_CS_URL') }}"
dci_ui_url="{{ lookup('env','DCI_UI_URL') | default('https://www.distributed-ci.io', True) }}"
no_log: true
# Add the ocp component when called from dci-pipeline
- name: Attach ocp component to the job
dci_job_component:
job_id: "{{ job_info.job.id }}"
component_id: "{{ ocp_component_id }}"
when: job_info is defined
# Schedule a new job only if not passed via dci-pipeline
- name: "Schedule a new job"
dci_job:
components: "{{ dci_components + [ocp_component_id] | default([ocp_component_id]) }}"
components_by_query: "{{ dci_components_by_query | default([]) }}"
topic: "{{ dci_topic }}"
comment: "{{ dci_comment | default('') }}"
url: "{{ dci_url | default('') }}"
name: "{{ dci_name | default('') }}"
configuration: "{{ dci_configuration | default('') }}"
previous_job_id: "{{ dci_previous_job_id | default('') }}"
register: job_info
when: job_info is not defined
- name: Set job id
set_fact:
job_id: "{{ job_info.job.id }}"
- name: Copy the job_id to the JOB_ID_FILE if it exists
copy:
content: "{{ job_id }}"
dest: "{{ JOB_ID_FILE }}"
when: JOB_ID_FILE is defined
- name: Set previous job id
set_fact:
dci_previous_job_id: "{{ job_info.job.previous_job_id }}"
when:
- 'dci_previous_job_id is not defined'
- 'job_info.job.previous_job_id != None'
- 'job_info.job.previous_job_id | length > 0'
- name: 'Set DCI tags for the current job'
dci_job:
id: '{{ job_id }}'
tags: '{{ dci_tags + dci_workarounds }}'
when: dci_tags[0] is defined
- name: Set pullsecret file for disconnected environments
include_tasks: 'plays/pullsecrets.yml'
when: dci_disconnected | default(false) | bool
# Keep in sync with test-runner parsing from d-o-a
- name: UI URL
debug:
msg: 'Follow the log at: {{ dci_ui_url }}/jobs/{{ job_id }}/jobStates' # noqa 204
# Step 1a : Red Hat "pre-run" step
- name: "Execute Red Hat pre-run step"
hosts: jumphost
tags:
- pre-run
- redhat-pre-run
environment:
KUBECONFIG: "{{ kubeconfig_path }}"
tasks:
- block:
- name: "Setup job_logs directory"
include_tasks: plays/log-dir.yml
when: job_logs is undefined
- name: "Execute pre-run"
include_tasks: 'plays/pre-run.yml'
rescue: *error_with_upload_logs
# Step 1b : Hook "pre-run" step
- name: "Execute hooks' pre-run step"
hosts: jumphost
environment:
KUBECONFIG: "{{ kubeconfig_path }}"
tags:
- pre-run
- partner-pre-run
tasks:
- block:
- name: "Setup job_logs directory"
include_tasks: plays/log-dir.yml
when: job_logs is undefined
- name: Run the pre-run hook
include_tasks: '{{ dci_config_dir }}/hooks/pre-run.yml'
when: check_pre_run.stat.exists
rescue: *error_with_upload_logs
# Step 2 : "running" step
- name: "Execute install step"
hosts: jumphost
tags:
- install
- running
environment:
KUBECONFIG: "{{ kubeconfig_path }}"
tasks:
- block:
- name: Set job state - running
dci_job:
id: "{{ job_id }}"
status: "running"
tags: [dci]
- name: "Execute install play"
include_tasks: 'plays/install.yml'
- name: "Execute install hook"
include_tasks: "{{ dci_config_dir }}/hooks/install.yml"
rescue: &failure_with_upload_logs
- name: "Run common logging process"
include_tasks: plays/common_logging.yml
- name: "Execute the teardown process"
include_tasks: "{{ dci_config_dir }}/hooks/teardown.yml"
when:
- dci_teardown_on_failure
- check_teardown.stat.exists
- name: "Run the teardown play"
include_tasks: plays/teardown.yml
when:
- dci_teardown_on_failure
- name: "Execute the failure process"
include_tasks: plays/failure.yml
# The error may happen before creating the job_info variable, or it may happen in a
# job where dci tag is not enabled (so job_info variable is not created). Then,
# we have to set a different error message in that case
- name: Fail properly
fail:
msg: "Failure: Something went wrong"
when: job_info is not defined
# Use dci tag to avoid problems with job_info.job.id, which would not be defined
# if dci tag is not enabled
- name: Fail properly
fail:
msg: "Failure: Something went wrong, review the log at: https://www.distributed-ci.io/jobs/{{ job_info.job.id }}/jobStates"
tags: [dci]
# Step 3 : "testing" step
- name: "Execute Red Hat tests step"
hosts: jumphost
tags:
- running
- testing
- redhat-testing
environment:
KUBECONFIG: "{{ kubeconfig_path }}"
tasks:
- block:
- name: "Setup job_logs directory"
include_tasks: plays/log-dir.yml
when: job_logs is undefined
- name: "Execute Red Hat tests"
include_tasks: plays/tests.yml
rescue: *failure_with_upload_logs
# Step 3b : "testing" step
- name: "Execute partner tests step"
hosts: jumphost
tags:
- running
- testing
- partner-testing
environment:
KUBECONFIG: "{{ kubeconfig_path }}"
tasks:
- block:
- name: "Setup job_logs directory"
include_tasks: plays/log-dir.yml
when: job_logs is undefined
- name: "Execute tests hook"
include_tasks: "{{ dci_config_dir }}/hooks/tests.yml"
when: check_tests.stat.exists
rescue: *failure_with_upload_logs
# Step 4 : "post-run" step
- name: "Execute post-run step"
hosts: jumphost
tags:
- post-run
environment:
KUBECONFIG: "{{ kubeconfig_path }}"
tasks:
- block:
- name: Run the post-run hook
include_tasks: '{{ dci_config_dir }}/hooks/post-run.yml'
when: check_post_run.stat.exists
- name: "Run post-run"
include_tasks: plays/post-run.yml
rescue: *error_with_upload_logs
# Step 5: "Final step (success)"
- name: "Success"
hosts: jumphost
tags:
- success
environment:
KUBECONFIG: "{{ kubeconfig_path }}"
tasks:
- name: "Run common logging process"
include_tasks: plays/common_logging.yml
- name: "Execute the teardown process"
include_tasks: "{{ dci_config_dir }}/hooks/teardown.yml"
when:
- dci_teardown_on_success
- check_teardown.stat.exists
- name: "Run the teardown play"
include_tasks: plays/teardown.yml
when:
- dci_teardown_on_success
- name: success
dci_job:
id: "{{ job_id }}"
status: "success"
tags: [dci]
- name: "Final step"
debug:
msg: "The job is now finished. Review the log at: https://www.distributed-ci.io/jobs/{{ job_info.job.id }}/jobStates"
tags: [dci]
...