forked from containers/netavark
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.cirrus.yml
295 lines (268 loc) · 10.4 KB
/
.cirrus.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
---
# Format Ref: https://cirrus-ci.org/guide/writing-tasks/
# Main collection of env. vars to set for all tasks and scripts.
env:
# Actual|intended branch for this run
DEST_BRANCH: "main"
# The default is 'sh' if unspecified
CIRRUS_SHELL: "/bin/bash"
# Location where source repo. will be cloned
CIRRUS_WORKING_DIR: "/var/tmp/netavark"
# Rust package cache also lives here
CARGO_HOME: "/var/cache/cargo"
# Rust compiler output lives here (see Makefile)
CARGO_TARGET_DIR: "$CIRRUS_WORKING_DIR/targets"
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
SCRIPT_BASE: "./contrib/cirrus"
IMAGE_SUFFIX: "c20240529t141726z-f40f39d13"
FEDORA_NETAVARK_IMAGE: "fedora-netavark-${IMAGE_SUFFIX}"
AARDVARK_DNS_BRANCH: "main"
AARDVARK_DNS_URL: "https://api.cirrus-ci.com/v1/artifact/github/containers/aardvark-dns/success/binary.zip?branch=${AARDVARK_DNS_BRANCH}"
FEDORA_NETAVARK_AARCH64_AMI: "fedora-netavark-aws-arm64-${IMAGE_SUFFIX}"
EC2_INST_TYPE: "t4g.xlarge"
gcp_credentials: ENCRYPTED[d6efdb7d6d4c61e3831df2193ca6348bb02f26cd931695f69d41930b1965f7dab72a838ca0902f6ed8cde66c7deddae2]
aws_credentials: ENCRYPTED[36b3e82f72ec2c909235b69d88b835a09e230aa289e2925d949b0dc4c813c1b468655aabb05edf3f7dcfed430c320b87]
build_task:
alias: "build"
# Compiling is very CPU intensive, make it chooch quicker for this task only
gce_instance: &standard_build_gce_x86_64
image_project: "libpod-218412"
zone: "us-central1-c"
disk: 200 # GB, do not set <200 per gcloud warning re: I/O performance
cpu: 8
memory: "8Gb"
image_name: "${FEDORA_NETAVARK_IMAGE}"
cargo_cache: &cargo_cache
# Populating this cache depends on execution of setup.sh, and runner.sh
# to builds of all release, debug, plus unit-tests.
folder: "$CARGO_HOME"
# Cirrus-CI will automatically store separate caches for branches vs PRs.
# We use the branch-name here mainly to distinguish PR-level caches in
# order to properly support backport-PRs to release branches. Otherwise
# all PRs & branches will share caches with other PRs and branches
# for a given $DEST_BRANCH and vX value. Adjust vX if cache schema
# changes.
fingerprint_script: echo -e "cargo_v3_${DEST_BRANCH}_amd64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
# Required to be set explicitly since fingerprint_key is also set
reupload_on_changes: true
targets_cache: &targets_cache
# Similar to cargo_cache, but holds the actual compiled dependent artifacts.
# This should be scoped to a hash of the dependency-metadata lock file.
# Cirrus-CI will automatically use separate caches for PRs and branches.
folder: "$CARGO_TARGET_DIR"
fingerprint_script: echo -e "targets_v3_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_amd64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
reupload_on_changes: true
bin_cache: &bin_cache
# This simply prevents rebuilding bin/netavark for every subsequent task.
folder: "$CIRRUS_WORKING_DIR/bin"
# Avoid binary pollution by scoping this to only this specific build.
# Adjust vX if cache schema changes.
fingerprint_key: "bin_v3_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_amd64" # Cache only within same tag, branch, or PR (branch will be 'pull/#')
reupload_on_changes: true
setup_script: &setup "$SCRIPT_BASE/setup.sh"
main_script: &main "$SCRIPT_BASE/runner.sh $CIRRUS_TASK_NAME"
cache_grooming_script: &groom bash "$SCRIPT_BASE/cache_groom.sh"
upload_caches: [ "cargo", "targets", "bin" ]
build_aarch64_task:
alias: "build_aarch64"
ec2_instance: &standard_build_ec2_aarch64
image: "${FEDORA_NETAVARK_AARCH64_AMI}"
type: $EC2_INST_TYPE
region: us-east-1
architecture: arm64 # CAUTION: This has to be "arm64", not "aarch64"
cargo_cache: &cargo_cache_aarch64
folder: "$CARGO_HOME"
# N/B: Should exactly match (except for arch) line from build_task (above).
# (No, there isn't an easy way to not duplicate most of this :()
fingerprint_script: echo -e "cargo_v3_${DEST_BRANCH}_aarch64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
reupload_on_changes: true
targets_cache: &targets_cache_aarch64
folder: "$CARGO_TARGET_DIR"
# N/B: Should exactly match (except for arch) line from build_task (above).
# (No, there isn't an easy way to not duplicate most of this :()
fingerprint_script: echo -e "targets_v3_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_aarch64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
reupload_on_changes: true
bin_cache: &bin_cache_aarch64
# This simply prevents rebuilding bin/netavark for every subsequent task.
folder: "$CIRRUS_WORKING_DIR/bin"
fingerprint_key: "bin_v2_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_aarch64" # Cache only within same tag, branch, or PR (branch will be 'pull/#')
reupload_on_changes: true
setup_script: *setup
main_script: *main
cache_grooming_script: *groom
upload_caches: [ "cargo", "targets", "bin" ]
# Downstream CI needs the aarch64 binaries from this CI system.
# However, we don't want to confuse architectures.
art_prep_script: |
cd bin
ls -la
for filename in ./*; do
mv "$filename" "${filename}.$(uname -m)-unknown-linux-gnu"
done
armbinary_artifacts: # See success_task
path: ./bin/netavark*
validate_task:
alias: "validate"
depends_on:
- "build"
gce_instance: &standard_gce_x86_64
<<: *standard_build_gce_x86_64
cpu: 8
memory: "8Gb"
# From this point forward, all cache's become read-only - meaning
# any changes made in this task aren't re-uploaded to the cache.
# This avoids some flapping between tasks, along with the upload time.
cargo_cache: &ro_cargo_cache
<<: *cargo_cache
reupload_on_changes: false
targets_cache: &ro_targets_cache
<<: *targets_cache
reupload_on_changes: false
bin_cache: &ro_bin_cache
<<: *bin_cache
reupload_on_changes: false
setup_script: *setup
main_script: *main
validate_aarch64_task:
alias: "validate_aarch64"
depends_on:
- "build_aarch64"
ec2_instance: *standard_build_ec2_aarch64
cargo_cache: &ro_cargo_cache_aarch64
<<: *cargo_cache_aarch64
reupload_on_changes: false
targets_cache: &ro_targets_cache_aarch64
<<: *targets_cache_aarch64
reupload_on_changes: false
bin_cache: &ro_bin_cache_aarch64
<<: *bin_cache_aarch64
reupload_on_changes: false
setup_script: *setup
main_script: *main
unit_task:
alias: "unit"
depends_on:
- "build"
gce_instance: *standard_gce_x86_64
cargo_cache: *ro_cargo_cache
targets_cache: *ro_targets_cache
bin_cache: *ro_bin_cache
setup_script: *setup
main_script: *main
unit_aarch64_task:
alias: "unit_aarch64"
depends_on:
- "build_aarch64"
ec2_instance: *standard_build_ec2_aarch64
cargo_cache: *ro_cargo_cache_aarch64
targets_cache: *ro_targets_cache_aarch64
bin_cache: *ro_bin_cache_aarch64
setup_script: *setup
main_script: *main
integration_task:
alias: "integration"
depends_on:
- "unit"
gce_instance: *standard_gce_x86_64
cargo_cache: *ro_cargo_cache
targets_cache: *ro_targets_cache
bin_cache: *ro_bin_cache
setup_script: *setup
main_script: *main
integration_aarch64_task:
alias: "integration_aarch64"
depends_on:
- "unit_aarch64"
ec2_instance: *standard_build_ec2_aarch64
cargo_cache: *ro_cargo_cache_aarch64
targets_cache: *ro_targets_cache_aarch64
bin_cache: *ro_bin_cache_aarch64
setup_script: *setup
main_script: *main
# This task is critical. It updates the "last-used by" timestamp stored
# in metadata for all VM images. This mechanism functions in tandem with
# an out-of-band pruning operation to remove disused VM images.
meta_task:
alias: meta
name: "VM img. keepalive"
container:
cpu: 2
memory: 2
image: quay.io/libpod/imgts:latest
env:
# Space-separated list of images used by this repository state
IMGNAMES: "${FEDORA_NETAVARK_IMAGE}"
EC2IMGNAMES: "${FEDORA_NETAVARK_AARCH64_AMI}"
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_REPO_NAME}"
AWSINI: ENCRYPTED[34663ab52255921d163085d348f5695b19ae49b8fec398282b9cd9aecadcb0144544e203ec553d38734f201b6885fd0a]
GCPJSON: ENCRYPTED[e7e6e13b98eb34f480a12412a048e3fb78a02239c229659e136b7a27e2ab25a5bbb61ab6016e322cb6f777fa2c9f9520]
GCPNAME: ENCRYPTED[f3fc6da8fe283ef506d7b18467a81153ea8e18b1d3cd76e79dcd6f566f20fdd3651522432d3d232f4d69eeb1502d1f6b]
GCPPROJECT: libpod-218412
clone_script: &noop mkdir -p $CIRRUS_WORKING_DIR # source not needed
script: /usr/local/bin/entrypoint.sh
ubuntu20_build_task:
alias: ubuntu20_build
depends_on:
- "build"
gce_instance: *standard_gce_x86_64
container:
cpu: 2 # Do not increase, will result in scheduling delays
memory: "8Gb"
image: quay.io/libpod/ubuntu20rust
script:
- cargo build
centos9_build_task:
alias: centos9_build
depends_on:
- "build"
gce_instance: *standard_gce_x86_64
container:
cpu: 2 # Do not increase, will result in scheduling delays
memory: "8Gb"
image: quay.io/libpod/centos9rust
script:
- cargo build
success_task:
name: "Total success"
alias: success
depends_on:
- "build"
- "build_aarch64"
- "validate"
- "validate_aarch64"
- "unit"
- "unit_aarch64"
- "integration"
- "integration_aarch64"
- "meta"
- "ubuntu20_build"
- "centos9_build"
gce_instance: *standard_gce_x86_64
env:
API_URL_BASE: "https://api.cirrus-ci.com/v1/artifact/build/${CIRRUS_BUILD_ID}"
EXP_BINS: >-
netavark
netavark.debug
netavark.info
netavark.aarch64-unknown-linux-gnu
netavark.debug.aarch64-unknown-linux-gnu
netavark.info.aarch64-unknown-linux-gnu
clone_script: *noop
bin_cache: *ro_bin_cache
# The paths used for uploaded artifacts are relative here and in Cirrus
script:
- set -x
- curl --fail --location -O --url ${API_URL_BASE}/build_aarch64/armbinary.zip
- unzip armbinary.zip
- rm -f armbinary.zip
- mv bin/* ./
- rm -rf bin
artifacts_test_script: # Other CI systems depend on all files being present
- ls -la
# If there's a missing file, show what it was in the output
- for fn in $EXP_BINS; do [[ -r "$(echo $fn|tee /dev/stderr)" ]] || exit 1; done
# Upload tested binary for consumption downstream
# https://cirrus-ci.org/guide/writing-tasks/#artifacts-instruction
binary_artifacts:
path: ./*netavark*