Skip to content

Commit

Permalink
Merge pull request #32 from cncf/ora-arm-runners
Browse files Browse the repository at this point in the history
adds 16cpu-64gb runner to new oci cluster
  • Loading branch information
RobertKielty authored Jan 23, 2025
2 parents 21ee7c0 + 995eba8 commit d12e251
Show file tree
Hide file tree
Showing 2 changed files with 255 additions and 0 deletions.
27 changes: 27 additions & 0 deletions ci/cluster/oci/runners/16cpu-64gb/argo.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: oci-16cpu-64gb
namespace: argocd
spec:
project: default
sources:
- chart: gha-runner-scale-set
repoURL: ghcr.io/actions/actions-runner-controller-charts
targetRevision: 0.10.1
helm:
releaseName: oci-16cpu-64gb
valueFiles:
- $values/ci/cluster/oci/runners/16cpu-64gb/values.yaml
- repoURL: 'https://github.com/cncf/automation.git'
targetRevision: main
ref: values
destination:
server: "https://kubernetes.default.svc"
namespace: arc-systems
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
228 changes: 228 additions & 0 deletions ci/cluster/oci/runners/16cpu-64gb/values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,228 @@
## githubConfigUrl is the GitHub url for where you want to configure runners
## ex: https://github.com/myorg/myrepo or https://github.com/myorg
githubConfigUrl: https://github.com/enterprises/cncf

## githubConfigSecret is the k8s secrets to use when auth with GitHub API.
## You can choose to use GitHub App or a PAT token
githubConfigSecret: github-arc-secret

controllerServiceAccount:
namespace: arc-systems
name: cncf-gha-controller-gha-rs-controller

## maxRunners is the max number of runners the autoscaling runner set will scale up to.
maxRunners: 100

## minRunners is the min number of idle runners. The target number of runners created will be
## calculated as a sum of minRunners and the number of jobs assigned to the scale set.
minRunners: 1

# runnerGroup: "default"

## name of the runner scale set to create. Defaults to the helm release name
# runnerScaleSetName: ""

## A self-signed CA certificate for communication with the GitHub server can be
## provided using a config map key selector. If `runnerMountPath` is set, for
## each runner pod ARC will:
## - create a `github-server-tls-cert` volume containing the certificate
## specified in `certificateFrom`
## - mount that volume on path `runnerMountPath`/{certificate name}
## - set NODE_EXTRA_CA_CERTS environment variable to that same path
## - set RUNNER_UPDATE_CA_CERTS environment variable to "1" (as of version
## 2.303.0 this will instruct the runner to reload certificates on the host)
##
## If any of the above had already been set by the user in the runner pod
## template, ARC will observe those and not overwrite them.
## Example configuration:
#
# githubServerTLS:
# certificateFrom:
# configMapKeyRef:
# name: config-map-name
# key: ca.crt
# runnerMountPath: /usr/local/share/ca-certificates/

## Container mode is an object that provides out-of-box configuration
## for dind and kubernetes mode. Template will be modified as documented under the
## template object.
##
## If any customization is required for dind or kubernetes mode, containerMode should remain
## empty, and configuration should be applied to the template.
containerMode:
type: "dind" ## type can be set to dind or kubernetes
# ## the following is required when containerMode.type=kubernetes
# kubernetesModeWorkVolumeClaim:
# accessModes: ["ReadWriteOnce"]
# # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath
# storageClassName: "dynamic-blob-storage"
# resources:
# requests:
# storage: 1Gi
# kubernetesModeServiceAccount:
# annotations:

## template is the PodSpec for each listener Pod
## For reference: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
listenerTemplate:
spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
# Node affinity is used to force cluster-autoscaler to stick
# to the master node. This allows the cluster to reliably downscale
# to zero worker nodes when needed.
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- name: listener
securityContext:
runAsUser: 1000
# containers:
# # Use this section to append additional configuration to the listener container.
# # If you change the name of the container, the configuration will not be applied to the listener,
# # and it will be treated as a side-car container.
# - name: listener
# securityContext:
# runAsUser: 1000
# # Use this section to add the configuration of a side-car container.
# # Comment it out or remove it if you don't need it.
# # Spec for this container will be applied as is without any modifications.
# - name: side-car
# image: example-sidecar

## template is the PodSpec for each runner Pod
## For reference: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
template:
## template.spec will be modified if you change the container mode
## with containerMode.type=dind, we will populate the template.spec with following pod spec
## template:
## spec:
## initContainers:
## - name: init-dind-externals
## image: ghcr.io/actions/actions-runner:latest
## command: ["cp", "-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
## volumeMounts:
## - name: dind-externals
## mountPath: /home/runner/tmpDir
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## command: ["/home/runner/run.sh"]
## env:
## - name: DOCKER_HOST
## value: unix:///run/docker/docker.sock
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-sock
## mountPath: /run/docker
## readOnly: true
## - name: dind
## image: docker:dind
## args:
## - dockerd
## - --host=unix:///run/docker/docker.sock
## - --group=$(DOCKER_GROUP_GID)
## env:
## - name: DOCKER_GROUP_GID
## value: "123"
## securityContext:
## privileged: true
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-sock
## mountPath: /run/docker
## - name: dind-externals
## mountPath: /home/runner/externals
## volumes:
## - name: work
## emptyDir: {}
## - name: dind-sock
## emptyDir: {}
## - name: dind-externals
## emptyDir: {}
######################################################################################################
## with containerMode.type=kubernetes, we will populate the template.spec with following pod spec
## template:
## spec:
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## command: ["/home/runner/run.sh"]
## env:
## - name: ACTIONS_RUNNER_CONTAINER_HOOKS
## value: /home/runner/k8s/index.js
## - name: ACTIONS_RUNNER_POD_NAME
## valueFrom:
## fieldRef:
## fieldPath: metadata.name
## - name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
## value: "true"
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## volumes:
## - name: work
## ephemeral:
## volumeClaimTemplate:
## spec:
## accessModes: [ "ReadWriteOnce" ]
## storageClassName: "local-path"
## resources:
## requests:
## storage: 1Gi
spec:
containers:
- name: runner
image: ghcr.io/cncf/external-gha-runner:main
imagePullPolicy: Always
command: ["/home/runner/run.sh"]
resources:
requests:
memory: 56Gi
cpu: 16
limits:
memory: 60Gi
cpu: 20
- name: dind
image: docker:dind
args:
- dockerd
- --host=unix:///run/docker/docker.sock
- --group=$(DOCKER_GROUP_GID)
- --mtu=1400
- --default-network-opt=bridge=com.docker.network.driver.mtu=1400
env:
- name: DOCKER_GROUP_GID
value: "123"
securityContext:
privileged: true
volumeMounts:
- name: work
mountPath: /home/runner/_work
- name: dind-sock
mountPath: /run/docker
- name: dind-externals
mountPath: /home/runner/externals
volumes:
- name: work
emptyDir: {}
# We need to assume the DIND socket volumes are being provided
# This is because Helm + Argo is busted :) The previous values won't work properly

## Optional controller service account that needs to have required Role and RoleBinding
## to operate this gha-runner-scale-set installation.
## The helm chart will try to find the controller deployment and its service account at installation time.
## In case the helm chart can't find the right service account, you can explicitly pass in the following value
## to help it finish RoleBinding with the right service account.
## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly.
# controllerServiceAccount:
# namespace: arc-system
# name: test-arc-gha-runner-scale-set-controller

0 comments on commit d12e251

Please sign in to comment.