-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsubmit.yml.erb
More file actions
171 lines (156 loc) · 5.24 KB
/
submit.yml.erb
File metadata and controls
171 lines (156 loc) · 5.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
<%
user = OodSupport::User.new
weka_nfs_server = Resolv.getaddress("weka-nfs")
# Initialize nesi_gid to nil
nesi_gid = nil
# Fetch the GID for the account group only if the account is present
if account.present?
nesi_group = Etc.getgrnam(account) # Fetching the group based on the account
nesi_gid = nesi_group ? nesi_group.gid : nil # Get the GID, or nil if not found
end
# Get ALL groups the user belongs to that match the project pattern
# Same approach as used in form.yml
all_user_groups = OodSupport::User.new.groups.sort_by(&:id).tap { |groups|
groups.unshift(groups.delete(OodSupport::Process.group))
}.map(&:name)
# Collect GIDs for ALL user's projects (not just selected ones)
additional_gids = []
all_user_groups.each do |project|
begin
project_group = Etc.getgrnam(project)
additional_gids << project_group.gid if project_group && project_group.gid != nesi_gid
rescue ArgumentError
# Handle case where group doesn't exist
Rails.logger.warn "Group not found for project: #{project}"
end
end
# Combine all supplemental groups
all_supplemental_groups = [nesi_gid].compact + additional_gids
%>
---
<% if cluster == "my-k8s-cluster" -%>
batch_connect:
template: "vnc"
script:
accounting_id: "<%= account %>"
wall_time: "<%= bc_num_hours.to_i * 3600 %>"
native:
container:
name: "<%= account.present? ? "vmd-#{account}" : 'vmd' %>"
image: "ghcr.io/nesi/nesi-ondemand-vnc:v0.9.0"
command: ["/bin/bash","-l","<%= staged_root %>/custom_job_script.sh"]
restart_policy: 'OnFailure'
<% if all_supplemental_groups.any? %>
supplemental_groups:
<% all_supplemental_groups.each do |gid| %>
- <%= gid %>
<% end %>
<% end %>
env:
TZ: "Pacific/Auckland"
LMOD_CONFIG_DIR: "/opt/nesi/etc/lmod"
MODULEPATH_ROOT: "/opt/nesi/lmod"
MODULEPATH: "/opt/nesi/lmod/generic"
BASH_ENV: "/usr/share/lmod/lmod/init/bash"
WEBSOCKIFY_CMD: "/usr/local/bin/websockify"
SESSION_DIR: "<%= staged_root %>"
PRIMARY_PROJECT: "<%= account %>"
OMP_NUM_THREADS: "<%= num_cores %>"
<% if additional_projects.present? %>
ADDITIONAL_PROJECTS: "<%= Array(additional_projects).join(':') %>"
<% end %>
port: "5901"
cpu: "<%= num_cores %>"
memory: "<%= num_mem %>Gi"
mounts:
- type: host
name: home
path: /home
destination_path: /home
- type: host
name: wlghome
path: /home
destination_path: /scale_wlg_persistent/filesets/home
- type: host
name: project
path: /nesi/project
destination_path: /nesi/project
- type: host
name: wlgproject
path: /nesi/project
destination_path: /scale_wlg_persistent/filesets/project
- type: host
name: nobackup
path: /nesi/nobackup
destination_path: /nesi/nobackup
- type: host
name: software
path: /opt/nesi
destination_path: /opt/nesi
- type: host
name: nss-socket
host_type: Socket
path: /var/lib/sss/pipes/nss
destination_path: /var/lib/sss/pipes/nss
- type: host
name: nsswitch-conf
host_type: File
path: /etc/nsswitch.conf
destination_path: /etc/nsswitch.conf
# Required mounts for SLURM
- type: host
name: munge-socket
host_type: Socket
path: /var/run/munge/munge.socket.2
destination_path: /var/run/munge/munge.socket.2
- type: host
name: slurm-conf
path: /run/slurm/conf
destination_path: /etc/slurm/
configmap:
files:
- filename: "logging.conf"
data: |
[*]
log-level=debug
logger-type=file
log-dir=<%= staged_root %>/logs
mount_path: '/etc/matlab'
- filename: 'k8_helper'
data: |
#!/usr/bin/env bash
set -x
KEY=$1
VALUE=$(echo -n $2 | base64)
CFG="$(hostname)-secret"
kubectl get secret ${CFG} -o json | jq --arg key $KEY --arg value $VALUE '.data[$key] = $value' | kubectl apply -f -
mount_path: '/opt/open_ondemand/helpers'
<%- else -%>
batch_connect:
template: "vnc"
websockify_cmd: '/usr/local/bin/websockify'
header: |
#!/bin/bash
#. ~/.bashrc
script_wrapper: |
cat << "CTRSCRIPT" > container.sh
export PATH="$PATH:/opt/TurboVNC/bin"
export LMOD_CONFIG_DIR=/opt/nesi/etc/lmod
export MODULEPATH_ROOT=/opt/nesi/lmod
export MODULEPATH=/opt/nesi/lmod/generic
export BASH_ENV=/usr/share/lmod/lmod/init/bash
%s
CTRSCRIPT
export APPTAINER_BIND="$HOME,/nesi,/opt/nesi,/etc/passwd,/etc/group,/var/lib/sss/pipes,/etc/nsswitch.conf,/var/run/munge/munge.socket.2,/var/run/slurm/conf,/var/spool/slurmd/conf-cache"
apptainer exec --writable-tmpfs /opt/nesi/containers/nesi-ood-base/nesi-ondemand-vnc-0.8.0.aimg /bin/bash container.sh
script:
accounting_id: "<%= account %>"
wall_time: "<%= bc_num_hours.to_i * 3600 %>"
native:
- "--ntasks"
- "1"
- "--cpus-per-task"
- "<%= num_cores %>"
- "--mem"
- "<%= num_mem %>G"
<%- end -%>