diff --git a/templates/slurm.conf.j2 b/templates/slurm.conf.j2 index ad84610..de1b752 100644 --- a/templates/slurm.conf.j2 +++ b/templates/slurm.conf.j2 @@ -153,8 +153,9 @@ Epilog=/etc/slurm/slurm.epilog.clean {% for part in openhpc_slurm_partitions %} {% set nodelist = [] %} {% for group in part.get('groups', [part]) %} - + {% set group_name = group.cluster_name|default(openhpc_cluster_name) ~ '_' ~ group.name %} +#----------------------------------------------------------- # openhpc_slurm_partitions group: {{ group_name }} {% set inventory_group_hosts = groups.get(group_name, []) %} {% if inventory_group_hosts | length > 0 %} @@ -164,8 +165,7 @@ Epilog=/etc/slurm/slurm.epilog.clean {% set ram_mb = (first_host_hv['ansible_memory_mb']['real']['total'] * (group.ram_multiplier | default(openhpc_ram_multiplier))) | int %} {% for hostlist in (inventory_group_hosts | hostlist_expression) %} {% set gres = ' Gres=%s' % (','.join(group.gres | map(attribute='conf') )) if 'gres' in group else '' %} - -NodeName={{ hostlist }} State=UNKNOWN RealMemory={{ group.get('ram_mb', ram_mb) }} Sockets={{first_host_hv['ansible_processor_count']}} CoresPerSocket={{ first_host_hv['ansible_processor_cores'] }} ThreadsPerCore={{ first_host_hv['ansible_processor_threads_per_core'] }}{{ gres }} +NodeName={{ hostlist }} State=UNKNOWN RealMemory={{ group.get('ram_mb', ram_mb) }} Sockets={{first_host_hv['ansible_processor_count']}} CoresPerSocket={{ first_host_hv['ansible_processor_cores'] }} ThreadsPerCore={{ first_host_hv['ansible_processor_threads_per_core'] }}{{ gres }} {{ group.node_params | default({}) | dict2parameters }} {% set _ = nodelist.append(hostlist) %} {% endfor %}{# nodes #} {% endif %}{# inventory_group_hosts #} @@ -180,6 +180,8 @@ NodeName={{ hostlist }} State=UNKNOWN RealMemory={{ group.get('ram_mb', ram_mb) PartitionName={{part.name}} Default={{ part.get('default', 'YES') }} MaxTime={{ part.get('maxtime', openhpc_job_maxtime) }} State=UP Nodes={{ nodelist | join(',') }} {{ part.partition_params | default({}) | dict2parameters }} {% endfor %}{# partitions #} +# /partitions ----------------------------------------------------------- + # Define a non-existent node, in no partition, so that slurmctld starts even with all partitions empty NodeName=nonesuch