Note: Template files are sensitive to whitespaces and tabs.
B.1 /etc/slurm/slurm.conf
#
# Example slurm.conf file. Please run configurator.html
# (in doc/html) to build a configuration file customized
# for your environment.
#
#
# slurm.conf file generated by configurator.html.
#
# See the slurm.conf man page for more information.
#
ClusterName=linux
ControlMachine=HPCHN
#ControlAddr=
#BackupController=
#BackupAddr=
#
SlurmUser=slurm
#SlurmdUser=root
SlurmctldPort=6817
SlurmdPort=6818
AuthType=auth/munge
#JobCredentialPrivateKey=
#JobCredentialPublicCertificate=
StateSaveLocation=/tmp
SlurmdSpoolDir=/tmp/slurmd
SwitchType=switch/none
MpiDefault=none
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmdPidFile=/var/run/slurmd.pid
ProctrackType=proctrack/pgid
#PluginDir=
#FirstJobId=
#MaxJobCount=
#PlugStackConfig=
#PropagatePrioProcess=
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
#Prolog=
#Epilog=
#SrunProlog=
#SrunEpilog=
#TaskProlog=
#TaskEpilog=
#TaskPlugin=
#TrackWCKey=no
#TreeWidth=50
#TmpFS=
#UsePAM=
#
# TIMERS
SlurmctldTimeout=300
SlurmdTimeout=300
InactiveLimit=0
MinJobAge=300
KillWait=30
Waittime=0
#
# SCHEDULING
SchedulerType=sched/backfill
#SchedulerAuth=
#SchedulerPort=
#SchedulerRootFilter=
SelectType=select/linear
FastSchedule=1
#PriorityType=priority/multifactor
#PriorityDecayHalfLife=14-0
#PriorityUsageResetPeriod=14-0
#PriorityWeightFairshare=100000
#PriorityWeightAge=1000
#PriorityWeightPartition=10000
#PriorityWeightJobSize=1000
#PriorityMaxAge=1-0
#
# LOGGING
SlurmctldDebug=3
#SlurmctldLogFile=
SlurmdDebug=3
#SlurmdLogFile=
JobCompType=jobcomp/none
#JobCompLoc=
#
# ACCOUNTING
#JobAcctGatherType=jobacct_gather/linux
#JobAcctGatherFrequency=30
#
#AccountingStorageType=accounting_storage/slurmdbd
#AccountingStorageHost=
#AccountingStorageLoc=
#AccountingStoragePass=
#AccountingStorageUser=
#
# COMPUTE NODES
# OpenHPC default configuration
PropagateResourceLimitsExcept=MEMLOCK
SlurmdLogFile=/var/log/slurm.log
SlurmctldLogFile=/var/log/slurmctld.log
AccountingStorageType=accounting_storage/filetxt
Epilog=/etc/slurm/slurm.epilog.clean
NodeName=HPCHN,node[02-04] Sockets=2 CoresPerSocket=12 ThreadsPerCore=2 State=UNKNOWN
PartitionName=normal Nodes=HPCHN,node[02-04] Default=YES MaxTime=24:00:00 State=UP
ReturnToService=1
B.2 /etc/httpd/conf.d/warewulf-httpd.conf
<IfModule !perl_module>
LoadModule perl\_module modules/mod\_perl.so
</IfModule>
<IfModule !version_module>
LoadModule version\_module modules/mod\_version.so
</IfModule>
PerlSwitches -w
PerlSwitches -T
PerlSwitches -I/var/www/stage/cgi-bin
# This is disabled as RHEL6 perl_mod seems to be missing this support
#PerlPreConnectionHandler Apache2::Reload
Alias /WW/static /usr/share/warewulf/www
Alias /WW/vnfs_cache /var/tmp/warewulf_cache
ScriptAlias /WW/file /usr/libexec/warewulf/cgi-bin/file.pl
ScriptAlias /WW/script /usr/libexec/warewulf/cgi-bin/script.pl
ScriptAlias /WW/nodeconfig /usr/libexec/warewulf/cgi-bin/nodeconfig.pl
ScriptAlias /WW/vnfs /usr/libexec/warewulf/cgi-bin/vnfs.pl
<Directory /usr/libexec/warewulf/cgi-bin>
SetHandler perl-script
PerlResponseHandler ModPerl::Registry
PerlOptions +ParseHeaders
Options +ExecCGI
<IfVersion >= 2.4>
Require all granted
</IfVersion>
</Directory>
<Directory /usr/share/warewulf/www>
Options Indexes MultiViews
AllowOverride None
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
</Directory>
<Directory /var/tmp/warewulf_cache>
AllowOverride None
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
</Directory>
B.3 /etc/warewulf/provision.conf
# What is the default network device that the master will use to
# communicate with the nodes?
network device = enp5s0f1
# Which DHCP server implementation should be used?
dhcp server = isc
# What is the TFTP root directory that should be used to store the
# network boot images? By default Warewulf will try and find the
# proper directory. Just add this if it can't locate it.
#tftpdir = /var/lib/tftpboot
# Automatically generate and manage a dynamnic_host virtual file
# object in the datastore? This is useful for provisioning this
# out to nodes so they always have a current /etc/hosts file.
generate dynamic_hosts = yes
# Should we manage and overwrite the local hostfile file on this
# system? This will cause all node entries to be added
# automatically to /etc/hosts.
update hostfile = yes
# If no cluster/domain is set on a node, should we add 'localdomain'
# as the default domain
use localdomain = yes
# The default kernel arguments to pass to the nodes boot kernel
default kargs = "net.ifnames=0 biosdevname=0 quiet"