Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Bo-Chun Chen
Terraform Openstack
Commits
05c81fc7
Commit
05c81fc7
authored
Jun 26, 2019
by
Ryan Jones
Browse files
updated files
parent
5c5745b2
Changes
117
Hide whitespace changes
Inline
Side-by-side
CRI_XCBC-uab-dev/roles/ohpc_config/templates/dhcpd.conf.j2
0 → 100755
View file @
05c81fc7
#
# DHCP Server Configuration file.
# see /usr/share/doc/dhcp*/dhcpd.conf.example
# see dhcpd.conf(5) man page
#
{% for host in groups['headnode'] %}
server-identifier {{ hostvars[host]['inventory_hostname'] }};
{% endfor %}
subnet {{ private_network }} netmask {{ private_network_long_netmask }} {
not authoritative;
option subnet-mask {{ private_network_long_netmask }};
}
CRI_XCBC-uab-dev/roles/ohpc_firewall_and_services/tasks/main.yaml
0 → 100755
View file @
05c81fc7
---
-
name
:
start and enable munge
service
:
name
:
munge
state
:
started
enabled
:
yes
CRI_XCBC-uab-dev/roles/ohpc_install/files/gres.conf
0 → 100755
View file @
05c81fc7
# General Resource definitions for SLURM#
#########################################
CRI_XCBC-uab-dev/roles/ohpc_install/files/include-rhel-xcbc
0 → 100755
View file @
05c81fc7
REPO_NAME
=
"os-base"
YUM_CONF
=
"/root/yum-ww.conf"
YUM_CMD
=
"yum -c
$CHROOTDIR
/
$YUM_CONF
--tolerant --installroot
$CHROOTDIR
-y"
sanity_check
()
{
if
[
!
-x
$WAREWULF_PREFIX
/bin/cluster-env
]
;
then
echo
"warewulf-cluster package is recommended on nodes you are building VNFS images on."
;
sleep
2
;
else
$WAREWULF_PREFIX
/bin/cluster-env
;
fi
if
!
rpm
-q
yum
>
/dev/null 2>&1
;
then
echo
"ERROR: Could not query RPM for YUM"
return
1
fi
return
0
}
prechroot
()
{
if
[
-n
"
$OS_MIRROR
"
]
;
then
YUM_MIRROR
=
"
$OS_MIRROR
"
fi
if
[[
-z
"
$YUM_MIRROR
"
&&
-z
"
$INSTALL_ISO
"
]]
;
then
echo
"ERROR: You must define the
\$
YUM_MIRROR variable in the template"
cleanup
exit
1
fi
VERSION
=
`
rpm
-qf
/etc/redhat-release
--qf
'%{VERSION}\n'
`
mkdir
-p
$CHROOTDIR
mkdir
-p
$CHROOTDIR
/etc
cp
-rap
/etc/yum.conf /etc/yum.repos.d
$CHROOTDIR
/etc
sed
-i
-e
"s/
\$
releasever/
$VERSION
/g"
`
find
$CHROOTDIR
/etc/yum
*
-type
f
`
YUM_CONF_DIRNAME
=
`
dirname
$YUM_CONF
`
mkdir
-m
0755
-p
$CHROOTDIR
/
$YUM_CONF_DIRNAME
>
$CHROOTDIR
/
$YUM_CONF
echo
"[main]"
>>
$CHROOTDIR
/
$YUM_CONF
echo
'cachedir=/var/cache/yum/$basearch/$releasever'
>>
$CHROOTDIR
/
$YUM_CONF
echo
"keepcache=0"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"debuglevel=2"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"logfile=/var/log/yum.log"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"exactarch=1"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"obsoletes=1"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"gpgcheck=0"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"plugins=1"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"reposdir=0"
>>
$CHROOTDIR
/
$YUM_CONF
echo
""
>>
$CHROOTDIR
/
$YUM_CONF
if
[
-z
"
$INSTALL_ISO
"
]
;
then
echo
"[
$REPO_NAME
]"
>>
$CHROOTDIR
/
$YUM_CONF
echo
'name=Linux $releasever - $basearch'
>>
$CHROOTDIR
/
$YUM_CONF
echo
"baseurl=
$YUM_MIRROR
"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"enabled=1"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"gpgcheck=0"
>>
$CHROOTDIR
/
$YUM_CONF
else
for
i
in
`
ls
-d
$MEDIA_MOUNTPATH
.
*
`
;
do
if
[
-z
"
$INSTALLDIRS
"
]
;
then
if
[
-d
$i
/repodata
]
;
then
# RHEL 6.x
INSTALLDIRS
=
"file://
$i
"
elif
[
-d
$i
/Server/repodata
]
;
then
# RHEL 5.x
INSTALLDIRS
=
"file://
$i
/Server"
fi
else
INSTALLDIRS
=
"
$INSTALLDIRS
,file://
$i
"
fi
done
echo
"[
$REPO_NAME
]"
>>
$CHROOTDIR
/
$YUM_CONF
echo
'name=Linux $releasever - $basearch'
>>
$CHROOTDIR
/
$YUM_CONF
echo
"baseurl=
$INSTALLDIRS
"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"enabled=1"
>>
$CHROOTDIR
/
$YUM_CONF
echo
"gpgcheck=0"
>>
$CHROOTDIR
/
$YUM_CONF
YUM_MIRROR
=
$INSTALLDIRS
fi
# 03/13/15 karl.w.schulz@intel.com - honor proxy setting if configured on local host
proxy_host
=
`
grep
"^proxy="
/etc/yum.conf
`
if
[
$?
-eq
0
]
;
then
echo
$proxy_host
>>
$CHROOTDIR
/
$YUM_CONF
fi
}
buildchroot
()
{
# first install the base package list
if
[
-z
"
$PKGLIST
"
]
;
then
echo
"ERROR: You must define the
\$
PKGLIST variable in the template!"
cleanup
exit
1
fi
$YUM_CMD
install
$PKGLIST
if
[
$?
-ne
0
]
;
then
echo
"ERROR: Failed to create chroot"
return
1
fi
# if we have defined additional packages ...
if
[
${#
ADDITIONALPACKAGES
[@]
}
-ne
0
]
;
then
for
PACKAGEGROUP
in
"
${
ADDITIONALPACKAGES
[@]
}
"
do
$YUM_CMD
install
$PACKAGEGROUP
if
[
$?
-ne
0
]
;
then
echo
"ERROR: Failed to add packages from
\$
PACKAGEGROUP"
return
1
fi
done
fi
return
0
}
postchroot
()
{
touch
$CHROOTDIR
/fastboot
if
grep
-q
rename_device
$CHROOTDIR
/etc/sysconfig/network-scripts/network-functions
;
then
echo
""
>>
$CHROOTDIR
/etc/sysconfig/network-scripts/network-functions
echo
"# This is a kludge added by Warewulf so devices don't get renamed (broke things with IB)"
>>
$CHROOTDIR
/etc/sysconfig/network-scripts/network-functions
echo
"rename_device() { return 0; }"
>>
$CHROOTDIR
/etc/sysconfig/network-scripts/network-functions
fi
return
0
}
# vim:filetype=sh:syntax=sh:expandtab:ts=4:sw=4:
CRI_XCBC-uab-dev/roles/ohpc_install/files/xcbc-example.cfg
0 → 100755
View file @
05c81fc7
# To edit this file start with a cluster line for the new cluster
# Cluster - 'cluster_name':MaxNodesPerJob=50
# Followed by Accounts you want in this fashion (root is created by default)...
# Parent - 'root'
# Account - 'cs':MaxNodesPerJob=5:MaxJobs=4:MaxTRESMins=cpu=20:FairShare=399:MaxWallDuration=40:Description='Computer Science':Organization='LC'
# Any of the options after a ':' can be left out and they can be in any order.
# If you want to add any sub accounts just list the Parent THAT HAS ALREADY
# BEEN CREATED before the account line in this fashion...
# Parent - 'cs'
# Account - 'test':MaxNodesPerJob=1:MaxJobs=1:MaxTRESMins=cpu=1:FairShare=1:MaxWallDuration=1:Description='Test Account':Organization='Test'
# To add users to a account add a line like this after a Parent - 'line'
# User - 'lipari':MaxNodesPerJob=2:MaxJobs=3:MaxTRESMins=cpu=4:FairShare=1:MaxWallDurationPerJob=1
Cluster - 'xcbc-example':Fairshare=1:QOS='normal'
Parent - 'root'
User - 'root':DefaultAccount='root':AdminLevel='Administrator':Fairshare=1
Account - 'test':Description='test users':Organization='xsede':Fairshare=1
Account - 'xcbc-users':Description='xsede users':Organization='xsede':Fairshare=1
Parent - 'test'
User - 'test-user':DefaultAccount='test':Fairshare=1
Parent - 'xcbc-users'
User - 'jecoulte':DefaultAccount='xcbc-users':Fairshare=1
CRI_XCBC-uab-dev/roles/ohpc_install/tasks/main.yml
0 → 100755
View file @
05c81fc7
---
-
name
:
install OpenHPC base, warewulf, and slurm server
yum
:
state
:
latest
name
:
-
"
ohpc-base"
-
"
ohpc-warewulf"
-
"
ohpc-slurm-server"
# - name: yum update
# yum: name=* state=latest update_cache=yes
-
name
:
add slurm user
user
:
name=slurm state=present system=yes
-
name
:
create slurm.conf
template
:
src=slurm_conf.j2 dest=/etc/slurm/slurm.conf
-
name
:
create slurmdbd.conf
template
:
src=slurmdbd_conf.j2 dest=/etc/slurm/slurmdbd.conf
-
name
:
put the sacctmgr config in /etc/slurm
template
:
src=xcbc-example.j2 dest=/etc/slurm/sacctmgr-heirarchy.cfg
-
name
:
set the gres.conf on the master
copy
:
src=gres.conf dest=/etc/slurm/gres.conf
-
name
:
Remove innodb log file
file
:
path
:
"
{{
item
}}"
state
:
absent
with_items
:
-
/var/lib/mysql/ib_logfile0
-
/var/lib/mysql/ib_logfile1
-
name
:
Update Mariadb setting
lineinfile
:
dest
:
/etc/my.cnf
insertafter
:
'
\[mysqld\]'
line
:
"
{{
item
}}"
state
:
present
with_items
:
-
innodb_buffer_pool_size=1024M
-
innodb_log_file_size=64M
-
innodb_lock_wait_timeout=900
-
name
:
restart mariadb
service
:
name
:
mariadb
state
:
restarted
enabled
:
yes
-
name
:
create slurm log folder
file
:
path
:
/var/log/slurm
state
:
directory
owner
:
slurm
group
:
slurm
mode
:
0755
-
name
:
initialize slurmdb databases
mysql_db
:
name
:
"
{{
slurm_acct_db
}}"
state
:
present
-
name
:
create slurmdb user
mysql_user
:
name
:
"
{{
slurmdb_sql_user
}}"
password
:
"
{{
slurmdb_sql_pass
}}"
priv
:
"
{{
slurm_acct_db
}}.*:ALL"
state
:
present
-
name
:
start and enable munge
service
:
name
:
munge
state
:
started
enabled
:
yes
-
name
:
start and enable slurmdbd
service
:
name
:
slurmdbd
state
:
started
enabled
:
yes
-
name
:
insert rhel-xcbc WW template
copy
:
src=include-rhel-xcbc dest="{{ template_path }}include-rhel-xcbc"
-
name
:
fix the warewulf wwsh script... ARGH (line 29)
lineinfile
:
dest
:
/bin/wwsh
insertafter
:
'
^\$ENV\{\"PATH\"\}'
line
:
"
delete
@ENV{'PATH',
'IFS',
'CDPATH',
'ENV',
'BASH_ENV'};"
state
:
present
-
name
:
fix the warewulf wwnodescan script... ARGH (line 96)
lineinfile
:
dest
:
/bin/wwnodescan
insertafter
:
'
^\$ENV\{\"PATH\"\}'
line
:
"
delete
@ENV{'PATH',
'IFS',
'CDPATH',
'ENV',
'BASH_ENV'};"
state
:
present
-
name
:
load sacctmgr config
command
:
sacctmgr -i load /etc/slurm/sacctmgr-heirarchy.cfg
-
name
:
start and enable slurmctld
service
:
name
:
slurmctld
state
:
started
enabled
:
yes
CRI_XCBC-uab-dev/roles/ohpc_install/templates/slurm_conf.j2
0 → 100755
View file @
05c81fc7
#
# Example slurm.conf file. Please run configurator.html
# (in doc/html) to build a configuration file customized
# for your environment.
#
#
# slurm.conf file generated by configurator.html.
#
# See the slurm.conf man page for more information.
#
ClusterName={{ cluster_name }}
ControlMachine={{ inventory_hostname }}
#ControlAddr=
#BackupController=
#BackupAddr=
#
SlurmUser=slurm
SlurmdUser=root
SlurmctldPort=6817
SlurmdPort=6818
AuthType=auth/munge
#JobCredentialPrivateKey=
#JobCredentialPublicCertificate=
StateSaveLocation=/tmp
SlurmdSpoolDir=/tmp/slurmd
SwitchType=switch/none
MpiDefault=none
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmdPidFile=/var/run/slurmd.pid
ProctrackType=proctrack/pgid
#PluginDir=
#FirstJobId=
ReturnToService=2
#MaxJobCount=
#PlugStackConfig=
#PropagatePrioProcess=
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
#Prolog=
#Epilog=
#SrunProlog=
#SrunEpilog=
#TaskProlog=
#TaskEpilog=
#TaskPlugin=
#TrackWCKey=no
#TreeWidth=50
#TmpFS=
#UsePAM=
#
# TIMERS
SlurmctldTimeout=300
SlurmdTimeout=300
InactiveLimit=0
MinJobAge=300
KillWait=30
Waittime=0
#
# SCHEDULING
SchedulerType=sched/backfill
#SchedulerAuth=
#SchedulerPort=
#SchedulerRootFilter=
SelectType=select/cons_res
SelectTypeParameters=CR_CPU
FastSchedule=0
#PriorityType=priority/multifactor
#PriorityDecayHalfLife=14-0
#PriorityUsageResetPeriod=14-0
#PriorityWeightFairshare=100000
#PriorityWeightAge=1000
#PriorityWeightPartition=10000
#PriorityWeightJobSize=1000
#PriorityMaxAge=1-0
#
# LOGGING
SlurmctldDebug=3
SlurmctldLogFile=/var/log/slurm/slurmctld.log
SlurmdDebug=3
SlurmdLogFile=/var/log/slurm/slurmd.log
JobCompType=jobcomp/none
#JobCompLoc=
#
# ACCOUNTING
JobAcctGatherType=jobacct_gather/linux
JobAcctGatherFrequency=30
#
AccountingStorageType=accounting_storage/slurmdbd
AccountingStorageHost={{ inventory_hostname }}
#AccountingStorageLoc=/var/log/slurm/slurmacct.log
AccountingStorageLoc={{ slurm_acct_db }}
AcctGatherNodeFreq=30
AccountingStorageEnforce=associations,limits
AccountingStoragePort={{ slurmdb_storage_port }}
#AccountingStoragePass=
#AccountingStorageUser=
#
#GENERAL RESOURCE
GresTypes={{ gres_types|default('""') }}
#
#EXAMPLE CONFIGURATION - copy,comment out, and edit
#
#COMPUTE NODES
NodeName=c0 Sockets=1 CoresPerSocket=1 State=UNKNOWN
#NodeName=compute-1 Sockets=1 CoresPerSocket=1 State=UNKNOWN
#NodeName=gpu-compute-1 Gres=gpu:gtx_TitanX:4 Sockets=2 CoresPerSocket=8 State=UNKNOWN
# PARTITIONS
#PartitionName=high Nodes=compute-[0-1] Default=YES MaxTime=INFINITE State=UP PriorityTier=10
#PartitionName=gpu Nodes=gpu-compute-1 Default=YES MaxTime=INFINITE State=UP PriorityTier=5 AllowGroups=slurmusers
PartitionName=low Nodes=c0 Default=YES MaxTime=2-00:00:00 State=UP
CRI_XCBC-uab-dev/roles/ohpc_install/templates/slurmdbd_conf.j2
0 → 100755
View file @
05c81fc7
#
# Example slurmdbd.conf file.
#
# See the slurmdbd.conf man page for more information.
#
# Archive info
#ArchiveJobs=yes
#ArchiveDir="/tmp"
#ArchiveSteps=yes
#ArchiveScript=
#JobPurge=12
#StepPurge=1
#
# Authentication info
AuthType=auth/munge
#AuthInfo=/var/run/munge/munge.socket.2
#
# slurmDBD info
DbdAddr=localhost
DbdHost=localhost
DbdPort={{ slurmdb_storage_port }}
SlurmUser=slurm
#MessageTimeout=300
DebugLevel=3
#DefaultQOS=normal,standby
LogFile=/var/log/slurm/slurmdbd.log
PidFile=/var/run/slurmdbd.pid
#PluginDir=/usr/lib/slurm
#PrivateData=accounts,users,usage,jobs
#TrackWCKey=yes
#
# Database info
StorageType=accounting_storage/mysql
StorageHost=localhost
StoragePort={{ slurmdb_port }}
StoragePass={{ slurmdb_sql_pass }}
StorageUser={{ slurmdb_sql_user }}
StorageLoc={{ slurm_acct_db }}
CRI_XCBC-uab-dev/roles/ohpc_install/templates/xcbc-example.j2
0 → 100755
View file @
05c81fc7
# To edit this file start with a cluster line for the new cluster
# Cluster - 'cluster_name':MaxNodesPerJob=50
# Followed by Accounts you want in this fashion (root is created by default)...
# Parent - 'root'
# Account - 'cs':MaxNodesPerJob=5:MaxJobs=4:MaxTRESMins=cpu=20:FairShare=399:MaxWallDuration=40:Description='Computer Science':Organization='LC'
# Any of the options after a ':' can be left out and they can be in any order.
# If you want to add any sub accounts just list the Parent THAT HAS ALREADY
# BEEN CREATED before the account line in this fashion...
# Parent - 'cs'
# Account - 'test':MaxNodesPerJob=1:MaxJobs=1:MaxTRESMins=cpu=1:FairShare=1:MaxWallDuration=1:Description='Test Account':Organization='Test'
# To add users to a account add a line like this after a Parent - 'line'
# User - 'lipari':MaxNodesPerJob=2:MaxJobs=3:MaxTRESMins=cpu=4:FairShare=1:MaxWallDurationPerJob=1
Cluster - '{{ cluster_name }}':Fairshare=1:QOS='normal'
Parent - 'root'
User - 'root':DefaultAccount='root':AdminLevel='Administrator':Fairshare=1
Account - 'test':Description='test users':Organization='xsede':Fairshare=1
Account - 'xcbc-users':Description='xsede users':Organization='xsede':Fairshare=1
Parent - 'test'
User - 'test-user':DefaultAccount='test':Fairshare=1
Parent - 'xcbc-users'
{% for user in cluster_users %}
User - '{{ user }}':DefaultAccount='xcbc-users':Fairshare=1
{% endfor %}
CRI_XCBC-uab-dev/roles/ohpc_jupyter/tasks/main.yml
0 → 100755
View file @
05c81fc7
---
-
name
:
Install Anaconda3 for jupyter.
shell
:
|
source /etc/profile.d/lmod.sh
export EASYBUILD_PREFIX={{ easybuild_prefix }}
module load EasyBuild
eb Anaconda3-5.3.0.eb --try-toolchain-name=dummy -r --force
become_user
:
build
args
:
executable
:
/bin/bash
-
name
:
Install nb_conda_kernels to manage jupyter kernels
shell
:
|
source /etc/profile.d/lmod.sh
export EASYBUILD_PREFIX={{ easybuild_prefix }}
module load Anaconda3
conda install -y nb_conda_kernels
become_user
:
root
args
:
executable
:
/bin/bash
CRI_XCBC-uab-dev/roles/ohpc_matlab/tasks/main.yaml
0 → 100755
View file @
05c81fc7
-
name
:
install the latest version of libXtst
yum
:
name
:
libXt
state
:
latest
-
name
:
Create directory
file
:
path
:
"
{{
matlab_clustershare
}}"
state
:
directory
mode
:
0755
-
name
:
Download matlab
get_url
:
url
:
"
{{
matlab_download_url
}}"
dest
:
"
{{
matlab_destination
}}"
-
name
:
Extract matlab
unarchive
:
src
:
"
{{
matlab_destination
}}"
dest
:
"
{{
matlab_clustershare
}}"
remote_src
:
yes
-
name
:
Create directory
file
:
path
:
"
{{
matlab_module_path
}}/{{
matlab_module_appdir
}}"
state
:
directory
mode
:
0755
-
name
:
Copy modulefile from template to module path
template
:
src
:
"
{{
matlab_module_file
}}"
dest
:
"
{{
matlab_module_path
}}/{{
matlab_module_appdir
}}/{{
matlab_module_file
}}"
CRI_XCBC-uab-dev/roles/ohpc_matlab/templates/r2018a
0 → 100755
View file @
05c81fc7
#%Module
set ver {{ matlab_ver }}
set matlabroot {{ matlab_install_root }}
set url {{ matlab_docs_url }}
set msg "This module adds Matlab $ver to various paths\n\nSee $url for usage examples\n"
proc ModulesHelp { } {
puts stderr $msg
}
module-whatis $msg
setenv MATLAB $matlabroot
setenv MATLABROOT $matlabroot
setenv MATLAB_HOME $matlabroot
setenv MLM_LICENSE_FILE {{ matlab_license_file }}
prepend-path PATH $matlabroot/bin
CRI_XCBC-uab-dev/roles/ohpc_sas/tasks/main.yaml
0 → 100755
View file @
05c81fc7
-
name
:
Create directory
file
:
path
:
"
{{
sas_clustershare
}}/{{
sas_module_file
}}/SASFoundation/{{
sas_module_file
}}"
state
:
directory
mode
:
0755
-
name
:
Install SAS (Executable file to run xfce4desktop)
template
:
src
:
"
{{
sas_module_appdir
}}"
dest
:
"
{{
sas_clustershare
}}/{{
sas_module_file
}}/SASFoundation/{{
sas_module_file
}}/{{
sas_module_appdir
}}"
mode
:
a+x
-
name
:
Create modules directory
file
:
path
:
"
{{
sas_module_path
}}/{{
sas_module_appdir
}}"
state
:
directory
mode
:
0755
-
name
:
Copy modulefile from template to module path
template
:
src
:
"
{{
sas_module_file
}}"
dest
:
"
{{
sas_module_path
}}/{{
sas_module_appdir
}}/{{
sas_module_file
}}"
CRI_XCBC-uab-dev/roles/ohpc_sas/templates/9.4
0 → 100755
View file @
05c81fc7
#%Module1.0####################################################################
##
## mymodule modulefile
##
## Sets up the SAS environment
##
set ver 9.4
set name sas
#set loading [module-info mode load]
#set subname [lrange [split $name - ] 0 0 ]
proc ModulesHelp { } {
puts stderr "\tThis module sets the environment for $name v$ver"
}
module-whatis "Set environment variables to use $name version $ver"
set base /export/apps/$name/$ver
## Add bin directories to the path
prepend-path PATH $base/SASFoundation/9.4
if { [ module-info mode load ] } {
puts stderr "Note: $name $ver environment loaded."
}
CRI_XCBC-uab-dev/roles/ohpc_sas/templates/sas
0 → 100755
View file @
05c81fc7