all 6.38 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
---
#OpenHPC release version
  openhpc_release_rpm: "https://github.com/openhpc/ohpc/releases/download/v1.3.GA/ohpc-release-1.3-1.el7.x86_64.rpm"
#The full list of available versions for CentOS can be generated via
# curl -s https://github.com/openhpc/ohpc/releases/ | grep rpm | grep -v sle | grep -v strong  | sed 's/.*="\(.*\)".*".*".*/\1/'
#
# Headnode Info
  public_interface: "eth0" # NIC that allows access to the public internet
  private_interface: "eth1" #NIC that allows access to compute nodes
  headnode_private_ip: "10.1.1.1"
  build_kernel_ver: '3.10.0-957.12.2.el7.x86_64' # `uname -r` at build time... for wwbootstrap

#Private network Info
  private_network: "10.1.1.0"  
  private_network_mask: "24"
  private_network_long_netmask: "255.255.255.0"
  compute_ip_minimum: "10.1.1.2"
  compute_ip_maximum: "10.1.1.255"
  gpu_ip_minimum: "10.1.1.128" #This could be more clever, like compute_ip_minimum + num_nodes...

#slurm.conf variables
  cluster_name: "ohpc"
#  gres_types: "gpu"

# sacct user list
  cluster_users:
    - centos   # include each username on separate line as a list

#Stateful compute or not?
  stateful_nodes: false

#Node Config Vars - for stateful nodes
  sda1: "mountpoint=/boot:dev=sda1:type=ext3:size=500"
  sda2: "dev=sda2:type=swap:size=500"
  sda3: "mountpoint=/:dev=sda3:type=ext3:size=fill"

# GPU Node Vars
# download the nvidia cuda installer, and run with only --extract=$path_to_CRI_XCBC/roles/gpu_build_vnfs/files to get these three installers
  nvidia_driver_installer: "NVIDIA-Linux-x86_64-387.26.run"
  cuda_toolkit_installer: "cuda-linux.9.1.85-23083092.run"
  cuda_samples_installer: "cuda-samples.9.1.85-23083092-linux.run"


# WW Template Names for wwmkchroot
  template_path: "/usr/libexec/warewulf/wwmkchroot/"
  compute_template: "compute-nodes"
  gpu_template: "gpu-nodes"  
  login_template: "login-nodes"  

# Chroot variables
  compute_chroot_loc: "/opt/ohpc/admin/images/{{ compute_chroot }}"
  compute_chroot: centos7-compute
  gpu_chroot_loc: "/opt/ohpc/admin/images/{{ gpu_chroot }}"
  gpu_chroot: centos7-gpu
  login_chroot_loc: "/opt/ohpc/admin/images/{{ login_chroot }}"
  login_chroot: centos7-login 

# Node Inventory method - automatic, or manual
  node_inventory_auto: true

#Node naming variables - no need to change
  compute_node_prefix: "c"
  num_compute_nodes: 1
  gpu_node_prefix: "gpu-compute-"
  num_gpu_nodes: 1
  login_node_prefix: "login-"
  num_login_nodes: 0

#OpenOnDemand
  ood_nodename: "ood"
  ood_version: 1.5
  ood_ip_addr: 10.1.1.254
  ood_rpm_repo: "https://yum.osc.edu/ondemand/{{ ood_version }}/ondemand-release-web-{{ ood_version }}-1.el7.noarch.rpm"

#Node Inventory - not in the Ansible inventory sense! Just for WW and Slurm config.
# Someday I will need a role that can run wwnodescan, and add nodes to this file! Probably horrifying practice.
# There is a real difference between building from scratch, and using these for maintenance / node addition!
#
  compute_private_nic: "eth0"
  compute_nodes:
   - { name: "compute-1", vnfs: '{{compute_chroot}}',  cpus: 1, sockets: 1, corespersocket: 1,  mac: "08:00:27:EC:E2:FF", ip: "10.0.0.254"}

  login_nodes:
   - { name: "login-1", vnfs: '{{login_chroot}}', cpus: 8, sockets: 2, corespersocket: 4,  mac: "00:26:b9:2e:21:ed", ip: "10.2.255.137"}
 
  gpu_nodes:
   - { name: "gpu-compute-1", vnfs: '{{gpu_chroot}}', gpus: 4, gpu_type: "gtx_TitanX", cpus: 16, sockets: 2, corespersocket: 8,  mac: "0c:c4:7a:6e:9d:6e", ip: "10.2.255.47"}
 
  viz_nodes:
   - { name: "viz-node-0-0", vnfs: gpu_chroot, gpus: 2, gpu_type: nvidia_gtx_780, cpus: 8, sockets: 2, corespersocket: 4,  mac: "foo", ip: "bar"}

#Slurm Accounting Variables - little need to change these
  slurm_acct_db: "slurmdb"
  slurmdb_storage_port: "7031"
  slurmdb_port: "1234"
  slurmdb_sql_pass: "password" #could force this to be a hash... 
  slurmdb_sql_user: slurm

#automatic variables for internal use
# Don't edit these!
  compute_node_glob: "{{ compute_node_prefix }}[0-{{ num_compute_nodes|int - 1}}]"
  gpu_node_glob: "{{ gpu_node_prefix }}[0-{{ num_gpu_nodes|int - 1}}]"
  node_glob_bash: "{{ compute_node_prefix }}{0..{{ num_compute_nodes|int - 1}}}"
  gpu_node_glob_bash: "{{ compute_node_prefix }}{0..{{ num_compute_nodes|int - 1}}}"

#Jupyter related
  jupyter_provision: false

#EasyBuild variables
  cluster_shared_folder: "/export"
  easybuild_prefix: "{{ cluster_shared_folder }}/eb"
  easybuild_tmpdir: "/tmp"
  easybuild_buildpath: "/tmp/build"
  easybuild_sourcepath: "/tmp/source"

#matlab install related
  matlab_provision: false
  matlab_download_url: "https://uab.box.com/shared/static/y01qu7oo1gpne6j2s6nqwcuee63epivo.gz"
  matlab_clustershare: "/opt/ohpc/pub/apps/matlab/"
  matlab_destination: "/tmp/matlab.tar.gz"
  # module file vars
  matlab_install_root: "/opt/ohpc/pub-master/apps/matlab/M2/"
  matlab_docs_url: "http://{{ ood_nodename }}"
  matlab_license_file: "{{ matlab_install_root }}/licenses/licenses.lic"
  matlab_module_path: "{{ easybuild_prefix }}/modules/all"
  matlab_module_appdir: "matlab"
  matlab_module_file: "r2018a"
  matlab_ver: "{{ matlab_module_file }}"

#SAS install related
  sas_provision: false
  sas_clustershare: "/export/apps/sas/"
  sas_module_path: "{{ easybuild_prefix }}/modules/all"
  sas_module_appdir: "sas"
  sas_module_file: "9.4"
  sas_ver: "{{ sas_module_file }}"

#Rstudio related
  rstudio_provision: false
  singularity_ver: '2.4.2'
  r_versions:
    - { full: '3.5.1', short: '3.5' }
    - { full: '3.4.4', short: '3.4' }

#Copr Repos
  enable_copr: true
  copr_repos:
    - { repo_name: "louistw/mod_wsgi-3.4-18-httpd24", host: ["{{ ood_nodename }}"] }
    - { repo_name: "louistw/slurm-17.11.11-ohpc-1.3.6", host: ["{{ cluster_name }}", "{{ ood_nodename }}"] }
    - { repo_name: "atlurie/shibboleth-3.0-ood", host: ["{{ ood_nodename }}"] }

# Shibboleth SSO
  enable_shib: false

# User Registration
  enable_user_reg: false
  user_register_app: "flask_user_reg"
  user_register_app_path: "/var/www/ood/register/{{ user_register_app }}"
  user_register_app_repo: "https://gitlab.rc.uab.edu/mmoo97/flask_user_reg.git"
  mod_wsgi_pkg_name: "uab-httpd24-mod_wsgi"
  RegUser_app_user: "reggie"
  RegUser_app_user_full_name: "RegUser of user register app"
  RegUser_app_user_passwd: "qweasd"

# User Create Scripts
  enable_user_create_scripts: false
  user_create_scripts: "ohpc_user_create"
  user_create_scripts_path: "/opt/{{ user_create_scripts }}"
  user_create_script_repo: "https://gitlab.rc.uab.edu/tr27p/ohpc_user_create.git"