Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • rc/bc_uab_matlab
  • louistw/bc_uab_matlab
2 results
Show changes
Commits on Source (29)
......@@ -92,7 +92,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
### Added
- Initial release!
[Unreleased]:
[Unreleased]:
https://github.com/OSC/bc_osc_matlab/compare/v0.11.0...HEAD
[0.11.0]: https://github.com/OSC/bc_osc_matlab/compare/v0.10.0...v0.11.0
[0.10.0]: https://github.com/OSC/bc_osc_matlab/compare/v0.9.0...v0.10.0
......
const table = {
"interactive": { "max_cpu": 48, "max_hour": 2, "max_gpu": 0 },
"express": { "max_cpu": 48, "max_hour": 2, "max_gpu": 0 },
"short": { "max_cpu": 48, "max_hour": 12, "max_gpu": 0 },
"pascalnodes": { "max_cpu": 28, "max_hour": 12, "max_gpu": 4 },
"pascalnodes-medium": { "max_cpu": 28, "max_hour": 48, "max_gpu": 4 },
"medium": { "max_cpu": 48, "max_hour": 50, "max_gpu": 0 },
"long": { "max_cpu": 48, "max_hour": 150, "max_gpu": 0 },
"intel-dcb": { "max_cpu": 24, "max_hour": 150, "max_gpu": 0 },
"amd-hdr100": { "max_cpu": 128, "max_hour": 150, "max_gpu": 0 },
"largemem": { "max_cpu": 24, "max_hour": 50, "max_gpu": 0 },
"largemem-long": { "max_cpu": 24, "max_hour": 150, "max_gpu": 0 },
"amperenodes": { "max_cpu": 128, "max_hour": 12, "max_gpu": 2 },
"amperenodes-medium": { "max_cpu": 128, "max_hour": 48, "max_gpu": 2 },
}
const gpu_part_regex = /pascal|ampere/;
function set_max_value(form_id, value) {
const form_element = $(form_id);
form_element.attr({'max': value});
if (form_element.val() > value)
form_element.val(value)
}
function set_partition_change_handler() {
let partition_select = $("#batch_connect_session_context_bc_partition");
partition_select.change( function(e) {
toggle_gpu_visibility(e);
});
}
function toggle_gpu_visibility(event) {
const partition = event.target.value;
const cpu_selector = '#batch_connect_session_context_bc_num_slots';
const gpu_selector = '#batch_connect_session_context_bc_num_gpus';
const hour_selector = '#batch_connect_session_context_bc_num_hours';
toggle_visibilty_of_form_group(gpu_selector, gpu_part_regex.test(partition));
set_max_value(cpu_selector, table[partition]["max_cpu"]);
set_max_value(gpu_selector, table[partition]["max_gpu"]);
set_max_value(hour_selector, table[partition]["max_hour"]);
}
function toggle_visibilty_of_form_group(form_id, show) {
let form_element = $(form_id);
let parent = form_element;
while (
(! parent[0].classList.contains('form-group')) &&
(! parent.is('html')) // ensure that we don't loop infinitely
) {
parent = parent.parent();
}
// If parent is HTML then something has gone wrong and visibility should not be changed
if ( parent.is('html') ) {
return;
}
if(show) {
parent.show();
} else {
parent.hide();
}
}
toggle_gpu_visibility(
// Fake the event
{ target: document.querySelector('#batch_connect_session_context_bc_partition') }
);
set_partition_change_handler();
<%-
groups = OodSupport::User.new.groups.sort_by(&:id).tap { |groups|
groups.unshift(groups.delete(OodSupport::Process.group))
}.map(&:name).grep(/^P./)
partitions = OodAppkit.clusters[:SLURM_CLUSTER].custom_config[:partitions]
-%>
---
cluster:
- "owens"
- "pitzer"
form:
- version
- account
- bc_num_hours
- bc_num_slots
- num_cores
- node_type
- bc_vnc_resolution
- bc_email_on_started
cluster: "SLURM_CLUSTER"
attributes:
num_cores:
custom_environment:
widget: text_area
label: Environment Setup
value: |
# If you would like to load other modules
# or add other things in your environment please list below
#
# format:
# module load example_module/VERSION example_module2
# export PATH=....
bc_num_hours:
value: 1
bc_num_slots:
widget: "number_field"
label: "Number of cores"
label: Number of CPU
value: 1
min: 1
max: 24
step: 1
bc_num_mems:
widget: "number_field"
label: Memory per CPU (GB)
value: 4
min: 1
max: 128
step: 1
bc_num_gpus:
widget: "number_field"
label: Number of GPUs
value: 1
help: |
Number of cores on node type (4 GB per core unless requesting whole
node). Leave blank if requesting full node.
min: 0
max: 28
max: 4
step: 1
bc_num_slots: "1"
bc_vnc_resolution:
required: true
account:
label: "Project"
bc_partition:
widget: select
label: Partition
options:
<%- groups.each do |group| %>
- "<%= group %>"
<%- end %>
node_type:
widget: select
label: "Node type"
help: |
- **Standard Compute** <br>
These are standard HPC machines. Owens has 648 of these nodes with 40
cores and 128 GB of memory. Pitzer has 224 of these nodes with 40 cores and
340 of these nodes with 48 cores. All pitzer nodes have 192 GB of RAM. Chosing "any" as the node type will decrease
your wait time.
- **GPU Enabled** <br>
These are HPC machines with GPUs. Owens has 160 nodes with 1 [NVIDIA Tesla P100 GPU]
and Pitzer has 74 nodes with 2 [NVIDIA Tesla V100 GPUs]. They have the same
CPU and memory characteristics of standard compute. However, Pitzer's 40 core machines
have 2 GPUs with 16 GB of RAM; and Pitzer's 48 core machines have 2 GPUs with 32 GB of RAM.
Dense GPU types have 4 GPUs with 16 GB of RAM.
- **Large Memory** <br>
These are HPC machines with very large amounts of memory. Owens has 16 hugemem nodes
with 48 cores and 1.5 TB of RAM. Pitzer has 4 hugemem nodes with 3 TB of RAM and 80 cores.
Pitzer also has 12 Largmem nodes which have 48 cores with 768 GB of RAM.
<%- partitions.each do |p| -%>
- [ "<%= p %>", "<%= p %>" ]
<%- end -%>
[NVIDIA Tesla P100 GPU]: http://www.nvidia.com/object/tesla-p100.html
[NVIDIA Tesla V100 GPUs]: https://www.nvidia.com/en-us/data-center/v100/
options:
- [
"any", "any",
data-max-num-cores-for-cluster-owens: 28,
data-max-num-cores-for-cluster-pitzer: 48,
]
- [
"40 core", "any-40core",
data-max-num-cores-for-cluster-pitzer: 40,
data-option-for-cluster-owens: false,
]
- [
"48 core", "any-48core",
data-max-num-cores-for-cluster-pitzer: 48,
data-option-for-cluster-owens: false,
]
- [
"any gpu", "gpu",
data-max-num-cores-for-cluster-owens: 28,
data-max-num-cores-for-cluster-pitzer: 48,
]
- [
"40 core gpu", "gpu-40core",
data-max-num-cores-for-cluster-pitzer: 40,
data-option-for-cluster-owens: false,
]
- [
"48 core gpu", "gpu-48core",
data-max-num-cores-for-cluster-pitzer: 48,
data-option-for-cluster-owens: false,
]
- [
"largemem", "largemem",
data-min-num-cores-for-cluster-pitzer: 48,
data-max-num-cores-for-cluster-pitzer: 48,
data-option-for-cluster-owens: false,
]
- [
"hugemem", "hugemem",
data-min-num-cores-for-cluster-owens: 48,
data-max-num-cores-for-cluster-owens: 48,
data-min-num-cores-for-cluster-pitzer: 80,
data-max-num-cores-for-cluster-pitzer: 80,
]
- [
"debug", "debug",
data-max-num-cores-for-cluster-owens: 28,
data-max-num-cores-for-cluster-pitzer: 48,
data-option-for-cluster-owens: false,
data-option-for-cluster-pitzer: false,
]
version:
widget: select
label: "MATLAB version"
help: "This defines the version of MATLAB you want to load."
options:
- [
"R2022a", "matlab/r2022a",
]
- [
"R2020a", "matlab/r2020a",
]
- [
"R2019b", "matlab/r2019b",
]
- [
"R2019a", "matlab/r2019a",
data-option-for-cluster-owens: false,
]
- [
"R2018b", "matlab/r2018b"
]
- [
"R2018a", "matlab/r2018a"
]
- [
"R2017a", "matlab/r2017a",
data-option-for-cluster-pitzer: false,
]
- [
"R2016b", "matlab/r2016b",
data-option-for-cluster-pitzer: false,
]
- [
"R2015b", "matlab/r2015b" ,
data-option-for-cluster-pitzer: false,
]
- [ "R2023a", "rc/matlab/R2023a"]
- [ "R2022a", "rc/matlab/R2022a"]
- [ "R2021b", "rc/matlab/R2021b"]
- [ "R2021a", "rc/matlab/R2021a"]
- [ "R2020a", "rc/matlab/R2020a"]
- [ "R2019b", "rc/matlab/R2019b"]
- [ "R2019a", "rc/matlab/R2019a"]
- [ "R2018a", "rc/matlab/R2018a"]
- [ "R2017b", "rc/matlab/R2017b"]
- [ "R2017a", "rc/matlab/R2017a"]
- [ "R2016b", "rc/matlab/R2016b"]
- [ "R2016a", "rc/matlab/R2016a"]
- [ "R2015a", "rc/matlab/R2015a"]
- [ "R2013a", "rc/matlab/R2013a"]
- [ "R2012a", "rc/matlab/R2012a"]
cuda_toolkit: "cuda10.0/toolkit"
form:
- custom_environment
- version
- bc_num_hours
- bc_partition
- bc_num_gpus
- bc_num_slots
- bc_num_mems
- bc_email_on_started
- cuda_toolkit
......@@ -4,8 +4,8 @@ category: Interactive Apps
subcategory: GUIs
role: batch_connect
description: |
This app will launch a [MATLAB] GUI on the [Owens cluster]. You will be able
This app will launch a [MATLAB] GUI on the [Cheaha cluster]. You will be able
to interact with the MATLAB GUI through a VNC session.
[MATLAB]: https://www.mathworks.com/products/matlab.html
[Owens cluster]: https://www.osc.edu/resources/technical_support/supercomputers/owens
[Cheaha cluster]: https://uabrc.github.io
#!/usr/bin/env bash
# Set working directory to home directory
cd "${HOME}"
#
# Launch Xfce Window Manager and Panel
#
(
export SEND_256_COLORS_TO_REMOTE=1
export XDG_CONFIG_HOME="<%= session.staged_root.join("config") %>"
export XDG_DATA_HOME="<%= session.staged_root.join("share") %>"
export XDG_CACHE_HOME="$(mktemp -d)"
module reset
set -x
xfwm4 --compositor=off --daemon --sm-client-disable
xsetroot -solid "#D3D3D3"
xfsettingsd --sm-client-disable
xfce4-panel --sm-client-disable
) &
#
# Start MATLAB
#
<%- if context.bc_partition == "pascalnodes" -%>
# Load CUDA toolkit
module load <%= context.cuda_toolkit %>
<%- end -%>
# Load the required environment
module load <%= context.version %>
# Launch MATLAB
module list
set -x
matlab -desktop
<%-
ppn = num_cores.blank? ? 28 : num_cores.to_i
nodes = bc_num_slots.blank? ? 1 : bc_num_slots.to_i
case node_type
when "hugemem"
ppn = 48
partition = bc_num_slots.to_i > 1 ? "hugemem-parallel" : "hugemem"
slurm_args = [ "--nodes", "#{nodes}", "--ntasks-per-node", "#{ppn}", "--partition", partition ]
when "vis"
slurm_args = ["--nodes", "#{nodes}", "--ntasks-per-node", "#{ppn}", "--gpus-per-node", "1", "--gres", "vis" ]
else
slurm_args = ["--nodes", "#{nodes}", "--ntasks-per-node", "#{ppn}" ]
email = ENV['USER']
if !email.include? '@'
email = email + '@uab.edu'
end
%>
-%>
---
batch_connect:
template: vnc
template: "vnc"
script:
accounting_id: "<%= account %>"
job_environment:
USER: "<%= ENV['USER'] %>"
native:
<%- slurm_args.each do |arg| %>
- "<%= arg %>"
<%- end %>
- "-N 1"
- "-n <%= bc_num_slots.blank? ? 1 : bc_num_slots.to_i %>"
- "--mem-per-cpu=<%= bc_num_mems.blank? ? 4 : bc_num_mems.to_i %>G"
- "--partition=<%= bc_partition %>"
- "--time=<%= bc_num_hours.blank? ? 1 : bc_num_hours.to_i %>:00:00"
- "--job-name=ood-matlab-<%= version.split("/")[-1] %>"
<%- if bc_partition.include? "pascalnodes" or bc_partition.include? "amperenodes" -%>
- "--gres=gpu:<%= bc_num_gpus.blank? ? 1 : bc_num_gpus.to_i %>"
<%- end -%>
<%- if bc_email_on_started == "1" -%>
- "--mail-type=BEGIN"
- "--mail-user=<%= email %>"
<%- end -%>
#!/usr/bin/env bash
<%- gpu = context.node_type.include?("vis") -%>
# Clean the environment
module purge
# Set working directory to home directory
cd "${HOME}"
......@@ -17,7 +12,7 @@ cd "${HOME}"
export XDG_CONFIG_HOME="<%= session.staged_root.join("config") %>"
export XDG_DATA_HOME="<%= session.staged_root.join("share") %>"
export XDG_CACHE_HOME="$(mktemp -d)"
module restore
module reset
set -x
xfwm4 --compositor=off --daemon --sm-client-disable
xsetroot -solid "#D3D3D3"
......@@ -29,17 +24,15 @@ cd "${HOME}"
# Start MATLAB
#
<%- if context.bc_partition == "pascalnodes" -%>
# Load CUDA toolkit
module load <%= context.cuda_toolkit %>
<%- end -%>
# Load the required environment
module load xalt/latest <%= context.version %>
module load <%= context.version %>
<%= context.custom_environment.gsub(/\r\n?/, "\n") %>
# Launch MATLAB
<%- if gpu -%>
module load intel/16.0.3 virtualgl
module list
set -x
vglrun matlab -desktop -nosoftwareopengl
<%- else -%>
module list
set -x
matlab -desktop
<%- end -%>