Auszug |
---|
General computational fluid dynamics solver (cell-centered FVM). GPUs are supported. |
...
Info |
---|
To obtain and checkout a product license please read Ansys Suite first. |
Documentation and Tutorials
...
Codeblock | ||||
---|---|---|---|---|
| ||||
#!/bin/bash
#SBATCH -t 00:10:00
#SBATCH --nodes=2
#SBATCH --ntasks-per-node=40
#SBATCH -L ansys
#SBATCH -p medium ### on Emmy
#SBATCH --mail-type=ALL
#SBATCH --output="cavity.log.%j"
#SBATCH --job-name=cavity_on_cpu
module load ansys/2019r2
srun hostname -s > hostfile
echo "Running on nodes: ${SLURM_JOB_NODELIST}"
fluent 2d -g -t${SLURM_NTASKS} -ssh -mpi=intel -pib -cnf=hostfile << EOFluentInput >cavity.out.$SLURM_JOB_ID
; this is an Ansys journal file aka text user interface (TUI) file
file/read-case initial_run.cas.h5
parallel/partition/method/cartesian-axes 2
file/auto-save/append-file-name time-step 6
file/auto-save/case-frequency if-case-is-modified
file/auto-save/data-frequency 10
file/auto-save/retain-most-recent-files yes
solve/initialize/initialize-flow
solve/iterate 100
exit
yes
EOFluentInput
echo '#################### Fluent finished ############' |
...
Codeblock | ||||
---|---|---|---|---|
| ||||
#!/bin/bash
#SBATCH -t 00:59:00
#SBATCH --nodes=1
#SBATCH --partition=gpu-a100:shared ### on GPU-cluster of NHR@ZIB
#SBATCH --ntasks-per-node=1
#SBATCH --gres=gpu:1 # number of GPUs per node - ignored if exclusive partition with 4 GPUs
#SBATCH --gpu-bind=single:1 # bind each process to its own GPU (single:<tasks_per_gpu>)
#SBATCH -L ansys
#SBATCH --output="slurm-log.%j"
module add gcc openmpi/gcc.11 ansys/2023r2_mlx_openmpiCUDAaware # external OpenMPI is CUDA-aware
hostlist=$(srun hostname -s | sort | uniq -c | awk '{printf $2":"$1","}')
echo "Running on nodes: $hostlist"
cat <<EOF >tui_input.jou
file/read-cas nozzle_gpu_supported.cas.h5
solve/initialize/hyb-initialization
solve/iterate 1000 yes
file/write-case-data outputfile1
file/export cgns outputfile2 full-domain yes yes
pressure temperature x-velocity y-velocity mach-number
quit
exit
EOF
fluent 3ddp -g -cnf=$hostlist -t${SLURM_NTASKS} -gpu -nm -i tui_input.jou \
-mpi=openmpi -pib -mpiopt="--report-bindings --rank-by core" >/dev/null 2>&1
echo '#################### Fluent finished ############' |
...
Codeblock | ||||
---|---|---|---|---|
| ||||
#!/bin/bash #SBATCH -t 00:10:00 #SBATCH --nodes=2 #SBATCH --ntasks-per-node=4 #SBATCH -L ansys #SBATCH -p gpu-a100 ### on GPU-cluster of NHR@ZIB ### on emmyEmmy gpu-pa100 is simply called gpu #SBATCH --output="slurm.log.%j" #SBATCH --job-name=cavity_on_gpu module add gcc openmpi/gcc.11 # external OpenMPI is CUDA aware module add ansys/2023r2_mlx_openmpiCUDAaware hostlist=$(srun hostname -s | sort | uniq -c | awk '{printf $2":"$1","}') echo "Running on nodes: $hostlist" cat <<EOF >fluent.jou ; this is an Ansys journal file aka text user interface (TUI) file parallel/gpgpu/show file/read-case initial_run.cas.h5 solve/set/flux-type yes solve/iterate 100 file/write-case-data outputfile ok exit EOF fluent 2d -g -t${SLURM_NTASKS} -gpgpu=4 -mpi=openmpi -pib -cnf=$hostlist -i fluent.jou >/dev/null 2>&1 echo '#################### Fluent finished ############' |
...