Auszug |
---|
General computational fluid dynamics solver (cell-centered FVM). GPUs are supported. |
...
Info |
---|
To obtain and checkout a product license please read Ansys Suite first. |
Documentation and Tutorials
...
Codeblock | ||||
---|---|---|---|---|
| ||||
#!/bin/bash #SBATCH -t 00:10:00 #SBATCH --nodes=2 #SBATCH --ntasks-per-node=96 #SBATCH -L ansys #SBATCH -p standard96:test #SBATCH --mail-type=ALL #SBATCH --output="cavity.log.%j" #SBATCH --job-name=cavity_on_cpu module load ansys/2023r2 srun hostname -s > hostfile echo "Running on nodes: ${SLURM_JOB_NODELIST}" fluent 2d -g -t${SLURM_NTASKS} -ssh -mpi=intel -pib -cnf=hostfile << EOFluentInput >cavity.out.$SLURM_JOB_ID ; this is an Ansys journal file aka text user interface (TUI) file file/read-case initial_run.cas.h5 parallel/partition/method/cartesian-axes 2 file/auto-save/append-file-name time-step 6 file/auto-save/case-frequency if-case-is-modified file/auto-save/data-frequency 10 file/auto-save/retain-most-recent-files yes solve/initialize/initialize-flow solve/iterate 100 exit yes EOFluentInput echo '#################### Fluent finished ############' |
...
Codeblock | ||||
---|---|---|---|---|
| ||||
#!/bin/bash
#SBATCH -t 00:10:00
#SBATCH --nodes=2
#SBATCH --ntasks-per-node=4
#SBATCH -L ansys
#SBATCH -p gpu-a100 ### on GPU-cluster of NHR@ZIB
### (on Emmy gpu-a100 is called gpu)
#SBATCH --output="slurm.log.%j"
#SBATCH --job-name=cavity_on_gpu
module add gcc openmpi/gcc.11 # external OpenMPI is CUDA aware
module add ansys/2023r2_mlx_openmpiCUDAaware
hostlist=$(srun hostname -s | sort | uniq -c | awk '{printf $2":"$1","}')
echo "Running on nodes: $hostlist"
cat <<EOF >fluent.jou
; this is an Ansys journal file aka text user interface (TUI) file
parallel/gpgpu/show
file/read-case initial_run.cas.h5
solve/set/flux-type yes
solve/iterate 100
file/write-case-data outputfile
ok
exit
EOF
fluent 2d -g -t${SLURM_NTASKS} -gpgpu=4 -mpi=openmpi -pib -cnf=$hostlist -i fluent.jou >/dev/null 2>&1
echo '#################### Fluent finished ############' |
...