pyLOM.utils#

Module contents#

pyLOM.utils.raiseError(errmsg)[source]#

Raise a controlled error and abort execution on all processes.

pyLOM.utils.raiseWarning(warnmsg, allranks=False)[source]#

Raise a controlled warning but don’t abort execution on all processes.

pyLOM.utils.round(value, precision)[source]#

Truncate array by a certain precision

pyLOM.utils.cr(ch_name, suff=0)[source]#

CR decorator

pyLOM.utils.cr_nvtx(ch_name, suff=0, color='green')[source]#

CR NVTX decorator

pyLOM.utils.cr_start(ch_name, suff)[source]#

Start the chrono of a channel

pyLOM.utils.cr_stop(ch_name, suff)[source]#

Stop the chrono of a channel

pyLOM.utils.cr_info(rank=-1)[source]#

Print information - order by major sum

pyLOM.utils.nvtxp(ch_name, color='blue')[source]#

CR NVTX decorator

pyLOM.utils.mem(ch_name, suff=0)[source]#
pyLOM.utils.mem_start(ch_name, suff)[source]#

Start the chrono of a channel

pyLOM.utils.mem_stop(ch_name, suff)[source]#

Stop the chrono of a channel

pyLOM.utils.mem_info(rank=-1, units='kB')[source]#

Print information - order by major sum

pyLOM.utils.worksplit(istart, iend, whoAmI, nWorkers=MPI_SIZE)[source]#

Divide the work between the processors

pyLOM.utils.writesplit(npoints, write_master)[source]#

Divide the write array between the processors

pyLOM.utils.is_rank_or_serial(root=0)[source]#

Return whether the rank is active or True in case of a serial run

pyLOM.utils.pprint(rank, *args, **kwargs)[source]#

Print alternative for parallel codes. It works as python’s print with the rank variable, which can be negative for everyone to print or equal to the rank that should print.

pyLOM.utils.mpi_barrier()[source]#

Implements the barrier

pyLOM.utils.mpi_send(f, dest, tag=0)[source]#

Implements the send operation

pyLOM.utils.mpi_recv(**kwargs)[source]#

Implements the recieve operation

pyLOM.utils.mpi_sendrecv(buff, **kwargs)[source]#

Implements the sendrecv operation

pyLOM.utils.mpi_scatter(sendbuff, root=0, do_split=False)[source]#

Send an array among the processors and split if necessary.

pyLOM.utils.mpi_gather(sendbuff, root=0, all=False)[source]#

Gather an array from all the processors.

pyLOM.utils.mpi_reduce(sendbuff, root=0, op='sum', all=False)[source]#

Reduce an array from all the processors.

pyLOM.utils.mpi_bcast(sendbuff, root=0)[source]#

Implements the broadcast operation

pyLOM.utils.gpu_device(id=MPI_RANK, gpu_per_node=4)[source]#

Setup the GPU to be used

pyLOM.utils.gpu_to_cpu(X)[source]#

Move an array from GPU to CPU

pyLOM.utils.cpu_to_gpu(X)[source]#

Move an array from GPU to CPU

pyLOM.utils.ascontiguousarray(X)[source]#