Basilisk CFD
Adaptive Cartesian mesh PDE framework
Loading...
Searching...
No Matches
multigrid-mpi.h File Reference

Go to the source code of this file.

Data Structures

struct  MpiBoundary
 

Macros

#define MULTIGRID_MPI   1
 

Functions

macro2 foreach_slice_x (int start, int end, int l)
 
macro2 foreach_slice_y (int start, int end, int l)
 
 for (int _d=0;_d< 2;_d++) static void *snd_x(int i
 
 foreach_slice_x (i, i+GHOSTS, level) for(int _s=0
 
 add_boundary (b)
 
trace double z_indexing (scalar index, bool leaves)
 

Variables

int dst
 
int int tag
 
int int int level = mpi_boundary_level
 
int int int scalarlist
 
int int int scalar MPI_Requestreq
 
size_t size = 0
 
size *doublebuf = (double *) malloc (size)
 
size *doubleb = buf
 
_s< 1;_s++) for(scalar sb=s;sb.i< s.i+s.block;sb.i++, b++) memcpy(b, &sb[], sizeof(real));MPI_Isend(buf, size, MPI_BYTE, dst, tag, MPI_COMM_WORLD, req);return buf;}for(int _d=0;_d< 2;_d++) static void rcv_x(int i, int src, int tag, int level, scalar *list){ if(src==MPI_PROC_NULL) return;size_t size=0;for(int _s=0;_s< 1;_s++) size+=s.block;size *=pow((1<< level)+2 *GHOSTS, 2 - 1) *GHOSTS *sizeof(real);double *buf=(double *) malloc(size), *b=buf;MPI_Status s;MPI_Recv(buf, size, MPI_BYTE, src, tag, MPI_COMM_WORLD, &s);foreach_slice_x(i, i+GHOSTS, level) for(int _s=0;_s< 1;_s++) for(scalar sb=s;sb.i< s.i+s.block;sb.i++, b++) memcpy(&sb[], b, sizeof(real));free(buf);}tracestatic void mpi_boundary_level(const Boundary *b, scalar *list, int level){ scalar *list1=NULL;for(int _s=0;_s< 1;_s++) if(!is_constant(s) &&s.block > 0) list1=list_add(list1, s);if(!list1) return;prof_start("mpi_boundary_level");if(level< 0) level=depth();MpiBoundary *mpi=(MpiBoundary *) b;struct { int x, y, z;} dir={0, 1, 2};for(int _d=0;_d< 2;_d++) { int left, right;MPI_Cart_shift(mpi->cartcomm, dir.x, 1, &left, &right);MPI_Request reqs[2];void *buf[2];int npl=(1<< level)+2 *GHOSTS, nr=0;if((buf[0]=snd_x(npl - 2 *GHOSTS, right, 0, level, list1, &reqs[nr]))) nr++;if((buf[1]=snd_x(2, left, 1, level, list1, &reqs[nr]))) nr++;rcv_x(0, left, 0, level, list1);rcv_x(npl - GHOSTS, right, 1, level, list1);MPI_Status stats[nr];MPI_Waitall(nr, reqs, stats);free(buf[0]);free(buf[1]);} free(list1);prof_stop();}static void mpi_boundary_destroy(Boundary *b){ MpiBoundary *m=(MpiBoundary *) b;MPI_Comm_free(&m->cartcomm);free(m);}static void mpi_dimensions_error(int n){ fprintf(stderr, "%s:%d: error: the number of MPI processes must be equal to ", __FILE__, LINENO);if(n > 1) fprintf(stderr, "%dx", n);fprintf(stderr, "%d^i\n", 1<< 2);exit(1);}Boundary *mpi_boundary_new(){ MpiBoundary *m=qcalloc(1, MpiBoundary);int n=1;for(int _d=0;_d< 2;_d++) n *=Dimensions.x;if(npe() % n) mpi_dimensions_error(n);int j=npe()/n, i=0;while(j > 1) { if(j %(1<< 2)) mpi_dimensions_error(n);j/=1<< 2;i++;} for(int _d=0;_d< 2;_d++) Dimensions.x *=1<< i;MPI_Dims_create(npe(), 2, &Dimensions.x);MPI_Cart_create(MPI_COMM_WORLD, 2, &Dimensions.x, &Period.x, 0, &m->cartcomm);MPI_Cart_coords(m->cartcomm, pid(), 2, mpi_coords);struct { int x, y, z;} dir={0, 1, 2};for(int _d=0;_d< 2;_d++) { int l, r;MPI_Cart_shift(m->cartcomm, dir.x, 1, &l, &r);if(l !=MPI_PROC_NULL) periodic_boundary(left);if(r !=MPI_PROC_NULL) periodic_boundary(right);} Dimensions_scale=Dimensions.x;N/=Dimensions.x;int r=0;while(N > 1) N/=2, r++;grid-> depth = grid->maxdepth = r
 
 N = Dimensions.x*(1 << r)
 
grid n = 1 << 2*depth()
 
grid tn = npe()*grid->n
 
b destroy = mpi_boundary_destroy
 

Macro Definition Documentation

◆ MULTIGRID_MPI

#define MULTIGRID_MPI   1

Definition at line 3 of file multigrid-mpi.h.

Function Documentation

◆ add_boundary()

add_boundary ( b  )

◆ for()

for ( )

◆ foreach_slice_x() [1/2]

foreach_slice_x ( i  ,
i GHOSTS,
level   
)
pure virtual

◆ foreach_slice_x() [2/2]

macro2 foreach_slice_x ( int  start,
int  end,
int  l 
)

Definition at line 19 of file multigrid-mpi.h.

References GHOSTS, Point::i, l, Point::level, point, SET_DIMENSIONS, and x.

◆ foreach_slice_y()

macro2 foreach_slice_y ( int  start,
int  end,
int  l 
)

Definition at line 30 of file multigrid-mpi.h.

References GHOSTS, Point::i, l, Point::level, point, SET_DIMENSIONS, and x.

◆ z_indexing()

trace double z_indexing ( scalar  index,
bool  leaves 
)

Definition at line 233 of file multigrid-mpi.h.

References _i, boundary, cell(), depth, dimension, i, index, leaves, npe, and x.

Referenced by balance(), and tag().

Here is the call graph for this function:
Here is the caller graph for this function:

Variable Documentation

◆ b

return b = buf

Definition at line 96 of file multigrid-mpi.h.

Referenced by adapt_wavelet(), add_boundary(), alloc_children(), apply_bc(), box_boundary_level(), box_boundary_level(), box_boundary_level_normal(), box_boundary_level_normal(), box_boundary_level_tangent(), bubbles_are_close(), buffer_new(), buffer_ref(), buffer_unref(), cartesian_init_scalar(), cartesian_init_tensor(), clamp(), coeffs1(), coeffs2(), dphidt(), eigenvalues(), embed_force(), embed_fraction_refine(), fine(), for(), for(), fraction_refine(), free_boundaries(), gpu_errors_create_buffer(), gpu_errors_delete_buffer(), gpu_errors_flush_buffer(), gpu_errors_init_buffer(), gpu_errors_scan_buffer(), gpu_errors_scan_bytes(), h_relax(), h_residual(), heavier_than(), if(), implicit_horizontal_diffusion(), inc_create_buffer(), inc_delete_buffer(), inc_flush_buffer(), inc_init_buffer(), inc_scan_buffer(), inc_scan_bytes(), init_grid(), line_center(), matrix(), matrix1(), max(), mem_assign(), mem_assign(), mem_free1d(), mempool_alloc(), mg_solve(), min(), minmod3(), minmodremap(), mpi_boundary_destroy(), mpi_boundary_level(), mpi_boundary_restriction(), msolve(), new_bid(), no_coalescence(), OMP_PARALLEL(), periodic_function(), pmaxsort(), post_create_buffer(), post_delete_buffer(), post_flush_buffer(), post_init_buffer(), post_scan_buffer(), post_scan_bytes(), ptotalsort(), quad_x(), quad_y(), quadratic(), rcv_pid_send(), rectangle_fraction(), relax(), relax_diffusion(), relax_GN(), relax_nh(), relax_nh(), relax_nh1(), relax_psi(), relax_viscosity(), remap_c(), residual(), residual_diffusion(), residual_GN(), residual_nh(), residual_nh3(), residual_psi(), residual_viscosity(), scalar_depends_from(), segBoxOverlap(), solve(), sort_long(), union_bound(), update_green_naghdi(), vertex_buffer_glColor3f(), vertical_diffusion(), vertical_viscosity(), volumez(), yy_get_next_buffer(), and zarea().

◆ buf

◆ depth

_s< 1; _s++) for (scalar sb = s; sb.i < s.i + s.block; sb.i++, b++) memcpy (b, &sb[], sizeof(real)); MPI_Isend (buf, size, MPI_BYTE, dst, tag, MPI_COMM_WORLD, req); return buf;}for (int _d = 0; _d < 2; _d++)static void rcv_x (int i, int src, int tag, int level, scalar * list){ if (src == MPI_PROC_NULL) return; size_t size = 0; for (int _s = 0; _s < 1; _s++) size += s.block; size *= pow((1 << level) + 2*GHOSTS, 2 - 1)*GHOSTS*sizeof(real); double * buf = (double *) malloc (size), * b = buf; MPI_Status s; MPI_Recv (buf, size, MPI_BYTE, src, tag, MPI_COMM_WORLD, &s); foreach_slice_x (i, i + GHOSTS, level) for (int _s = 0; _s < 1; _s++) for (scalar sb = s; sb.i < s.i + s.block; sb.i++, b++) memcpy (&sb[], b, sizeof(real)); free (buf);}tracestatic void mpi_boundary_level (const Boundary * b, scalar * list, int level){ scalar * list1 = NULL; for (int _s = 0; _s < 1; _s++) if (!is_constant(s) && s.block > 0) list1 = list_add (list1, s); if (!list1) return; prof_start ("mpi_boundary_level"); if (level < 0) level = depth(); MpiBoundary * mpi = (MpiBoundary *) b; struct { int x, y, z; } dir = {0,1,2}; for (int _d = 0; _d < 2; _d++) { int left, right; MPI_Cart_shift (mpi->cartcomm, dir.x, 1, &left, &right); MPI_Request reqs[2]; void * buf[2]; int npl = (1 << level) + 2*GHOSTS, nr = 0; if ((buf[0] = snd_x (npl - 2*GHOSTS, right, 0, level, list1, &reqs[nr]))) nr++; if ((buf[1] = snd_x (2, left, 1, level, list1, &reqs[nr]))) nr++; rcv_x (0, left, 0, level, list1); rcv_x (npl - GHOSTS, right, 1, level, list1); MPI_Status stats[nr]; MPI_Waitall (nr, reqs, stats); free (buf[0]); free (buf[1]); } free (list1); prof_stop();}static void mpi_boundary_destroy (Boundary * b){ MpiBoundary * m = (MpiBoundary *) b; MPI_Comm_free (&m->cartcomm); free (m);}static void mpi_dimensions_error (int n){ fprintf (stderr, "%s:%d: error: the number of MPI processes must be equal to ", __FILE__, LINENO); if (n > 1) fprintf (stderr, "%dx", n); fprintf (stderr, "%d^i\n", 1 << 2); exit (1); }Boundary * mpi_boundary_new(){ MpiBoundary * m = qcalloc (1, MpiBoundary); int n = 1; for (int _d = 0; _d < 2; _d++) n *= Dimensions.x; if (npe() % n) mpi_dimensions_error (n); int j = npe()/n, i = 0; while (j > 1) { if (j % (1 << 2)) mpi_dimensions_error (n); j /= 1 << 2; i++; } for (int _d = 0; _d < 2; _d++) Dimensions.x *= 1 << i; MPI_Dims_create (npe(), 2, &Dimensions.x); MPI_Cart_create (MPI_COMM_WORLD, 2, &Dimensions.x, &Period.x, 0, &m->cartcomm); MPI_Cart_coords (m->cartcomm, pid(), 2, mpi_coords); struct { int x, y, z; } dir = {0,1,2}; for (int _d = 0; _d < 2; _d++) { int l, r; MPI_Cart_shift (m->cartcomm, dir.x, 1, &l, &r); if (l != MPI_PROC_NULL) periodic_boundary (left); if (r != MPI_PROC_NULL) periodic_boundary (right); } Dimensions_scale = Dimensions.x; N /= Dimensions.x; int r = 0; while (N > 1) N /= 2, r++; grid-> depth = grid->maxdepth = r

Definition at line 218 of file multigrid-mpi.h.

Referenced by z_indexing().

◆ destroy

Definition at line 226 of file multigrid-mpi.h.

◆ dst

int dst

Definition at line 87 of file multigrid-mpi.h.

Referenced by gpu_reduction(), main(), str_append(), and vertical_diffusion().

◆ level

◆ list

int int int scalar* list

Definition at line 87 of file multigrid-mpi.h.

◆ N

N = Dimensions.x*(1 << r)

Definition at line 219 of file multigrid-mpi.h.

◆ n

grid n = 1 << 2*depth()

Definition at line 220 of file multigrid-mpi.h.

◆ req

Initial value:
{
return NULL
int x
Definition common.h:76
int dst

Definition at line 88 of file multigrid-mpi.h.

◆ size

◆ tag

int int tag

◆ tn

grid tn = npe()*grid->n

Definition at line 221 of file multigrid-mpi.h.