64
64
\tableofcontents
65
65
\end {frame }
66
66
67
+ \section {Boost.MPI }
68
+
69
+ \begin {frame }{Boost.MPI}
70
+ Boost.MPI is a part of the Boost C++ libraries that provides C++ bindings for the Message Passing Interface (MPI).
71
+
72
+ Boost.MPI makes it easier to write distributed applications in C++ by wrapping the complex MPI API with C++-friendly abstractions, improving safety and reducing the amount of boilerplate code.
73
+
74
+ Key Features of Boost.MPI:
75
+ \begin {itemize }
76
+ \item Simplified use of MPI with C++ bindings.
77
+ \item Supports complex data types through Boost.Serialization.
78
+ \item Easier management of distributed tasks and communication.
79
+ \item Compatible with common MPI implementations like MPICH, OpenMPI, MS MPI, etc.
80
+ \end {itemize }
81
+
82
+ Note: C API mapptint ot Boost.MPI: \href {https://www.boost.org/doc/libs/1_86_0/doc/html/mpi/c_mapping.html}{link}
83
+
84
+ {\footnotesize For more details see Boost.MPI docs: \href {https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}{link}}
85
+ \end {frame }
86
+
87
+ \begin {frame }[fragile]{Boost.MPI example}
88
+ \lstset {style=CStyle, caption=Hello World example with Boost MPI}
89
+ \ begin{lstlisting}
90
+ #include <boost/mpi.hpp>
91
+ #include <iostream>
92
+
93
+ // Namespace alias for convenience
94
+ namespace mpi = boost::mpi;
95
+
96
+ int main(int argc, char* argv[]) {
97
+ // Initialize the MPI environment
98
+ mpi::environment env(argc, argv);
99
+ mpi::communicator world;
100
+
101
+ // Get the rank (ID) of the current process and the total number of processes
102
+ int rank = world.rank();
103
+ int size = world.size();
104
+
105
+ if (rank == 0) {
106
+ // If this is the root process (rank 0), send a message to another process
107
+ std::string message = "Hello from process 0";
108
+ world.send(1, 0, message); // Send to process 1
109
+ std::cout << "Process 0 sent: " << message << std::endl;
110
+ } else if (rank == 1) {
111
+ // If this is process 1, receive the message
112
+ std::string received_message;
113
+ world.recv(0, 0, received_message); // Receive from process 0
114
+ std::cout << "Process 1 received: " << received_message << std::endl;
115
+ }
116
+
117
+ return 0;
118
+ }
119
+ \end {lstlisting }
120
+ \end {frame }
121
+
67
122
\section {Advanced Send/Receive API }
68
123
69
124
\begin {frame }{Why Using \texttt {MPI\_ Send } and \texttt {MPI\_ Recv } Is Not Enough?}
@@ -79,7 +134,11 @@ \section{Advanced Send/Receive API}
79
134
\begin {frame }{\texttt {MPI\_ Isend }}
80
135
Non-Blocking Send function. Initiates a send operation that returns immediately.
81
136
82
- \texttt {int MPI\_ Isend(const void *buf, int count, MPI\_ Datatype datatype, int dest, int tag, MPI\_ Comm comm, MPI\_ Request *request); }
137
+ {
138
+ \footnotesize
139
+ \texttt {int MPI\_ Isend(const void *buf, int count, MPI\_ Datatype datatype, int dest, int tag, MPI\_ Comm comm, MPI\_ Request *request); } \\
140
+ \texttt {boost::mpi::request boost::mpi::communicator::isend(int dest, int tag, const T* values, int n); }
141
+ }
83
142
84
143
Parameters:
85
144
@@ -92,13 +151,17 @@ \section{Advanced Send/Receive API}
92
151
\item comm: Communicator
93
152
\item request: Communication request handle
94
153
\end {itemize }
95
- Usage: Allows the sender to proceed with computation while the message is being sent.
154
+ { \footnotesize Usage: Allows the sender to proceed with computation while the message is being sent.}
96
155
\end {frame }
97
156
98
157
\begin {frame }{\texttt {MPI\_ Irecv }}
99
158
Non-Blocking Receive function. Initiates a receive operation that returns immediately.
100
159
101
- \texttt {int MPI\_ Irecv(void *buf, int count, MPI\_ Datatype datatype, int source, int tag, MPI\_ Comm comm, MPI\_ Request *request); }
160
+ {
161
+ \footnotesize
162
+ \texttt {int MPI\_ Irecv(void *buf, int count, MPI\_ Datatype datatype, int source, int tag, MPI\_ Comm comm, MPI\_ Request *request); } \\
163
+ \texttt {boost::mpi::request boost::mpi::communicator::irecv(int source, int tag, T\& value); }
164
+ }
102
165
103
166
Parameters:
104
167
@@ -111,7 +174,7 @@ \section{Advanced Send/Receive API}
111
174
\item comm: Communicator
112
175
\item request: Communication request handle
113
176
\end {itemize }
114
- Usage: Allows the receiver to proceed with computation while waiting for the message.
177
+ { \footnotesize Usage: Allows the receiver to proceed with computation while waiting for the message.}
115
178
\end {frame }
116
179
117
180
\section {Synchronization }
@@ -137,7 +200,11 @@ \section{Synchronization}
137
200
\begin {frame }{\texttt {MPI\_ Barrier }}
138
201
Global Synchronization function. It blocks processes until all of them have reached the barrier.
139
202
140
- \texttt {int MPI\_ Barrier(MPI\_ Comm comm); }
203
+ {
204
+ \footnotesize
205
+ \texttt {int MPI\_ Barrier(MPI\_ Comm comm); } \\
206
+ \texttt {void boost::mpi::communicator::barrier(); }
207
+ }
141
208
142
209
Usage:
143
210
@@ -175,7 +242,11 @@ \section{Collective operations}
175
242
\begin {frame }{Broadcast (\texttt {MPI\_ Bcast })}
176
243
Send data from one process to all other processes.
177
244
178
- \texttt {int MPI\_ Bcast(void *buffer, int count, MPI\_ Datatype datatype, int root, MPI\_ Comm comm); }
245
+ {
246
+ \footnotesize
247
+ \texttt {int MPI\_ Bcast(void *buffer, int count, MPI\_ Datatype datatype, int root, MPI\_ Comm comm); } \\
248
+ \texttt {void broadcast(const communicator\& comm, T\& value, int root); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
249
+ }
179
250
180
251
\begin {minipage }[t]{0.6\textwidth }
181
252
Parameters:
@@ -201,7 +272,11 @@ \section{Collective operations}
201
272
202
273
Can be seen as the opposite operation to broadcast.
203
274
204
- \texttt {int MPI\_ Reduce(const void *sendbuf, void *recvbuf, int count, MPI\_ Datatype datatype, MPI\_ Op op, int root, MPI\_ Comm comm); }
275
+ {
276
+ \footnotesize
277
+ \texttt {int MPI\_ Reduce(const void *sendbuf, void *recvbuf, int count, MPI\_ Datatype datatype, MPI\_ Op op, int root, MPI\_ Comm comm); } \\
278
+ \texttt {void reduce(const communicator\& comm, const T\& in\_ value, T\& out\_ value, Op op, int root); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
279
+ }
205
280
206
281
\begin {minipage }[t]{0.2\textwidth }
207
282
Supported operations:
@@ -224,7 +299,11 @@ \section{Collective operations}
224
299
\begin {frame }{\texttt {MPI\_ Gather }}
225
300
Collect data from all processes to a single root process.
226
301
227
- \texttt {int MPI\_ Gather(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, int root, MPI\_ Comm comm); }
302
+ {
303
+ \footnotesize
304
+ \texttt {int MPI\_ Gather(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, int root, MPI\_ Comm comm); } \\
305
+ \texttt {void gather(const communicator\& comm, const T\& in\_ value, std::vector<T>\& out\_ values, int root); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
306
+ }
228
307
229
308
\begin {minipage }[t]{0.6\textwidth }
230
309
Parameters:
@@ -245,7 +324,11 @@ \section{Collective operations}
245
324
\begin {frame }{\texttt {MPI\_ Scatter }}
246
325
Distribute distinct chunks of data from root to all processes.
247
326
248
- \texttt {int MPI\_ Scatter(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, int root, MPI\_ Comm comm); }
327
+ {
328
+ \footnotesize
329
+ \texttt {int MPI\_ Scatter(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, int root, MPI\_ Comm comm); } \\
330
+ \texttt {void scatter(const communicator\& comm, const std::vector<T>\& in\_ values, T\& out\_ value, int root); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
331
+ }
249
332
250
333
\begin {minipage }[t]{0.6\textwidth }
251
334
Parameters:
@@ -266,15 +349,24 @@ \section{Collective operations}
266
349
\begin {frame }{\texttt {MPI\_ AllGather }}
267
350
Gather data from all processes and distributes the combined data to all processes.
268
351
269
- \texttt {int MPI\_ Allgather(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, MPI\_ Comm comm); }
352
+ {
353
+ \footnotesize
354
+ \texttt {int MPI\_ Allgather(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, MPI\_ Comm comm); } \\
355
+ \texttt {void all\_ gather(const communicator\& comm, const T\& in\_ value,
356
+ std::vector<T>\& out\_ values); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
357
+ }
270
358
271
359
Usage of this function reduces the need for separate gather and broadcast operations.
272
360
\end {frame }
273
361
274
362
\begin {frame }{All-to-All (\texttt {MPI\_ Alltoall })}
275
363
Description: Each process sends data to and receives data from all other processes. It can be seen as transposing a matrix distributed across processes.
276
364
277
- \texttt {int MPI\_ Alltoall(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, MPI\_ Comm comm); }
365
+ {
366
+ \footnotesize
367
+ \texttt {int MPI\_ Alltoall(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, MPI\_ Comm comm); } \\
368
+ \texttt {void all\_ to\_ all(const communicator\& comm, const std::vector<T>\& in\_ values, std::vector<T>\& out\_ values); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
369
+ }
278
370
279
371
Note: This operation is communication-intensive.
280
372
\end {frame }
@@ -304,6 +396,7 @@ \section{Collective operations}
304
396
\begin {frame }{References}
305
397
\begin {enumerate }
306
398
\item MPI Standard \href {https://www.mpi-forum.org/docs/}{https://www.mpi-forum.org/docs/}
399
+ \item Boost.MPI Chapter in Boost documentation \href {https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}{https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}
307
400
\item Open MPI v4.0.7 documentation: \href {https://www.open-mpi.org/doc/v4.0/}{https://www.open-mpi.org/doc/v4.0/}
308
401
\end {enumerate }
309
402
\end {frame }
0 commit comments