Skip to content

Commit 5031a6d

Browse files
authored
Merge pull request #27 from aobolensk/03-mpi-boost
2 parents 0584d68 + e6df232 commit 5031a6d

File tree

2 files changed

+108
-14
lines changed

2 files changed

+108
-14
lines changed

03-mpi-api/03-mpi-api.tex

Lines changed: 104 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,61 @@
6464
\tableofcontents
6565
\end{frame}
6666

67+
\section{Boost.MPI}
68+
69+
\begin{frame}{Boost.MPI}
70+
Boost.MPI is a part of the Boost C++ libraries that provides C++ bindings for the Message Passing Interface (MPI).
71+
72+
Boost.MPI makes it easier to write distributed applications in C++ by wrapping the complex MPI API with C++-friendly abstractions, improving safety and reducing the amount of boilerplate code.
73+
74+
Key Features of Boost.MPI:
75+
\begin{itemize}
76+
\item Simplified use of MPI with C++ bindings.
77+
\item Supports complex data types through Boost.Serialization.
78+
\item Easier management of distributed tasks and communication.
79+
\item Compatible with common MPI implementations like MPICH, OpenMPI, MS MPI, etc.
80+
\end{itemize}
81+
82+
Note: C API mapptint ot Boost.MPI: \href{https://www.boost.org/doc/libs/1_86_0/doc/html/mpi/c_mapping.html}{link}
83+
84+
{\footnotesize For more details see Boost.MPI docs: \href{https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}{link}}
85+
\end{frame}
86+
87+
\begin{frame}[fragile]{Boost.MPI example}
88+
\lstset{style=CStyle, caption=Hello World example with Boost MPI}
89+
\begin{lstlisting}
90+
#include <boost/mpi.hpp>
91+
#include <iostream>
92+
93+
// Namespace alias for convenience
94+
namespace mpi = boost::mpi;
95+
96+
int main(int argc, char* argv[]) {
97+
// Initialize the MPI environment
98+
mpi::environment env(argc, argv);
99+
mpi::communicator world;
100+
101+
// Get the rank (ID) of the current process and the total number of processes
102+
int rank = world.rank();
103+
int size = world.size();
104+
105+
if (rank == 0) {
106+
// If this is the root process (rank 0), send a message to another process
107+
std::string message = "Hello from process 0";
108+
world.send(1, 0, message); // Send to process 1
109+
std::cout << "Process 0 sent: " << message << std::endl;
110+
} else if (rank == 1) {
111+
// If this is process 1, receive the message
112+
std::string received_message;
113+
world.recv(0, 0, received_message); // Receive from process 0
114+
std::cout << "Process 1 received: " << received_message << std::endl;
115+
}
116+
117+
return 0;
118+
}
119+
\end{lstlisting}
120+
\end{frame}
121+
67122
\section{Advanced Send/Receive API}
68123

69124
\begin{frame}{Why Using \texttt{MPI\_Send} and \texttt{MPI\_Recv} Is Not Enough?}
@@ -79,7 +134,11 @@ \section{Advanced Send/Receive API}
79134
\begin{frame}{\texttt{MPI\_Isend}}
80135
Non-Blocking Send function. Initiates a send operation that returns immediately.
81136

82-
\texttt{int MPI\_Isend(const void *buf, int count, MPI\_Datatype datatype, int dest, int tag, MPI\_Comm comm, MPI\_Request *request);}
137+
{
138+
\footnotesize
139+
\texttt{int MPI\_Isend(const void *buf, int count, MPI\_Datatype datatype, int dest, int tag, MPI\_Comm comm, MPI\_Request *request);} \\
140+
\texttt{boost::mpi::request boost::mpi::communicator::isend(int dest, int tag, const T* values, int n);}
141+
}
83142

84143
Parameters:
85144

@@ -92,13 +151,17 @@ \section{Advanced Send/Receive API}
92151
\item comm: Communicator
93152
\item request: Communication request handle
94153
\end{itemize}
95-
Usage: Allows the sender to proceed with computation while the message is being sent.
154+
{\footnotesize Usage: Allows the sender to proceed with computation while the message is being sent.}
96155
\end{frame}
97156

98157
\begin{frame}{\texttt{MPI\_Irecv}}
99158
Non-Blocking Receive function. Initiates a receive operation that returns immediately.
100159

101-
\texttt{int MPI\_Irecv(void *buf, int count, MPI\_Datatype datatype, int source, int tag, MPI\_Comm comm, MPI\_Request *request);}
160+
{
161+
\footnotesize
162+
\texttt{int MPI\_Irecv(void *buf, int count, MPI\_Datatype datatype, int source, int tag, MPI\_Comm comm, MPI\_Request *request);} \\
163+
\texttt{boost::mpi::request boost::mpi::communicator::irecv(int source, int tag, T\& value);}
164+
}
102165

103166
Parameters:
104167

@@ -111,7 +174,7 @@ \section{Advanced Send/Receive API}
111174
\item comm: Communicator
112175
\item request: Communication request handle
113176
\end{itemize}
114-
Usage: Allows the receiver to proceed with computation while waiting for the message.
177+
{\footnotesize Usage: Allows the receiver to proceed with computation while waiting for the message.}
115178
\end{frame}
116179

117180
\section{Synchronization}
@@ -137,7 +200,11 @@ \section{Synchronization}
137200
\begin{frame}{\texttt{MPI\_Barrier}}
138201
Global Synchronization function. It blocks processes until all of them have reached the barrier.
139202

140-
\texttt{int MPI\_Barrier(MPI\_Comm comm);}
203+
{
204+
\footnotesize
205+
\texttt{int MPI\_Barrier(MPI\_Comm comm);} \\
206+
\texttt{void boost::mpi::communicator::barrier();}
207+
}
141208

142209
Usage:
143210

@@ -175,7 +242,11 @@ \section{Collective operations}
175242
\begin{frame}{Broadcast (\texttt{MPI\_Bcast})}
176243
Send data from one process to all other processes.
177244

178-
\texttt{int MPI\_Bcast(void *buffer, int count, MPI\_Datatype datatype, int root, MPI\_Comm comm);}
245+
{
246+
\footnotesize
247+
\texttt{int MPI\_Bcast(void *buffer, int count, MPI\_Datatype datatype, int root, MPI\_Comm comm);} \\
248+
\texttt{void broadcast(const communicator\& comm, T\& value, int root);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
249+
}
179250

180251
\begin{minipage}[t]{0.6\textwidth}
181252
Parameters:
@@ -201,7 +272,11 @@ \section{Collective operations}
201272

202273
Can be seen as the opposite operation to broadcast.
203274

204-
\texttt{int MPI\_Reduce(const void *sendbuf, void *recvbuf, int count, MPI\_Datatype datatype, MPI\_Op op, int root, MPI\_Comm comm);}
275+
{
276+
\footnotesize
277+
\texttt{int MPI\_Reduce(const void *sendbuf, void *recvbuf, int count, MPI\_Datatype datatype, MPI\_Op op, int root, MPI\_Comm comm);} \\
278+
\texttt{void reduce(const communicator\& comm, const T\& in\_value, T\& out\_value, Op op, int root);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
279+
}
205280

206281
\begin{minipage}[t]{0.2\textwidth}
207282
Supported operations:
@@ -224,7 +299,11 @@ \section{Collective operations}
224299
\begin{frame}{\texttt{MPI\_Gather}}
225300
Collect data from all processes to a single root process.
226301

227-
\texttt{int MPI\_Gather(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, int root, MPI\_Comm comm);}
302+
{
303+
\footnotesize
304+
\texttt{int MPI\_Gather(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, int root, MPI\_Comm comm);} \\
305+
\texttt{void gather(const communicator\& comm, const T\& in\_value, std::vector<T>\& out\_values, int root);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
306+
}
228307

229308
\begin{minipage}[t]{0.6\textwidth}
230309
Parameters:
@@ -245,7 +324,11 @@ \section{Collective operations}
245324
\begin{frame}{\texttt{MPI\_Scatter}}
246325
Distribute distinct chunks of data from root to all processes.
247326

248-
\texttt{int MPI\_Scatter(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, int root, MPI\_Comm comm);}
327+
{
328+
\footnotesize
329+
\texttt{int MPI\_Scatter(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, int root, MPI\_Comm comm);} \\
330+
\texttt{void scatter(const communicator\& comm, const std::vector<T>\& in\_values, T\& out\_value, int root);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
331+
}
249332

250333
\begin{minipage}[t]{0.6\textwidth}
251334
Parameters:
@@ -266,15 +349,24 @@ \section{Collective operations}
266349
\begin{frame}{\texttt{MPI\_AllGather}}
267350
Gather data from all processes and distributes the combined data to all processes.
268351

269-
\texttt{int MPI\_Allgather(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, MPI\_Comm comm);}
352+
{
353+
\footnotesize
354+
\texttt{int MPI\_Allgather(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, MPI\_Comm comm);} \\
355+
\texttt{void all\_gather(const communicator\& comm, const T\& in\_value,
356+
std::vector<T>\& out\_values);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
357+
}
270358

271359
Usage of this function reduces the need for separate gather and broadcast operations.
272360
\end{frame}
273361

274362
\begin{frame}{All-to-All (\texttt{MPI\_Alltoall})}
275363
Description: Each process sends data to and receives data from all other processes. It can be seen as transposing a matrix distributed across processes.
276364

277-
\texttt{int MPI\_Alltoall(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, MPI\_Comm comm);}
365+
{
366+
\footnotesize
367+
\texttt{int MPI\_Alltoall(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, MPI\_Comm comm);} \\
368+
\texttt{void all\_to\_all(const communicator\& comm, const std::vector<T>\& in\_values, std::vector<T>\& out\_values);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
369+
}
278370

279371
Note: This operation is communication-intensive.
280372
\end{frame}
@@ -304,6 +396,7 @@ \section{Collective operations}
304396
\begin{frame}{References}
305397
\begin{enumerate}
306398
\item MPI Standard \href{https://www.mpi-forum.org/docs/}{https://www.mpi-forum.org/docs/}
399+
\item Boost.MPI Chapter in Boost documentation \href{https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}{https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}
307400
\item Open MPI v4.0.7 documentation: \href{https://www.open-mpi.org/doc/v4.0/}{https://www.open-mpi.org/doc/v4.0/}
308401
\end{enumerate}
309402
\end{frame}

03-mpi-api/03-mpi-api.toc

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1-
\beamer@sectionintoc {1}{Advanced Send/Receive API}{3}{0}{1}
2-
\beamer@sectionintoc {2}{Synchronization}{6}{0}{2}
3-
\beamer@sectionintoc {3}{Collective operations}{8}{0}{3}
1+
\beamer@sectionintoc {1}{Boost.MPI}{3}{0}{1}
2+
\beamer@sectionintoc {2}{Advanced Send/Receive API}{5}{0}{2}
3+
\beamer@sectionintoc {3}{Synchronization}{8}{0}{3}
4+
\beamer@sectionintoc {4}{Collective operations}{10}{0}{4}

0 commit comments

Comments
 (0)