/usr/include/dune/common/collectivecommunication.hh is in libdune-common-dev 2.2.1-2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 | // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
// vi: set et ts=4 sw=2 sts=2:
#ifndef DUNE_COLLECTIVECOMMUNICATION_HH
#define DUNE_COLLECTIVECOMMUNICATION_HH
#include<iostream>
#include<complex>
#include<algorithm>
#include"exceptions.hh"
/*! \defgroup ParallelCommunication Parallel Communication
\ingroup Common
Dune offers an abstraction to the basic methods of parallel
communication. It allows to switch parallel features on and off,
without changing the code.
*/
/*!
\file
\brief An abstraction to the basic methods of parallel communication,
following the message-passing paradigm.
\ingroup ParallelCommunication
*/
namespace Dune
{
/* define some type that definitely differs from MPI_Comm */
struct No_Comm {};
/*! @brief Collective communication interface and sequential default implementation
CollectiveCommunication offers an abstraction to the basic methods
of parallel communication, following the message-passing
paradigm. It allows to switch parallel features on and off, without
changing the code. Currently only MPI and sequential code are
supported.
A CollectiveCommunication object is returned by all grids (also
the sequential ones) in order to allow code to be written in
a transparent way for sequential and parallel grids.
This class provides a default implementation for sequential grids.
The number of processes involved is 1, any sum, maximum, etc. returns
just its input argument and so on.
In specializations one can implement the real thing using appropriate
communication functions, e.g. there exists an implementation using
the Message Passing %Interface (MPI), see Dune::CollectiveCommunication<MPI_Comm>.
Moreover, the communication subsystem used by an implementation
is not visible in the interface, i.e. Dune grid implementations
are not restricted to MPI.
\ingroup ParallelCommunication
*/
template<typename C>
class CollectiveCommunication
{
public:
//! Construct default object
CollectiveCommunication()
{}
CollectiveCommunication (const C&)
{}
//! Return rank, is between 0 and size()-1
int rank () const
{
return 0;
}
//! Number of processes in set, is greater than 0
int size () const
{
return 1;
}
/** @brief Compute the sum of the argument over all processes and
return the result in every process. Assumes that T has an operator+
*/
template<typename T>
T sum (T& in) const // MPI does not know about const :-(
{
return in;
}
/** @brief Compute the sum over all processes for each component of an array and return the result
in every process. Assumes that T has an operator+
*/
template<typename T>
int sum (T* inout, int len) const
{
return 0;
}
/** @brief Compute the product of the argument over all processes and
return the result in every process. Assumes that T has an operator*
*/
template<typename T>
T prod (T& in) const // MPI does not know about const :-(
{
return in;
}
/** @brief Compute the product over all processes
for each component of an array and return the result
in every process. Assumes that T has an operator*
*/
template<typename T>
int prod (T* inout, int len) const
{
return 0;
}
/** @brief Compute the minimum of the argument over all processes and
return the result in every process. Assumes that T has an operator<
*/
template<typename T>
T min (T& in) const // MPI does not know about const :-(
{
return in;
}
/** @brief Compute the minimum over all processes
for each component of an array and return the result
in every process. Assumes that T has an operator<
*/
template<typename T>
int min (T* inout, int len) const
{
return 0;
}
/** @brief Compute the maximum of the argument over all processes and
return the result in every process. Assumes that T has an operator<
*/
template<typename T>
T max (T& in) const // MPI does not know about const :-(
{
return in;
}
/** @brief Compute the maximum over all processes
for each component of an array and return the result
in every process. Assumes that T has an operator<
*/
template<typename T>
int max (T* inout, int len) const
{
return 0;
}
/** @brief Wait until all processes have arrived at this point in the program.
*/
int barrier () const
{
return 0;
}
/** @brief Distribute an array from the process with rank root to all other processes
*/
template<typename T>
int broadcast (T* inout, int len, int root) const
{
return 0;
}
/** @brief Gather arrays on root task.
*
* Each process sends its in array of length len to the root process
* (including the root itself). In the root process these arrays are stored in rank
* order in the out array which must have size len * number of processes.
* @param[in] in The send buffer with the data to send.
* @param[out] out The buffer to store the received data in. Might have length zero on non-root
* tasks.
* @param[in] len The number of elements to send on each task.
* @param[out] root The root task that gathers the data.
*/
template<typename T>
int gather (T* in, T* out, int len, int root) const // note out must have same size as in
{
for (int i=0; i<len; i++)
out[i] = in[i];
return 0;
}
/** @brief Scatter array from a root to all other task.
*
* The root process sends the elements with index from k*len to (k+1)*len-1 in its array to
* task k, which stores it at index 0 to len-1.
* @param[in] send The array to scatter. Might have length zero on non-root
* tasks.
* @param[out] recv The buffer to store the received data in. Upon completion of the
* method each task will have same data stored there as the one in
* send buffer of the root task before.
* @param[in] len The number of elements in the recv buffer.
* @param[out] root The root task that gathers the data.
*/
template<typename T>
int scatter (T* send, T* recv, int len, int root) const // note out must have same size as in
{
for (int i=0; i<len; i++)
recv[i] = send[i];
return 0;
}
/**
* @brief Gathers data from all tasks and distribute it to all.
*
* The block of data sent from the jth process is received by every
* process and placed in the jth block of the buffer recvbuf.
*
* @param[in] sbuf The buffer with the data to send. Has to be the same for
* each task.
* @param[in] count The number of elements to send by any process.
* @param[out] rbuf The receive buffer for the data. Has to be of size
* notasks*count, with notasks being the number of tasks in the communicator.
*/
template<typename T>
int allgather(T* sbuf, int count, T* rbuf) const
{
for(T* end=sbuf+count; sbuf < end; ++sbuf, ++rbuf)
*sbuf=*rbuf;
return 0;
}
/**
* @brief Compute something over all processes
* for each component of an array and return the result
* in every process.
*
* The template parameter BinaryFunction is the type of
* the binary function to use for the computation
*
* @param inout The array to compute on.
* @param len The number of components in the array
*/
template<typename BinaryFunction, typename Type>
int allreduce(Type* inout, int len) const
{
return 0;
}
/**
* @brief Compute something over all processes
* for each component of an array and return the result
* in every process.
*
* The template parameter BinaryFunction is the type of
* the binary function to use for the computation
*
* @param in The array to compute on.
* @param out The array to store the results in.
* @param len The number of components in the array
*/
template<typename BinaryFunction, typename Type>
void allreduce(Type* in, Type* out, int len) const
{
std::copy(in, in+len, out);
return;
}
};
}
#endif
|