/usr/include/dune/grid/yaspgrid/partitioning.hh is in libdune-grid-dev 2.5.1-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | #ifndef DUNE_GRID_YASPGRID_PARTITIONING_HH
#define DUNE_GRID_YASPGRID_PARTITIONING_HH
/** \file
* \brief This file provides tools to partition YaspGrids.
* If you want to write your own partitioner, inherit from YLoadBalance
* and implement the loadbalance() method. You can also browse this file
* for already available useful partitioners, like YaspFixedSizePartitioner.
*/
#include<array>
#include<dune/common/power.hh>
namespace Dune
{
/** \brief a base class for the yaspgrid partitioning strategy
* The name might be irritating. It will probably change to YaspPartitionerBase
* in a 3.0 release.
*/
template<int d>
class YLoadBalance
{
public:
typedef std::array<int, d> iTupel;
virtual ~YLoadBalance() {}
virtual void loadbalance(const iTupel&, int, iTupel&) const = 0;
};
/** \brief Implement the default load balance strategy of yaspgrid
*/
template<int d>
class YLoadBalanceDefault : public YLoadBalance<d>
{
public:
typedef std::array<int, d> iTupel;
virtual ~YLoadBalanceDefault() {}
/** \brief Distribute a structured grid across a set of processors
*
* \param [in] size Number of elements in each coordinate direction, for the entire grid
* \param [in] P Number of processors
*/
virtual void loadbalance (const iTupel& size, int P, iTupel& dims) const
{
double opt=1E100;
iTupel trydims;
optimize_dims(d-1,size,P,dims,trydims,opt);
}
private:
void optimize_dims (int i, const iTupel& size, int P, iTupel& dims, iTupel& trydims, double &opt ) const
{
if (i>0) // test all subdivisions recursively
{
for (int k=1; k<=P; k++)
if (P%k==0)
{
// P divisible by k
trydims[i] = k;
optimize_dims(i-1,size,P/k,dims,trydims,opt);
}
}
else
{
// found a possible combination
trydims[0] = P;
// check for optimality
double m = -1.0;
for (int k=0; k<d; k++)
{
double mm=((double)size[k])/((double)trydims[k]);
if (fmod((double)size[k],(double)trydims[k])>0.0001) mm*=3;
if ( mm > m ) m = mm;
}
//if (_rank==0) std::cout << "optimize_dims: " << size << " | " << trydims << " norm=" << m << std::endl;
if (m<opt)
{
opt = m;
dims = trydims;
}
}
}
};
/** \brief Implement yaspgrid load balance strategy for P=x^{dim} processors
*/
template<int d>
class YLoadBalancePowerD : public YLoadBalance<d>
{
public:
typedef std::array<int, d> iTupel;
virtual ~YLoadBalancePowerD() {}
virtual void loadbalance (const iTupel& size, int P, iTupel& dims) const
{
for(int i=1; i<=P; ++i)
if(Power<d>::eval(i)==P) {
std::fill(dims.begin(), dims.end(),i);
return;
}
DUNE_THROW(GridError, "Loadbalancing failed: your number of processes needs to be a " << d << "-th power.");
}
};
/** \brief Implement partitioner that gets a fixed partitioning from an array
* If the given partitioning doesn't match the number of processors, the grid should
* be distributed to, an exception is thrown.
*/
template<int d>
class YaspFixedSizePartitioner : public YLoadBalance<d>
{
public:
YaspFixedSizePartitioner(const std::array<int,d>& dims) : _dims(dims) {}
virtual ~YaspFixedSizePartitioner() {}
virtual void loadbalance(const std::array<int,d>&, int P, std::array<int,d>& dims) const
{
int prod = 1;
for (int i=0; i<d; i++)
prod *= _dims[i];
if (P != prod)
DUNE_THROW(Dune::Exception,"Your processor number doesn't match your partitioning information");
dims = _dims;
}
private:
std::array<int,d> _dims;
};
}
#endif
|