MueLu  Version of the Day
MueLu_TentativePFactory_kokkos_def.hpp
Go to the documentation of this file.
1 // @HEADER
2 //
3 // ***********************************************************************
4 //
5 // MueLu: A package for multigrid based preconditioning
6 // Copyright 2012 Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact
39 // Jonathan Hu (jhu@sandia.gov)
40 // Andrey Prokopenko (aprokop@sandia.gov)
41 // Ray Tuminaro (rstumin@sandia.gov)
42 //
43 // ***********************************************************************
44 //
45 // @HEADER
46 #ifndef MUELU_TENTATIVEPFACTORY_KOKKOS_DEF_HPP
47 #define MUELU_TENTATIVEPFACTORY_KOKKOS_DEF_HPP
48 
49 #ifdef HAVE_MUELU_KOKKOS_REFACTOR
50 
51 #include "Kokkos_UnorderedMap.hpp"
52 
54 
55 #include "MueLu_Aggregates_kokkos.hpp"
56 #include "MueLu_AmalgamationFactory.hpp"
57 #include "MueLu_AmalgamationInfo.hpp"
58 #include "MueLu_CoarseMapFactory_kokkos.hpp"
59 #include "MueLu_MasterList.hpp"
60 #include "MueLu_NullspaceFactory_kokkos.hpp"
61 #include "MueLu_PerfUtils.hpp"
62 #include "MueLu_Monitor.hpp"
63 #include "MueLu_Utilities_kokkos.hpp"
64 
65 namespace MueLu {
66 
67  namespace { // anonymous
68 
69  template<class LocalOrdinal, class View>
70  class ReduceMaxFunctor{
71  public:
72  ReduceMaxFunctor(View view) : view_(view) { }
73 
74  KOKKOS_INLINE_FUNCTION
75  void operator()(const LocalOrdinal &i, LocalOrdinal& vmax) const {
76  if (vmax < view_(i))
77  vmax = view_(i);
78  }
79 
80  KOKKOS_INLINE_FUNCTION
81  void join (volatile LocalOrdinal& dst, const volatile LocalOrdinal& src) const {
82  if (dst < src) {
83  dst = src;
84  }
85  }
86 
87  KOKKOS_INLINE_FUNCTION
88  void init (LocalOrdinal& dst) const {
89  dst = 0;
90  }
91  private:
92  View view_;
93  };
94 
95  // local QR decomposition
96  template<class LOType, class GOType, class SCType,class DeviceType, class NspType, class aggRowsType, class maxAggDofSizeType, class agg2RowMapLOType, class statusType, class rowsType, class rowsAuxType, class colsAuxType, class valsAuxType>
97  class LocalQRDecompFunctor {
98  private:
99  typedef LOType LO;
100  typedef GOType GO;
101  typedef SCType SC;
102 
103  typedef typename DeviceType::execution_space execution_space;
104  typedef Kokkos::ArithTraits<SC> ATS;
105  typedef typename ATS::magnitudeType Magnitude;
106 
109 
110  private:
111 
112  NspType fineNS;
113  NspType coarseNS;
114  aggRowsType aggRows;
115  maxAggDofSizeType maxAggDofSize; //< maximum number of dofs in aggregate (max size of aggregate * numDofsPerNode)
116  agg2RowMapLOType agg2RowMapLO;
117  statusType statusAtomic;
118  rowsType rows;
119  rowsAuxType rowsAux;
120  colsAuxType colsAux;
121  valsAuxType valsAux;
122  bool doQRStep;
123  public:
124  LocalQRDecompFunctor(NspType fineNS_, NspType coarseNS_, aggRowsType aggRows_, maxAggDofSizeType maxAggDofSize_, agg2RowMapLOType agg2RowMapLO_, statusType statusAtomic_, rowsType rows_, rowsAuxType rowsAux_, colsAuxType colsAux_, valsAuxType valsAux_, bool doQRStep_) :
125  fineNS(fineNS_),
126  coarseNS(coarseNS_),
127  aggRows(aggRows_),
128  maxAggDofSize(maxAggDofSize_),
129  agg2RowMapLO(agg2RowMapLO_),
130  statusAtomic(statusAtomic_),
131  rows(rows_),
132  rowsAux(rowsAux_),
133  colsAux(colsAux_),
134  valsAux(valsAux_),
135  doQRStep(doQRStep_)
136  { }
137 
138  KOKKOS_INLINE_FUNCTION
139  void operator() ( const typename Kokkos::TeamPolicy<execution_space>::member_type & thread, size_t& nnz) const {
140  auto agg = thread.league_rank();
141 
142  // size of aggregate: number of DOFs in aggregate
143  auto aggSize = aggRows(agg+1) - aggRows(agg);
144 
145  const SC one = ATS::one();
146  const SC two = one + one;
147  const SC zero = ATS::zero();
148  const auto zeroM = ATS::magnitude(zero);
149 
150  int m = aggSize;
151  int n = fineNS.extent(1);
152 
153  // calculate row offset for coarse nullspace
154  Xpetra::global_size_t offset = agg * n;
155 
156  if (doQRStep) {
157 
158  // Extract the piece of the nullspace corresponding to the aggregate
159  shared_matrix r(thread.team_shmem(), m, n); // A (initially), R (at the end)
160  for (int j = 0; j < n; j++)
161  for (int k = 0; k < m; k++)
162  r(k,j) = fineNS(agg2RowMapLO(aggRows(agg)+k),j);
163 #if 0
164  printf("A\n");
165  for (int i = 0; i < m; i++) {
166  for (int j = 0; j < n; j++)
167  printf(" %5.3lf ", r(i,j));
168  printf("\n");
169  }
170 #endif
171 
172  // Calculate QR decomposition (standard)
173  shared_matrix q(thread.team_shmem(), m, m); // Q
174  if (m >= n) {
175  bool isSingular = false;
176 
177  // Initialize Q^T
178  auto qt = q;
179  for (int i = 0; i < m; i++) {
180  for (int j = 0; j < m; j++)
181  qt(i,j) = zero;
182  qt(i,i) = one;
183  }
184 
185  for (int k = 0; k < n; k++) { // we ignore "n" instead of "n-1" to normalize
186  // FIXME_KOKKOS: use team
187  Magnitude s = zeroM, norm, norm_x;
188  for (int i = k+1; i < m; i++)
189  s += pow(ATS::magnitude(r(i,k)), 2);
190  norm = sqrt(pow(ATS::magnitude(r(k,k)), 2) + s);
191 
192  if (norm == zero) {
193  isSingular = true;
194  break;
195  }
196 
197  r(k,k) -= norm*one;
198 
199  norm_x = sqrt(pow(ATS::magnitude(r(k,k)), 2) + s);
200  if (norm_x == zeroM) {
201  // We have a single diagonal element in the column.
202  // No reflections required. Just need to restor r(k,k).
203  r(k,k) = norm*one;
204  continue;
205  }
206 
207  // FIXME_KOKKOS: use team
208  for (int i = k; i < m; i++)
209  r(i,k) /= norm_x;
210 
211  // Update R(k:m,k+1:n)
212  for (int j = k+1; j < n; j++) {
213  // FIXME_KOKKOS: use team in the loops
214  SC si = zero;
215  for (int i = k; i < m; i++)
216  si += r(i,k) * r(i,j);
217  for (int i = k; i < m; i++)
218  r(i,j) -= two*si * r(i,k);
219  }
220 
221  // Update Q^T (k:m,k:m)
222  for (int j = k; j < m; j++) {
223  // FIXME_KOKKOS: use team in the loops
224  SC si = zero;
225  for (int i = k; i < m; i++)
226  si += r(i,k) * qt(i,j);
227  for (int i = k; i < m; i++)
228  qt(i,j) -= two*si * r(i,k);
229  }
230 
231  // Fix R(k:m,k)
232  r(k,k) = norm*one;
233  for (int i = k+1; i < m; i++)
234  r(i,k) = zero;
235  }
236 
237 #if 0
238  // Q = (Q^T)^T
239  for (int i = 0; i < m; i++)
240  for (int j = 0; j < i; j++) {
241  SC tmp = qt(i,j);
242  qt(i,j) = qt(j,i);
243  qt(j,i) = tmp;
244  }
245 #endif
246 
247  // Build coarse nullspace using the upper triangular part of R
248  for (int j = 0; j < n; j++)
249  for (int k = 0; k <= j; k++)
250  coarseNS(offset+k,j) = r(k,j);
251 
252  if (isSingular) {
253  statusAtomic(1) = true;
254  return;
255  }
256 
257  } else {
258  // Special handling for m < n (i.e. single node aggregates in structural mechanics)
259 
260  // The local QR decomposition is not possible in the "overconstrained"
261  // case (i.e. number of columns in qr > number of rowsAux), which
262  // corresponds to #DOFs in Aggregate < n. For usual problems this
263  // is only possible for single node aggregates in structural mechanics.
264  // (Similar problems may arise in discontinuous Galerkin problems...)
265  // We bypass the QR decomposition and use an identity block in the
266  // tentative prolongator for the single node aggregate and transfer the
267  // corresponding fine level null space information 1-to-1 to the coarse
268  // level null space part.
269 
270  // NOTE: The resulting tentative prolongation operator has
271  // (m*DofsPerNode-n) zero columns leading to a singular
272  // coarse level operator A. To deal with that one has the following
273  // options:
274  // - Use the "RepairMainDiagonal" flag in the RAPFactory (default:
275  // false) to add some identity block to the diagonal of the zero rowsAux
276  // in the coarse level operator A, such that standard level smoothers
277  // can be used again.
278  // - Use special (projection-based) level smoothers, which can deal
279  // with singular matrices (very application specific)
280  // - Adapt the code below to avoid zero columns. However, we do not
281  // support a variable number of DOFs per node in MueLu/Xpetra which
282  // makes the implementation really hard.
283  //
284  // FIXME: do we need to check for singularity here somehow? Zero
285  // columns would be easy but linear dependency would require proper QR.
286 
287  // R = extended (by adding identity rowsAux) qr
288  for (int j = 0; j < n; j++)
289  for (int k = 0; k < n; k++)
290  if (k < m)
291  coarseNS(offset+k,j) = r(k,j);
292  else
293  coarseNS(offset+k,j) = (k == j ? one : zero);
294 
295  // Q = I (rectangular)
296  for (int i = 0; i < m; i++)
297  for (int j = 0; j < n; j++)
298  q(i,j) = (j == i ? one : zero);
299  }
300 
301  // Process each row in the local Q factor and fill helper arrays to assemble P
302  for (int j = 0; j < m; j++) {
303  LO localRow = agg2RowMapLO(aggRows(agg)+j);
304  size_t rowStart = rowsAux(localRow);
305  size_t lnnz = 0;
306  for (int k = 0; k < n; k++) {
307  // skip zeros
308  if (q(j,k) != zero) {
309  colsAux(rowStart+lnnz) = offset + k;
310  valsAux(rowStart+lnnz) = q(j,k);
311  lnnz++;
312  }
313  }
314  rows(localRow+1) = lnnz;
315  nnz += lnnz;
316  }
317 
318 #if 0
319  printf("R\n");
320  for (int i = 0; i < m; i++) {
321  for (int j = 0; j < n; j++)
322  printf(" %5.3lf ", coarseNS(i,j));
323  printf("\n");
324  }
325 
326  printf("Q\n");
327  for (int i = 0; i < aggSize; i++) {
328  for (int j = 0; j < aggSize; j++)
329  printf(" %5.3lf ", q(i,j));
330  printf("\n");
331  }
332 #endif
333  } else {
335  // "no-QR" option //
337  // Local Q factor is just the fine nullspace support over the current aggregate.
338  // Local R factor is the identity.
339  // TODO I have not implemented any special handling for aggregates that are too
340  // TODO small to locally support the nullspace, as is done in the standard QR
341  // TODO case above.
342 
343  for (int j = 0; j < m; j++) {
344  LO localRow = agg2RowMapLO(aggRows(agg)+j);
345  size_t rowStart = rowsAux(localRow);
346  size_t lnnz = 0;
347  for (int k = 0; k < n; k++) {
348  const SC qr_jk = fineNS(localRow,k);
349  // skip zeros
350  if (qr_jk != zero) {
351  colsAux(rowStart+lnnz) = offset + k;
352  valsAux(rowStart+lnnz) = qr_jk;
353  lnnz++;
354  }
355  }
356  rows(localRow+1) = lnnz;
357  nnz += lnnz;
358  }
359 
360  for (int j = 0; j < n; j++)
361  coarseNS(offset+j,j) = one;
362 
363  }
364 
365  }
366 
367  // amount of shared memory
368  size_t team_shmem_size( int team_size ) const {
369  if (doQRStep) {
370  int m = maxAggDofSize;
371  int n = fineNS.extent(1);
372  return shared_matrix::shmem_size(m, n) + // r
373  shared_matrix::shmem_size(m, m); // q
374  } else
375  return 0;
376  }
377  };
378 
379  } // namespace anonymous
380 
381  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class DeviceType>
382  RCP<const ParameterList> TentativePFactory_kokkos<Scalar,LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType>>::GetValidParameterList() const {
383  RCP<ParameterList> validParamList = rcp(new ParameterList());
384 
385 #define SET_VALID_ENTRY(name) validParamList->setEntry(name, MasterList::getEntry(name))
386  SET_VALID_ENTRY("tentative: calculate qr");
387  SET_VALID_ENTRY("tentative: build coarse coordinates");
388 #undef SET_VALID_ENTRY
389 
390  validParamList->set< RCP<const FactoryBase> >("A", Teuchos::null, "Generating factory of the matrix A");
391  validParamList->set< RCP<const FactoryBase> >("Aggregates", Teuchos::null, "Generating factory of the aggregates");
392  validParamList->set< RCP<const FactoryBase> >("Nullspace", Teuchos::null, "Generating factory of the nullspace");
393  validParamList->set< RCP<const FactoryBase> >("UnAmalgamationInfo", Teuchos::null, "Generating factory of UnAmalgamationInfo");
394  validParamList->set< RCP<const FactoryBase> >("CoarseMap", Teuchos::null, "Generating factory of the coarse map");
395  validParamList->set< RCP<const FactoryBase> >("Coordinates", Teuchos::null, "Generating factory of the coordinates");
396 
397  // Make sure we don't recursively validate options for the matrixmatrix kernels
398  ParameterList norecurse;
399  norecurse.disableRecursiveValidation();
400  validParamList->set<ParameterList> ("matrixmatrix: kernel params", norecurse, "MatrixMatrix kernel parameters");
401 
402  return validParamList;
403  }
404 
405  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class DeviceType>
406  void TentativePFactory_kokkos<Scalar,LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType>>::DeclareInput(Level& fineLevel, Level& coarseLevel) const {
407 
408  const ParameterList& pL = GetParameterList();
409 
410  Input(fineLevel, "A");
411  Input(fineLevel, "Aggregates");
412  Input(fineLevel, "Nullspace");
413  Input(fineLevel, "UnAmalgamationInfo");
414  Input(fineLevel, "CoarseMap");
415  if( fineLevel.GetLevelID() == 0 &&
416  fineLevel.IsAvailable("Coordinates", NoFactory::get()) && // we have coordinates (provided by user app)
417  pL.get<bool>("tentative: build coarse coordinates") ) { // and we want coordinates on other levels
418  bTransferCoordinates_ = true; // then set the transfer coordinates flag to true
419  Input(fineLevel, "Coordinates");
420  } else if (bTransferCoordinates_) {
421  Input(fineLevel, "Coordinates");
422  }
423  }
424 
425  template <class Scalar,class LocalOrdinal, class GlobalOrdinal, class DeviceType>
426  void TentativePFactory_kokkos<Scalar,LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType>>::Build(Level& fineLevel, Level& coarseLevel) const {
427  return BuildP(fineLevel, coarseLevel);
428  }
429 
430  template <class Scalar,class LocalOrdinal, class GlobalOrdinal, class DeviceType>
431  void TentativePFactory_kokkos<Scalar,LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType>>::BuildP(Level& fineLevel, Level& coarseLevel) const {
432  FactoryMonitor m(*this, "Build", coarseLevel);
433 
434  auto A = Get< RCP<Matrix> > (fineLevel, "A");
435  auto aggregates = Get< RCP<Aggregates_kokkos> >(fineLevel, "Aggregates");
436  auto amalgInfo = Get< RCP<AmalgamationInfo> > (fineLevel, "UnAmalgamationInfo");
437  auto fineNullspace = Get< RCP<MultiVector> > (fineLevel, "Nullspace");
438  auto coarseMap = Get< RCP<const Map> > (fineLevel, "CoarseMap");
439  RCP<RealValuedMultiVector> fineCoords;
440  if(bTransferCoordinates_) {
441  fineCoords = Get< RCP<RealValuedMultiVector> >(fineLevel, "Coordinates");
442  }
443 
444  RCP<Matrix> Ptentative;
445  RCP<MultiVector> coarseNullspace;
446  RCP<RealValuedMultiVector> coarseCoords;
447 
448  if(bTransferCoordinates_) {
449  ArrayView<const GO> elementAList = coarseMap->getNodeElementList();
450  GO indexBase = coarseMap->getIndexBase();
451 
452  LO blkSize = 1;
453  if (rcp_dynamic_cast<const StridedMap>(coarseMap) != Teuchos::null)
454  blkSize = rcp_dynamic_cast<const StridedMap>(coarseMap)->getFixedBlockSize();
455 
456  Array<GO> elementList;
457  ArrayView<const GO> elementListView;
458  if (blkSize == 1) {
459  // Scalar system
460  // No amalgamation required
461  elementListView = elementAList;
462 
463  } else {
464  auto numElements = elementAList.size() / blkSize;
465 
466  elementList.resize(numElements);
467 
468  // Amalgamate the map
469  for (LO i = 0; i < Teuchos::as<LO>(numElements); i++)
470  elementList[i] = (elementAList[i*blkSize]-indexBase)/blkSize + indexBase;
471 
472  elementListView = elementList;
473  }
474 
475  auto uniqueMap = fineCoords->getMap();
476  auto coarseCoordMap = MapFactory::Build(coarseMap->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(),
477  elementListView, indexBase, coarseMap->getComm());
478  coarseCoords = RealValuedMultiVectorFactory::Build(coarseCoordMap, fineCoords->getNumVectors());
479 
480  // Create overlapped fine coordinates to reduce global communication
481  RCP<RealValuedMultiVector> ghostedCoords = fineCoords;
482  if (aggregates->AggregatesCrossProcessors()) {
483  auto nonUniqueMap = aggregates->GetMap();
484  auto importer = ImportFactory::Build(uniqueMap, nonUniqueMap);
485 
486  ghostedCoords = RealValuedMultiVectorFactory::Build(nonUniqueMap, fineCoords->getNumVectors());
487  ghostedCoords->doImport(*fineCoords, *importer, Xpetra::INSERT);
488  }
489 
490  // The good new is that his graph has already been constructed for the
491  // TentativePFactory and was cached in Aggregates. So this is a no-op.
492  auto aggGraph = aggregates->GetGraph();
493  auto numAggs = aggGraph.numRows();
494 
495  auto fineCoordsView = fineCoords ->template getLocalView<DeviceType>();
496  auto coarseCoordsView = coarseCoords->template getLocalView<DeviceType>();
497 
498  // Fill in coarse coordinates
499  {
500  SubFactoryMonitor m2(*this, "AverageCoords", coarseLevel);
501 
502  const auto dim = fineCoords->getNumVectors();
503 
504  typename AppendTrait<decltype(fineCoordsView), Kokkos::RandomAccess>::type fineCoordsRandomView = fineCoordsView;
505  for (size_t j = 0; j < dim; j++) {
506  Kokkos::parallel_for("MueLu::TentativeP::BuildCoords", Kokkos::RangePolicy<local_ordinal_type, execution_space>(0, numAggs),
507  KOKKOS_LAMBDA(const LO i) {
508  // A row in this graph represents all node ids in the aggregate
509  // Therefore, averaging is very easy
510 
511  auto aggregate = aggGraph.rowConst(i);
512 
513  double sum = 0.0; // do not use Scalar here (Stokhos)
514  for (size_t colID = 0; colID < static_cast<size_t>(aggregate.length); colID++)
515  sum += fineCoordsRandomView(aggregate(colID),j);
516 
517  coarseCoordsView(i,j) = sum / aggregate.length;
518  });
519  }
520  }
521  }
522 
523  if (!aggregates->AggregatesCrossProcessors())
524  BuildPuncoupled(coarseLevel, A, aggregates, amalgInfo, fineNullspace, coarseMap, Ptentative, coarseNullspace, coarseLevel.GetLevelID());
525  else
526  BuildPcoupled (A, aggregates, amalgInfo, fineNullspace, coarseMap, Ptentative, coarseNullspace);
527 
528  // If available, use striding information of fine level matrix A for range
529  // map and coarseMap as domain map; otherwise use plain range map of
530  // Ptent = plain range map of A for range map and coarseMap as domain map.
531  // NOTE:
532  // The latter is not really safe, since there is no striding information
533  // for the range map. This is not really a problem, since striding
534  // information is always available on the intermedium levels and the
535  // coarsest levels.
536  if (A->IsView("stridedMaps") == true)
537  Ptentative->CreateView("stridedMaps", A->getRowMap("stridedMaps"), coarseMap);
538  else
539  Ptentative->CreateView("stridedMaps", Ptentative->getRangeMap(), coarseMap);
540 
541  if(bTransferCoordinates_) {
542  Set(coarseLevel, "Coordinates", coarseCoords);
543  }
544  Set(coarseLevel, "Nullspace", coarseNullspace);
545  Set(coarseLevel, "P", Ptentative);
546 
547  if (IsPrint(Statistics1)) {
548  RCP<ParameterList> params = rcp(new ParameterList());
549  params->set("printLoadBalancingInfo", true);
550  GetOStream(Statistics1) << PerfUtils::PrintMatrixInfo(*Ptentative, "Ptent", params);
551  }
552  }
553 
554  template <class Scalar,class LocalOrdinal, class GlobalOrdinal, class DeviceType>
555  void TentativePFactory_kokkos<Scalar,LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType>>::
556  BuildPuncoupled(Level& coarseLevel, RCP<Matrix> A, RCP<Aggregates_kokkos> aggregates, RCP<AmalgamationInfo> amalgInfo, RCP<MultiVector> fineNullspace,
557  RCP<const Map> coarseMap, RCP<Matrix>& Ptentative, RCP<MultiVector>& coarseNullspace, const int levelID) const {
558  auto rowMap = A->getRowMap();
559  auto colMap = A->getColMap();
560 
561  const size_t numRows = rowMap->getNodeNumElements();
562  const size_t NSDim = fineNullspace->getNumVectors();
563 
564  typedef Kokkos::ArithTraits<SC> ATS;
565  typedef typename ATS::magnitudeType Magnitude;
566  const SC zero = ATS::zero(), one = ATS::one();
567 
568  const LO INVALID = Teuchos::OrdinalTraits<LO>::invalid();
569 
570  typename Aggregates_kokkos::local_graph_type aggGraph;
571  {
572  SubFactoryMonitor m2(*this, "Get Aggregates graph", coarseLevel);
573  aggGraph = aggregates->GetGraph();
574  }
575  auto aggRows = aggGraph.row_map;
576  auto aggCols = aggGraph.entries;
577 
578  // Aggregates map is based on the amalgamated column map
579  // We can skip global-to-local conversion if LIDs in row map are
580  // same as LIDs in column map
581  bool goodMap;
582  {
583  SubFactoryMonitor m2(*this, "Check good map", coarseLevel);
584  goodMap = isGoodMap(*rowMap, *colMap);
585  }
586  // FIXME_KOKKOS: need to proofread later code for bad maps
587  TEUCHOS_TEST_FOR_EXCEPTION(!goodMap, Exceptions::RuntimeError,
588  "MueLu: TentativePFactory_kokkos: for now works only with good maps "
589  "(i.e. \"matching\" row and column maps)");
590 
591  // STEP 1: do unamalgamation
592  // The non-kokkos version uses member functions from the AmalgamationInfo
593  // container class to unamalgamate the data. In contrast, the kokkos
594  // version of TentativePFactory does the unamalgamation here and only uses
595  // the data of the AmalgamationInfo container class
596 
597  // Extract information for unamalgamation
598  LO fullBlockSize, blockID, stridingOffset, stridedBlockSize;
599  GO indexBase;
600  amalgInfo->GetStridingInformation(fullBlockSize, blockID, stridingOffset, stridedBlockSize, indexBase);
601  GO globalOffset = amalgInfo->GlobalOffset();
602 
603  // Extract aggregation info (already in Kokkos host views)
604  auto procWinner = aggregates->GetProcWinner() ->template getLocalView<DeviceType>();
605  auto vertex2AggId = aggregates->GetVertex2AggId()->template getLocalView<DeviceType>();
606  const size_t numAggregates = aggregates->GetNumAggregates();
607 
608  int myPID = aggregates->GetMap()->getComm()->getRank();
609 
610  // Create Kokkos::View (on the device) to store the aggreate dof sizes
611  // Later used to get aggregate dof offsets
612  // NOTE: This zeros itself on construction
613  typedef typename Aggregates_kokkos::aggregates_sizes_type::non_const_type AggSizeType;
614  AggSizeType aggDofSizes;
615 
616  if (stridedBlockSize == 1) {
617  SubFactoryMonitor m2(*this, "Calc AggSizes", coarseLevel);
618 
619  // FIXME_KOKKOS: use ViewAllocateWithoutInitializing + set a single value
620  aggDofSizes = AggSizeType("agg_dof_sizes", numAggregates+1);
621 
622  auto sizesConst = aggregates->ComputeAggregateSizes();
623  Kokkos::deep_copy(Kokkos::subview(aggDofSizes, Kokkos::make_pair(static_cast<size_t>(1), numAggregates+1)), sizesConst);
624 
625  } else {
626  SubFactoryMonitor m2(*this, "Calc AggSizes", coarseLevel);
627 
628  // FIXME_KOKKOS: use ViewAllocateWithoutInitializing + set a single value
629  aggDofSizes = AggSizeType("agg_dof_sizes", numAggregates + 1);
630 
631  auto nodeMap = aggregates->GetMap()->getLocalMap();
632  auto dofMap = colMap->getLocalMap();
633 
634  Kokkos::parallel_for("MueLu:TentativePF:Build:compute_agg_sizes", range_type(0,numAggregates),
635  KOKKOS_LAMBDA(const LO agg) {
636  auto aggRowView = aggGraph.rowConst(agg);
637 
638  size_t size = 0;
639  for (LO colID = 0; colID < aggRowView.length; colID++) {
640  GO nodeGID = nodeMap.getGlobalElement(aggRowView(colID));
641 
642  for (LO k = 0; k < stridedBlockSize; k++) {
643  GO dofGID = (nodeGID - indexBase) * fullBlockSize + k + indexBase + globalOffset + stridingOffset;
644 
645  if (dofMap.getLocalElement(dofGID) != INVALID)
646  size++;
647  }
648  }
649  aggDofSizes(agg+1) = size;
650  });
651  }
652 
653  // Find maximum dof size for aggregates
654  // Later used to reserve enough scratch space for local QR decompositions
655  LO maxAggSize = 0;
656  ReduceMaxFunctor<LO,decltype(aggDofSizes)> reduceMax(aggDofSizes);
657  Kokkos::parallel_reduce("MueLu:TentativePF:Build:max_agg_size", range_type(0, aggDofSizes.extent(0)), reduceMax, maxAggSize);
658 
659  // parallel_scan (exclusive)
660  // The aggDofSizes View then contains the aggregate dof offsets
661  Kokkos::parallel_scan("MueLu:TentativePF:Build:aggregate_sizes:stage1_scan", range_type(0,numAggregates+1),
662  KOKKOS_LAMBDA(const LO i, LO& update, const bool& final_pass) {
663  update += aggDofSizes(i);
664  if (final_pass)
665  aggDofSizes(i) = update;
666  });
667 
668  // Create Kokkos::View on the device to store mapping
669  // between (local) aggregate id and row map ids (LIDs)
670  Kokkos::View<LO*, DeviceType> agg2RowMapLO(Kokkos::ViewAllocateWithoutInitializing("agg2row_map_LO"), numRows);
671  {
672  SubFactoryMonitor m2(*this, "Create Agg2RowMap", coarseLevel);
673 
674  AggSizeType aggOffsets(Kokkos::ViewAllocateWithoutInitializing("aggOffsets"), numAggregates);
675  Kokkos::deep_copy(aggOffsets, Kokkos::subview(aggDofSizes, Kokkos::make_pair(static_cast<size_t>(0), numAggregates)));
676 
677  Kokkos::parallel_for("MueLu:TentativePF:Build:createAgg2RowMap", range_type(0, vertex2AggId.extent(0)),
678  KOKKOS_LAMBDA(const LO lnode) {
679  if (procWinner(lnode, 0) == myPID) {
680  // No need for atomics, it's one-to-one
681  auto aggID = vertex2AggId(lnode,0);
682 
683  auto offset = Kokkos::atomic_fetch_add( &aggOffsets(aggID), stridedBlockSize );
684  // FIXME: I think this may be wrong
685  // We unconditionally add the whole block here. When we calculated
686  // aggDofSizes, we did the isLocalElement check. Something's fishy.
687  for (LO k = 0; k < stridedBlockSize; k++)
688  agg2RowMapLO(offset + k) = lnode*stridedBlockSize + k;
689  }
690  });
691  }
692 
693  // STEP 2: prepare local QR decomposition
694  // Reserve memory for tentative prolongation operator
695  coarseNullspace = MultiVectorFactory::Build(coarseMap, NSDim);
696 
697  // Pull out the nullspace vectors so that we can have random access (on the device)
698  auto fineNS = fineNullspace ->template getLocalView<DeviceType>();
699  auto coarseNS = coarseNullspace->template getLocalView<DeviceType>();
700 
701  size_t nnz = 0; // actual number of nnz
702 
703  typedef typename Xpetra::Matrix<SC,LO,GO,NO>::local_matrix_type local_matrix_type;
704  typedef typename local_matrix_type::row_map_type::non_const_type rows_type;
705  typedef typename local_matrix_type::index_type::non_const_type cols_type;
706  typedef typename local_matrix_type::values_type::non_const_type vals_type;
707 
708 
709  // Device View for status (error messages...)
710  typedef Kokkos::View<int[10], DeviceType> status_type;
711  status_type status("status");
712 
713  typename AppendTrait<decltype(fineNS), Kokkos::RandomAccess>::type fineNSRandom = fineNS;
714  typename AppendTrait<status_type, Kokkos::Atomic> ::type statusAtomic = status;
715 
716  rows_type rows;
717  cols_type cols;
718  vals_type vals;
719 
720  const ParameterList& pL = GetParameterList();
721  const bool& doQRStep = pL.get<bool>("tentative: calculate qr");
722  if (!doQRStep) {
723  GetOStream(Runtime1) << "TentativePFactory : bypassing local QR phase" << std::endl;
724  if (NSDim>1)
725  GetOStream(Warnings0) << "TentativePFactor : for nontrivial nullspace, this may degrade performance" << std::endl;
726  }
727 
728  if (NSDim == 1) {
729  // 1D is special, as it is the easiest. We don't even need to the QR,
730  // just normalize an array. Plus, no worries abot small aggregates. In
731  // addition, we do not worry about compression. It is unlikely that
732  // nullspace will have zeros. If it does, a prolongator row would be
733  // zero and we'll get singularity anyway.
734  SubFactoryMonitor m2(*this, "Stage 1 (LocalQR)", coarseLevel);
735 
736  nnz = numRows;
737 
738  // FIXME_KOKKOS: use ViewAllocateWithoutInitializing + set a single value
739  rows = rows_type("Ptent_rows", numRows+1);
740  cols = cols_type(Kokkos::ViewAllocateWithoutInitializing("Ptent_cols"), numRows);
741  vals = vals_type(Kokkos::ViewAllocateWithoutInitializing("Ptent_vals"), numRows);
742 
743  // Set up team policy with numAggregates teams and one thread per team.
744  // Each team handles a slice of the data associated with one aggregate
745  // and performs a local QR decomposition (in this case real QR is
746  // unnecessary).
747  const Kokkos::TeamPolicy<execution_space> policy(numAggregates, 1);
748 
749  if (doQRStep) {
750  Kokkos::parallel_for("MueLu:TentativePF:BuildUncoupled:main_loop", policy,
751  KOKKOS_LAMBDA(const typename Kokkos::TeamPolicy<execution_space>::member_type &thread) {
752  auto agg = thread.league_rank();
753 
754  // size of the aggregate (number of DOFs in aggregate)
755  LO aggSize = aggRows(agg+1) - aggRows(agg);
756 
757  // Extract the piece of the nullspace corresponding to the aggregate, and
758  // put it in the flat array, "localQR" (in column major format) for the
759  // QR routine. Trivial in 1D.
760  auto norm = ATS::magnitude(zero);
761 
762  // Calculate QR by hand
763  // FIXME: shouldn't there be stridedblock here?
764  // FIXME_KOKKOS: shouldn't there be stridedblock here?
765  for (decltype(aggSize) k = 0; k < aggSize; k++) {
766  auto dnorm = ATS::magnitude(fineNSRandom(agg2RowMapLO(aggRows(agg)+k),0));
767  norm += dnorm*dnorm;
768  }
769  norm = sqrt(norm);
770 
771  if (norm == zero) {
772  // zero column; terminate the execution
773  statusAtomic(1) = true;
774  return;
775  }
776 
777  // R = norm
778  coarseNS(agg, 0) = norm;
779 
780  // Q = localQR(:,0)/norm
781  for (decltype(aggSize) k = 0; k < aggSize; k++) {
782  LO localRow = agg2RowMapLO(aggRows(agg)+k);
783  SC localVal = fineNSRandom(agg2RowMapLO(aggRows(agg)+k),0) / norm;
784 
785  rows(localRow+1) = localRow+1;
786  cols(localRow) = agg;
787  vals(localRow) = localVal;
788 
789  }
790  });
791 
792  typename status_type::HostMirror statusHost = Kokkos::create_mirror_view(status);
793  Kokkos::deep_copy(statusHost, status);
794  for (decltype(statusHost.size()) i = 0; i < statusHost.size(); i++)
795  if (statusHost(i)) {
796  std::ostringstream oss;
797  oss << "MueLu::TentativePFactory::MakeTentative: ";
798  switch (i) {
799  case 0: oss << "!goodMap is not implemented"; break;
800  case 1: oss << "fine level NS part has a zero column"; break;
801  }
802  throw Exceptions::RuntimeError(oss.str());
803  }
804 
805  } else {
806  Kokkos::parallel_for("MueLu:TentativePF:BuildUncoupled:main_loop_noqr", policy,
807  KOKKOS_LAMBDA(const typename Kokkos::TeamPolicy<execution_space>::member_type &thread) {
808  auto agg = thread.league_rank();
809 
810  // size of the aggregate (number of DOFs in aggregate)
811  LO aggSize = aggRows(agg+1) - aggRows(agg);
812 
813  // R = norm
814  coarseNS(agg, 0) = one;
815 
816  // Q = localQR(:,0)/norm
817  for (decltype(aggSize) k = 0; k < aggSize; k++) {
818  LO localRow = agg2RowMapLO(aggRows(agg)+k);
819  SC localVal = fineNSRandom(agg2RowMapLO(aggRows(agg)+k),0);
820 
821  rows(localRow+1) = localRow+1;
822  cols(localRow) = agg;
823  vals(localRow) = localVal;
824 
825  }
826  });
827  }
828 
829  } else { // NSdim > 1
830  // FIXME_KOKKOS: This code branch is completely unoptimized.
831  // Work to do:
832  // - Optimize QR decomposition
833  // - Remove INVALID usage similarly to CoalesceDropFactory_kokkos by
834  // packing new values in the beginning of each row
835  // We do use auxilary view in this case, so keep a second rows view for
836  // counting nonzeros in rows
837 
838  // NOTE: the allocation (initialization) of these view takes noticeable time
839  size_t nnzEstimate = numRows * NSDim;
840  rows_type rowsAux("Ptent_aux_rows", numRows+1);
841  cols_type colsAux("Ptent_aux_cols", nnzEstimate);
842  vals_type valsAux("Ptent_aux_vals", nnzEstimate);
843  rows = rows_type("Ptent_rows", numRows+1);
844  {
845  // Stage 0: fill in views.
846  SubFactoryMonitor m2(*this, "Stage 0 (InitViews)", coarseLevel);
847 
848  // The main thing to notice is initialization of vals with INVALID. These
849  // values will later be used to compress the arrays
850  Kokkos::parallel_for("MueLu:TentativePF:BuildPuncoupled:for1", range_type(0, numRows+1),
851  KOKKOS_LAMBDA(const LO row) {
852  rowsAux(row) = row*NSDim;
853  });
854  Kokkos::parallel_for("MueLu:TentativePF:BuildUncoupled:for2", range_type(0, nnzEstimate),
855  KOKKOS_LAMBDA(const LO j) {
856  colsAux(j) = INVALID;
857  valsAux(j) = zero;
858  });
859  }
860 
861  {
862  SubFactoryMonitor m2 = SubFactoryMonitor(*this, doQRStep ? "Stage 1 (LocalQR)" : "Stage 1 (Fill coarse nullspace and tentative P)", coarseLevel);
863  // Set up team policy with numAggregates teams and one thread per team.
864  // Each team handles a slice of the data associated with one aggregate
865  // and performs a local QR decomposition
866  const Kokkos::TeamPolicy<execution_space> policy(numAggregates,1); // numAggregates teams a 1 thread
867  LocalQRDecompFunctor<LocalOrdinal, GlobalOrdinal, Scalar, DeviceType, decltype(fineNSRandom),
868  decltype(aggDofSizes /*aggregate sizes in dofs*/), decltype(maxAggSize), decltype(agg2RowMapLO),
869  decltype(statusAtomic), decltype(rows), decltype(rowsAux), decltype(colsAux),
870  decltype(valsAux)>
871  localQRFunctor(fineNSRandom, coarseNS, aggDofSizes, maxAggSize, agg2RowMapLO, statusAtomic,
872  rows, rowsAux, colsAux, valsAux, doQRStep);
873  Kokkos::parallel_reduce("MueLu:TentativePF:BuildUncoupled:main_qr_loop", policy, localQRFunctor, nnz);
874  }
875 
876  typename status_type::HostMirror statusHost = Kokkos::create_mirror_view(status);
877  Kokkos::deep_copy(statusHost, status);
878  for (decltype(statusHost.size()) i = 0; i < statusHost.size(); i++)
879  if (statusHost(i)) {
880  std::ostringstream oss;
881  oss << "MueLu::TentativePFactory::MakeTentative: ";
882  switch(i) {
883  case 0: oss << "!goodMap is not implemented"; break;
884  case 1: oss << "fine level NS part has a zero column"; break;
885  }
886  throw Exceptions::RuntimeError(oss.str());
887  }
888 
889  // Compress the cols and vals by ignoring INVALID column entries that correspond
890  // to 0 in QR.
891 
892  // The real cols and vals are constructed using calculated (not estimated) nnz
893  cols = decltype(cols)("Ptent_cols", nnz);
894  vals = decltype(vals)("Ptent_vals", nnz);
895  {
896  // Stage 2: compress the arrays
897  SubFactoryMonitor m2(*this, "Stage 2 (CompressRows)", coarseLevel);
898 
899  Kokkos::parallel_scan("MueLu:TentativePF:Build:compress_rows", range_type(0,numRows+1),
900  KOKKOS_LAMBDA(const LO i, LO& upd, const bool& final) {
901  upd += rows(i);
902  if (final)
903  rows(i) = upd;
904  });
905  }
906 
907  {
908  SubFactoryMonitor m2(*this, "Stage 2 (CompressCols)", coarseLevel);
909 
910  // FIXME_KOKKOS: this can be spedup by moving correct cols and vals values
911  // to the beginning of rows. See CoalesceDropFactory_kokkos for
912  // example.
913  Kokkos::parallel_for("MueLu:TentativePF:Build:compress_cols_vals", range_type(0,numRows),
914  KOKKOS_LAMBDA(const LO i) {
915  LO rowStart = rows(i);
916 
917  size_t lnnz = 0;
918  for (auto j = rowsAux(i); j < rowsAux(i+1); j++)
919  if (colsAux(j) != INVALID) {
920  cols(rowStart+lnnz) = colsAux(j);
921  vals(rowStart+lnnz) = valsAux(j);
922  lnnz++;
923  }
924  });
925  }
926  }
927 
928  GetOStream(Runtime1) << "TentativePFactory : aggregates do not cross process boundaries" << std::endl;
929 
930  {
931  // Stage 3: construct Xpetra::Matrix
932  SubFactoryMonitor m2(*this, "Stage 3 (LocalMatrix+FillComplete)", coarseLevel);
933 
934  local_matrix_type lclMatrix = local_matrix_type("A", numRows, coarseMap->getNodeNumElements(), nnz, vals, rows, cols);
935 
936  // Managing labels & constants for ESFC
937  RCP<ParameterList> FCparams;
938  if (pL.isSublist("matrixmatrix: kernel params"))
939  FCparams = rcp(new ParameterList(pL.sublist("matrixmatrix: kernel params")));
940  else
941  FCparams = rcp(new ParameterList);
942 
943  // By default, we don't need global constants for TentativeP
944  FCparams->set("compute global constants", FCparams->get("compute global constants", false));
945  FCparams->set("Timer Label", std::string("MueLu::TentativeP-") + toString(levelID));
946 
947  auto PtentCrs = CrsMatrixFactory::Build(lclMatrix, rowMap, coarseMap, coarseMap, A->getDomainMap());
948  Ptentative = rcp(new CrsMatrixWrap(PtentCrs));
949  }
950  }
951 
952  template <class Scalar,class LocalOrdinal, class GlobalOrdinal, class DeviceType>
953  void TentativePFactory_kokkos<Scalar,LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType>>::
954  BuildPcoupled(RCP<Matrix> A, RCP<Aggregates_kokkos> aggregates, RCP<AmalgamationInfo> amalgInfo, RCP<MultiVector> fineNullspace,
955  RCP<const Map> coarseMap, RCP<Matrix>& Ptentative, RCP<MultiVector>& coarseNullspace) const {
956  throw Exceptions::RuntimeError("MueLu: Construction of coupled tentative P is not implemented");
957  }
958 
959  template <class Scalar,class LocalOrdinal, class GlobalOrdinal, class DeviceType>
960  bool TentativePFactory_kokkos<Scalar,LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType>>::
961  isGoodMap(const Map& rowMap, const Map& colMap) const {
962  auto rowLocalMap = rowMap.getLocalMap();
963  auto colLocalMap = colMap.getLocalMap();
964 
965  const size_t numRows = rowLocalMap.getNodeNumElements();
966  const size_t numCols = colLocalMap.getNodeNumElements();
967 
968  if (numCols < numRows)
969  return false;
970 
971  size_t numDiff = 0;
972  Kokkos::parallel_reduce("MueLu:TentativePF:isGoodMap", range_type(0, numRows),
973  KOKKOS_LAMBDA(const LO i, size_t &diff) {
974  diff += (rowLocalMap.getGlobalElement(i) != colLocalMap.getGlobalElement(i));
975  }, numDiff);
976 
977  return (numDiff == 0);
978  }
979 
980 } //namespace MueLu
981 
982 #define MUELU_TENTATIVEPFACTORY_KOKKOS_SHORT
983 #endif // HAVE_MUELU_KOKKOS_REFACTOR
984 #endif // MUELU_TENTATIVEPFACTORY_KOKKOS_DEF_HPP
Important warning messages (one line)
void parallel_for(const ExecPolicy &policy, const FunctorType &functor, const std::string &str="", typename Impl::enable_if< Kokkos::Impl::is_execution_policy< ExecPolicy >::value >::type *=0)
std::string toString(const T &what)
Little helper function to convert non-string types to strings.
GlobalOrdinal GO
void parallel_reduce(const std::string &label, const PolicyType &policy, const FunctorType &functor, ReturnType &return_value, typename Impl::enable_if< Kokkos::Impl::is_execution_policy< PolicyType >::value >::type *=0)
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
Print more statistics.
KOKKOS_INLINE_FUNCTION Kokkos::complex< RealType > pow(const complex< RealType > &x, const RealType &e)
LocalOrdinal LO
Namespace for MueLu classes and methods.
static const NoFactory * get()
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
View
static std::string PrintMatrixInfo(const Matrix &A, const std::string &msgTag, RCP< const Teuchos::ParameterList > params=Teuchos::null)
size_t global_size_t
KOKKOS_INLINE_FUNCTION Kokkos::complex< RealType > sqrt(const complex< RealType > &x)
KOKKOS_FORCEINLINE_FUNCTION constexpr pair< T1, T2 > make_pair(T1 x, T2 y)
void deep_copy(const View< DT, DP... > &dst, typename ViewTraits< DT, DP... >::const_value_type &value, typename std::enable_if< std::is_same< typename ViewTraits< DT, DP... >::specialize, void >::value >::type *=0)
Scalar SC
Description of what is happening (more verbose)
#define SET_VALID_ENTRY(name)