ViennaCL - The Vienna Computing Library  1.6.1
Free open-source GPU-accelerated linear algebra and solver library.
amg_base.hpp
Go to the documentation of this file.
1 #ifndef VIENNACL_LINALG_DETAIL_AMG_AMG_BASE_HPP_
2 #define VIENNACL_LINALG_DETAIL_AMG_AMG_BASE_HPP_
3 
4 /* =========================================================================
5  Copyright (c) 2010-2014, Institute for Microelectronics,
6  Institute for Analysis and Scientific Computing,
7  TU Wien.
8  Portions of this software are copyright by UChicago Argonne, LLC.
9 
10  -----------------
11  ViennaCL - The Vienna Computing Library
12  -----------------
13 
14  Project Head: Karl Rupp rupp@iue.tuwien.ac.at
15 
16  (A list of authors and contributors can be found in the PDF manual)
17 
18  License: MIT (X11), see file LICENSE in the base directory
19 ============================================================================= */
20 
27 #include <boost/numeric/ublas/operation.hpp>
28 #include <boost/numeric/ublas/vector.hpp>
29 #include <cmath>
30 #include <set>
31 #include <list>
32 #include <algorithm>
33 
34 #include <map>
35 #ifdef VIENNACL_WITH_OPENMP
36 #include <omp.h>
37 #endif
38 
39 #include "amg_debug.hpp"
40 
41 #define VIENNACL_AMG_COARSE_RS 1
42 #define VIENNACL_AMG_COARSE_ONEPASS 2
43 #define VIENNACL_AMG_COARSE_RS0 3
44 #define VIENNACL_AMG_COARSE_RS3 4
45 #define VIENNACL_AMG_COARSE_AG 5
46 #define VIENNACL_AMG_INTERPOL_DIRECT 1
47 #define VIENNACL_AMG_INTERPOL_CLASSIC 2
48 #define VIENNACL_AMG_INTERPOL_AG 3
49 #define VIENNACL_AMG_INTERPOL_SA 4
50 
51 namespace viennacl
52 {
53 namespace linalg
54 {
55 namespace detail
56 {
57 namespace amg
58 {
61 class amg_tag
62 {
63 public:
76  amg_tag(unsigned int coarse = 1,
77  unsigned int interpol = 1,
78  double threshold = 0.25,
79  double interpolweight = 0.2,
80  double jacobiweight = 1,
81  unsigned int presmooth = 1,
82  unsigned int postsmooth = 1,
83  unsigned int coarselevels = 0)
84  : coarse_(coarse), interpol_(interpol),
85  threshold_(threshold), interpolweight_(interpolweight), jacobiweight_(jacobiweight),
86  presmooth_(presmooth), postsmooth_(postsmooth), coarselevels_(coarselevels) {}
87 
88  // Getter-/Setter-Functions
89  void set_coarse(unsigned int coarse) { coarse_ = coarse; }
90  unsigned int get_coarse() const { return coarse_; }
91 
92  void set_interpol(unsigned int interpol) { interpol_ = interpol; }
93  unsigned int get_interpol() const { return interpol_; }
94 
95  void set_threshold(double threshold) { if (threshold > 0 && threshold <= 1) threshold_ = threshold; }
96  double get_threshold() const { return threshold_; }
97 
98  void set_as(double jacobiweight) { if (jacobiweight > 0 && jacobiweight <= 2) jacobiweight_ = jacobiweight; }
99  double get_interpolweight() const { return interpolweight_; }
100 
101  void set_interpolweight(double interpolweight) { if (interpolweight > 0 && interpolweight <= 2) interpolweight_ = interpolweight; }
102  double get_jacobiweight() const { return jacobiweight_; }
103 
104  void set_presmooth(unsigned int presmooth) { presmooth_ = presmooth; }
105  unsigned int get_presmooth() const { return presmooth_; }
106 
107  void set_postsmooth(unsigned int postsmooth) { postsmooth_ = postsmooth; }
108  unsigned int get_postsmooth() const { return postsmooth_; }
109 
110  void set_coarselevels(unsigned int coarselevels) { coarselevels_ = coarselevels; }
111  unsigned int get_coarselevels() const { return coarselevels_; }
112 
113 private:
114  unsigned int coarse_, interpol_;
115  double threshold_, interpolweight_, jacobiweight_;
116  unsigned int presmooth_, postsmooth_, coarselevels_;
117 };
118 
123 template<typename InternalT, typename IteratorT, typename NumericT>
125 {
126 private:
127  InternalT *m_;
128  IteratorT iter_;
129  unsigned int i_,j_;
130  NumericT s_;
131 
132 public:
134 
142  amg_nonzero_scalar(InternalT *m,
143  IteratorT & iter,
144  unsigned int i,
145  unsigned int j,
146  NumericT s = 0): m_(m), iter_(iter), i_(i), j_(j), s_(s) {}
147 
151  NumericT operator=(const NumericT value)
152  {
153  s_ = value;
154  // Only write if scalar is nonzero
155  if (s_ <= 0 && s_ >= 0) return s_;
156  // Write to m_ using iterator iter_ or indices (i_,j_)
157  m_->addscalar(iter_,i_,j_,s_);
158  return s_;
159  }
160 
164  NumericT operator+=(const NumericT value)
165  {
166  // If zero is added, then no change necessary
167  if (value <= 0 && value >= 0)
168  return s_;
169 
170  s_ += value;
171  // Remove entry if resulting scalar is zero
172  if (s_ <= 0 && s_ >= 0)
173  {
174  m_->removescalar(iter_,i_);
175  return s_;
176  }
177  //Write to m_ using iterator iter_ or indices (i_,j_)
178  m_->addscalar(iter_,i_,j_,s_);
179  return s_;
180  }
181  NumericT operator++(int)
182  {
183  s_++;
184  if (s_ == 0)
185  m_->removescalar(iter_,i_);
186  m_->addscalar (iter_,i_,j_,s_);
187  return s_;
188  }
189  NumericT operator++()
190  {
191  s_++;
192  if (s_ == 0)
193  m_->removescalar(iter_,i_);
194  m_->addscalar(iter_,i_,j_,s_);
195  return s_;
196  }
197  operator NumericT(void) { return s_; }
198 };
199 
202 template<typename InternalT>
204 {
205 private:
207  typedef typename InternalT::mapped_type ScalarType;
208 
209  InternalT & internal_vec_;
210  typename InternalT::iterator iter_;
211 
212 public:
213 
218  amg_sparsevector_iterator(InternalT & vec, bool begin=true): internal_vec_(vec)
219  {
220  if (begin)
221  iter_ = internal_vec_.begin();
222  else
223  iter_ = internal_vec_.end();
224  }
225 
226  bool operator == (self_type other)
227  {
228  if (iter_ == other.iter_)
229  return true;
230  else
231  return false;
232  }
233  bool operator != (self_type other)
234  {
235  if (iter_ != other.iter_)
236  return true;
237  else
238  return false;
239  }
240 
241  self_type const & operator ++ () const { iter_++; return *this; }
242  self_type & operator ++ () { iter_++; return *this; }
243  self_type const & operator -- () const { iter_--; return *this; }
244  self_type & operator -- () { iter_--; return *this; }
245  ScalarType const & operator * () const { return (*iter_).second; }
246  ScalarType & operator * () { return (*iter_).second; }
247  unsigned int index() const { return (*iter_).first; }
248  unsigned int index() { return (*iter_).first; }
249 };
250 
253 template<typename NumericT>
255 {
256 public:
257  typedef NumericT value_type;
258 
259 private:
260  // A map is used internally which saves all non-zero elements with pairs of (index,value)
261  typedef std::map<unsigned int, NumericT> InternalType;
264 
265  // Size is only a dummy variable. Not needed for internal map structure but for compatible vector interface.
266  unsigned int size_;
267  InternalType internal_vector_;
268 
269 public:
271  typedef typename InternalType::const_iterator const_iterator;
272 
273 public:
277  amg_sparsevector(unsigned int size = 0): size_(size) {}
278 
279  void resize(unsigned int size) { size_ = size; }
280  unsigned int size() const { return size_;}
281 
282  // Returns number of non-zero entries in vector equal to the size of the underlying map.
283  unsigned int internal_size() const { return static_cast<unsigned int>(internal_vector_.size()); }
284  // Delete underlying map.
285  void clear() { internal_vector_.clear(); }
286  // Remove entry at position i.
287  void remove(unsigned int i) { internal_vector_.erase(i); }
288 
289  // Add s to the entry at position i
290  void add(unsigned int i, NumericT s)
291  {
292  typename InternalType::iterator iter = internal_vector_.find(i);
293  // If there is no entry at position i, add new entry at that position
294  if (iter == internal_vector_.end())
295  addscalar(iter,i,i,s);
296  else
297  {
298  s += (*iter).second;
299  // If new value is zero, then erase the entry, otherwise write new value
300  if (s < 0 || s > 0)
301  (*iter).second = s;
302  else
303  internal_vector_.erase(iter);
304  }
305  }
306 
307  // Write to the map. Is called from non-zero scalar type.
308  template<typename IteratorT>
309  void addscalar(IteratorT & iter, unsigned int i, unsigned int /* j */, NumericT s)
310  {
311  // Don't write if value is zero
312  if (s <= 0 && s >= 0)
313  return;
314 
315  // If entry is already present, overwrite value, otherwise make new entry
316  if (iter != internal_vector_.end())
317  (*iter).second = s;
318  else
319  internal_vector_[i] = s;
320  }
321 
322  // Remove value from the map. Is called from non-zero scalar type.
323  template<typename IteratorT>
324  void removescalar(IteratorT & iter, unsigned int /* i */) { internal_vector_.erase(iter); }
325 
326  // Bracket operator. Returns non-zero scalar type with actual values of the respective entry which calls addscalar/removescalar after value is altered.
327  NonzeroScalarType operator [] (unsigned int i)
328  {
329  typename InternalType::iterator it = internal_vector_.find(i);
330  // If value is already present then build non-zero scalar with actual value, otherwise 0.
331  if (it != internal_vector_.end())
332  return NonzeroScalarType(this,it,i,i,(*it).second);
333  else
334  return NonzeroScalarType(this,it,i,i,0);
335  }
336 
337  // Use internal data structure directly for read-only access. No need to use non-zero scalar as no write access possible.
338  NumericT operator [] (unsigned int i) const
339  {
340  const_iterator it = internal_vector_.find(i);
341 
342  if (it != internal_vector_.end())
343  return (*it).second;
344  else
345  return 0;
346  }
347 
348  // Iterator functions.
349  iterator begin() { return iterator(internal_vector_); }
350  const_iterator begin() const { return internal_vector_.begin(); }
351  iterator end() { return iterator(internal_vector_, false); }
352  const_iterator end() const { return internal_vector_.end(); }
353 
354  // checks whether value at index i is nonzero. More efficient than doing [] == 0.
355  bool isnonzero(unsigned int i) const { return internal_vector_.find(i) != internal_vector_.end(); }
356 
357  // Copies data into a ublas vector type.
358  operator boost::numeric::ublas::vector<NumericT>(void)
359  {
360  boost::numeric::ublas::vector<NumericT> vec (size_);
361  for (iterator iter = begin(); iter != end(); ++iter)
362  vec [iter.index()] = *iter;
363  return vec;
364  }
365 };
366 
372 template<typename NumericT>
374 {
375 public:
376  typedef NumericT value_type;
377 private:
378  typedef std::map<unsigned int,NumericT> RowType;
379  typedef std::vector<RowType> InternalType;
381 
382  // Adapter is used for certain functionality, especially iterators.
385 
386  // Non-zero scalar is used to write to the matrix.
388 
389  // Holds matrix coefficients.
390  InternalType internal_mat_;
391  // Holds matrix coefficient of transposed matrix if built.
392  // Note: Only internal_mat is written using operators and methods while internal_mat_trans is built from internal_mat using do_trans().
393  InternalType internal_mat_trans_;
394  // Saves sizes.
395  vcl_size_t s1_, s2_;
396 
397  // True if the transposed of the matrix is used (for calculations, iteration, etc.).
398  bool transposed_mode_;
399  // True if the transposed is already built (saved in internal_mat_trans) and also up to date (no changes to internal_mat).
400  bool transposed_;
401 
402 public:
407 
410  {
411  transposed_mode_ = false;
412  transposed_ = false;
413  }
414 
419  amg_sparsematrix(unsigned int i, unsigned int j)
420  {
421  AdapterType a(internal_mat_, i, j);
422  a.resize(i,j,false);
423  AdapterType a_trans(internal_mat_trans_, j, i);
424  a_trans.resize(j,i,false);
425  s1_ = i;
426  s2_ = j;
427  a.clear();
428  a_trans.clear();
429  transposed_mode_ = false;
430  transposed_ = false;
431  }
432 
437  amg_sparsematrix(std::vector<std::map<unsigned int, NumericT> > const & mat)
438  {
439  AdapterType a (internal_mat_, mat.size(), mat.size());
440  AdapterType a_trans (internal_mat_trans_, mat.size(), mat.size());
441  a.resize(mat.size(), mat.size());
442  a_trans.resize(mat.size(), mat.size());
443 
444  internal_mat_ = mat;
445  s1_ = s2_ = mat.size();
446 
447  transposed_mode_ = false;
448  transposed_ = false;
449  }
450 
455  template<typename MatrixT>
456  amg_sparsematrix(MatrixT const & mat)
457  {
458  AdapterType a(internal_mat_, mat.size1(), mat.size2());
459  AdapterType a_trans(internal_mat_trans_, mat.size2(), mat.size1());
460  a.resize(mat.size1(), mat.size2());
461  a_trans.resize(mat.size2(), mat.size1());
462  s1_ = mat.size1();
463  s2_ = mat.size2();
464  a.clear();
465  a_trans.clear();
466 
467  for (typename MatrixT::const_iterator1 row_iter = mat.begin1(); row_iter != mat.end1(); ++row_iter)
468  {
469  for (typename MatrixT::const_iterator2 col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
470  {
471  if (std::fabs(*col_iter) > 0) // *col_iter != 0, but without floating point comparison warnings
472  {
473  unsigned int x = static_cast<unsigned int>(col_iter.index1());
474  unsigned int y = static_cast<unsigned int>(col_iter.index2());
475  a(x,y) = *col_iter;
476  a_trans(y,x) = *col_iter;
477  }
478  }
479  }
480  transposed_mode_ = false;
481  transposed_ = true;
482  }
483 
484  // Build transposed of the current matrix.
485  void do_trans()
486  {
487  // Do it only once if called in a parallel section
488  #ifdef VIENNACL_WITH_OPENMP
489  #pragma omp critical
490  #endif
491  {
492  // Only build transposed if it is not built or not up to date
493  if (!transposed_)
494  {
495  // Mode has to be set to standard mode temporarily
496  bool save_mode = transposed_mode_;
497  transposed_mode_ = false;
498 
499  for (iterator1 row_iter = begin1(); row_iter != end1(); ++row_iter)
500  for (iterator2 col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
501  internal_mat_trans_[col_iter.index2()][static_cast<unsigned int>(col_iter.index1())] = *col_iter;
502 
503  transposed_mode_ = save_mode;
504  transposed_ = true;
505  }
506  }
507  } //do_trans()
508 
509  // Set transposed mode (true=transposed, false=regular)
510  void set_trans(bool mode)
511  {
512  transposed_mode_ = mode;
513  if (mode)
514  do_trans();
515  }
516 
517  bool get_trans() const { return transposed_mode_; }
518 
519  // Checks whether coefficient (i,j) is non-zero. More efficient than using (i,j) == 0.
520  bool isnonzero (unsigned int i, unsigned int j) const
521  {
522  if (!transposed_mode_)
523  {
524  if (internal_mat_[i].find(j) != internal_mat_[i].end())
525  return true;
526  else
527  return false;
528  }
529  else
530  {
531  if (internal_mat_[j].find(i) != internal_mat_[j].end())
532  return true;
533  else
534  return false;
535  }
536  } //isnonzero()
537 
538  // Add s to value at (i,j)
539  void add(unsigned int i, unsigned int j, NumericT s)
540  {
541  // If zero is added then do nothing.
542  if (s <= 0 && s >= 0)
543  return;
544 
545  typename RowType::iterator col_iter = internal_mat_[i].find(j);
546  // If there is no entry at position (i,j), then make new entry.
547  if (col_iter == internal_mat_[i].end())
548  addscalar(col_iter,i,j,s);
549  else
550  {
551  s += (*col_iter).second;
552  // Update value and erase entry if value is zero.
553  if (s < 0 || s > 0)
554  (*col_iter).second = s;
555  else
556  internal_mat_[i].erase(col_iter);
557  }
558  transposed_ = false;
559  } //add()
560 
561  // Write to the internal data structure. Is called from non-zero scalar type.
562  template<typename IteratorT>
563  void addscalar(IteratorT & iter, unsigned int i, unsigned int j, NumericT s)
564  {
565  // Don't write if value is zero
566  if (s >= 0 && s <= 0)
567  return;
568 
569  if (iter != internal_mat_[i].end())
570  (*iter).second = s;
571  else
572  internal_mat_[i][j] = s;
573 
574  transposed_ = false;
575  }
576 
577  // Remove entry from internal data structure. Is called from non-zero scalar type.
578  template<typename IteratorT>
579  void removescalar(IteratorT & iter, unsigned int i)
580  {
581  internal_mat_[i].erase(iter);
582  transposed_ = false;
583  }
584 
585  // Return non-zero scalar at position (i,j). Value is written to the non-zero scalar and updated via addscalar()/removescalar().
586  NonzeroScalarType operator()(unsigned int i, unsigned int j)
587  {
588  typename RowType::iterator iter;
589 
590  if (!transposed_mode_)
591  {
592  iter = internal_mat_[i].find(j);
593  if (iter != internal_mat_[i].end())
594  return NonzeroScalarType(this,iter,i,j,(*iter).second);
595  else
596  return NonzeroScalarType(this,iter,i,j,0);
597  }
598  else
599  {
600  iter = internal_mat_[j].find(i);
601  if (iter != internal_mat_[j].end())
602  return NonzeroScalarType(this,iter,j,i,(*iter).second);
603  else
604  return NonzeroScalarType(this,iter,j,i,0);
605  }
606  }
607 
608  // For read-only access return the actual value directly. Non-zero datatype not needed as no write access possible.
609  NumericT operator()(unsigned int i, unsigned int j) const
610  {
611  typename RowType::const_iterator iter;
612 
613  if (!transposed_mode_)
614  {
615  iter = internal_mat_[i].find(j);
616  if (iter != internal_mat_[i].end())
617  return (*iter).second;
618  else
619  return 0;
620  }
621  else
622  {
623  iter = internal_mat_[j].find(i);
624  if (iter != internal_mat_[j].end())
625  return (*iter).second;
626  else
627  return 0;
628  }
629  }
630 
631  void resize(unsigned int i, unsigned int j, bool preserve = true)
632  {
633  AdapterType a (internal_mat_);
634  a.resize(i,j,preserve);
635  AdapterType a_trans (internal_mat_trans_);
636  a_trans.resize(j,i,preserve);
637  s1_ = i;
638  s2_ = j;
639  }
640 
641  void clear()
642  {
643  AdapterType a(internal_mat_, s1_, s2_);
644  a.clear();
645  AdapterType a_trans(internal_mat_trans_, s2_, s1_);
646  a_trans.clear();
647  transposed_ = true;
648  }
649 
651  {
652  if (!transposed_mode_)
653  return s1_;
654  else
655  return s2_;
656  }
657 
659  {
660  if (!transposed_mode_)
661  return s1_;
662  else
663  return s2_;
664  }
665 
666 
668  {
669  if (!transposed_mode_)
670  return s2_;
671  else
672  return s1_;
673  }
674 
676  {
677  if (!transposed_mode_)
678  return s2_;
679  else
680  return s1_;
681  }
682 
683  iterator1 begin1(bool trans = false)
684  {
685  if (!trans && !transposed_mode_)
686  {
687  AdapterType a(internal_mat_, s1_, s2_);
688  return a.begin1();
689  }
690  else
691  {
692  do_trans();
693  AdapterType a_trans(internal_mat_trans_, s2_, s1_);
694  return a_trans.begin1();
695  }
696  }
697 
698  iterator1 end1(bool trans = false)
699  {
700  if (!trans && !transposed_mode_)
701  {
702  AdapterType a(internal_mat_, s1_, s2_);
703  return a.end1();
704  }
705  else
706  {
707  //do_trans();
708  AdapterType a_trans(internal_mat_trans_, s2_, s1_);
709  return a_trans.end1();
710  }
711  }
712 
713  iterator2 begin2(bool trans = false)
714  {
715  if (!trans && !transposed_mode_)
716  {
717  AdapterType a(internal_mat_, s1_, s2_);
718  return a.begin2();
719  }
720  else
721  {
722  do_trans();
723  AdapterType a_trans(internal_mat_trans_, s2_, s1_);
724  return a_trans.begin2();
725  }
726  }
727 
728  iterator2 end2(bool trans = false)
729  {
730  if (!trans && !transposed_mode_)
731  {
732  AdapterType a(internal_mat_, s1_, s2_);
733  return a.end2();
734  }
735  else
736  {
737  //do_trans();
738  AdapterType a_trans(internal_mat_trans_, s2_, s1_);
739  return a_trans.end2();
740  }
741  }
742 
743  const_iterator1 begin1() const
744  {
745  // Const_iterator of transposed can only be used if transposed matrix is already built and up to date.
746  assert((!transposed_mode_ || (transposed_mode_ && transposed_)) && bool("Error: Cannot build const_iterator when transposed has not been built yet!"));
747  ConstAdapterType a_const(internal_mat_, s1_, s2_);
748  return a_const.begin1();
749  }
750 
751  const_iterator1 end1(bool trans = false) const
752  {
753  assert((!transposed_mode_ || (transposed_mode_ && transposed_)) && bool("Error: Cannot build const_iterator when transposed has not been built yet!"));
754  ConstAdapterType a_const(internal_mat_, trans ? s2_ : s1_, trans ? s1_ : s2_);
755  return a_const.end1();
756  }
757 
758  const_iterator2 begin2(bool trans = false) const
759  {
760  assert((!transposed_mode_ || (transposed_mode_ && transposed_)) && bool("Error: Cannot build const_iterator when transposed has not been built yet!"));
761  ConstAdapterType a_const(internal_mat_, trans ? s2_ : s1_, trans ? s1_ : s2_);
762  return a_const.begin2();
763  }
764 
765  const_iterator2 end2(bool trans = false) const
766  {
767  assert((!transposed_mode_ || (transposed_mode_ && transposed_)) && bool("Error: Cannot build const_iterator when transposed has not been built yet!"));
768  ConstAdapterType a_const(internal_mat_, trans ? s2_ : s1_, trans ? s1_ : s2_);
769  return a_const.end2();
770  }
771 
772  // Returns pointer to the internal data structure. Improves performance of copy operation to GPU.
773  std::vector<std::map<unsigned int, NumericT> > * get_internal_pointer()
774  {
775  if (!transposed_mode_)
776  return &internal_mat_;
777 
778  if (!transposed_)
779  do_trans();
780  return &internal_mat_trans_;
781  }
782 
783  operator boost::numeric::ublas::compressed_matrix<NumericT>(void)
784  {
785  boost::numeric::ublas::compressed_matrix<NumericT> mat;
786  mat.resize(size1(), size2(), false);
787  mat.clear();
788 
789  for (iterator1 row_iter = begin1(); row_iter != end1(); ++row_iter)
790  for (iterator2 col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
791  mat(col_iter.index1(), col_iter.index2()) = *col_iter;
792 
793  return mat;
794  }
795 
796  operator boost::numeric::ublas::matrix<NumericT>(void)
797  {
798  boost::numeric::ublas::matrix<NumericT> mat;
799  mat.resize(size1(), size2(), false);
800  mat.clear();
801 
802  for (iterator1 row_iter = begin1(); row_iter != end1(); ++row_iter)
803  for (iterator2 col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
804  mat(col_iter.index1(), col_iter.index2()) = *col_iter;
805 
806  return mat;
807  }
808 };
809 
816 {
817 private:
819 
820  unsigned int index_;
821  unsigned int influence_;
822  // Determines whether point is undecided.
823  bool undecided_;
824  // Determines wheter point is C point (true) or F point (false).
825  bool cpoint_;
826  unsigned int coarse_index_;
827  // Index offset of parallel coarsening. In that case a point acts as if it had an index of index_-offset_ and treats other points as if they had an index of index+offset_
828  unsigned int offset_;
829  // Aggregate the point belongs to.
830  unsigned int aggregate_;
831 
832  // Holds all points influencing this point.
833  ListType influencing_points_;
834  // Holds all points that are influenced by this point.
835  ListType influenced_points_;
836 
837 public:
840 
843  amg_point (unsigned int index, unsigned int size): index_(index), influence_(0), undecided_(true), cpoint_(false), coarse_index_(0), offset_(0), aggregate_(0)
844  {
845  influencing_points_ = ListType(size);
846  influenced_points_ = ListType(size);
847  }
848 
849  void set_offset(unsigned int offset) { offset_ = offset; }
850  unsigned int get_offset() { return offset_; }
851  void set_index(unsigned int index) { index_ = index+offset_; }
852  unsigned int get_index() const { return index_-offset_; }
853  unsigned int get_influence() const { return influence_; }
854  void set_aggregate(unsigned int aggregate) { aggregate_ = aggregate; }
855  unsigned int get_aggregate () { return aggregate_; }
856 
857  bool is_cpoint() const { return cpoint_ && !undecided_; }
858  bool is_fpoint() const { return !cpoint_ && !undecided_; }
859  bool is_undecided() const { return undecided_; }
860 
861  // Returns number of influencing points
862  unsigned int number_influencing() const { return influencing_points_.internal_size(); }
863  // Returns true if *point is influencing this point
864  bool is_influencing(amg_point* point) const { return influencing_points_.isnonzero(point->get_index()+offset_); }
865  // Add *point to influencing points
866  void add_influencing_point(amg_point* point) { influencing_points_[point->get_index()+offset_] = point; }
867  // Add *point to influenced points
868  void add_influenced_point(amg_point* point) { influenced_points_[point->get_index()+offset_] = point; }
869 
870  // Clear influencing points
871  void clear_influencing() { influencing_points_.clear(); }
872  // Clear influenced points
873  void clear_influenced() {influenced_points_.clear(); }
874 
875 
876  unsigned int get_coarse_index() const { return coarse_index_; }
877  void set_coarse_index(unsigned int index) { coarse_index_ = index; }
878 
879  // Calculates the initial influence measure equal to the number of influenced points.
880  void calc_influence() { influence_ = influenced_points_.internal_size(); }
881 
882  // Add to influence measure.
883  unsigned int add_influence(unsigned int add)
884  {
885  influence_ += add;
886  return influence_;
887  }
888  // Make this point C point. Only call via amg_pointvector.
889  void make_cpoint()
890  {
891  undecided_ = false;
892  cpoint_ = true;
893  influence_ = 0;
894  }
895  // Make this point F point. Only call via amg_pointvector.
896  void make_fpoint()
897  {
898  undecided_ = false;
899  cpoint_ = false;
900  influence_ = 0;
901  }
902  // Switch point from F to C point. Only call via amg_pointvector.
903  void switch_ftoc() { cpoint_ = true; }
904 
905  // Iterator handling for influencing and influenced points.
906  iterator begin_influencing() { return influencing_points_.begin(); }
907  iterator end_influencing() { return influencing_points_.end(); }
908  const_iterator begin_influencing() const { return influencing_points_.begin(); }
909  const_iterator end_influencing() const { return influencing_points_.end(); }
910  iterator begin_influenced() { return influenced_points_.begin(); }
911  iterator end_influenced() { return influenced_points_.end(); }
912  const_iterator begin_influenced() const { return influenced_points_.begin(); }
913  const_iterator end_influenced() const { return influenced_points_.end(); }
914 };
915 
918 struct classcomp
919 {
920  // Function returns true if l comes before r in the ordering.
921  bool operator() (amg_point* l, amg_point* r) const
922  {
923  // Map is sorted by influence number starting with the highest
924  // If influence number is the same then lowest point index comes first
925  return (l->get_influence() < r->get_influence() || (l->get_influence() == r->get_influence() && l->get_index() > r->get_index()));
926  }
927 };
928 
935 {
936 private:
937  // Type for the sorted list
938  typedef std::set<amg_point*,classcomp> ListType;
939  // Type for the vector of pointers
940  typedef std::vector<amg_point*> VectorType;
941 
942  VectorType pointvector_;
943  ListType pointlist_;
944  unsigned int size_;
945  unsigned int c_points_, f_points_;
946 
947 public:
948  typedef VectorType::iterator iterator;
949  typedef VectorType::const_iterator const_iterator;
950 
954  amg_pointvector(unsigned int size = 0): size_(size)
955  {
956  pointvector_ = VectorType(size);
957  c_points_ = f_points_ = 0;
958  }
959 
960  // Construct all the points dynamically and save pointers into vector.
961  void init_points()
962  {
963  for (unsigned int i=0; i<size(); ++i)
964  pointvector_[i] = new amg_point(i,size());
965  }
966  // Delete all the points.
968  {
969  for (unsigned int i=0; i<size(); ++i)
970  delete pointvector_[i];
971  }
972  // Add point to the vector. Note: User has to make sure that point at point->get_index() does not exist yet, otherwise it will be overwritten!
973  void add_point(amg_point *point)
974  {
975  pointvector_[point->get_index()] = point;
976  if (point->is_cpoint()) c_points_++;
977  else if (point->is_fpoint()) f_points_++;
978  }
979 
980  // Update C and F count for point *point.
981  // Necessary if C and F points were constructed outside this data structure (e.g. by parallel coarsening RS0 or RS3).
982  void update_cf(amg_point *point)
983  {
984  if (point->is_cpoint()) c_points_++;
985  else if (point->is_fpoint()) f_points_++;
986  }
987  // Clear the C and F point count.
988  void clear_cf() { c_points_ = f_points_ = 0; }
989 
990  // Clear both point lists.
992  {
993  for (iterator iter = pointvector_.begin(); iter != pointvector_.end(); ++iter)
994  {
995  (*iter)->clear_influencing();
996  (*iter)->clear_influenced();
997  }
998  }
999 
1000  amg_point* operator[](unsigned int i) const { return pointvector_[i]; }
1001  iterator begin() { return pointvector_.begin(); }
1002  iterator end() { return pointvector_.end(); }
1003  const_iterator begin() const { return pointvector_.begin(); }
1004  const_iterator end() const { return pointvector_.end(); }
1005 
1006  void resize(unsigned int size)
1007  {
1008  size_ = size;
1009  pointvector_ = VectorType(size);
1010  }
1011  unsigned int size() const { return size_; }
1012 
1013  // Returns number of C points
1014  unsigned int get_cpoints() const { return c_points_; }
1015  // Returns number of F points
1016  unsigned int get_fpoints() const { return f_points_; }
1017 
1018  // Does the initial sorting of points into the list. Sorting is automatically done by the std::set data type.
1019  void sort()
1020  {
1021  for (iterator iter = begin(); iter != end(); ++iter)
1022  pointlist_.insert(*iter);
1023  }
1024  // Returns the point with the highest influence measure
1026  {
1027  // No points remaining? Return NULL such that coarsening will stop.
1028  if (pointlist_.size() == 0)
1029  return NULL;
1030  // If point with highest influence measure (end of the list) has measure of zero, then no further C points can be constructed. Return NULL.
1031  if ((*(--pointlist_.end()))->get_influence() == 0)
1032  return NULL;
1033  // Otherwise, return the point with highest influence measure located at the end of the list.
1034  else
1035  return *(--pointlist_.end());
1036  }
1037  // Add "add" to influence measure for point *point in the sorted list.
1038  void add_influence(amg_point* point, unsigned int add)
1039  {
1040  ListType::iterator iter = pointlist_.find(point);
1041  // If point is not in the list then stop.
1042  if (iter == pointlist_.end()) return;
1043 
1044  // Point has to be erased first as changing the value does not re-order the std::set
1045  pointlist_.erase(iter);
1046  point->add_influence(add);
1047 
1048  // Insert point back into the list. Using the iterator improves performance. The new position has to be at the same position or to the right of the old.
1049  pointlist_.insert(point);
1050  }
1051  // Make *point to C point and remove from sorted list
1052  void make_cpoint(amg_point* point)
1053  {
1054  pointlist_.erase(point);
1055  point->make_cpoint();
1056  c_points_++;
1057  }
1058  // Make *point to F point and remove from sorted list
1059  void make_fpoint(amg_point* point)
1060  {
1061  pointlist_.erase(point);
1062  point->make_fpoint();
1063  f_points_++;
1064  }
1065  // Swich *point from F to C point
1066  void switch_ftoc(amg_point* point)
1067  {
1068  point->switch_ftoc();
1069  c_points_++;
1070  f_points_--;
1071  }
1072 
1073  // Build vector of indices for C point on the coarse level.
1075  {
1076  unsigned int count = 0;
1077  // Use simple counter for index creation.
1078  for (iterator iter = pointvector_.begin(); iter != pointvector_.end(); ++iter)
1079  {
1080  // Set index on coarse level using counter variable
1081  if ((*iter)->is_cpoint())
1082  {
1083  (*iter)->set_coarse_index(count);
1084  count++;
1085  }
1086  }
1087  }
1088 
1089  // Return information for debugging purposes
1090  template<typename MatrixT>
1091  void get_influence_matrix(MatrixT & mat) const
1092  {
1093  mat = MatrixT(size(),size());
1094  mat.clear();
1095 
1096  for (const_iterator row_iter = begin(); row_iter != end(); ++row_iter)
1097  for (amg_point::iterator col_iter = (*row_iter)->begin_influencing(); col_iter != (*row_iter)->end_influencing(); ++col_iter)
1098  mat((*row_iter)->get_index(),(*col_iter)->get_index()) = true;
1099  }
1100  template<typename VectorT>
1101  void get_influence(VectorT & vec) const
1102  {
1103  vec = VectorT(size_);
1104  vec.clear();
1105 
1106  for (const_iterator iter = begin(); iter != end(); ++iter)
1107  vec[(*iter)->get_index()] = (*iter)->get_influence();
1108  }
1109  template<typename VectorT>
1110  void get_sorting(VectorT & vec) const
1111  {
1112  vec = VectorT(pointlist_.size());
1113  vec.clear();
1114  unsigned int i=0;
1115 
1116  for (ListType::const_iterator iter = pointlist_.begin(); iter != pointlist_.end(); ++iter)
1117  {
1118  vec[i] = (*iter)->get_index();
1119  i++;
1120  }
1121  }
1122  template<typename VectorT>
1123  void get_C(VectorT & vec) const
1124  {
1125  vec = VectorT(size_);
1126  vec.clear();
1127 
1128  for (const_iterator iter = begin(); iter != end(); ++iter)
1129  {
1130  if ((*iter)->is_cpoint())
1131  vec[(*iter)->get_index()] = true;
1132  }
1133  }
1134  template<typename VectorT>
1135  void get_F(VectorT & vec) const
1136  {
1137  vec = VectorT(size_);
1138  vec.clear();
1139 
1140  for (const_iterator iter = begin(); iter != end(); ++iter)
1141  {
1142  if ((*iter)->is_fpoint())
1143  vec[(*iter)->get_index()] = true;
1144  }
1145  }
1146  template<typename MatrixT>
1147  void get_Aggregates(MatrixT & mat) const
1148  {
1149  mat = MatrixT(size_,size_);
1150  mat.clear();
1151 
1152  for (const_iterator iter = begin(); iter != end(); ++iter)
1153  {
1154  if (!(*iter)->is_undecided())
1155  mat((*iter)->get_aggregate(),(*iter)->get_index()) = true;
1156  }
1157  }
1158 };
1159 
1163 template<typename InternalT1, typename InternalT2>
1165 {
1166  typedef typename InternalT1::value_type SparseMatrixType;
1167  typedef typename InternalT2::value_type PointVectorType;
1168 
1169 public:
1170  // Data structures on a per-processor basis.
1171  boost::numeric::ublas::vector<InternalT1> A_slice_;
1172  boost::numeric::ublas::vector<InternalT2> pointvector_slice_;
1173  // Holds the offsets showing the indices for which a new slice begins.
1174  boost::numeric::ublas::vector<boost::numeric::ublas::vector<unsigned int> > offset_;
1175 
1176  unsigned int threads_;
1177  unsigned int levels_;
1178 
1179  void init(unsigned int levels, unsigned int threads = 0)
1180  {
1181  // Either use the number of threads chosen by the user or the maximum number of threads available on the processor.
1182  if (threads == 0)
1183  #ifdef VIENNACL_WITH_OPENMP
1184  threads_ = omp_get_num_procs();
1185  #else
1186  threads_ = 1;
1187  #endif
1188  else
1189  threads_ = threads;
1190 
1191  levels_ = levels;
1192 
1193  A_slice_.resize(threads_);
1194  pointvector_slice_.resize(threads_);
1195  // Offset has threads_+1 entries to also hold the total size
1196  offset_.resize(threads_+1);
1197 
1198  for (unsigned int i=0; i<threads_; ++i)
1199  {
1200  A_slice_[i].resize(levels_);
1201  pointvector_slice_[i].resize(levels_);
1202  // Offset needs one more level for the build-up of the next offset
1203  offset_[i].resize(levels_+1);
1204  }
1205  offset_[threads_].resize(levels_+1);
1206  } //init()
1207 
1208  // Slice matrix A into as many parts as threads are used.
1209  void slice(unsigned int level, InternalT1 const & A, InternalT2 const & pointvector)
1210  {
1211  // On the finest level, build a new slicing first.
1212  if (level == 0)
1213  slice_new(level, A);
1214 
1215  // On coarser levels use the same slicing as on the finest level (Points stay together on the same thread on all levels).
1216  // This is necessary as due to interpolation and galerkin product there only exist connections between points on the same thread on coarser levels.
1217  // Note: Offset is determined in amg_coarse_rs0() after fine level was built.
1218  slice_build(level, A, pointvector);
1219  }
1220 
1221  // Join point data structure into Pointvector
1222  void join(unsigned int level, InternalT2 & pointvector) const
1223  {
1224  typedef typename InternalT2::value_type PointVectorType;
1225 
1226  // Reset index offset of all points and update overall C and F point count
1227  pointvector[level].clear_cf();
1228  for (typename PointVectorType::iterator iter = pointvector[level].begin(); iter != pointvector[level].end(); ++iter)
1229  {
1230  (*iter)->set_offset(0);
1231  pointvector[level].update_cf(*iter);
1232  }
1233  }
1234 
1235 private:
1240  void slice_new(unsigned int level, InternalT1 const & A)
1241  {
1242  // Determine index offset of all the slices (index of A[level] when the respective slice starts).
1243  #ifdef VIENNACL_WITH_OPENMP
1244  #pragma omp parallel for
1245  #endif
1246  for (long i2=0; i2<=static_cast<long>(threads_); ++i2)
1247  {
1248  std::size_t i = static_cast<std::size_t>(i2);
1249 
1250  // Offset of first piece is zero. Pieces 1,...,threads-1 have equal size while the last one might be greater.
1251  if (i == 0) offset_[i][level] = 0;
1252  else if (i == threads_) offset_[i][level] = static_cast<unsigned int>(A[level].size1());
1253  else offset_[i][level] = static_cast<unsigned int>(i*(A[level].size1()/threads_));
1254  }
1255  }
1256 
1262  void slice_build(unsigned int level, InternalT1 const & A, InternalT2 const & pointvector)
1263  {
1264  typedef typename SparseMatrixType::const_iterator1 ConstRowIterator;
1265  typedef typename SparseMatrixType::const_iterator2 ConstColIterator;
1266 
1267  unsigned int x, y;
1268  amg_point *point;
1269 
1270  #ifdef VIENNACL_WITH_OPENMP
1271  #pragma omp parallel for private (x,y,point)
1272  #endif
1273  for (long i2=0; i2<static_cast<long>(threads_); ++i2)
1274  {
1275  std::size_t i = static_cast<std::size_t>(i2);
1276 
1277  // Allocate space for the matrix slice and the pointvector.
1278  A_slice_[i][level] = SparseMatrixType(offset_[i+1][level]-offset_[i][level], offset_[i+1][level]-offset_[i][level]);
1279  pointvector_slice_[i][level] = PointVectorType(offset_[i+1][level]-offset_[i][level]);
1280 
1281  // Iterate over the part that belongs to thread i (from Offset[i][level] to Offset[i+1][level]).
1282  ConstRowIterator row_iter = A[level].begin1();
1283  row_iter += offset_[i][level];
1284  x = static_cast<unsigned int>(row_iter.index1());
1285 
1286  while (x < offset_[i+1][level] && row_iter != A[level].end1())
1287  {
1288  // Set offset for point index and save point for the respective thread
1289  point = pointvector[level][x];
1290  point->set_offset(offset_[i][level]);
1291  pointvector_slice_[i][level].add_point(point);
1292 
1293  ConstColIterator col_iter = row_iter.begin();
1294  y = static_cast<unsigned int>(col_iter.index2());
1295 
1296  // Save all coefficients from the matrix slice
1297  while (y < offset_[i+1][level] && col_iter != row_iter.end())
1298  {
1299  if (y >= offset_[i][level])
1300  A_slice_[i][level](x-offset_[i][level], y-offset_[i][level]) = *col_iter;
1301 
1302  ++col_iter;
1303  y = static_cast<unsigned int>(col_iter.index2());
1304  }
1305 
1306  ++row_iter;
1307  x = static_cast<unsigned int>(row_iter.index1());
1308  }
1309  }
1310  }
1311 };
1312 
1318 template<typename SparseMatrixT>
1319 void amg_mat_prod (SparseMatrixT & A, SparseMatrixT & B, SparseMatrixT & RES)
1320 {
1321  typedef typename SparseMatrixT::value_type ScalarType;
1322  typedef typename SparseMatrixT::iterator1 InternalRowIterator;
1323  typedef typename SparseMatrixT::iterator2 InternalColIterator;
1324 
1325  long x,y,z;
1326  ScalarType prod;
1327  RES = SparseMatrixT(static_cast<unsigned int>(A.size1()), static_cast<unsigned int>(B.size2()));
1328  RES.clear();
1329 
1330 #ifdef VIENNACL_WITH_OPENMP
1331  #pragma omp parallel for private (x,y,z,prod)
1332 #endif
1333  for (x=0; x<static_cast<long>(A.size1()); ++x)
1334  {
1335  InternalRowIterator row_iter = A.begin1();
1336  row_iter += vcl_size_t(x);
1337  for (InternalColIterator col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
1338  {
1339  y = static_cast<unsigned int>(col_iter.index2());
1340  InternalRowIterator row_iter2 = B.begin1();
1341  row_iter2 += vcl_size_t(y);
1342 
1343  for (InternalColIterator col_iter2 = row_iter2.begin(); col_iter2 != row_iter2.end(); ++col_iter2)
1344  {
1345  z = static_cast<unsigned int>(col_iter2.index2());
1346  prod = *col_iter * *col_iter2;
1347  RES.add(static_cast<unsigned int>(x),static_cast<unsigned int>(z),prod);
1348  }
1349  }
1350  }
1351 }
1352 
1358 template<typename SparseMatrixT>
1359 void amg_galerkin_prod (SparseMatrixT & A, SparseMatrixT & P, SparseMatrixT & RES)
1360 {
1361  typedef typename SparseMatrixT::value_type ScalarType;
1362  typedef typename SparseMatrixT::iterator1 InternalRowIterator;
1363  typedef typename SparseMatrixT::iterator2 InternalColIterator;
1364 
1365  long x,y1,y2,z;
1367  RES = SparseMatrixT(static_cast<unsigned int>(P.size2()), static_cast<unsigned int>(P.size2()));
1368  RES.clear();
1369 
1370 #ifdef VIENNACL_WITH_OPENMP
1371  #pragma omp parallel for private (x,y1,y2,z,row)
1372 #endif
1373  for (x=0; x<static_cast<long>(P.size2()); ++x)
1374  {
1375  row = amg_sparsevector<ScalarType>(static_cast<unsigned int>(A.size2()));
1376  InternalRowIterator row_iter = P.begin1(true);
1377  row_iter += vcl_size_t(x);
1378 
1379  for (InternalColIterator col_iter = row_iter.begin(); col_iter != row_iter.end(); ++col_iter)
1380  {
1381  y1 = static_cast<long>(col_iter.index2());
1382  InternalRowIterator row_iter2 = A.begin1();
1383  row_iter2 += vcl_size_t(y1);
1384 
1385  for (InternalColIterator col_iter2 = row_iter2.begin(); col_iter2 != row_iter2.end(); ++col_iter2)
1386  {
1387  y2 = static_cast<long>(col_iter2.index2());
1388  row.add (static_cast<unsigned int>(y2), *col_iter * *col_iter2);
1389  }
1390  }
1391  for (typename amg_sparsevector<ScalarType>::iterator iter = row.begin(); iter != row.end(); ++iter)
1392  {
1393  y2 = iter.index();
1394  InternalRowIterator row_iter3 = P.begin1();
1395  row_iter3 += vcl_size_t(y2);
1396 
1397  for (InternalColIterator col_iter3 = row_iter3.begin(); col_iter3 != row_iter3.end(); ++col_iter3)
1398  {
1399  z = static_cast<long>(col_iter3.index2());
1400  RES.add (static_cast<unsigned int>(x), static_cast<unsigned int>(z), *col_iter3 * *iter);
1401  }
1402  }
1403  }
1404 
1405  #ifdef VIENNACL_AMG_DEBUG
1406  std::cout << "Galerkin Operator: " << std::endl;
1407  printmatrix (RES);
1408  #endif
1409 }
1410 
1416 template<typename SparseMatrixT>
1417 void test_triplematprod(SparseMatrixT & A, SparseMatrixT & P, SparseMatrixT & A_i1)
1418 {
1419  typedef typename SparseMatrixT::value_type ScalarType;
1420 
1421  boost::numeric::ublas::compressed_matrix<ScalarType> A_temp (A.size1(), A.size2());
1422  A_temp = A;
1423  boost::numeric::ublas::compressed_matrix<ScalarType> P_temp (P.size1(), P.size2());
1424  P_temp = P;
1425  P.set_trans(true);
1426  boost::numeric::ublas::compressed_matrix<ScalarType> R_temp (P.size1(), P.size2());
1427  R_temp = P;
1428  P.set_trans(false);
1429 
1430  boost::numeric::ublas::compressed_matrix<ScalarType> RA (R_temp.size1(),A_temp.size2());
1431  RA = boost::numeric::ublas::prod(R_temp,A_temp);
1432  boost::numeric::ublas::compressed_matrix<ScalarType> RAP (RA.size1(),P_temp.size2());
1433  RAP = boost::numeric::ublas::prod(RA,P_temp);
1434 
1435  for (unsigned int x=0; x<RAP.size1(); ++x)
1436  {
1437  for (unsigned int y=0; y<RAP.size2(); ++y)
1438  {
1439  if (std::fabs(static_cast<ScalarType>(RAP(x,y)) - static_cast<ScalarType>(A_i1(x,y))) > 0.0001)
1440  std::cout << x << " " << y << " " << RAP(x,y) << " " << A_i1(x,y) << std::endl;
1441  }
1442  }
1443 }
1444 
1450 template<typename SparseMatrixT, typename PointVectorT>
1451 void test_interpolation(SparseMatrixT & A, SparseMatrixT & P, PointVectorT & Pointvector)
1452 {
1453  for (unsigned int i=0; i<P.size1(); ++i)
1454  {
1455  if (Pointvector.is_cpoint(i))
1456  {
1457  bool set = false;
1458  for (unsigned int j=0; j<P.size2(); ++j)
1459  {
1460  if (P.isnonzero(i,j))
1461  {
1462  if (P(i,j) != 1)
1463  std::cout << "Error 1 in row " << i << std::endl;
1464  if (P(i,j) == 1 && set)
1465  std::cout << "Error 2 in row " << i << std::endl;
1466  if (P(i,j) == 1 && !set)
1467  set = true;
1468  }
1469  }
1470  }
1471 
1472  if (Pointvector.is_fpoint(i))
1473  for (unsigned int j=0; j<P.size2(); ++j)
1474  {
1475  if (P.isnonzero(i,j) && j> Pointvector.get_cpoints()-1)
1476  std::cout << "Error 3 in row " << i << std::endl;
1477  if (P.isnonzero(i,j))
1478  {
1479  bool set = false;
1480  for (unsigned int k=0; k<P.size1(); ++k)
1481  {
1482  if (P.isnonzero(k,j))
1483  {
1484  if (Pointvector.is_cpoint(k) && P(k,j) == 1 && A.isnonzero(i,k))
1485  set = true;
1486  }
1487  }
1488  if (!set)
1489  std::cout << "Error 4 in row " << i << std::endl;
1490  }
1491  }
1492  }
1493 }
1494 
1495 
1496 } //namespace amg
1497 }
1498 }
1499 }
1500 
1501 #endif
NumericT operator+=(const NumericT value)
Addition operator. Adds a constant.
Definition: amg_base.hpp:164
amg_sparsematrix(unsigned int i, unsigned int j)
Constructor. Builds matrix of size (i,j).
Definition: amg_base.hpp:419
void addscalar(IteratorT &iter, unsigned int i, unsigned int j, NumericT s)
Definition: amg_base.hpp:563
void set_offset(unsigned int offset)
Definition: amg_base.hpp:849
Debug functionality for AMG. To be removed.
void set_aggregate(unsigned int aggregate)
Definition: amg_base.hpp:854
unsigned int get_coarselevels() const
Definition: amg_base.hpp:111
const_iterator end_influencing() const
Definition: amg_base.hpp:909
amg_sparsevector_iterator(InternalT &vec, bool begin=true)
The constructor.
Definition: amg_base.hpp:218
A class for the AMG points. Saves point index and influence measure Holds information whether point i...
Definition: amg_base.hpp:815
void slice(unsigned int level, InternalT1 const &A, InternalT2 const &pointvector)
Definition: amg_base.hpp:1209
bool isnonzero(unsigned int i, unsigned int j) const
Definition: amg_base.hpp:520
boost::numeric::ublas::vector< InternalT2 > pointvector_slice_
Definition: amg_base.hpp:1172
void removescalar(IteratorT &iter, unsigned int i)
Definition: amg_base.hpp:579
unsigned int number_influencing() const
Definition: amg_base.hpp:862
void resize(vcl_size_t i, vcl_size_t j, bool preserve=true)
Definition: adapter.hpp:389
void set_index(unsigned int index)
Definition: amg_base.hpp:851
NumericT operator=(const NumericT value)
Assignment operator. Writes value into matrix at the given position.
Definition: amg_base.hpp:151
A class for the sparse vector type.
Definition: amg_base.hpp:254
void set_postsmooth(unsigned int postsmooth)
Definition: amg_base.hpp:107
unsigned int get_postsmooth() const
Definition: amg_base.hpp:108
unsigned int get_coarse_index() const
Definition: amg_base.hpp:876
const_iterator2 end2(bool trans=false) const
Definition: amg_base.hpp:765
void add(unsigned int i, NumericT s)
Definition: amg_base.hpp:290
void set_as(double jacobiweight)
Definition: amg_base.hpp:98
const_iterator2 begin2(bool trans=false) const
Definition: amg_base.hpp:758
amg_point(unsigned int index, unsigned int size)
The constructor.
Definition: amg_base.hpp:843
const_iterator begin_influenced() const
Definition: amg_base.hpp:912
std::vector< std::map< unsigned int, NumericT > > * get_internal_pointer()
Definition: amg_base.hpp:773
ConstAdapterType::const_iterator1 const_iterator1
Definition: amg_base.hpp:405
A const iterator for sparse matrices of type std::vector > ...
Definition: adapter.hpp:48
Main namespace in ViennaCL. Holds all the basic types such as vector, matrix, etc. and defines operations upon them.
Definition: cpu_ram.hpp:29
void resize(unsigned int i, unsigned int j, bool preserve=true)
Definition: amg_base.hpp:631
ConstAdapterType::const_iterator2 const_iterator2
Definition: amg_base.hpp:406
void removescalar(IteratorT &iter, unsigned int)
Definition: amg_base.hpp:324
bool is_influencing(amg_point *point) const
Definition: amg_base.hpp:864
VectorT prod(std::vector< std::vector< T, A1 >, A2 > const &matrix, VectorT const &vector)
Definition: prod.hpp:91
vcl_size_t size(VectorType const &vec)
Generic routine for obtaining the size of a vector (ViennaCL, uBLAS, etc.)
Definition: size.hpp:144
void set_interpol(unsigned int interpol)
Definition: amg_base.hpp:92
unsigned int add_influence(unsigned int add)
Definition: amg_base.hpp:883
A tag for algebraic multigrid (AMG). Used to transport information from the user to the implementatio...
Definition: amg_base.hpp:61
amg_pointvector(unsigned int size=0)
The constructor.
Definition: amg_base.hpp:954
Definition: blas3.hpp:36
void set_coarse_index(unsigned int index)
Definition: amg_base.hpp:877
boost::numeric::ublas::vector< InternalT1 > A_slice_
Definition: amg_base.hpp:1171
NonzeroScalarType operator[](unsigned int i)
Definition: amg_base.hpp:327
void set_coarse(unsigned int coarse)
Definition: amg_base.hpp:89
amg_sparsevector(unsigned int size=0)
The constructor.
Definition: amg_base.hpp:277
void init(unsigned int levels, unsigned int threads=0)
Definition: amg_base.hpp:1179
unsigned int get_presmooth() const
Definition: amg_base.hpp:105
const_iterator begin_influencing() const
Definition: amg_base.hpp:908
void test_interpolation(SparseMatrixT &A, SparseMatrixT &P, PointVectorT &Pointvector)
Test if interpolation matrix makes sense. Only vanilla test though! Only checks if basic requirements...
Definition: amg_base.hpp:1451
unsigned int get_interpol() const
Definition: amg_base.hpp:93
VectorType::const_iterator const_iterator
Definition: amg_base.hpp:949
void join(unsigned int level, InternalT2 &pointvector) const
Definition: amg_base.hpp:1222
void addscalar(IteratorT &iter, unsigned int i, unsigned int, NumericT s)
Definition: amg_base.hpp:309
void add_influencing_point(amg_point *point)
Definition: amg_base.hpp:866
Adapts a constant sparse matrix type made up from std::vector > to basic ub...
Definition: adapter.hpp:183
A non-const iterator for sparse matrices of type std::vector > ...
Definition: adapter.hpp:237
std::size_t vcl_size_t
Definition: forwards.h:74
ListType::const_iterator const_iterator
Definition: amg_base.hpp:839
void amg_galerkin_prod(SparseMatrixT &A, SparseMatrixT &P, SparseMatrixT &RES)
Sparse Galerkin product: Calculates RES = trans(P)*A*P.
Definition: amg_base.hpp:1359
void trans(const matrix_expression< const matrix_base< NumericT, SizeT, DistanceT >, const matrix_base< NumericT, SizeT, DistanceT >, op_trans > &proxy, matrix_base< NumericT > &temp_trans)
void prod(const MatrixT1 &A, bool transposed_A, const MatrixT2 &B, bool transposed_B, MatrixT3 &C, ScalarT alpha, ScalarT beta)
amg_sparsematrix(MatrixT const &mat)
Constructor. Builds matrix via another matrix type. (Only necessary feature of this other matrix type...
Definition: amg_base.hpp:456
const_iterator end_influenced() const
Definition: amg_base.hpp:913
A class for the sparse matrix type. Uses vector of maps as data structure for higher performance and ...
Definition: amg_base.hpp:373
vector_expression< const matrix_base< NumericT, F >, const unsigned int, op_row > row(const matrix_base< NumericT, F > &A, unsigned int i)
Definition: matrix.hpp:853
void test_triplematprod(SparseMatrixT &A, SparseMatrixT &P, SparseMatrixT &A_i1)
Test triple-matrix product by comparing it to ublas functions. Very slow for large matrices! ...
Definition: amg_base.hpp:1417
amg_point * operator[](unsigned int i) const
Definition: amg_base.hpp:1000
unsigned int get_coarse() const
Definition: amg_base.hpp:90
NumericT operator()(unsigned int i, unsigned int j) const
Definition: amg_base.hpp:609
void add(unsigned int i, unsigned int j, NumericT s)
Definition: amg_base.hpp:539
A class for a scalar that can be written to the sparse matrix or sparse vector datatypes.
Definition: amg_base.hpp:124
void add_influence(amg_point *point, unsigned int add)
Definition: amg_base.hpp:1038
float ScalarType
Definition: fft_1d.cpp:42
amg_nonzero_scalar(InternalT *m, IteratorT &iter, unsigned int i, unsigned int j, NumericT s=0)
The constructor.
Definition: amg_base.hpp:142
void set_threshold(double threshold)
Definition: amg_base.hpp:95
Defines an iterator for the sparse vector type.
Definition: amg_base.hpp:203
void set_presmooth(unsigned int presmooth)
Definition: amg_base.hpp:104
NonzeroScalarType operator()(unsigned int i, unsigned int j)
Definition: amg_base.hpp:586
Adapts a non-const sparse matrix type made up from std::vector > to basic u...
Definition: adapter.hpp:357
void amg_mat_prod(SparseMatrixT &A, SparseMatrixT &B, SparseMatrixT &RES)
Sparse matrix product. Calculates RES = A*B.
Definition: amg_base.hpp:1319
void printmatrix(MatrixT &, int)
Definition: amg_debug.hpp:77
amg_tag(unsigned int coarse=1, unsigned int interpol=1, double threshold=0.25, double interpolweight=0.2, double jacobiweight=1, unsigned int presmooth=1, unsigned int postsmooth=1, unsigned int coarselevels=0)
The constructor.
Definition: amg_base.hpp:76
void add_influenced_point(amg_point *point)
Definition: amg_base.hpp:868
bool operator()(amg_point *l, amg_point *r) const
Definition: amg_base.hpp:921
const_iterator1 end1(bool trans=false) const
Definition: amg_base.hpp:751
InternalType::const_iterator const_iterator
Definition: amg_base.hpp:271
void set_interpolweight(double interpolweight)
Definition: amg_base.hpp:101
Comparison class for the sorted set of points in amg_pointvector. Set is sorted by influence measure ...
Definition: amg_base.hpp:918
amg_sparsevector_iterator< InternalType > iterator
Definition: amg_base.hpp:270
amg_sparsematrix(std::vector< std::map< unsigned int, NumericT > > const &mat)
Constructor. Builds matrix via std::vector by copying memory (Only necessary feature of thi...
Definition: amg_base.hpp:437
boost::numeric::ublas::vector< boost::numeric::ublas::vector< unsigned int > > offset_
Definition: amg_base.hpp:1174
void set_coarselevels(unsigned int coarselevels)
Definition: amg_base.hpp:110
void get_influence_matrix(MatrixT &mat) const
Definition: amg_base.hpp:1091
A class for the matrix slicing for parallel coarsening schemes (RS0/RS3).
Definition: amg_base.hpp:1164
A class for the AMG points. Holds pointers of type amg_point in a vector that can be accessed using [...
Definition: amg_base.hpp:934