[ VIGRA Homepage | Function Index | Class Index | Namespaces | File List | Main Page ]

multi_handle.hxx VIGRA

1 /************************************************************************/
2 /* */
3 /* Copyright 2011-2014 by Ullrich Koethe */
4 /* */
5 /* This file is part of the VIGRA computer vision library. */
6 /* The VIGRA Website is */
7 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
8 /* Please direct questions, bug reports, and contributions to */
9 /* ullrich.koethe@iwr.uni-heidelberg.de or */
10 /* vigra@informatik.uni-hamburg.de */
11 /* */
12 /* Permission is hereby granted, free of charge, to any person */
13 /* obtaining a copy of this software and associated documentation */
14 /* files (the "Software"), to deal in the Software without */
15 /* restriction, including without limitation the rights to use, */
16 /* copy, modify, merge, publish, distribute, sublicense, and/or */
17 /* sell copies of the Software, and to permit persons to whom the */
18 /* Software is furnished to do so, subject to the following */
19 /* conditions: */
20 /* */
21 /* The above copyright notice and this permission notice shall be */
22 /* included in all copies or substantial portions of the */
23 /* Software. */
24 /* */
25 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
26 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
27 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
28 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
29 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
30 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
31 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
32 /* OTHER DEALINGS IN THE SOFTWARE. */
33 /* */
34 /************************************************************************/
35 
36 #ifndef MULTI_HANDLE_HXX
37 #define MULTI_HANDLE_HXX
38 
39 #include "multi_fwd.hxx"
40 #include "metaprogramming.hxx"
41 #include "multi_shape.hxx"
42 
43 namespace vigra {
44 
45 template <unsigned TARGET_INDEX, class Handle, unsigned int INDEX=Handle::index>
46 struct CoupledHandleCast;
47 
48 #ifndef _MSC_VER // Visual Studio doesn't like these forward declarations
49 template <unsigned int TARGET_INDEX, class Handle>
50 typename CoupledHandleCast<TARGET_INDEX, Handle>::reference
51 get(Handle & handle);
52 
53 template <unsigned int TARGET_INDEX, class Handle>
54 typename CoupledHandleCast<TARGET_INDEX, Handle>::const_reference
55 get(Handle const & handle);
56 #endif
57 
58 /** \addtogroup MultiIteratorGroup
59 */
60 //@{
61 
62  /**
63  Handle class, used by CoupledScanOrderIterator as the value type to simultaneously itearate over multiple images.
64  */
65 template <class T, class NEXT>
66 class CoupledHandle
67 : public NEXT
68 {
69 public:
70  typedef NEXT base_type;
71  typedef CoupledHandle<T, NEXT> self_type;
72 
73  static const int index = NEXT::index + 1; // index of this member of the chain
74  static const unsigned int dimensions = NEXT::dimensions;
75 
76  typedef T value_type;
77  typedef T * pointer;
78  typedef T const * const_pointer;
79  typedef T & reference;
80  typedef T const & const_reference;
81  typedef typename base_type::shape_type shape_type;
82 
83  CoupledHandle()
84  : base_type(),
85  pointer_(),
86  strides_()
87  {}
88 
89  template <class NEXT1>
90  CoupledHandle(CoupledHandle<T, NEXT1> const & h, NEXT const & next)
91  : base_type(next),
92  pointer_(h.pointer_),
93  strides_(h.strides_)
94  {}
95 
96  CoupledHandle(const_pointer p, shape_type const & strides, NEXT const & next)
97  : base_type(next),
98  pointer_(const_cast<pointer>(p)),
99  strides_(strides)
100  {}
101 
102  template <class Stride>
103  CoupledHandle(MultiArrayView<dimensions, T, Stride> const & v, NEXT const & next)
104  : base_type(next),
105  pointer_(const_cast<pointer>(v.data())),
106  strides_(v.stride())
107  {
108  vigra_precondition(v.shape() == this->shape(), "createCoupledIterator(): shape mismatch.");
109  }
110 
111  inline void incDim(int dim)
112  {
113  pointer_ += strides_[dim];
114  base_type::incDim(dim);
115  }
116 
117  inline void decDim(int dim)
118  {
119  pointer_ -= strides_[dim];
120  base_type::decDim(dim);
121  }
122 
123  inline void addDim(int dim, MultiArrayIndex d)
124  {
125  pointer_ += d*strides_[dim];
126  base_type::addDim(dim, d);
127  }
128 
129  inline void add(shape_type const & d)
130  {
131  pointer_ += dot(d, strides_);
132  base_type::add(d);
133  }
134 
135  template<int DIMENSION>
136  inline void increment()
137  {
138  pointer_ += strides_[DIMENSION];
139  base_type::template increment<DIMENSION>();
140  }
141 
142  template<int DIMENSION>
143  inline void decrement()
144  {
145  pointer_ -= strides_[DIMENSION];
146  base_type::template decrement<DIMENSION>();
147  }
148 
149  // TODO: test if making the above a default case of the this hurts performance
150  template<int DIMENSION>
151  inline void increment(MultiArrayIndex offset)
152  {
153  pointer_ += offset*strides_[DIMENSION];
154  base_type::template increment<DIMENSION>(offset);
155  }
156 
157  template<int DIMENSION>
158  inline void decrement(MultiArrayIndex offset)
159  {
160  pointer_ -= offset*strides_[DIMENSION];
161  base_type::template decrement<DIMENSION>(offset);
162  }
163 
164  void restrictToSubarray(shape_type const & start, shape_type const & end)
165  {
166  pointer_ += dot(start, strides_);
167  base_type::restrictToSubarray(start, end);
168  }
169 
170  // ptr access
171  reference operator*()
172  {
173  return *pointer_;
174  }
175 
176  const_reference operator*() const
177  {
178  return *pointer_;
179  }
180 
181  pointer operator->()
182  {
183  return pointer_;
184  }
185 
186  const_pointer operator->() const
187  {
188  return pointer_;
189  }
190 
191  pointer ptr()
192  {
193  return pointer_;
194  }
195 
196  const_pointer ptr() const
197  {
198  return pointer_;
199  }
200 
201  shape_type const & strides() const
202  {
203  return strides_;
204  }
205 
206  MultiArrayView<dimensions, T>
207  arrayView() const
208  {
209  return MultiArrayView<dimensions, T>(this->shape(), strides(), ptr() - dot(this->point(), strides()));
210  }
211 
212  template <unsigned int TARGET_INDEX>
213  typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
214  get()
215  {
216  return vigra::get<TARGET_INDEX>(*this);
217  }
218 
219  template <unsigned int TARGET_INDEX>
220  typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
221  get() const
222  {
223  return vigra::get<TARGET_INDEX>(*this);
224  }
225 
226  pointer pointer_;
227  shape_type strides_;
228 };
229 
230  // CoupledHandle holding the current coordinate
231  // (always the end of a CoupledHandle chain)
232 template <int N>
233 class CoupledHandle<TinyVector<MultiArrayIndex, N>, void>
234 {
235 public:
236  static const unsigned int index = 0; // index of this member of the chain
237  static const unsigned int dimensions = N;
238 
239  typedef typename MultiArrayShape<N>::type value_type;
240  typedef value_type const * pointer;
241  typedef value_type const * const_pointer;
242  typedef value_type const & reference;
243  typedef value_type const & const_reference;
244  typedef value_type shape_type;
245  typedef CoupledHandle<value_type, void> self_type;
246 
247  CoupledHandle()
248  : point_(),
249  shape_(),
250  scanOrderIndex_()
251  {}
252 
253  CoupledHandle(value_type const & shape)
254  : point_(),
255  shape_(shape),
256  scanOrderIndex_()
257  {}
258 
259  CoupledHandle(typename MultiArrayShape<N+1>::type const & shape)
260  : point_(),
261  shape_(shape.begin()),
262  scanOrderIndex_()
263  {}
264 
265  inline void incDim(int dim)
266  {
267  ++point_[dim];
268  }
269 
270  inline void decDim(int dim)
271  {
272  --point_[dim];
273  }
274 
275  inline void addDim(int dim, MultiArrayIndex d)
276  {
277  point_[dim] += d;
278  }
279 
280  inline void add(shape_type const & d)
281  {
282  point_ += d;
283  }
284 
285  template<int DIMENSION>
286  inline void increment()
287  {
288  ++point_[DIMENSION];
289  }
290 
291  template<int DIMENSION>
292  inline void decrement()
293  {
294  --point_[DIMENSION];
295  }
296 
297  // TODO: test if making the above a default case of the this hurts performance
298  template<int DIMENSION>
299  inline void increment(MultiArrayIndex offset)
300  {
301  point_[DIMENSION] += offset;
302  }
303 
304  template<int DIMENSION>
305  inline void decrement(MultiArrayIndex offset)
306  {
307  point_[DIMENSION] -= offset;
308  }
309 
310  void restrictToSubarray(shape_type const & start, shape_type const & end)
311  {
312  point_ = shape_type();
313  shape_ = end - start;
314  scanOrderIndex_ = 0;
315  }
316 
317  inline void incrementIndex()
318  {
319  ++scanOrderIndex_;
320  }
321 
322  inline void decrementIndex()
323  {
324  --scanOrderIndex_;
325  }
326 
327  inline void incrementIndex(MultiArrayIndex offset)
328  {
329  scanOrderIndex_ += offset;
330  }
331 
332  inline void decrementIndex(MultiArrayIndex offset)
333  {
334  scanOrderIndex_ -= offset;
335  }
336 
337  // access
338  MultiArrayIndex scanOrderIndex() const
339  {
340  return scanOrderIndex_;
341  }
342 
343  // access
344  const_reference point() const
345  {
346  return point_;
347  }
348 
349  // access
350  const_reference shape() const
351  {
352  return shape_;
353  }
354 
355  const_reference operator*() const
356  {
357  return point_;
358  }
359 
360  const_pointer operator->() const
361  {
362  return &point_;
363  }
364 
365  const_pointer ptr() const
366  {
367  return &point_;
368  }
369 
370  unsigned int borderType() const
371  {
372  return detail::BorderTypeImpl<N>::exec(point_, shape_);
373  }
374 
375  template <unsigned int TARGET_INDEX>
376  typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
377  get()
378  {
379  return vigra::get<TARGET_INDEX>(*this);
380  }
381 
382  template <unsigned int TARGET_INDEX>
383  typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
384  get() const
385  {
386  return vigra::get<TARGET_INDEX>(*this);
387  }
388 
389  value_type point_, shape_;
390  MultiArrayIndex scanOrderIndex_;
391 };
392 
393  // CoupledHandle for multi-band data
394 template <class T, class NEXT>
395 class CoupledHandle<Multiband<T>, NEXT>
396 : public NEXT
397 {
398 public:
399  typedef NEXT base_type;
400  typedef CoupledHandle<Multiband<T>, NEXT> self_type;
401 
402  static const unsigned int index = NEXT::index + 1; // index of this member of the chain
403  static const unsigned int dimensions = NEXT::dimensions;
404 
405  typedef MultiArrayView<1, T, StridedArrayTag> value_type;
406  typedef value_type * pointer;
407  typedef value_type const * const_pointer;
408  typedef value_type & reference;
409  typedef value_type const & const_reference;
410  typedef typename base_type::shape_type shape_type;
411 
412  CoupledHandle()
413  : base_type(),
414  view_(),
415  strides_()
416  {}
417 
418  template <class NEXT1>
419  CoupledHandle(CoupledHandle<Multiband<T>, NEXT1> const & h, NEXT const & next)
420  : base_type(next),
421  view_(h.view_),
422  strides_(h.strides_)
423  {}
424 
425  CoupledHandle(const_reference p, shape_type const & strides, NEXT const & next)
426  : base_type(next),
427  view_(p),
428  strides_(strides)
429  {}
430 
431  template <class Stride>
432  CoupledHandle(MultiArrayView<dimensions+1, Multiband<T>, Stride> const & v, NEXT const & next)
433  : base_type(next),
434  view_(v.bindInner(shape_type())),
435  strides_(v.bindOuter(0).stride())
436  {
437  vigra_precondition(v.bindOuter(0).shape() == this->shape(), "createCoupledIterator(): shape mismatch.");
438  }
439 
440  inline void incDim(int dim)
441  {
442  view_.unsafePtr() += strides_[dim];
443  base_type::incDim(dim);
444  }
445 
446  inline void decDim(int dim)
447  {
448  view_.unsafePtr() -= strides_[dim];
449  base_type::decDim(dim);
450  }
451 
452  inline void addDim(int dim, MultiArrayIndex d)
453  {
454  view_.unsafePtr() += d*strides_[dim];
455  base_type::addDim(dim, d);
456  }
457 
458  inline void add(shape_type const & d)
459  {
460  view_.unsafePtr() += dot(d, strides_);
461  base_type::add(d);
462  }
463 
464  template<int DIMENSION>
465  inline void increment()
466  {
467  view_.unsafePtr() += strides_[DIMENSION];
468  base_type::template increment<DIMENSION>();
469  }
470 
471  template<int DIMENSION>
472  inline void decrement()
473  {
474  view_.unsafePtr() -= strides_[DIMENSION];
475  base_type::template decrement<DIMENSION>();
476  }
477 
478  // TODO: test if making the above a default case of the this hurts performance
479  template<int DIMENSION>
480  inline void increment(MultiArrayIndex offset)
481  {
482  view_.unsafePtr() += offset*strides_[DIMENSION];
483  base_type::template increment<DIMENSION>(offset);
484  }
485 
486  template<int DIMENSION>
487  inline void decrement(MultiArrayIndex offset)
488  {
489  view_.unsafePtr() -= offset*strides_[DIMENSION];
490  base_type::template decrement<DIMENSION>(offset);
491  }
492 
493  void restrictToSubarray(shape_type const & start, shape_type const & end)
494  {
495  view_.unsafePtr() += dot(start, strides_);
496  base_type::restrictToSubarray(start, end);
497  }
498 
499  // ptr access
500  reference operator*()
501  {
502  return view_;
503  }
504 
505  const_reference operator*() const
506  {
507  return view_;
508  }
509 
510  pointer operator->()
511  {
512  return &view_;
513  }
514 
515  const_pointer operator->() const
516  {
517  return &view_;
518  }
519 
520  pointer ptr()
521  {
522  return &view_;
523  }
524 
525  const_pointer ptr() const
526  {
527  return &view_;
528  }
529 
530  shape_type const & strides() const
531  {
532  return strides_;
533  }
534 
535  MultiArrayView<dimensions+1, Multiband<T> >
536  arrayView() const
537  {
538  typedef MultiArrayView<dimensions+1, T> View;
539  typename View::difference_type vshape(SkipInitialization), vstride(SkipInitialization);
540  vshape.template subarray<0, dimensions>() = this->shape();
541  vstride.template subarray<0, dimensions>() = strides();
542  vshape[dimensions] = view_.shape(0);
543  vstride[dimensions] = view_.stride(0);
544  return View(vshape, vstride, view_.data() - dot(this->point(), strides())).multiband();
545  }
546 
547  template <unsigned int TARGET_INDEX>
548  typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
549  get()
550  {
551  return vigra::get<TARGET_INDEX>(*this);
552  }
553 
554  template <unsigned int TARGET_INDEX>
555  typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
556  get() const
557  {
558  return vigra::get<TARGET_INDEX>(*this);
559  }
560 
561  value_type view_;
562  shape_type strides_;
563 };
564 
565  // helper class for CoupledHandle for CunkedArray
566 template <unsigned int N, class T>
567 class IteratorChunkHandle
568 {
569  public:
570  typedef ChunkedArray<N, T> array_type;
571  typedef typename MultiArrayShape<N>::type shape_type;
572 
573  IteratorChunkHandle()
574  : offset_(),
575  chunk_(0)
576  {}
577 
578  IteratorChunkHandle(shape_type const & offset)
579  : offset_(offset),
580  chunk_(0)
581  {}
582 
583  IteratorChunkHandle(IteratorChunkHandle const & other)
584  : offset_(other.offset_),
585  chunk_(0)
586  {}
587 
588  IteratorChunkHandle & operator=(IteratorChunkHandle const & other)
589  {
590  offset_ = other.offset_;
591  chunk_ = 0;
592  return *this;
593  }
594 
595  shape_type offset_;
596  SharedChunkHandle<N, T> * chunk_;
597 };
598 
599  /* CoupledHandle for CunkedArray
600 
601  The handle must store a pointer to a chunk because the chunk knows
602  about memory menagement, and to an array view because it knows about
603  subarrays and slices.
604 
605  Perhaps we can reduce this to a single pointer or otherwise reduce
606  the handle memory to make it faster?
607  */
608 template <class U, class NEXT>
609 class CoupledHandle<ChunkedMemory<U>, NEXT>
610 : public NEXT,
611  public IteratorChunkHandle<NEXT::dimensions, typename UnqualifiedType<U>::type>
612 {
613 public:
614  typedef typename UnqualifiedType<U>::type T;
615  typedef NEXT base_type;
616  typedef IteratorChunkHandle<NEXT::dimensions, T> base_type2;
617  typedef CoupledHandle<ChunkedMemory<U>, NEXT> self_type;
618 
619  static const unsigned int index = NEXT::index + 1; // index of this member of the chain
620  static const unsigned int dimensions = NEXT::dimensions;
621 
622  typedef typename IfBool<UnqualifiedType<U>::isConst,
623  ChunkedArrayBase<dimensions, T> const,
624  ChunkedArrayBase<dimensions, T> >::type array_type;
625  typedef detail::ChunkShape<dimensions, T> chunk_shape;
626  typedef T value_type;
627  typedef U * pointer;
628  typedef value_type const * const_pointer;
629  typedef U & reference;
630  typedef value_type const & const_reference;
631  typedef typename base_type::shape_type shape_type;
632 
633  CoupledHandle()
634  : base_type(),
635  base_type2(),
636  pointer_(),
637  strides_(),
638  upper_bound_(),
639  array_()
640  {}
641 
642  CoupledHandle(CoupledHandle const & other)
643  : base_type(other),
644  base_type2(other),
645  pointer_(other.pointer_),
646  strides_(other.strides_),
647  upper_bound_(other.upper_bound_),
648  array_(other.array_)
649  {
650  if(array_)
651  pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
652  }
653 
654  CoupledHandle(array_type const & array, NEXT const & next)
655  : base_type(next),
656  base_type2(),
657  pointer_(),
658  array_(const_cast<array_type*>(&array))
659  {
660  if(array_)
661  pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
662  }
663 
664  ~CoupledHandle()
665  {
666  // deref the present chunk
667  if(array_)
668  array_->unrefChunk(this);
669  }
670 
671  CoupledHandle & operator=(CoupledHandle const & other)
672  {
673  if(this != &other)
674  {
675  // deref the present chunk
676  if(array_)
677  array_->unrefChunk(this);
678  base_type::operator=(other);
679  base_type2::operator=(other);
680  array_ = other.array_;
681  if(array_)
682  {
683  pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
684  }
685  else
686  {
687  pointer_ = other.pointer_;
688  strides_ = other.strides_;
689  upper_bound_ = other.upper_bound_;
690  }
691  }
692  return *this;
693  }
694 
695  using base_type::point;
696  using base_type::shape;
697 
698  inline void incDim(int dim)
699  {
700  base_type::incDim(dim);
701  pointer_ += strides_[dim];
702  if(point()[dim] == upper_bound_[dim])
703  {
704  // if(point()[dim] < shape()[dim])
705  pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
706  }
707  }
708 
709  inline void decDim(int dim)
710  {
711  base_type::decDim(dim);
712  pointer_ -= strides_[dim];
713  if(point()[dim] < upper_bound_[dim] - array_->chunk_shape_[dim])
714  {
715  // if(point()[dim] >= 0)
716  pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
717  }
718  }
719 
720  inline void addDim(int dim, MultiArrayIndex d)
721  {
722  base_type::addDim(dim, d);
723  if(point()[dim] < shape()[dim] && point()[dim] >= 0)
724  pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
725  }
726 
727  inline void add(shape_type const & d)
728  {
729  base_type::add(d);
730  pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
731  }
732 
733  template<int DIMENSION>
734  inline void increment()
735  {
736  // incDim(DIMENSION);
737  base_type::template increment<DIMENSION>();
738  pointer_ += strides_[DIMENSION];
739  if(point()[DIMENSION] == upper_bound_[DIMENSION])
740  {
741  if(point()[DIMENSION] > shape()[DIMENSION])
742  // this invariant check prevents the compiler from optimizing stupidly
743  // (it makes a difference of a factor of 2!)
744  vigra_invariant(false, "CoupledHandle<ChunkedMemory<T>>: internal error.");
745  else
746  pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
747  }
748  }
749 
750  template<int DIMENSION>
751  inline void decrement()
752  {
753  // decDim(DIMENSION);
754  base_type::template decrement<DIMENSION>();
755  pointer_ -= strides_[DIMENSION];
756  if(point()[DIMENSION] < upper_bound_[DIMENSION] - array_->chunk_shape_[DIMENSION])
757  {
758  if(point()[DIMENSION] < -1)
759  // this invariant check prevents the compiler from optimizing stupidly
760  // (it makes a difference of a factor of 2!)
761  vigra_invariant(false, "CoupledHandle<ChunkedMemory<T>>: internal error.");
762  else
763  pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
764  }
765  }
766 
767  template<int DIMENSION>
768  inline void increment(MultiArrayIndex d)
769  {
770  addDim(DIMENSION, d);
771  }
772 
773  template<int DIMENSION>
774  inline void decrement(MultiArrayIndex d)
775  {
776  addDim(DIMENSION, -d);
777  }
778 
779  void restrictToSubarray(shape_type const & start, shape_type const & end)
780  {
781  base_type::restrictToSubarray(start, end);
782  this->offset_ += start;
783  pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
784  }
785 
786  // ptr access
787  reference operator*()
788  {
789  return *pointer_;
790  }
791 
792  const_reference operator*() const
793  {
794  return *pointer_;
795  }
796 
797  pointer operator->()
798  {
799  return pointer_;
800  }
801 
802  const_pointer operator->() const
803  {
804  return pointer_;
805  }
806 
807  pointer ptr()
808  {
809  return pointer_;
810  }
811 
812  const_pointer ptr() const
813  {
814  return pointer_;
815  }
816 
817  array_type const &
818  arrayView() const
819  {
820  return *array_;
821  }
822 
823  template <unsigned int TARGET_INDEX>
824  typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
825  get()
826  {
827  return vigra::get<TARGET_INDEX>(*this);
828  }
829 
830  template <unsigned int TARGET_INDEX>
831  typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
832  get() const
833  {
834  return vigra::get<TARGET_INDEX>(*this);
835  }
836 
837  pointer pointer_;
838  shape_type strides_, upper_bound_;
839  array_type * array_;
840 };
841 
842  // meta-programming helper classes to implement 'get<INDEX>(CoupledHandle)'
843 template <unsigned TARGET_INDEX>
844 struct Error__CoupledHandle_index_out_of_range;
845 
846 namespace detail {
847 
848 template <unsigned TARGET_INDEX, class Handle, bool isValid, unsigned int INDEX=Handle::index>
849 struct CoupledHandleCastImpl
850 {
851  typedef typename CoupledHandleCastImpl<TARGET_INDEX, typename Handle::base_type, isValid>::type type;
852  typedef typename type::value_type value_type;
853  typedef typename type::reference reference;
854  typedef typename type::const_reference const_reference;
855 };
856 
857 template <unsigned TARGET_INDEX, class Handle, unsigned int INDEX>
858 struct CoupledHandleCastImpl<TARGET_INDEX, Handle, false, INDEX>
859 {
860  typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> type;
861  typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> value_type;
862  typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> reference;
863  typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> const_reference;
864 };
865 
866 template <unsigned TARGET_INDEX, class Handle>
867 struct CoupledHandleCastImpl<TARGET_INDEX, Handle, true, TARGET_INDEX>
868 {
869  typedef Handle type;
870  typedef typename type::value_type value_type;
871  typedef typename type::reference reference;
872  typedef typename type::const_reference const_reference;
873 };
874 
875 } // namespace detail
876 
877 template <unsigned TARGET_INDEX, class Handle, unsigned int INDEX>
878 struct CoupledHandleCast
879 : public detail::CoupledHandleCastImpl<TARGET_INDEX, Handle, (TARGET_INDEX <= INDEX), INDEX>
880 {};
881 
882 template <unsigned int TARGET_INDEX, class Handle>
883 inline
884 typename CoupledHandleCast<TARGET_INDEX, Handle>::type &
885 cast(Handle & handle)
886 {
887  return handle;
888 }
889 
890 template <unsigned int TARGET_INDEX, class Handle>
891 inline
892 typename CoupledHandleCast<TARGET_INDEX, Handle>::type const &
893 cast(Handle const & handle)
894 {
895  return handle;
896 }
897 
898  /** Returns reference to the element in the band of the handle with index TARGET_INDEX.
899  */
900 template <unsigned int TARGET_INDEX, class Handle>
901 inline
902 typename CoupledHandleCast<TARGET_INDEX, Handle>::reference
903 get(Handle & handle)
904 {
905  return *cast<TARGET_INDEX>(handle);
906 }
907 
908  /** Returns a constant reference to the element in the band of the handle with index TARGET_INDEX.
909  */
910 template <unsigned int TARGET_INDEX, class Handle>
911 inline
912 typename CoupledHandleCast<TARGET_INDEX, Handle>::const_reference
913 get(Handle const & handle)
914 {
915  return *cast<TARGET_INDEX>(handle);
916 }
917 
918  // meta-programming helper classes to infer the type of
919  // a CoupledHandle for a set of arrays
920 template <unsigned int N, class List>
921 struct ComposeCoupledHandle;
922 
923 template <unsigned int N, class T, class TAIL>
924 struct ComposeCoupledHandle<N, TypeList<T, TAIL> >
925 {
926  typedef typename ComposeCoupledHandle<N, TAIL>::type BaseType;
927  typedef typename MultiArrayShape<N>::type shape_type;
928  typedef CoupledHandle<T, BaseType> type;
929 
930  template <class S>
931  type exec(MultiArrayView<N, T, S> const & m,
932  shape_type const & start, shape_type const & end,
933  BaseType const & base)
934  {
935  return type(m.subarray(start, end).data(), m.stride(), base);
936  }
937 
938  template <class S>
939  type exec(MultiArrayView<N, T, S> const & m, BaseType const & base)
940  {
941  return type(m.data(), m.stride(), base);
942  }
943 };
944 
945 template <unsigned int N>
946 struct ComposeCoupledHandle<N, void>
947 {
948  typedef typename MultiArrayShape<N>::type shape_type;
949  typedef CoupledHandle<shape_type, void> type;
950 
951  type exec(shape_type const & shape)
952  {
953  return type(shape);
954  }
955 
956  type exec(shape_type const & start, shape_type const & end)
957  {
958  return type(end-start);
959  }
960 };
961 
962 
963 template <unsigned int N, class T1=void, class T2=void, class T3=void, class T4=void, class T5=void>
964 struct CoupledHandleType
965 {
966  // reverse the order to get the desired index order
967  typedef typename MakeTypeList<T5, T4, T3, T2, T1>::type TypeList;
968  typedef typename ComposeCoupledHandle<N, TypeList>::type type;
969 };
970 
971 template <unsigned int N, class T1, class T2, class T3, class T4, class T5>
972 struct CoupledHandleType<N, Multiband<T1>, T2, T3, T4, T5>
973 {
974  // reverse the order to get the desired index order
975  typedef typename MakeTypeList<T5, T4, T3, T2, Multiband<T1> >::type TypeList;
976  typedef typename ComposeCoupledHandle<N-1, TypeList>::type type;
977 };
978 
979  // meta-programming helper classes to implement 'zip(iterator1, iterator2)'
980 template <class A, class B>
981 struct ZipCoupledHandles;
982 
983 template <class A, class Head, class Tail>
984 struct ZipCoupledHandles<A, CoupledHandle<Head, Tail> >
985 {
986  typedef typename ZipCoupledHandles<A, Tail>::type Next;
987  typedef CoupledHandle<Head, Next> type;
988 
989  static type construct(A const & a, CoupledHandle<Head, Tail> const & h)
990  {
991  return type(h, ZipCoupledHandles<A, Tail>::construct(a, (Tail const &)h));
992  }
993 };
994 
995 template <class A, class Shape>
996 struct ZipCoupledHandles<A, CoupledHandle<Shape, void> >
997 {
998  typedef A type;
999 
1000  static type construct(A const & a, CoupledHandle<Shape, void> const &)
1001  {
1002  return a;
1003  }
1004 };
1005 
1006  // allow an iterator that uses CoupledHandle to specialize its
1007  // dereferencing functions, such that
1008  // '*iter' returns a referenc to the current point if
1009  // the handle is just a coordinate handle
1010  // '*iter' returns a reference to the current data element
1011  // if the handle referes to just one array
1012  // '*iter' returns a reference to the handle itself if it refers to
1013  // several arrays simultaneously (i.e. is actualy a coupled handle)
1014 template <class Handle, unsigned int INDEX=Handle::index>
1015 struct CoupledHandleTraits
1016 {
1017  typedef Handle value_type;
1018  typedef Handle & reference;
1019  typedef Handle const & const_reference;
1020  typedef Handle * pointer;
1021  typedef Handle const * const_pointer;
1022 
1023  static reference dereference(Handle & h)
1024  {
1025  return h;
1026  }
1027 
1028  static const_reference dereference(Handle const & h)
1029  {
1030  return h;
1031  }
1032 };
1033 
1034 template <class Handle>
1035 struct CoupledHandleTraits<Handle, 0>
1036 {
1037  typedef typename Handle::value_type value_type;
1038  typedef typename Handle::reference reference;
1039  typedef typename Handle::const_reference const_reference;
1040  typedef typename Handle::pointer pointer;
1041  typedef typename Handle::const_pointer const_pointer;
1042 
1043  static reference dereference(Handle & h)
1044  {
1045  return *h;
1046  }
1047 
1048  static const_reference dereference(Handle const & h)
1049  {
1050  return *h;
1051  }
1052 };
1053 
1054 template <class Handle>
1055 struct CoupledHandleTraits<Handle, 1>
1056 {
1057  typedef typename Handle::value_type value_type;
1058  typedef typename Handle::reference reference;
1059  typedef typename Handle::const_reference const_reference;
1060  typedef typename Handle::pointer pointer;
1061  typedef typename Handle::const_pointer const_pointer;
1062 
1063  static reference dereference(Handle & h)
1064  {
1065  return *h;
1066  }
1067 
1068  static const_reference dereference(Handle const & h)
1069  {
1070  return *h;
1071  }
1072 };
1073 
1074 
1075 //@}
1076 
1077 } // namespace vigra
1078 
1079 #endif /* MULTI_HANDLE_HXX */

© Ullrich Köthe (ullrich.koethe@iwr.uni-heidelberg.de)
Heidelberg Collaboratory for Image Processing, University of Heidelberg, Germany

html generated using doxygen and Python
vigra 1.10.0 (Thu Jan 8 2015)