[ VIGRA Homepage | Function Index | Class Index | Namespaces | File List | Main Page ]

multi_handle.hxx
1/************************************************************************/
2/* */
3/* Copyright 2011-2014 by Ullrich Koethe */
4/* */
5/* This file is part of the VIGRA computer vision library. */
6/* The VIGRA Website is */
7/* http://hci.iwr.uni-heidelberg.de/vigra/ */
8/* Please direct questions, bug reports, and contributions to */
9/* ullrich.koethe@iwr.uni-heidelberg.de or */
10/* vigra@informatik.uni-hamburg.de */
11/* */
12/* Permission is hereby granted, free of charge, to any person */
13/* obtaining a copy of this software and associated documentation */
14/* files (the "Software"), to deal in the Software without */
15/* restriction, including without limitation the rights to use, */
16/* copy, modify, merge, publish, distribute, sublicense, and/or */
17/* sell copies of the Software, and to permit persons to whom the */
18/* Software is furnished to do so, subject to the following */
19/* conditions: */
20/* */
21/* The above copyright notice and this permission notice shall be */
22/* included in all copies or substantial portions of the */
23/* Software. */
24/* */
25/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
26/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
27/* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
28/* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
29/* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
30/* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
31/* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
32/* OTHER DEALINGS IN THE SOFTWARE. */
33/* */
34/************************************************************************/
35
36#ifndef MULTI_HANDLE_HXX
37#define MULTI_HANDLE_HXX
38
39#include "multi_fwd.hxx"
40#include "metaprogramming.hxx"
41#include "multi_shape.hxx"
42
43namespace vigra {
44
45template <unsigned TARGET_INDEX, class Handle, unsigned int INDEX=Handle::index>
46struct CoupledHandleCast;
47
48template <unsigned int TARGET_INDEX, class Handle>
49typename CoupledHandleCast<TARGET_INDEX, Handle>::reference
50get(Handle & handle);
51
52template <unsigned int TARGET_INDEX, class Handle>
53typename CoupledHandleCast<TARGET_INDEX, Handle>::const_reference
54get(Handle const & handle);
55
56/** \addtogroup MultiIteratorGroup
57*/
58//@{
59
60 /**
61 Handle class, used by CoupledScanOrderIterator as the value type to simultaneously itearate over multiple images.
62 */
63template <class T, class NEXT>
65: public NEXT
66{
67public:
68 typedef NEXT base_type;
70
71 static const int index = NEXT::index + 1; // index of this member of the chain
72 static const unsigned int dimensions = NEXT::dimensions;
73
74 typedef T value_type;
75 typedef T * pointer;
76 typedef T const * const_pointer;
77 typedef T & reference;
78 typedef T const & const_reference;
79 typedef typename base_type::shape_type shape_type;
80
82 : base_type(),
83 pointer_(),
84 strides_()
85 {}
86
87 template <class NEXT1>
88 CoupledHandle(CoupledHandle<T, NEXT1> const & h, NEXT const & next)
89 : base_type(next),
90 pointer_(h.pointer_),
91 strides_(h.strides_)
92 {}
93
94 CoupledHandle(const_pointer p, shape_type const & strides, NEXT const & next)
95 : base_type(next),
96 pointer_(const_cast<pointer>(p)),
97 strides_(strides)
98 {}
99
100 template <class Stride>
101 CoupledHandle(MultiArrayView<dimensions, T, Stride> const & v, NEXT const & next)
102 : base_type(next),
103 pointer_(const_cast<pointer>(v.data())),
104 strides_(v.stride())
105 {
106 vigra_precondition(v.shape() == this->shape(), "createCoupledIterator(): shape mismatch.");
107 }
108
109 inline void incDim(int dim)
110 {
111 pointer_ += strides_[dim];
112 base_type::incDim(dim);
113 }
114
115 inline void decDim(int dim)
116 {
117 pointer_ -= strides_[dim];
118 base_type::decDim(dim);
119 }
120
121 inline void addDim(int dim, MultiArrayIndex d)
122 {
123 pointer_ += d*strides_[dim];
124 base_type::addDim(dim, d);
125 }
126
127 inline void add(shape_type const & d)
128 {
129 pointer_ += dot(d, strides_);
130 base_type::add(d);
131 }
132
133 template<int DIMENSION>
134 inline void increment()
135 {
136 pointer_ += strides_[DIMENSION];
137 base_type::template increment<DIMENSION>();
138 }
139
140 template<int DIMENSION>
141 inline void decrement()
142 {
143 pointer_ -= strides_[DIMENSION];
144 base_type::template decrement<DIMENSION>();
145 }
146
147 // TODO: test if making the above a default case of the this hurts performance
148 template<int DIMENSION>
149 inline void increment(MultiArrayIndex offset)
150 {
151 pointer_ += offset*strides_[DIMENSION];
152 base_type::template increment<DIMENSION>(offset);
153 }
154
155 template<int DIMENSION>
156 inline void decrement(MultiArrayIndex offset)
157 {
158 pointer_ -= offset*strides_[DIMENSION];
159 base_type::template decrement<DIMENSION>(offset);
160 }
161
162 void restrictToSubarray(shape_type const & start, shape_type const & end)
163 {
164 pointer_ += dot(start, strides_);
165 base_type::restrictToSubarray(start, end);
166 }
167
168 // ptr access
169 reference operator*()
170 {
171 return *pointer_;
172 }
173
174 const_reference operator*() const
175 {
176 return *pointer_;
177 }
178
179 pointer operator->()
180 {
181 return pointer_;
182 }
183
184 const_pointer operator->() const
185 {
186 return pointer_;
187 }
188
189 pointer ptr()
190 {
191 return pointer_;
192 }
193
194 const_pointer ptr() const
195 {
196 return pointer_;
197 }
198
199 shape_type const & strides() const
200 {
201 return strides_;
202 }
203
205 arrayView() const
206 {
207 return MultiArrayView<dimensions, T>(this->shape(), strides(), ptr() - dot(this->point(), strides()));
208 }
209
210 template <unsigned int TARGET_INDEX>
211 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
212 get()
213 {
214 return vigra::get<TARGET_INDEX>(*this);
215 }
216
217 template <unsigned int TARGET_INDEX>
218 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
219 get() const
220 {
221 return vigra::get<TARGET_INDEX>(*this);
222 }
223
224 // NOTE: dangerous function - only use it when you know what you are doing
225 void internal_reset(const_pointer p)
226 {
227 pointer_ = const_cast<pointer>(p);
228 }
229
230 pointer pointer_;
231 shape_type strides_;
232};
233
234 // CoupledHandle holding the current coordinate
235 // (always the end of a CoupledHandle chain)
236template <int N>
238{
239public:
240 static const unsigned int index = 0; // index of this member of the chain
241 static const unsigned int dimensions = N;
242
243 typedef typename MultiArrayShape<N>::type value_type;
244 typedef value_type const * pointer;
245 typedef value_type const * const_pointer;
246 typedef value_type const & reference;
247 typedef value_type const & const_reference;
248 typedef value_type shape_type;
249 typedef CoupledHandle<value_type, void> self_type;
250
252 : point_(),
253 shape_(),
254 scanOrderIndex_()
255 {}
256
257 CoupledHandle(value_type const & shape)
258 : point_(),
259 shape_(shape),
260 scanOrderIndex_()
261 {}
262
263 CoupledHandle(typename MultiArrayShape<N+1>::type const & shape)
264 : point_(),
265 shape_(shape.begin()),
266 scanOrderIndex_()
267 {}
268
269 inline void incDim(int dim)
270 {
271 ++point_[dim];
272 }
273
274 inline void decDim(int dim)
275 {
276 --point_[dim];
277 }
278
279 inline void addDim(int dim, MultiArrayIndex d)
280 {
281 point_[dim] += d;
282 }
283
284 inline void add(shape_type const & d)
285 {
286 point_ += d;
287 }
288
289 template<int DIMENSION>
290 inline void increment()
291 {
292 ++point_[DIMENSION];
293 }
294
295 template<int DIMENSION>
296 inline void decrement()
297 {
298 --point_[DIMENSION];
299 }
300
301 // TODO: test if making the above a default case of the this hurts performance
302 template<int DIMENSION>
303 inline void increment(MultiArrayIndex offset)
304 {
305 point_[DIMENSION] += offset;
306 }
307
308 template<int DIMENSION>
309 inline void decrement(MultiArrayIndex offset)
310 {
311 point_[DIMENSION] -= offset;
312 }
313
314 void restrictToSubarray(shape_type const & start, shape_type const & end)
315 {
316 point_ = shape_type();
317 shape_ = end - start;
318 scanOrderIndex_ = 0;
319 }
320
321 inline void incrementIndex()
322 {
323 ++scanOrderIndex_;
324 }
325
326 inline void decrementIndex()
327 {
328 --scanOrderIndex_;
329 }
330
331 inline void incrementIndex(MultiArrayIndex offset)
332 {
333 scanOrderIndex_ += offset;
334 }
335
336 inline void decrementIndex(MultiArrayIndex offset)
337 {
338 scanOrderIndex_ -= offset;
339 }
340
341 // access
342 MultiArrayIndex scanOrderIndex() const
343 {
344 return scanOrderIndex_;
345 }
346
347 // access
348 const_reference point() const
349 {
350 return point_;
351 }
352
353 // access
354 const_reference shape() const
355 {
356 return shape_;
357 }
358
359 const_reference operator*() const
360 {
361 return point_;
362 }
363
364 const_pointer operator->() const
365 {
366 return &point_;
367 }
368
369 const_pointer ptr() const
370 {
371 return &point_;
372 }
373
374 unsigned int borderType() const
375 {
376 return detail::BorderTypeImpl<N>::exec(point_, shape_);
377 }
378
379 template <unsigned int TARGET_INDEX>
380 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
381 get()
382 {
383 return vigra::get<TARGET_INDEX>(*this);
384 }
385
386 template <unsigned int TARGET_INDEX>
387 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
388 get() const
389 {
390 return vigra::get<TARGET_INDEX>(*this);
391 }
392
393 // NOTE: dangerous function - only use it when you know what you are doing
394 void internal_reset(value_type const & point)
395 {
396 point_ = point;
397 }
398
399 value_type point_, shape_;
400 MultiArrayIndex scanOrderIndex_;
401};
402
403 // CoupledHandle for multi-band data
404template <class T, class NEXT>
405class CoupledHandle<Multiband<T>, NEXT>
406: public NEXT
407{
408public:
409 typedef NEXT base_type;
410 typedef CoupledHandle<Multiband<T>, NEXT> self_type;
411
412 static const unsigned int index = NEXT::index + 1; // index of this member of the chain
413 static const unsigned int dimensions = NEXT::dimensions;
414
415 typedef MultiArrayView<1, T, StridedArrayTag> value_type;
416 typedef value_type * pointer;
417 typedef value_type const * const_pointer;
418 typedef value_type & reference;
419 typedef value_type const & const_reference;
420 typedef typename base_type::shape_type shape_type;
421
422 CoupledHandle()
423 : base_type(),
424 view_(),
425 strides_()
426 {}
427
428 template <class NEXT1>
429 CoupledHandle(CoupledHandle<Multiband<T>, NEXT1> const & h, NEXT const & next)
430 : base_type(next),
431 view_(h.view_),
432 strides_(h.strides_)
433 {}
434
435 CoupledHandle(const_reference p, shape_type const & strides, NEXT const & next)
436 : base_type(next),
437 view_(p),
438 strides_(strides)
439 {}
440
441 template <class Stride>
442 CoupledHandle(MultiArrayView<dimensions+1, Multiband<T>, Stride> const & v, NEXT const & next)
443 : base_type(next),
444 view_(v.bindInner(shape_type())),
445 strides_(v.bindOuter(0).stride())
446 {
447 vigra_precondition(v.bindOuter(0).shape() == this->shape(), "createCoupledIterator(): shape mismatch.");
448 }
449
450 inline void incDim(int dim)
451 {
452 view_.unsafePtr() += strides_[dim];
453 base_type::incDim(dim);
454 }
455
456 inline void decDim(int dim)
457 {
458 view_.unsafePtr() -= strides_[dim];
459 base_type::decDim(dim);
460 }
461
462 inline void addDim(int dim, MultiArrayIndex d)
463 {
464 view_.unsafePtr() += d*strides_[dim];
465 base_type::addDim(dim, d);
466 }
467
468 inline void add(shape_type const & d)
469 {
470 view_.unsafePtr() += dot(d, strides_);
471 base_type::add(d);
472 }
473
474 template<int DIMENSION>
475 inline void increment()
476 {
477 view_.unsafePtr() += strides_[DIMENSION];
478 base_type::template increment<DIMENSION>();
479 }
480
481 template<int DIMENSION>
482 inline void decrement()
483 {
484 view_.unsafePtr() -= strides_[DIMENSION];
485 base_type::template decrement<DIMENSION>();
486 }
487
488 // TODO: test if making the above a default case of the this hurts performance
489 template<int DIMENSION>
490 inline void increment(MultiArrayIndex offset)
491 {
492 view_.unsafePtr() += offset*strides_[DIMENSION];
493 base_type::template increment<DIMENSION>(offset);
494 }
495
496 template<int DIMENSION>
497 inline void decrement(MultiArrayIndex offset)
498 {
499 view_.unsafePtr() -= offset*strides_[DIMENSION];
500 base_type::template decrement<DIMENSION>(offset);
501 }
502
503 void restrictToSubarray(shape_type const & start, shape_type const & end)
504 {
505 view_.unsafePtr() += dot(start, strides_);
506 base_type::restrictToSubarray(start, end);
507 }
508
509 // ptr access
510 reference operator*()
511 {
512 return view_;
513 }
514
515 const_reference operator*() const
516 {
517 return view_;
518 }
519
520 pointer operator->()
521 {
522 return &view_;
523 }
524
525 const_pointer operator->() const
526 {
527 return &view_;
528 }
529
530 pointer ptr()
531 {
532 return &view_;
533 }
534
535 const_pointer ptr() const
536 {
537 return &view_;
538 }
539
540 shape_type const & strides() const
541 {
542 return strides_;
543 }
544
545 MultiArrayView<dimensions+1, Multiband<T> >
546 arrayView() const
547 {
548 typedef MultiArrayView<dimensions+1, T> View;
549 typename View::difference_type vshape(SkipInitialization), vstride(SkipInitialization);
550 vshape.template subarray<0, dimensions>() = this->shape();
551 vstride.template subarray<0, dimensions>() = strides();
552 vshape[dimensions] = view_.shape(0);
553 vstride[dimensions] = view_.stride(0);
554 return View(vshape, vstride, view_.data() - dot(this->point(), strides())).multiband();
555 }
556
557 template <unsigned int TARGET_INDEX>
558 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
559 get()
560 {
561 return vigra::get<TARGET_INDEX>(*this);
562 }
563
564 template <unsigned int TARGET_INDEX>
565 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
566 get() const
567 {
568 return vigra::get<TARGET_INDEX>(*this);
569 }
570
571 template <class U>
572 void internal_reset(U const &)
573 {
574 vigra_fail("CoupledHandle<Multiband<T>>::internal_reset(): not implemented.");
575 }
576
577 value_type view_;
578 shape_type strides_;
579};
580
581 // helper class for CoupledHandle for CunkedArray
582template <unsigned int N, class T>
583class IteratorChunkHandle
584{
585 public:
586 typedef ChunkedArray<N, T> array_type;
587 typedef typename MultiArrayShape<N>::type shape_type;
588
589 IteratorChunkHandle()
590 : offset_(),
591 chunk_(0)
592 {}
593
594 IteratorChunkHandle(shape_type const & offset)
595 : offset_(offset),
596 chunk_(0)
597 {}
598
599 IteratorChunkHandle(IteratorChunkHandle const & other)
600 : offset_(other.offset_),
601 chunk_(0)
602 {}
603
604 IteratorChunkHandle & operator=(IteratorChunkHandle const & other)
605 {
606 offset_ = other.offset_;
607 chunk_ = 0;
608 return *this;
609 }
610
611 shape_type offset_;
612 SharedChunkHandle<N, T> * chunk_;
613};
614
615 /* CoupledHandle for CunkedArray
616
617 The handle must store a pointer to a chunk because the chunk knows
618 about memory menagement, and to an array view because it knows about
619 subarrays and slices.
620
621 Perhaps we can reduce this to a single pointer or otherwise reduce
622 the handle memory to make it faster?
623 */
624template <class U, class NEXT>
625class CoupledHandle<ChunkedMemory<U>, NEXT>
626: public NEXT,
627 public IteratorChunkHandle<NEXT::dimensions, typename UnqualifiedType<U>::type>
628{
629public:
630 typedef typename UnqualifiedType<U>::type T;
631 typedef NEXT base_type;
632 typedef IteratorChunkHandle<NEXT::dimensions, T> base_type2;
633 typedef CoupledHandle<ChunkedMemory<U>, NEXT> self_type;
634
635 static const unsigned int index = NEXT::index + 1; // index of this member of the chain
636 static const unsigned int dimensions = NEXT::dimensions;
637
638 typedef typename IfBool<UnqualifiedType<U>::isConst,
639 ChunkedArrayBase<dimensions, T> const,
640 ChunkedArrayBase<dimensions, T> >::type array_type;
641 typedef detail::ChunkShape<dimensions, T> chunk_shape;
642 typedef T value_type;
643 typedef U * pointer;
644 typedef value_type const * const_pointer;
645 typedef U & reference;
646 typedef value_type const & const_reference;
647 typedef typename base_type::shape_type shape_type;
648
649 CoupledHandle()
650 : base_type(),
651 base_type2(),
652 pointer_(),
653 strides_(),
654 upper_bound_(),
655 array_()
656 {}
657
658 CoupledHandle(CoupledHandle const & other)
659 : base_type(other),
660 base_type2(other),
661 pointer_(other.pointer_),
662 strides_(other.strides_),
663 upper_bound_(other.upper_bound_),
664 array_(other.array_)
665 {
666 if(array_)
667 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
668 }
669
670 CoupledHandle(array_type const & array, NEXT const & next)
671 : base_type(next),
672 base_type2(),
673 pointer_(),
674 array_(const_cast<array_type*>(&array))
675 {
676 if(array_)
677 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
678 }
679
680 ~CoupledHandle()
681 {
682 // deref the present chunk
683 if(array_)
684 array_->unrefChunk(this);
685 }
686
687 CoupledHandle & operator=(CoupledHandle const & other)
688 {
689 if(this != &other)
690 {
691 // deref the present chunk
692 if(array_)
693 array_->unrefChunk(this);
694 base_type::operator=(other);
695 base_type2::operator=(other);
696 array_ = other.array_;
697 if(array_)
698 {
699 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
700 }
701 else
702 {
703 pointer_ = other.pointer_;
704 strides_ = other.strides_;
705 upper_bound_ = other.upper_bound_;
706 }
707 }
708 return *this;
709 }
710
711 using base_type::point;
712 using base_type::shape;
713
714 inline void incDim(int dim)
715 {
716 base_type::incDim(dim);
717 pointer_ += strides_[dim];
718 if(point()[dim] == upper_bound_[dim])
719 {
720 // if(point()[dim] < shape()[dim])
721 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
722 }
723 }
724
725 inline void decDim(int dim)
726 {
727 base_type::decDim(dim);
728 pointer_ -= strides_[dim];
729 if(point()[dim] < upper_bound_[dim] - array_->chunk_shape_[dim])
730 {
731 // if(point()[dim] >= 0)
732 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
733 }
734 }
735
736 inline void addDim(int dim, MultiArrayIndex d)
737 {
738 base_type::addDim(dim, d);
739 if(point()[dim] < shape()[dim] && point()[dim] >= 0)
740 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
741 }
742
743 inline void add(shape_type const & d)
744 {
745 base_type::add(d);
746 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
747 }
748
749 template<int DIMENSION>
750 inline void increment()
751 {
752 // incDim(DIMENSION);
753 base_type::template increment<DIMENSION>();
754 pointer_ += strides_[DIMENSION];
755 if(point()[DIMENSION] == upper_bound_[DIMENSION])
756 {
757 if(point()[DIMENSION] > shape()[DIMENSION])
758 // this invariant check prevents the compiler from optimizing stupidly
759 // (it makes a difference of a factor of 2!)
760 vigra_invariant(false, "CoupledHandle<ChunkedMemory<T>>: internal error.");
761 else
762 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
763 }
764 }
765
766 template<int DIMENSION>
767 inline void decrement()
768 {
769 // decDim(DIMENSION);
770 base_type::template decrement<DIMENSION>();
771 pointer_ -= strides_[DIMENSION];
772 if(point()[DIMENSION] < upper_bound_[DIMENSION] - array_->chunk_shape_[DIMENSION])
773 {
774 if(point()[DIMENSION] < -1)
775 // this invariant check prevents the compiler from optimizing stupidly
776 // (it makes a difference of a factor of 2!)
777 vigra_invariant(false, "CoupledHandle<ChunkedMemory<T>>: internal error.");
778 else
779 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
780 }
781 }
782
783 template<int DIMENSION>
784 inline void increment(MultiArrayIndex d)
785 {
786 addDim(DIMENSION, d);
787 }
788
789 template<int DIMENSION>
790 inline void decrement(MultiArrayIndex d)
791 {
792 addDim(DIMENSION, -d);
793 }
794
795 void restrictToSubarray(shape_type const & start, shape_type const & end)
796 {
797 base_type::restrictToSubarray(start, end);
798 this->offset_ += start;
799 pointer_ = array_->chunkForIterator(point(), strides_, upper_bound_, this);
800 }
801
802 // ptr access
803 reference operator*()
804 {
805 return *pointer_;
806 }
807
808 const_reference operator*() const
809 {
810 return *pointer_;
811 }
812
813 pointer operator->()
814 {
815 return pointer_;
816 }
817
818 const_pointer operator->() const
819 {
820 return pointer_;
821 }
822
823 pointer ptr()
824 {
825 return pointer_;
826 }
827
828 const_pointer ptr() const
829 {
830 return pointer_;
831 }
832
833 array_type const &
834 arrayView() const
835 {
836 return *array_;
837 }
838
839 template <unsigned int TARGET_INDEX>
840 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::reference
841 get()
842 {
843 return vigra::get<TARGET_INDEX>(*this);
844 }
845
846 template <unsigned int TARGET_INDEX>
847 typename CoupledHandleCast<TARGET_INDEX, CoupledHandle, index>::const_reference
848 get() const
849 {
850 return vigra::get<TARGET_INDEX>(*this);
851 }
852
853 template <class V>
854 void internal_reset(V const &)
855 {
856 vigra_fail("CoupledHandle<ChunkedMemory<T>>::internal_reset(): not implemented.");
857 }
858
859 pointer pointer_;
860 shape_type strides_, upper_bound_;
861 array_type * array_;
862};
863
864 // meta-programming helper classes to implement 'get<INDEX>(CoupledHandle)'
865template <unsigned TARGET_INDEX>
866struct Error__CoupledHandle_index_out_of_range;
867
868namespace detail {
869
870template <unsigned TARGET_INDEX, class Handle, bool isValid, unsigned int INDEX=Handle::index>
871struct CoupledHandleCastImpl
872{
873 typedef typename CoupledHandleCastImpl<TARGET_INDEX, typename Handle::base_type, isValid>::type type;
874 typedef typename type::value_type value_type;
875 typedef typename type::reference reference;
876 typedef typename type::const_reference const_reference;
877};
878
879template <unsigned TARGET_INDEX, class Handle, unsigned int INDEX>
880struct CoupledHandleCastImpl<TARGET_INDEX, Handle, false, INDEX>
881{
882 typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> type;
883 typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> value_type;
884 typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> reference;
885 typedef Error__CoupledHandle_index_out_of_range<TARGET_INDEX> const_reference;
886};
887
888template <unsigned TARGET_INDEX, class Handle>
889struct CoupledHandleCastImpl<TARGET_INDEX, Handle, true, TARGET_INDEX>
890{
891 typedef Handle type;
892 typedef typename type::value_type value_type;
893 typedef typename type::reference reference;
894 typedef typename type::const_reference const_reference;
895};
896
897} // namespace detail
898
899template <unsigned TARGET_INDEX, class Handle, unsigned int INDEX>
900struct CoupledHandleCast
901: public detail::CoupledHandleCastImpl<TARGET_INDEX, Handle, (TARGET_INDEX <= INDEX), INDEX>
902{};
903
904template <unsigned int TARGET_INDEX, class Handle>
905inline
906typename CoupledHandleCast<TARGET_INDEX, Handle>::type &
907cast(Handle & handle)
908{
909 return handle;
910}
911
912template <unsigned int TARGET_INDEX, class Handle>
913inline
914typename CoupledHandleCast<TARGET_INDEX, Handle>::type const &
915cast(Handle const & handle)
916{
917 return handle;
918}
919
920 /** Returns reference to the element in the band of the handle with index TARGET_INDEX.
921 */
922template <unsigned int TARGET_INDEX, class Handle>
923inline
924typename CoupledHandleCast<TARGET_INDEX, Handle>::reference
925get(Handle & handle)
926{
927 return *cast<TARGET_INDEX>(handle);
928}
929
930 /** Returns a constant reference to the element in the band of the handle with index TARGET_INDEX.
931 */
932template <unsigned int TARGET_INDEX, class Handle>
933inline
934typename CoupledHandleCast<TARGET_INDEX, Handle>::const_reference
935get(Handle const & handle)
936{
937 return *cast<TARGET_INDEX>(handle);
938}
939
940 // meta-programming helper classes to infer the type of
941 // a CoupledHandle for a set of arrays
942template <unsigned int N, class List>
943struct ComposeCoupledHandle;
944
945template <unsigned int N, class T, class TAIL>
946struct ComposeCoupledHandle<N, TypeList<T, TAIL> >
947{
948 typedef typename ComposeCoupledHandle<N, TAIL>::type BaseType;
949 typedef typename MultiArrayShape<N>::type shape_type;
950 typedef CoupledHandle<T, BaseType> type;
951
952 template <class S>
953 type exec(MultiArrayView<N, T, S> const & m,
954 shape_type const & start, shape_type const & end,
955 BaseType const & base)
956 {
957 return type(m.subarray(start, end).data(), m.stride(), base);
958 }
959
960 template <class S>
961 type exec(MultiArrayView<N, T, S> const & m, BaseType const & base)
962 {
963 return type(m.data(), m.stride(), base);
964 }
965};
966
967template <unsigned int N>
968struct ComposeCoupledHandle<N, void>
969{
970 typedef typename MultiArrayShape<N>::type shape_type;
971 typedef CoupledHandle<shape_type, void> type;
972
973 type exec(shape_type const & shape)
974 {
975 return type(shape);
976 }
977
978 type exec(shape_type const & start, shape_type const & end)
979 {
980 return type(end-start);
981 }
982};
983
984
985template <unsigned int N, class T1=void, class T2=void, class T3=void, class T4=void, class T5=void>
986struct CoupledHandleType
987{
988 // reverse the order to get the desired index order
989 typedef typename MakeTypeList<T5, T4, T3, T2, T1>::type TypeList;
990 typedef typename ComposeCoupledHandle<N, TypeList>::type type;
991};
992
993template <unsigned int N, class T1, class T2, class T3, class T4, class T5>
994struct CoupledHandleType<N, Multiband<T1>, T2, T3, T4, T5>
995{
996 // reverse the order to get the desired index order
997 typedef typename MakeTypeList<T5, T4, T3, T2, Multiband<T1> >::type TypeList;
998 typedef typename ComposeCoupledHandle<N-1, TypeList>::type type;
999};
1000
1001 // meta-programming helper classes to implement 'zip(iterator1, iterator2)'
1002template <class A, class B>
1003struct ZipCoupledHandles;
1004
1005template <class A, class Head, class Tail>
1006struct ZipCoupledHandles<A, CoupledHandle<Head, Tail> >
1007{
1008 typedef typename ZipCoupledHandles<A, Tail>::type Next;
1009 typedef CoupledHandle<Head, Next> type;
1010
1011 static type construct(A const & a, CoupledHandle<Head, Tail> const & h)
1012 {
1013 return type(h, ZipCoupledHandles<A, Tail>::construct(a, (Tail const &)h));
1014 }
1015};
1016
1017template <class A, class Shape>
1018struct ZipCoupledHandles<A, CoupledHandle<Shape, void> >
1019{
1020 typedef A type;
1021
1022 static type construct(A const & a, CoupledHandle<Shape, void> const &)
1023 {
1024 return a;
1025 }
1026};
1027
1028 // allow an iterator that uses CoupledHandle to specialize its
1029 // dereferencing functions, such that
1030 // '*iter' returns a referenc to the current point if
1031 // the handle is just a coordinate handle
1032 // '*iter' returns a reference to the current data element
1033 // if the handle referes to just one array
1034 // '*iter' returns a reference to the handle itself if it refers to
1035 // several arrays simultaneously (i.e. is actualy a coupled handle)
1036template <class Handle, unsigned int INDEX=Handle::index>
1037struct CoupledHandleTraits
1038{
1039 typedef Handle value_type;
1040 typedef Handle & reference;
1041 typedef Handle const & const_reference;
1042 typedef Handle * pointer;
1043 typedef Handle const * const_pointer;
1044
1045 static reference dereference(Handle & h)
1046 {
1047 return h;
1048 }
1049
1050 static const_reference dereference(Handle const & h)
1051 {
1052 return h;
1053 }
1054};
1055
1056template <class Handle>
1057struct CoupledHandleTraits<Handle, 0>
1058{
1059 typedef typename Handle::value_type value_type;
1060 typedef typename Handle::reference reference;
1061 typedef typename Handle::const_reference const_reference;
1062 typedef typename Handle::pointer pointer;
1063 typedef typename Handle::const_pointer const_pointer;
1064
1065 static reference dereference(Handle & h)
1066 {
1067 return *h;
1068 }
1069
1070 static const_reference dereference(Handle const & h)
1071 {
1072 return *h;
1073 }
1074};
1075
1076template <class Handle>
1077struct CoupledHandleTraits<Handle, 1>
1078{
1079 typedef typename Handle::value_type value_type;
1080 typedef typename Handle::reference reference;
1081 typedef typename Handle::const_reference const_reference;
1082 typedef typename Handle::pointer pointer;
1083 typedef typename Handle::const_pointer const_pointer;
1084
1085 static reference dereference(Handle & h)
1086 {
1087 return *h;
1088 }
1089
1090 static const_reference dereference(Handle const & h)
1091 {
1092 return *h;
1093 }
1094};
1095
1096
1097//@}
1098
1099} // namespace vigra
1100
1101#endif /* MULTI_HANDLE_HXX */
Definition multi_handle.hxx:66
TinyVector< MultiArrayIndex, N > type
Definition multi_shape.hxx:272
Class for a single RGB value.
Definition rgbvalue.hxx:128
Class for fixed size vectors.
Definition tinyvector.hxx:1008
LookupTag< TAG, A >::result_type get(A const &a)
Definition accumulator.hxx:2942
NormTraits< T >::SquaredNormType dot(const MultiArrayView< 2, T, C1 > &x, const MultiArrayView< 2, T, C2 > &y)
Definition matrix.hxx:1342
std::ptrdiff_t MultiArrayIndex
Definition multi_fwd.hxx:60

© Ullrich Köthe (ullrich.koethe@iwr.uni-heidelberg.de)
Heidelberg Collaboratory for Image Processing, University of Heidelberg, Germany

html generated using doxygen and Python
vigra 1.12.2