SparseArray.h

Go to the documentation of this file.
00001 /*
00002  *    Copyright 2004-2006 Intel Corporation
00003  * 
00004  *    Licensed under the Apache License, Version 2.0 (the "License");
00005  *    you may not use this file except in compliance with the License.
00006  *    You may obtain a copy of the License at
00007  * 
00008  *        http://www.apache.org/licenses/LICENSE-2.0
00009  * 
00010  *    Unless required by applicable law or agreed to in writing, software
00011  *    distributed under the License is distributed on an "AS IS" BASIS,
00012  *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00013  *    See the License for the specific language governing permissions and
00014  *    limitations under the License.
00015  */
00016 
00017 #ifndef __SPARSE_RANGE_H__
00018 #define __SPARSE_RANGE_H__
00019 
00020 #include <list>
00021 
00028 template<typename _Type> class SparseArray { 
00029 public: 
00030     class Block {
00031     public:
00032         size_t offset_;
00033         size_t size_;
00034         _Type* data_;
00035 
00036         Block(size_t offset = 0, size_t size = 0)
00037             : offset_(offset), size_(size), data_(0)
00038         {
00039             data_ = static_cast<_Type*>(calloc(size, sizeof(_Type)));
00040         }
00041         
00042         struct BlockCompare {
00043             bool operator()(const Block& a, const Block& b) 
00044             {
00045                 return a.offset_ < b.offset_;
00046             }
00047         };
00048     };
00049     typedef std::list<Block> BlockList;
00050 
00051     ~SparseArray() 
00052     {
00053         for (typename BlockList::iterator itr = blocks_.begin();
00054              itr != blocks_.end(); ++itr)
00055         {
00056             free(itr->data_);
00057             itr->data_ = 0;
00058         }
00059     }
00060     
00065     _Type operator[](size_t offset) const
00066     {
00067         for (typename BlockList::const_iterator itr = blocks_.begin();
00068              itr != blocks_.end(); ++itr)
00069         {
00070             if (itr->offset_ <= offset &&
00071                 itr->offset_ + itr->size_ > offset)
00072             {
00073                 return itr->data_[offset - itr->offset_];
00074             }
00075             
00076             if (itr->offset_ > offset)
00077             {
00078                 break;
00079             }
00080         }        
00081 
00082         return _Type();
00083     }
00084     
00088     void range_write(size_t offset, size_t elts, const _Type* in)
00089     {
00090         _Type* dest = allocate(offset, elts);
00091         ASSERT(dest != 0);
00092 
00093         for (size_t i = 0; i<elts; ++i)
00094         {
00095             dest[i] = in[i];
00096         }
00097     }
00098 
00103     size_t size() const
00104     {
00105         if (blocks_.empty())
00106         {
00107             return 0;
00108         }
00109 
00110         typename BlockList::const_iterator i = blocks_.end();
00111         --i;
00112 
00113         return i->offset_ + i->size_;
00114     }
00115 
00119     const BlockList& blocks() const 
00120     { 
00121         return blocks_;
00122     }
00123     
00124 private:
00126     BlockList blocks_;
00127 
00134     _Type* allocate(size_t offset, size_t size)
00135     {
00136         bool merge = false;
00137         
00138         //
00139         // Take care of any existing blocks which overlap the new
00140         // block like this:
00141         //
00142         // |-- old block --|
00143         //                   |- new block -| case 1
00144         //   |-new block-|                   case 2
00145         //     |---- new block ----|         case 3
00146         typename BlockList::iterator itr = blocks_.begin();
00147         while (itr != blocks_.end())
00148         {
00149             // case 1: block occurs before, don't merge if bordering
00150             if (itr->offset_ + itr->size_ <= offset)
00151             {
00152                 ++itr;
00153                 continue;
00154             }              
00155 
00156             // case 2: new block is completely contained in an existing block
00157             if (itr->offset_ <= offset && 
00158                 (itr->offset_ + itr->size_ >= offset + size))
00159             {
00160                 return &itr->data_[offset - itr->offset_];
00161             }
00162 
00163             // case 3: current block overlaps part of the new block
00164             if (itr->offset_ <= offset && 
00165                 (itr->offset_ + itr->size_ > offset) &&
00166                 (itr->offset_ + itr->size_ < offset + size))
00167             {
00168                 merge = true;
00169                 break;
00170             }
00171 
00172             // we stepped over to past the block
00173             if (itr->offset_ > offset)
00174             {
00175                 break;
00176             }
00177 
00178             NOTREACHED;
00179         }
00180 
00181         Block new_block;
00182         if (merge) 
00183         {
00184             ASSERT(itr != blocks_.end());
00185 
00186             size_t new_size = offset + size - itr->offset_;
00187             itr->data_ = static_cast<_Type*>
00188                          (realloc(itr->data_, sizeof(_Type) * new_size));
00189             itr->size_ = new_size;
00190             new_block = *itr;
00191         }
00192         else
00193         {
00194             new_block = Block(offset, size);
00195             blocks_.insert(itr, new_block);
00196             --itr;
00197         }
00198         
00199         typename BlockList::iterator tail = itr;
00200         ++tail;
00201         
00202         //
00203         // fixup the tail, which includes any blocks like this:
00204         //
00205         // |---- new block ----|
00206         //    |- old block -|             case 1
00207         //         |---- old block ----|  case 2
00208         //
00209         while (tail != blocks_.end())
00210         {
00211             // stepped past the end of the new_block
00212             if (tail->offset_ > itr->offset_ + itr->size_) 
00213             {
00214                 break;
00215             }
00216             
00217             // case 1
00218             if (tail->offset_ <= itr->offset_ + itr->size_ &&
00219                 tail->offset_ + tail->size_ <= itr->offset_ + itr->size_)
00220             {
00221                 // copy over the old data
00222                 size_t offset = tail->offset_ - itr->offset_;
00223                 for (size_t i = 0; i<tail->size_; ++i)
00224                 {
00225                     itr->data_[offset + i] = tail->data_[i];
00226                 }
00227                 free(tail->data_);
00228                 blocks_.erase(tail++);
00229                 continue;
00230             }
00231             
00232             // case 2
00233             if (tail->offset_ <= itr->offset_ + itr->size_)
00234             {
00235                 size_t new_size = tail->offset_ + tail->size_ - itr->offset_;
00236                 itr->data_ = static_cast<_Type*>(realloc(itr->data_, new_size));
00237                 itr->size_ = new_size;
00238                 
00239                 // copy over the old data
00240                 size_t offset = tail->offset_ - itr->offset_;
00241                 for (size_t i = 0; i<tail->size_; ++i)
00242                 {
00243                     itr->data_[offset + i] = tail->data_[i];
00244                 }
00245                 free(tail->data_);
00246                 blocks_.erase(tail++);
00247                 continue;
00248             }
00249 
00250             NOTREACHED;
00251         }
00252 
00253         return &itr->data_[offset - itr->offset_];
00254     }
00255     
00256 };
00257 
00258 #endif // __SPARSE_RANGE_H__

Generated on Thu Jun 7 12:54:29 2007 for DTN Reference Implementation by  doxygen 1.5.1