1 : //
2 : // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
3 : // Use of this source code is governed by a BSD-style license that can be
4 : // found in the LICENSE file.
5 : //
6 :
7 : #ifndef _POOLALLOC_INCLUDED_
8 : #define _POOLALLOC_INCLUDED_
9 :
10 : #ifdef _DEBUG
11 : #define GUARD_BLOCKS // define to enable guard block sanity checking
12 : #endif
13 :
14 : //
15 : // This header defines an allocator that can be used to efficiently
16 : // allocate a large number of small requests for heap memory, with the
17 : // intention that they are not individually deallocated, but rather
18 : // collectively deallocated at one time.
19 : //
20 : // This simultaneously
21 : //
22 : // * Makes each individual allocation much more efficient; the
23 : // typical allocation is trivial.
24 : // * Completely avoids the cost of doing individual deallocation.
25 : // * Saves the trouble of tracking down and plugging a large class of leaks.
26 : //
27 : // Individual classes can use this allocator by supplying their own
28 : // new and delete methods.
29 : //
30 : // STL containers can use this allocator by using the pool_allocator
31 : // class as the allocator (second) template argument.
32 : //
33 :
34 : #include <stddef.h>
35 : #include <string.h>
36 : #include <vector>
37 :
38 : // If we are using guard blocks, we must track each indivual
39 : // allocation. If we aren't using guard blocks, these
40 : // never get instantiated, so won't have any impact.
41 : //
42 :
43 : class TAllocation {
44 : public:
45 0 : TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
46 0 : size(size), mem(mem), prevAlloc(prev) {
47 : // Allocations are bracketed:
48 : // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
49 : // This would be cleaner with if (guardBlockSize)..., but that
50 : // makes the compiler print warnings about 0 length memsets,
51 : // even with the if() protecting them.
52 : #ifdef GUARD_BLOCKS
53 0 : memset(preGuard(), guardBlockBeginVal, guardBlockSize);
54 0 : memset(data(), userDataFill, size);
55 0 : memset(postGuard(), guardBlockEndVal, guardBlockSize);
56 : #endif
57 0 : }
58 :
59 0 : void check() const {
60 0 : checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
61 0 : checkGuardBlock(postGuard(), guardBlockEndVal, "after");
62 0 : }
63 :
64 : void checkAllocList() const;
65 :
66 : // Return total size needed to accomodate user buffer of 'size',
67 : // plus our tracking data.
68 0 : inline static size_t allocationSize(size_t size) {
69 0 : return size + 2 * guardBlockSize + headerSize();
70 : }
71 :
72 : // Offset from surrounding buffer to get to user data buffer.
73 0 : inline static unsigned char* offsetAllocation(unsigned char* m) {
74 0 : return m + guardBlockSize + headerSize();
75 : }
76 :
77 : private:
78 : void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
79 :
80 : // Find offsets to pre and post guard blocks, and user data buffer
81 0 : unsigned char* preGuard() const { return mem + headerSize(); }
82 0 : unsigned char* data() const { return preGuard() + guardBlockSize; }
83 0 : unsigned char* postGuard() const { return data() + size; }
84 :
85 : size_t size; // size of the user data area
86 : unsigned char* mem; // beginning of our allocation (pts to header)
87 : TAllocation* prevAlloc; // prior allocation in the chain
88 :
89 : // Support MSVC++ 6.0
90 : const static unsigned char guardBlockBeginVal;
91 : const static unsigned char guardBlockEndVal;
92 : const static unsigned char userDataFill;
93 :
94 : const static size_t guardBlockSize;
95 : #ifdef GUARD_BLOCKS
96 0 : inline static size_t headerSize() { return sizeof(TAllocation); }
97 : #else
98 : inline static size_t headerSize() { return 0; }
99 : #endif
100 : };
101 :
102 : //
103 : // There are several stacks. One is to track the pushing and popping
104 : // of the user, and not yet implemented. The others are simply a
105 : // repositories of free pages or used pages.
106 : //
107 : // Page stacks are linked together with a simple header at the beginning
108 : // of each allocation obtained from the underlying OS. Multi-page allocations
109 : // are returned to the OS. Individual page allocations are kept for future
110 : // re-use.
111 : //
112 : // The "page size" used is not, nor must it match, the underlying OS
113 : // page size. But, having it be about that size or equal to a set of
114 : // pages is likely most optimal.
115 : //
116 : class TPoolAllocator {
117 : public:
118 : TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
119 :
120 : //
121 : // Don't call the destructor just to free up the memory, call pop()
122 : //
123 : ~TPoolAllocator();
124 :
125 : //
126 : // Call push() to establish a new place to pop memory too. Does not
127 : // have to be called to get things started.
128 : //
129 : void push();
130 :
131 : //
132 : // Call pop() to free all memory allocated since the last call to push(),
133 : // or if no last call to push, frees all memory since first allocation.
134 : //
135 : void pop();
136 :
137 : //
138 : // Call popAll() to free all memory allocated.
139 : //
140 : void popAll();
141 :
142 : //
143 : // Call allocate() to actually acquire memory. Returns 0 if no memory
144 : // available, otherwise a properly aligned pointer to 'numBytes' of memory.
145 : //
146 : void* allocate(size_t numBytes);
147 :
148 : //
149 : // There is no deallocate. The point of this class is that
150 : // deallocation can be skipped by the user of it, as the model
151 : // of use is to simultaneously deallocate everything at once
152 : // by calling pop(), and to not have to solve memory leak problems.
153 : //
154 :
155 : protected:
156 : friend struct tHeader;
157 :
158 : struct tHeader {
159 0 : tHeader(tHeader* nextPage, size_t pageCount) :
160 : nextPage(nextPage),
161 : pageCount(pageCount)
162 : #ifdef GUARD_BLOCKS
163 0 : , lastAllocation(0)
164 : #endif
165 0 : { }
166 :
167 0 : ~tHeader() {
168 : #ifdef GUARD_BLOCKS
169 0 : if (lastAllocation)
170 0 : lastAllocation->checkAllocList();
171 : #endif
172 0 : }
173 :
174 : tHeader* nextPage;
175 : size_t pageCount;
176 : #ifdef GUARD_BLOCKS
177 : TAllocation* lastAllocation;
178 : #endif
179 : };
180 :
181 0 : struct tAllocState {
182 : size_t offset;
183 : tHeader* page;
184 : };
185 : typedef std::vector<tAllocState> tAllocStack;
186 :
187 : // Track allocations if and only if we're using guard blocks
188 0 : void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
189 : #ifdef GUARD_BLOCKS
190 0 : new(memory) TAllocation(numBytes, memory, block->lastAllocation);
191 0 : block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
192 : #endif
193 : // This is optimized entirely away if GUARD_BLOCKS is not defined.
194 0 : return TAllocation::offsetAllocation(memory);
195 : }
196 :
197 : size_t pageSize; // granularity of allocation from the OS
198 : size_t alignment; // all returned allocations will be aligned at
199 : // this granularity, which will be a power of 2
200 : size_t alignmentMask;
201 : size_t headerSkip; // amount of memory to skip to make room for the
202 : // header (basically, size of header, rounded
203 : // up to make it aligned
204 : size_t currentPageOffset; // next offset in top of inUseList to allocate from
205 : tHeader* freeList; // list of popped memory
206 : tHeader* inUseList; // list of all memory currently being used
207 : tAllocStack stack; // stack of where to allocate from, to partition pool
208 :
209 : int numCalls; // just an interesting statistic
210 : size_t totalBytes; // just an interesting statistic
211 : private:
212 : TPoolAllocator& operator=(const TPoolAllocator&); // dont allow assignment operator
213 : TPoolAllocator(const TPoolAllocator&); // dont allow default copy constructor
214 : };
215 :
216 :
217 : //
218 : // There could potentially be many pools with pops happening at
219 : // different times. But a simple use is to have a global pop
220 : // with everyone using the same global allocator.
221 : //
222 : extern TPoolAllocator& GetGlobalPoolAllocator();
223 : extern void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator);
224 : #define GlobalPoolAllocator GetGlobalPoolAllocator()
225 :
226 : struct TThreadGlobalPools
227 : {
228 : TPoolAllocator* globalPoolAllocator;
229 : };
230 :
231 : //
232 : // This STL compatible allocator is intended to be used as the allocator
233 : // parameter to templatized STL containers, like vector and map.
234 : //
235 : // It will use the pools for allocation, and not
236 : // do any deallocation, but will still do destruction.
237 : //
238 : template<class T>
239 : class pool_allocator {
240 : public:
241 : typedef size_t size_type;
242 : typedef ptrdiff_t difference_type;
243 : typedef T* pointer;
244 : typedef const T* const_pointer;
245 : typedef T& reference;
246 : typedef const T& const_reference;
247 : typedef T value_type;
248 :
249 : template<class Other>
250 : struct rebind {
251 : typedef pool_allocator<Other> other;
252 : };
253 : pointer address(reference x) const { return &x; }
254 : const_pointer address(const_reference x) const { return &x; }
255 :
256 0 : pool_allocator() : allocator(&GlobalPoolAllocator) { }
257 : pool_allocator(TPoolAllocator& a) : allocator(&a) { }
258 0 : pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
259 :
260 : template <class Other>
261 : pool_allocator<T>& operator=(const pool_allocator<Other>& p) {
262 : allocator = p.allocator;
263 : return *this;
264 : }
265 :
266 : template<class Other>
267 : pool_allocator(const pool_allocator<Other>& p) : allocator(&p.getAllocator()) { }
268 :
269 : #if defined(__SUNPRO_CC) && !defined(_RWSTD_ALLOCATOR)
270 : // libCStd on some platforms have a different allocate/deallocate interface.
271 : // Caller pre-bakes sizeof(T) into 'n' which is the number of bytes to be
272 : // allocated, not the number of elements.
273 : void* allocate(size_type n) {
274 : return getAllocator().allocate(n);
275 : }
276 : void* allocate(size_type n, const void*) {
277 : return getAllocator().allocate(n);
278 : }
279 : void deallocate(void*, size_type) {}
280 : #else
281 0 : pointer allocate(size_type n) {
282 0 : return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
283 : }
284 : pointer allocate(size_type n, const void*) {
285 : return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
286 : }
287 0 : void deallocate(pointer, size_type) {}
288 : #endif // _RWSTD_ALLOCATOR
289 :
290 0 : void construct(pointer p, const T& val) { new ((void *)p) T(val); }
291 0 : void destroy(pointer p) { p->T::~T(); }
292 :
293 0 : bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
294 : bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
295 :
296 0 : size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
297 : size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
298 :
299 : void setAllocator(TPoolAllocator* a) { allocator = a; }
300 0 : TPoolAllocator& getAllocator() const { return *allocator; }
301 :
302 : protected:
303 : TPoolAllocator* allocator;
304 : };
305 :
306 : #endif // _POOLALLOC_INCLUDED_
|