Halide 19.0.0
Halide compiler and libraries
Loading...
Searching...
No Matches
memory_arena.h
Go to the documentation of this file.
1#ifndef HALIDE_RUNTIME_MEMORY_ARENA_H
2#define HALIDE_RUNTIME_MEMORY_ARENA_H
3
4#include "../HalideRuntime.h"
5#include "block_storage.h"
6
7namespace Halide {
8namespace Runtime {
9namespace Internal {
10
11// --
12// Memory Arena class for region based allocations and caching of same-type data
13// -- Implementation uses block_storage, and internally manages lists of allocated entries
14// -- Customizable allocator (defaults to BlockStorage::default_allocator())
15// -- Not thread safe ... locking must be done by client
16//
18public:
19 // Disable copy constructors and assignment
20 MemoryArena(const MemoryArena &) = delete;
21 MemoryArena &operator=(const MemoryArena &) = delete;
22
23 // Default initial capacity
24 static constexpr uint32_t default_capacity = uint32_t(32); // smallish
25
26 // Configurable parameters
32
33 explicit MemoryArena(void *user_context, const Config &config = default_config(),
35
37
38 // Factory methods for creation / destruction
40 static void destroy(void *user_context, MemoryArena *arena);
41
42 // Initialize a newly created instance
43 void initialize(void *user_context, const Config &config,
45
46 // Public interface methods
47 void *reserve(void *user_context, bool initialize = false);
48 void reclaim(void *user_context, void *ptr);
49 bool collect(void *user_context); //< returns true if any blocks were removed
50 void destroy(void *user_context);
51
52 // Access methods
53 const Config &current_config() const;
54 static const Config &default_config();
55
58
59private:
60 // Sentinal invalid entry value
61 static const uint32_t invalid_entry = uint32_t(-1);
62
63 // Each block contains:
64 // - an array of entries
65 // - an array of indices (for the free list)
66 // - an array of status flags (indicating usage)
67 // - free index points to next available entry for the block (or invalid_entry if block is full)
68 struct Block {
69 void *entries = nullptr;
70 uint32_t *indices = nullptr;
71 AllocationStatus *status = nullptr;
72 uint32_t capacity = 0;
73 uint32_t free_index = 0;
74 };
75
76 Block *create_block(void *user_context);
77 bool collect_block(void *user_context, Block *block); //< returns true if any blocks were removed
78 void destroy_block(void *user_context, Block *block);
79 Block *lookup_block(void *user_context, uint32_t index);
80
81 void *create_entry(void *user_context, Block *block, uint32_t index);
82 void destroy_entry(void *user_context, Block *block, uint32_t index);
83 void *lookup_entry(void *user_context, Block *block, uint32_t index);
84
85 Config config;
86 BlockStorage blocks;
87};
88
90 const Config &cfg,
91 const SystemMemoryAllocatorFns &alloc)
92 : config(cfg),
93 blocks(user_context, {sizeof(MemoryArena::Block), 32, 32}, alloc) {
95}
96
100
101MemoryArena *MemoryArena::create(void *user_context, const Config &cfg, const SystemMemoryAllocatorFns &system_allocator) {
102 halide_debug_assert(user_context, system_allocator.allocate != nullptr);
103 MemoryArena *result = reinterpret_cast<MemoryArena *>(
104 system_allocator.allocate(user_context, sizeof(MemoryArena)));
105
106 if (result == nullptr) {
107 halide_error(user_context, "MemoryArena: Failed to create instance! Out of memory!\n");
108 return nullptr;
109 }
110
111 result->initialize(user_context, cfg, system_allocator);
112 return result;
113}
114
116 halide_debug_assert(user_context, instance != nullptr);
117 SystemMemoryAllocatorFns system_allocator = instance->blocks.current_allocator();
118 instance->destroy(user_context);
119 halide_debug_assert(user_context, system_allocator.deallocate != nullptr);
120 system_allocator.deallocate(user_context, instance);
121}
122
124 const Config &cfg,
125 const SystemMemoryAllocatorFns &system_allocator) {
126 config = cfg;
127 blocks.initialize(user_context, {sizeof(MemoryArena::Block), 32, 32}, system_allocator);
129}
130
132 if (!blocks.empty()) {
133 for (size_t i = blocks.size(); i--;) {
134 Block *block = lookup_block(user_context, i);
135 halide_debug_assert(user_context, block != nullptr);
136 destroy_block(user_context, block);
137 }
138 }
139 blocks.destroy(user_context);
140}
141
143 bool result = false;
144 for (size_t i = blocks.size(); i--;) {
145 Block *block = lookup_block(user_context, i);
146 halide_debug_assert(user_context, block != nullptr);
147 if (collect_block(user_context, block)) {
148 blocks.remove(user_context, i);
149 result = true;
150 }
151 }
152 return result;
153}
154
155void *MemoryArena::reserve(void *user_context, bool initialize) {
156 // Scan blocks for a free entry
157 for (size_t i = blocks.size(); i--;) {
158 Block *block = lookup_block(user_context, i);
159 halide_debug_assert(user_context, block != nullptr);
160 if (block->free_index != invalid_entry) {
161 return create_entry(user_context, block, block->free_index);
162 }
163 }
164
165 if (config.maximum_block_count && (blocks.size() >= config.maximum_block_count)) {
166 halide_error(user_context, "MemoryArena: Failed to reserve new entry! Maxmimum blocks reached!\n");
167 return nullptr;
168 }
169
170 // All blocks full ... create a new one
171 uint32_t index = 0;
172 Block *block = create_block(user_context);
173 void *entry_ptr = create_entry(user_context, block, index);
174
175 // Optionally clear the allocation if requested
176 if (initialize) {
177 memset(entry_ptr, 0, config.entry_size);
178 }
179 return entry_ptr;
180}
181
182void MemoryArena::reclaim(void *user_context, void *entry_ptr) {
183 for (size_t i = blocks.size(); i--;) {
184 Block *block = lookup_block(user_context, i);
185 halide_debug_assert(user_context, block != nullptr);
186
187 // is entry_ptr in the address range of this block.
188 uint8_t *offset_ptr = static_cast<uint8_t *>(entry_ptr);
189 uint8_t *base_ptr = static_cast<uint8_t *>(block->entries);
190 uint8_t *end_ptr = static_cast<uint8_t *>(offset_address(block->entries, block->capacity * config.entry_size));
191 if ((entry_ptr >= base_ptr) && (entry_ptr < end_ptr)) {
192 const uint32_t offset = static_cast<uint32_t>(offset_ptr - base_ptr);
193 const uint32_t index = offset / config.entry_size;
194 destroy_entry(user_context, block, index);
195 return;
196 }
197 }
198 halide_error(user_context, "MemoryArena: Pointer address doesn't belong to this memory pool!\n");
199}
200
201typename MemoryArena::Block *MemoryArena::create_block(void *user_context) {
202 // resize capacity starting with initial up to 1.5 last capacity
203 uint32_t new_capacity = config.minimum_block_capacity;
204 if (!blocks.empty()) {
205 const Block *last_block = static_cast<Block *>(blocks.back());
206 new_capacity = (last_block->capacity * 3 / 2);
207 }
208
209 halide_debug_assert(user_context, current_allocator().allocate != nullptr);
210 void *new_entries = current_allocator().allocate(user_context, config.entry_size * new_capacity);
211 memset(new_entries, 0, config.entry_size * new_capacity);
212
213 uint32_t *new_indices = (uint32_t *)current_allocator().allocate(user_context, sizeof(uint32_t) * new_capacity);
215
216 for (uint32_t i = 0; i < new_capacity - 1; ++i) {
217 new_indices[i] = i + 1; // singly-linked list of all free entries in the block
218 new_status[i] = AllocationStatus::Available; // usage status
219 }
220
221 new_indices[new_capacity - 1] = invalid_entry;
222 new_status[new_capacity - 1] = AllocationStatus::InvalidStatus;
223
224 const Block new_block = {new_entries, new_indices, new_status, new_capacity, 0};
225 blocks.append(user_context, &new_block);
226 return static_cast<Block *>(blocks.back());
227}
228
229void MemoryArena::destroy_block(void *user_context, Block *block) {
230 halide_debug_assert(user_context, block != nullptr);
231 if (block->entries != nullptr) {
232 halide_debug_assert(user_context, current_allocator().deallocate != nullptr);
233 current_allocator().deallocate(user_context, block->entries);
234 current_allocator().deallocate(user_context, block->indices);
235 current_allocator().deallocate(user_context, block->status);
236 block->entries = nullptr;
237 block->indices = nullptr;
238 block->status = nullptr;
239 }
240}
241
242bool MemoryArena::collect_block(void *user_context, Block *block) {
243 halide_debug_assert(user_context, block != nullptr);
244 if (block->entries != nullptr) {
245 bool can_collect = true;
246 for (size_t i = block->capacity; i--;) {
247 if (block->status[i] == AllocationStatus::InUse) {
248 can_collect = false;
249 break;
250 }
251 }
252 if (can_collect) {
253 destroy_block(user_context, block);
254 return true;
255 }
256 }
257 return false;
258}
259
260MemoryArena::Block *MemoryArena::lookup_block(void *user_context, uint32_t index) {
261 return static_cast<Block *>(blocks[index]);
262}
263
264void *MemoryArena::lookup_entry(void *user_context, Block *block, uint32_t index) {
265 halide_debug_assert(user_context, block != nullptr);
266 halide_debug_assert(user_context, block->entries != nullptr);
267 return offset_address(block->entries, index * config.entry_size);
268}
269
270void *MemoryArena::create_entry(void *user_context, Block *block, uint32_t index) {
271 void *entry_ptr = lookup_entry(user_context, block, index);
272 block->free_index = block->indices[index];
273 block->status[index] = AllocationStatus::InUse;
274#ifdef DEBUG_RUNTIME_INTERNAL
275 memset(entry_ptr, 0, config.entry_size);
276#endif
277 return entry_ptr;
278}
279
280void MemoryArena::destroy_entry(void *user_context, Block *block, uint32_t index) {
281 block->status[index] = AllocationStatus::Available;
282 block->indices[index] = block->free_index;
283 block->free_index = index;
284}
285
286const typename MemoryArena::Config &
288 return config;
289}
290
291const typename MemoryArena::Config &
293 static Config result;
294 return result;
295}
296
299 return blocks.current_allocator();
300}
301
306
307// --
308
309} // namespace Internal
310} // namespace Runtime
311} // namespace Halide
312
313#endif // HALIDE_RUNTIME_MEMORY_ARENA_H
This file declares the routines used by Halide internally in its runtime.
void halide_error(void *user_context, const char *)
Halide calls this function on runtime errors (for example bounds checking failures).
void remove(void *user_context, size_t index)
void initialize(void *user_context, const Config &cfg, const SystemMemoryAllocatorFns &sma=default_allocator())
static const SystemMemoryAllocatorFns & default_allocator()
const SystemMemoryAllocatorFns & current_allocator() const
void append(void *user_context, const void *entry_ptr)
void initialize(void *user_context, const Config &config, const SystemMemoryAllocatorFns &allocator=default_allocator())
void * reserve(void *user_context, bool initialize=false)
static MemoryArena * create(void *user_context, const Config &config, const SystemMemoryAllocatorFns &allocator=default_allocator())
const Config & current_config() const
static constexpr uint32_t default_capacity
const SystemMemoryAllocatorFns & current_allocator() const
static const Config & default_config()
static void destroy(void *user_context, MemoryArena *arena)
MemoryArena(const MemoryArena &)=delete
void reclaim(void *user_context, void *ptr)
MemoryArena & operator=(const MemoryArena &)=delete
static const SystemMemoryAllocatorFns & default_allocator()
ALWAYS_INLINE const void * offset_address(const void *address, size_t byte_offset)
This file defines the class FunctionDAG, which is our representation of a Halide pipeline,...
@ Internal
Not visible externally, similar to 'static' linkage in C.
#define halide_debug_assert(user_context, cond)
halide_debug_assert() is like halide_assert(), but only expands into a check when DEBUG_RUNTIME is de...
unsigned __INT8_TYPE__ uint8_t
void * memset(void *s, int val, size_t n)
unsigned __INT32_TYPE__ uint32_t
void * user_context
VulkanMemoryAllocator * allocator