Halide 19.0.0
Halide compiler and libraries
Loading...
Searching...
No Matches
device_buffer_utils.h
Go to the documentation of this file.
1#ifndef HALIDE_RUNTIME_DEVICE_BUFFER_UTILS_H
2#define HALIDE_RUNTIME_DEVICE_BUFFER_UTILS_H
3
4#include "HalideRuntime.h"
5#include "device_interface.h"
6#include "printer.h"
7
8namespace Halide {
9namespace Runtime {
10namespace Internal {
11
12// A host <-> dev copy should be done with the fewest possible number
13// of contiguous copies to minimize driver overhead. If our
14// halide_buffer_t has strides larger than its extents (e.g. because
15// it represents a sub-region of a larger halide_buffer_t) we can't
16// safely copy it back and forth using a single contiguous copy,
17// because we'd clobber in-between values that another thread might be
18// using. In the best case we can do a single contiguous copy, but in
19// the worst case we need to individually copy over every pixel.
20//
21// This problem is made extra difficult by the fact that the ordering
22// of the dimensions in a halide_buffer_t doesn't relate to memory layout at
23// all, so the strides could be in any order.
24//
25// We solve it by representing a copy job we need to perform as a
26// device_copy struct. It describes a multi-dimensional array of
27// copies to perform. Initially it describes copying over a single
28// pixel at a time. We then try to discover contiguous groups of
29// copies that can be coalesced into a single larger copy.
30
31// The struct that describes a host <-> dev copy to perform.
32#define MAX_COPY_DIMS 16
34 // opaque handles for source and device memory.
36 // The offset in the source memory to start
38 // The multidimensional array of contiguous copy tasks that need to be done.
40 // The strides (in bytes) that separate adjacent copy tasks in each dimension.
43 // How many contiguous bytes to copy per task
45};
46
47WEAK void copy_memory_helper(const device_copy &copy, int d, int64_t src_off, int64_t dst_off) {
48 if ((d < -1) || (d >= MAX_COPY_DIMS)) {
49 return; // TODO(marcos): we should probably flag an error somehow here
50 }
51
52 // Skip size-1 dimensions
53 while (d >= 0 && copy.extent[d] == 1) {
54 d--;
55 }
56
57 if (d == -1) {
58 const void *from = (void *)(copy.src + src_off);
59 void *to = (void *)(copy.dst + dst_off);
60 memcpy(to, from, copy.chunk_size);
61 } else {
62 for (uint64_t i = 0; i < copy.extent[d]; i++) {
63 copy_memory_helper(copy, d - 1, src_off, dst_off);
64 src_off += copy.src_stride_bytes[d];
65 dst_off += copy.dst_stride_bytes[d];
66 }
67 }
68}
69
70WEAK void copy_memory(const device_copy &copy, void *user_context) {
71 // If this is a zero copy buffer, these pointers will be the same.
72 if (copy.src != copy.dst) {
73 copy_memory_helper(copy, MAX_COPY_DIMS - 1, copy.src_begin, 0);
74 } else {
75 debug(user_context) << "copy_memory: no copy needed as pointers are the same.\n";
76 }
77}
78
79// Fills the entire dst buffer, which must be contained within src
81 const halide_buffer_t *dst, bool dst_host) {
82 // Make a copy job representing copying the first pixel only.
84 c.src = src_host ? (uint64_t)src->host : src->device;
85 c.dst = dst_host ? (uint64_t)dst->host : dst->device;
86 c.chunk_size = src->type.bytes();
87 for (int i = 0; i < MAX_COPY_DIMS; i++) {
88 c.extent[i] = 1;
89 c.src_stride_bytes[i] = 0;
90 c.dst_stride_bytes[i] = 0;
91 }
92
93 // Offset the src base pointer to the right point in its buffer.
94 c.src_begin = 0;
95 for (int i = 0; i < src->dimensions; i++) {
96 c.src_begin += (int64_t)src->dim[i].stride * (int64_t)(dst->dim[i].min - src->dim[i].min);
97 }
98 c.src_begin *= c.chunk_size;
99
100 if (src->dimensions != dst->dimensions ||
101 src->type.bytes() != dst->type.bytes() ||
102 dst->dimensions > MAX_COPY_DIMS) {
103 // These conditions should also be checked for outside this fn.
104 device_copy zero = {0};
105 return zero;
106 }
107
108 if (c.chunk_size == 0) {
109 // This buffer apparently represents no memory. Return a zero'd copy
110 // task.
111 device_copy zero = {0};
112 return zero;
113 }
114
115 // Now expand it to copy all the pixels (one at a time) by taking
116 // the extents and strides from the halide_buffer_ts. Dimensions
117 // are added to the copy by inserting it such that the stride is
118 // in ascending order in the dst.
119 for (int i = 0; i < dst->dimensions; i++) {
120 // TODO: deal with negative strides.
121 uint64_t dst_stride_bytes = (uint64_t)dst->dim[i].stride * dst->type.bytes();
122 uint64_t src_stride_bytes = (uint64_t)src->dim[i].stride * src->type.bytes();
123 // Insert the dimension sorted into the buffer copy.
124 int insert;
125 for (insert = 0; insert < i; insert++) {
126 // If the stride is 0, we put it at the end because it can't be
127 // folded.
128 if (dst_stride_bytes < c.dst_stride_bytes[insert] && dst_stride_bytes != 0) {
129 break;
130 }
131 }
132 for (int j = i; j > insert; j--) {
133 c.extent[j] = c.extent[j - 1];
134 c.dst_stride_bytes[j] = c.dst_stride_bytes[j - 1];
135 c.src_stride_bytes[j] = c.src_stride_bytes[j - 1];
136 }
137 c.extent[insert] = dst->dim[i].extent;
138 // debug(nullptr) << "c.extent[" << insert << "] = " << (int)(c.extent[insert]) << "\n";
139 c.dst_stride_bytes[insert] = dst_stride_bytes;
140 c.src_stride_bytes[insert] = src_stride_bytes;
141 };
142
143 // Attempt to fold contiguous dimensions into the chunk
144 // size. Since the dimensions are sorted by stride, and the
145 // strides must be greater than or equal to the chunk size, this
146 // means we can just delete the innermost dimension as long as its
147 // stride in both src and dst is equal to the chunk size.
148 while (c.chunk_size &&
149 c.chunk_size == c.src_stride_bytes[0] &&
150 c.chunk_size == c.dst_stride_bytes[0]) {
151 // Fold the innermost dimension's extent into the chunk_size.
152 c.chunk_size *= c.extent[0];
153
154 // Erase the innermost dimension from the list of dimensions to
155 // iterate over.
156 for (int j = 1; j < MAX_COPY_DIMS; j++) {
157 c.extent[j - 1] = c.extent[j];
158 c.src_stride_bytes[j - 1] = c.src_stride_bytes[j];
159 c.dst_stride_bytes[j - 1] = c.dst_stride_bytes[j];
160 }
161 c.extent[MAX_COPY_DIMS - 1] = 1;
164 }
165 return c;
166}
167
169 return make_buffer_copy(buf, true, buf, false);
170}
171
173 return make_buffer_copy(buf, false, buf, true);
174}
175
176// Caller is expected to verify that src->dimensions == dst->dimensions
178 int64_t offset = 0;
179 for (int i = 0; i < src->dimensions; i++) {
180 offset += (int64_t)(dst->dim[i].min - src->dim[i].min) * (int64_t)src->dim[i].stride;
181 }
182 offset *= src->type.bytes();
183 return offset;
184}
185
186// Caller is expected to verify that src->dimensions == dst->dimensions + 1,
187// and that slice_dim and slice_pos are valid within src
188ALWAYS_INLINE int64_t calc_device_slice_byte_offset(const struct halide_buffer_t *src, int slice_dim, int slice_pos) {
189 int64_t offset = (int64_t)(slice_pos - src->dim[slice_dim].min) * (int64_t)src->dim[slice_dim].stride;
190 offset *= src->type.bytes();
191 return offset;
192}
193
194} // namespace Internal
195} // namespace Runtime
196} // namespace Halide
197
198#endif // HALIDE_DEVICE_BUFFER_UTILS_H
This file declares the routines used by Halide internally in its runtime.
#define MAX_COPY_DIMS
WEAK void copy_memory(const device_copy &copy, void *user_context)
WEAK device_copy make_host_to_device_copy(const halide_buffer_t *buf)
WEAK void copy_memory_helper(const device_copy &copy, int d, int64_t src_off, int64_t dst_off)
WEAK device_copy make_device_to_host_copy(const halide_buffer_t *buf)
ALWAYS_INLINE int64_t calc_device_slice_byte_offset(const struct halide_buffer_t *src, int slice_dim, int slice_pos)
ALWAYS_INLINE int64_t calc_device_crop_byte_offset(const struct halide_buffer_t *src, struct halide_buffer_t *dst)
WEAK device_copy make_buffer_copy(const halide_buffer_t *src, bool src_host, const halide_buffer_t *dst, bool dst_host)
This file defines the class FunctionDAG, which is our representation of a Halide pipeline,...
@ Internal
Not visible externally, similar to 'static' linkage in C.
unsigned __INT64_TYPE__ uint64_t
signed __INT64_TYPE__ int64_t
void * memcpy(void *s1, const void *s2, size_t n)
#define ALWAYS_INLINE
#define WEAK
The raw representation of an image passed around by generated Halide code.
int32_t dimensions
The dimensionality of the buffer.
halide_dimension_t * dim
The shape of the buffer.
uint64_t device
A device-handle for e.g.
uint8_t * host
A pointer to the start of the data in main memory.
struct halide_type_t type
The type of each buffer element.