40 std::atomic<size_t> head;
41 std::atomic<size_t> tail;
42 std::unique_ptr<T[]> buff;
46 capacity(0), tail(0), head(0), buff(
nullptr) {
48 T *mem =
new T[size + 1];
64 operator bool()
const {
65 return buff.get() !=
nullptr;
69 size_t curr = head.load(std::memory_order_relaxed);
70 size_t next = (curr + 1) % capacity;
71 if (!buff || (next == tail.load(std::memory_order_acquire))) {
75 head.store(next, std::memory_order_release);
79 T pop(
bool peek=
false) {
80 size_t curr = tail.load(std::memory_order_relaxed);
81 if (!buff || (curr == head.load(std::memory_order_acquire))) {
86 size_t next = (curr + 1) % capacity;
87 tail.store(next, std::memory_order_release);
103template <
class T,
size_t A=__CACHE_LINE_SIZE__>
class DMABuffer {
114 pool(pool), n_samples(samples), n_channels(channels), ptr(mem), ts(0), flags(0) {
122 return n_samples * n_channels;
126 return n_samples * n_channels *
sizeof(T);
132 SCB_CleanDCache_by_Addr(data(), bytes());
140 SCB_InvalidateDCache_by_Addr(data(), bytes());
145 uint32_t timestamp() {
149 void timestamp(uint32_t ts) {
153 uint32_t channels() {
159 pool->free(
this, flags);
163 void set_flags(uint32_t f) {
167 bool get_flags(uint32_t f=0xFFFFFFFFU) {
171 void clr_flags(uint32_t f=0xFFFFFFFFU) {
175 T& operator[](
size_t i) {
176 assert(ptr && i < size());
180 const T& operator[](
size_t i)
const {
181 assert(ptr && i < size());
185 operator bool()
const {
186 return (ptr !=
nullptr);
190template <
class T,
size_t A=__CACHE_LINE_SIZE__>
class DMAPool {
199 static void *aligned_malloc(
size_t size) {
200 void **ptr, *stashed;
201 size_t offset = A - 1 +
sizeof(
void *);
202 if ((A % 2) || !((stashed = ::malloc(size + offset)))) {
205 ptr = (
void **) (((uintptr_t) stashed + offset) & ~(A - 1));
211 static void aligned_free(
void *ptr) {
212 if (ptr !=
nullptr) {
213 ::free(((
void **) ptr)[-1]);
218 DMAPool(
size_t n_samples,
size_t n_channels,
size_t n_buffers,
void *mem_in=
nullptr):
219 mem((uint8_t *) mem_in), managed(mem_in==
nullptr), wqueue(n_buffers), rqueue(n_buffers) {
221 size_t bufsize = (((n_samples * n_channels *
sizeof(T)) + (A-1)) & ~(A-1));
222 if (bufsize && rqueue && wqueue) {
223 if (mem ==
nullptr) {
225 mem = (uint8_t *) aligned_malloc(n_buffers * bufsize);
233 for (
size_t i=0; i<n_buffers; i++) {
235 this, n_samples, n_channels, (
T *) &mem[
i *
bufsize]
237 if (
buf ==
nullptr) {
247 delete alloc(DMA_BUFFER_READ);
251 delete alloc(DMA_BUFFER_WRITE);
254 if (mem && managed) {
260 return !(wqueue.empty());
264 return !(rqueue.empty());
278 if (flags & DMA_BUFFER_READ) {
286 buf->clr_flags(DMA_BUFFER_READ | DMA_BUFFER_WRITE);
287 buf->set_flags(flags);
293 if (
buf ==
nullptr) {
297 flags =
buf->get_flags();
299 if (flags & DMA_BUFFER_READ) {