2
* Copyright 2016 Patrick Rudolph <siro@das-labor.org>
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE. */
23
#include "nine_queue.h"
24
#include "os/os_thread.h"
25
#include "util/macros.h"
26
#include "nine_helpers.h"
28
#define NINE_CMD_BUF_INSTR (256)
30
#define NINE_CMD_BUFS (32)
31
#define NINE_CMD_BUFS_MASK (NINE_CMD_BUFS - 1)
33
#define NINE_QUEUE_SIZE (8192 * 16 + 128)
35
#define DBG_CHANNEL DBG_DEVICE
38
* Single producer - single consumer pool queue
41
* Calls nine_queue_alloc to get a slice of memory in current cmdbuf.
42
* Calls nine_queue_flush to flush the queue by request.
43
* The queue is flushed automatically on insufficient space or once the
44
* cmdbuf contains NINE_CMD_BUF_INSTR instructions.
46
* nine_queue_flush does block, while nine_queue_alloc doesn't block.
48
* nine_queue_alloc returns NULL on insufficent space.
51
* Calls nine_queue_wait_flush to wait for a cmdbuf.
52
* After waiting for a cmdbuf it calls nine_queue_get until NULL is returned.
54
* nine_queue_wait_flush does block, while nine_queue_get doesn't block.
57
* Only a single consumer and a single producer are supported.
62
unsigned instr_size[NINE_CMD_BUF_INSTR];
69
struct nine_queue_pool {
70
struct nine_cmdbuf pool[NINE_CMD_BUFS];
81
/* Consumer functions: */
83
nine_queue_wait_flush(struct nine_queue_pool* ctx)
85
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->tail];
87
/* wait for cmdbuf full */
88
mtx_lock(&ctx->mutex_push);
91
DBG("waiting for full cmdbuf\n");
92
cnd_wait(&ctx->event_push, &ctx->mutex_push);
94
DBG("got cmdbuf=%p\n", cmdbuf);
95
mtx_unlock(&ctx->mutex_push);
101
/* Gets a pointer to the next memory slice.
103
* Returns NULL on empty cmdbuf. */
105
nine_queue_get(struct nine_queue_pool* ctx)
107
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->tail];
110
/* At this pointer there's always a cmdbuf. */
112
if (ctx->cur_instr == cmdbuf->num_instr) {
113
/* signal waiting producer */
114
mtx_lock(&ctx->mutex_pop);
115
DBG("freeing cmdbuf=%p\n", cmdbuf);
117
cnd_signal(&ctx->event_pop);
118
mtx_unlock(&ctx->mutex_pop);
120
ctx->tail = (ctx->tail + 1) & NINE_CMD_BUFS_MASK;
125
/* At this pointer there's always a cmdbuf with instruction to process. */
126
offset = cmdbuf->offset;
127
cmdbuf->offset += cmdbuf->instr_size[ctx->cur_instr];
130
return cmdbuf->mem_pool + offset;
133
/* Producer functions: */
135
/* Flushes the queue.
136
* Moves the current cmdbuf to worker thread.
137
* Blocks until next cmdbuf is free. */
139
nine_queue_flush(struct nine_queue_pool* ctx)
141
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->head];
143
DBG("flushing cmdbuf=%p instr=%d size=%d\n",
144
cmdbuf, cmdbuf->num_instr, cmdbuf->offset);
146
/* Nothing to flush */
147
if (!cmdbuf->num_instr)
150
/* signal waiting worker */
151
mtx_lock(&ctx->mutex_push);
153
cnd_signal(&ctx->event_push);
154
mtx_unlock(&ctx->mutex_push);
156
ctx->head = (ctx->head + 1) & NINE_CMD_BUFS_MASK;
158
cmdbuf = &ctx->pool[ctx->head];
160
/* wait for queue empty */
161
mtx_lock(&ctx->mutex_pop);
164
DBG("waiting for empty cmdbuf\n");
165
cnd_wait(&ctx->event_pop, &ctx->mutex_pop);
167
DBG("got empty cmdbuf=%p\n", cmdbuf);
168
mtx_unlock(&ctx->mutex_pop);
170
cmdbuf->num_instr = 0;
173
/* Gets a a pointer to slice of memory with size @space.
174
* Does block if queue is full.
175
* Returns NULL on @space > NINE_QUEUE_SIZE. */
177
nine_queue_alloc(struct nine_queue_pool* ctx, unsigned space)
180
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->head];
182
if (space > NINE_QUEUE_SIZE)
185
/* at this pointer there's always a free queue available */
187
if ((cmdbuf->offset + space > NINE_QUEUE_SIZE) ||
188
(cmdbuf->num_instr == NINE_CMD_BUF_INSTR)) {
190
nine_queue_flush(ctx);
192
cmdbuf = &ctx->pool[ctx->head];
195
DBG("cmdbuf=%p space=%d\n", cmdbuf, space);
197
/* at this pointer there's always a free queue with sufficient space available */
199
offset = cmdbuf->offset;
200
cmdbuf->offset += space;
201
cmdbuf->instr_size[cmdbuf->num_instr] = space;
202
cmdbuf->num_instr ++;
204
return cmdbuf->mem_pool + offset;
207
/* Returns the current queue flush state.
208
* TRUE nothing flushed
209
* FALSE one ore more instructions queued flushed. */
211
nine_queue_no_flushed_work(struct nine_queue_pool* ctx)
213
return (ctx->tail == ctx->head);
216
/* Returns the current queue empty state.
217
* TRUE no instructions queued.
218
* FALSE one ore more instructions queued. */
220
nine_queue_isempty(struct nine_queue_pool* ctx)
222
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->head];
224
return (ctx->tail == ctx->head) && !cmdbuf->num_instr;
227
struct nine_queue_pool*
228
nine_queue_create(void)
231
struct nine_queue_pool *ctx;
233
ctx = CALLOC_STRUCT(nine_queue_pool);
237
for (i = 0; i < NINE_CMD_BUFS; i++) {
238
ctx->pool[i].mem_pool = MALLOC(NINE_QUEUE_SIZE);
239
if (!ctx->pool[i].mem_pool)
243
cnd_init(&ctx->event_pop);
244
(void) mtx_init(&ctx->mutex_pop, mtx_plain);
246
cnd_init(&ctx->event_push);
247
(void) mtx_init(&ctx->mutex_push, mtx_plain);
249
/* Block until first cmdbuf has been flushed. */
250
ctx->worker_wait = TRUE;
255
for (i = 0; i < NINE_CMD_BUFS; i++) {
256
if (ctx->pool[i].mem_pool)
257
FREE(ctx->pool[i].mem_pool);
265
nine_queue_delete(struct nine_queue_pool *ctx)
269
mtx_destroy(&ctx->mutex_pop);
270
cnd_destroy(&ctx->event_pop);
272
mtx_destroy(&ctx->mutex_push);
273
cnd_destroy(&ctx->event_push);
275
for (i = 0; i < NINE_CMD_BUFS; i++)
276
FREE(ctx->pool[i].mem_pool);