2
* Copyright © 2012 Intel Corporation
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26
* Support functions for the glthread feature of Mesa.
28
* In multicore systems, many applications end up CPU-bound with about half
29
* their time spent inside their rendering thread and half inside Mesa. To
30
* alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31
* quickly logs the GL commands to a buffer to be processed by a worker
35
#include "main/mtypes.h"
36
#include "main/glthread.h"
37
#include "main/glthread_marshal.h"
38
#include "main/hash.h"
39
#include "util/u_atomic.h"
40
#include "util/u_thread.h"
41
#include "util/u_cpu_detect.h"
43
#include "state_tracker/st_context.h"
46
glthread_unmarshal_batch(void *job, void *gdata, int thread_index)
48
struct glthread_batch *batch = (struct glthread_batch*)job;
49
struct gl_context *ctx = batch->ctx;
51
unsigned used = batch->used;
52
uint64_t *buffer = batch->buffer;
53
const uint64_t *last = &buffer[used];
55
_glapi_set_dispatch(ctx->CurrentServerDispatch);
57
_mesa_HashLockMutex(ctx->Shared->BufferObjects);
58
ctx->BufferObjectsLocked = true;
59
simple_mtx_lock(&ctx->Shared->TexMutex);
60
ctx->TexturesLocked = true;
63
const struct marshal_cmd_base *cmd =
64
(const struct marshal_cmd_base *)&buffer[pos];
66
pos += _mesa_unmarshal_dispatch[cmd->cmd_id](ctx, cmd, last);
69
ctx->TexturesLocked = false;
70
simple_mtx_unlock(&ctx->Shared->TexMutex);
71
ctx->BufferObjectsLocked = false;
72
_mesa_HashUnlockMutex(ctx->Shared->BufferObjects);
77
unsigned batch_index = batch - ctx->GLThread.batches;
78
/* Atomically set this to -1 if it's equal to batch_index. */
79
p_atomic_cmpxchg(&ctx->GLThread.LastProgramChangeBatch, batch_index, -1);
80
p_atomic_cmpxchg(&ctx->GLThread.LastDListChangeBatchIndex, batch_index, -1);
84
glthread_thread_initialization(void *job, void *gdata, int thread_index)
86
struct gl_context *ctx = (struct gl_context*)job;
88
st_set_background_context(ctx, &ctx->GLThread.stats);
89
_glapi_set_context(ctx);
93
_mesa_glthread_init(struct gl_context *ctx)
95
struct glthread_state *glthread = &ctx->GLThread;
97
assert(!glthread->enabled);
99
if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
104
glthread->VAOs = _mesa_NewHashTable();
105
if (!glthread->VAOs) {
106
util_queue_destroy(&glthread->queue);
110
_mesa_glthread_reset_vao(&glthread->DefaultVAO);
111
glthread->CurrentVAO = &glthread->DefaultVAO;
113
if (!_mesa_create_marshal_tables(ctx)) {
114
_mesa_DeleteHashTable(glthread->VAOs);
115
util_queue_destroy(&glthread->queue);
119
for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++) {
120
glthread->batches[i].ctx = ctx;
121
util_queue_fence_init(&glthread->batches[i].fence);
123
glthread->next_batch = &glthread->batches[glthread->next];
126
glthread->enabled = true;
127
glthread->stats.queue = &glthread->queue;
129
glthread->SupportsBufferUploads =
130
ctx->Const.BufferCreateMapUnsynchronizedThreadSafe &&
131
ctx->Const.AllowMappedBuffersDuringExecution;
133
/* If the draw start index is non-zero, glthread can upload to offset 0,
134
* which means the attrib offset has to be -(first * stride).
135
* So require signed vertex buffer offsets.
137
glthread->SupportsNonVBOUploads = glthread->SupportsBufferUploads &&
138
ctx->Const.VertexBufferOffsetIsInt32;
140
ctx->CurrentClientDispatch = ctx->MarshalExec;
142
glthread->LastDListChangeBatchIndex = -1;
144
/* Execute the thread initialization function in the thread. */
145
struct util_queue_fence fence;
146
util_queue_fence_init(&fence);
147
util_queue_add_job(&glthread->queue, ctx, &fence,
148
glthread_thread_initialization, NULL, 0);
149
util_queue_fence_wait(&fence);
150
util_queue_fence_destroy(&fence);
154
free_vao(void *data, UNUSED void *userData)
160
_mesa_glthread_destroy(struct gl_context *ctx, const char *reason)
162
struct glthread_state *glthread = &ctx->GLThread;
164
if (!glthread->enabled)
168
_mesa_debug(ctx, "glthread destroy reason: %s\n", reason);
170
_mesa_glthread_finish(ctx);
171
util_queue_destroy(&glthread->queue);
173
for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++)
174
util_queue_fence_destroy(&glthread->batches[i].fence);
176
_mesa_HashDeleteAll(glthread->VAOs, free_vao, NULL);
177
_mesa_DeleteHashTable(glthread->VAOs);
179
ctx->GLThread.enabled = false;
180
ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
182
/* Update the dispatch only if the context is current. */
183
if (_glapi_get_dispatch() == ctx->MarshalExec) {
184
_glapi_set_dispatch(ctx->CurrentClientDispatch);
189
_mesa_glthread_flush_batch(struct gl_context *ctx)
191
struct glthread_state *glthread = &ctx->GLThread;
192
if (!glthread->enabled)
195
if (ctx->CurrentServerDispatch == ctx->ContextLost) {
196
_mesa_glthread_destroy(ctx, "context lost");
201
return; /* the batch is empty */
203
/* Pin threads regularly to the same Zen CCX that the main thread is
204
* running on. The main thread can move between CCXs.
206
if (util_get_cpu_caps()->num_L3_caches > 1 &&
208
ctx->pipe->set_context_param &&
209
++glthread->pin_thread_counter % 128 == 0) {
210
int cpu = util_get_current_cpu();
213
uint16_t L3_cache = util_get_cpu_caps()->cpu_to_L3[cpu];
214
if (L3_cache != U_CPU_INVALID_L3) {
215
util_set_thread_affinity(glthread->queue.threads[0],
216
util_get_cpu_caps()->L3_affinity_mask[L3_cache],
217
NULL, util_get_cpu_caps()->num_cpu_mask_bits);
218
ctx->pipe->set_context_param(ctx->pipe,
219
PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE,
225
struct glthread_batch *next = glthread->next_batch;
227
/* Debug: execute the batch immediately from this thread.
229
* Note that glthread_unmarshal_batch() changes the dispatch table so we'll
230
* need to restore it when it returns.
233
glthread_unmarshal_batch(next, NULL, 0);
234
_glapi_set_dispatch(ctx->CurrentClientDispatch);
238
p_atomic_add(&glthread->stats.num_offloaded_items, glthread->used);
239
next->used = glthread->used;
241
util_queue_add_job(&glthread->queue, next, &next->fence,
242
glthread_unmarshal_batch, NULL, 0);
243
glthread->last = glthread->next;
244
glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
245
glthread->next_batch = &glthread->batches[glthread->next];
250
* Waits for all pending batches have been unmarshaled.
252
* This can be used by the main thread to synchronize access to the context,
253
* since the worker thread will be idle after this.
256
_mesa_glthread_finish(struct gl_context *ctx)
258
struct glthread_state *glthread = &ctx->GLThread;
259
if (!glthread->enabled)
262
/* If this is called from the worker thread, then we've hit a path that
263
* might be called from either the main thread or the worker (such as some
264
* dri interface entrypoints), in which case we don't need to actually
265
* synchronize against ourself.
267
if (u_thread_is_self(glthread->queue.threads[0]))
270
struct glthread_batch *last = &glthread->batches[glthread->last];
271
struct glthread_batch *next = glthread->next_batch;
274
if (!util_queue_fence_is_signalled(&last->fence)) {
275
util_queue_fence_wait(&last->fence);
279
if (glthread->used) {
280
p_atomic_add(&glthread->stats.num_direct_items, glthread->used);
281
next->used = glthread->used;
284
/* Since glthread_unmarshal_batch changes the dispatch to direct,
285
* restore it after it's done.
287
struct _glapi_table *dispatch = _glapi_get_dispatch();
288
glthread_unmarshal_batch(next, NULL, 0);
289
_glapi_set_dispatch(dispatch);
291
/* It's not a sync because we don't enqueue partial batches, but
292
* it would be a sync if we did. So count it anyway.
298
p_atomic_inc(&glthread->stats.num_syncs);
302
_mesa_glthread_finish_before(struct gl_context *ctx, const char *func)
304
_mesa_glthread_finish(ctx);
306
/* Uncomment this if you want to know where glthread syncs. */
307
/*printf("fallback to sync: %s\n", func);*/
311
_mesa_error_glthread_safe(struct gl_context *ctx, GLenum error, bool glthread,
312
const char *format, ...)
315
_mesa_marshal_InternalSetError(error);
317
char s[MAX_DEBUG_MESSAGE_LENGTH];
320
va_start(args, format);
321
ASSERTED size_t len = vsnprintf(s, MAX_DEBUG_MESSAGE_LENGTH, format, args);
324
/* Whoever calls _mesa_error should use shorter strings. */
325
assert(len < MAX_DEBUG_MESSAGE_LENGTH);
327
_mesa_error(ctx, error, "%s", s);