2
* Copyright 2007 Nouveau Project
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28
#include "nouveau_private.h"
30
#define PB_BUFMGR_DWORDS (4096 / 2)
31
#define PB_MIN_USER_DWORDS 2048
34
nouveau_pushbuf_space_call(struct nouveau_channel *chan, unsigned min)
36
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
37
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
38
struct nouveau_bo *bo;
41
if (min < PB_MIN_USER_DWORDS)
42
min = PB_MIN_USER_DWORDS;
44
nvpb->current_offset = nvpb->base.cur - nvpb->pushbuf;
45
if (nvpb->current_offset + min + 2 <= nvpb->size)
49
if (nvpb->current == CALPB_BUFFERS)
51
bo = nvpb->buffer[nvpb->current];
53
ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
57
nvpb->size = (bo->size - 8) / 4;
58
nvpb->pushbuf = bo->map;
59
nvpb->current_offset = 0;
61
nvpb->base.channel = chan;
62
nvpb->base.remaining = nvpb->size;
63
nvpb->base.cur = nvpb->pushbuf;
70
nouveau_pushbuf_space(struct nouveau_channel *chan, unsigned min)
72
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
73
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
76
return nouveau_pushbuf_space_call(chan, min);
83
nvpb->size = min < PB_MIN_USER_DWORDS ? PB_MIN_USER_DWORDS : min;
84
nvpb->pushbuf = malloc(sizeof(uint32_t) * nvpb->size);
86
nvpb->base.channel = chan;
87
nvpb->base.remaining = nvpb->size;
88
nvpb->base.cur = nvpb->pushbuf;
94
nouveau_pushbuf_fini_call(struct nouveau_channel *chan)
96
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
97
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
100
for (i = 0; i < CALPB_BUFFERS; i++)
101
nouveau_bo_ref(NULL, &nvpb->buffer[i]);
103
nvpb->pushbuf = NULL;
107
nouveau_pushbuf_init_call(struct nouveau_channel *chan)
109
struct drm_nouveau_gem_pushbuf_call req;
110
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
111
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
112
struct nouveau_device *dev = chan->device;
115
req.channel = chan->id;
117
ret = drmCommandWriteRead(nouveau_device(dev)->fd,
118
DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
121
ret = drmCommandWriteRead(nouveau_device(dev)->fd,
122
DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
127
nvpb->no_aper_update = 1;
130
for (i = 0; i < CALPB_BUFFERS; i++) {
131
ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
132
0, CALPB_BUFSZ, &nvpb->buffer[i]);
134
nouveau_pushbuf_fini_call(chan);
140
nvpb->cal_suffix0 = req.suffix0;
141
nvpb->cal_suffix1 = req.suffix1;
145
nouveau_pushbuf_init(struct nouveau_channel *chan)
147
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
148
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
151
nouveau_pushbuf_init_call(chan);
153
ret = nouveau_pushbuf_space(chan, 0);
156
nouveau_pushbuf_fini_call(chan);
157
ret = nouveau_pushbuf_space(chan, 0);
164
nvpb->buffers = calloc(NOUVEAU_GEM_MAX_BUFFERS,
165
sizeof(struct drm_nouveau_gem_pushbuf_bo));
166
nvpb->relocs = calloc(NOUVEAU_GEM_MAX_RELOCS,
167
sizeof(struct drm_nouveau_gem_pushbuf_reloc));
169
chan->pushbuf = &nvpb->base;
174
nouveau_pushbuf_flush(struct nouveau_channel *chan, unsigned min)
176
struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
177
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
178
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
182
if (nvpb->base.remaining == nvpb->size)
186
struct drm_nouveau_gem_pushbuf_call req;
188
*(nvpb->base.cur++) = nvpb->cal_suffix0;
189
*(nvpb->base.cur++) = nvpb->cal_suffix1;
190
if (nvpb->base.remaining > 2) /* space() will fixup if not */
191
nvpb->base.remaining -= 2;
194
req.channel = chan->id;
195
req.handle = nvpb->buffer[nvpb->current]->handle;
196
req.offset = nvpb->current_offset * 4;
197
req.nr_buffers = nvpb->nr_buffers;
198
req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
199
req.nr_relocs = nvpb->nr_relocs;
200
req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
201
req.nr_dwords = (nvpb->base.cur - nvpb->pushbuf) -
202
nvpb->current_offset;
203
req.suffix0 = nvpb->cal_suffix0;
204
req.suffix1 = nvpb->cal_suffix1;
205
ret = drmCommandWriteRead(nvdev->fd, nvpb->no_aper_update ?
206
DRM_NOUVEAU_GEM_PUSHBUF_CALL :
207
DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
211
nvpb->cal_suffix0 = req.suffix0;
212
nvpb->cal_suffix1 = req.suffix1;
213
if (!nvpb->no_aper_update) {
214
nvdev->base.vm_vram_size = req.vram_available;
215
nvdev->base.vm_gart_size = req.gart_available;
218
struct drm_nouveau_gem_pushbuf req;
221
req.channel = chan->id;
222
req.nr_dwords = nvpb->size - nvpb->base.remaining;
223
req.dwords = (uint64_t)(unsigned long)nvpb->pushbuf;
224
req.nr_buffers = nvpb->nr_buffers;
225
req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
226
req.nr_relocs = nvpb->nr_relocs;
227
req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
228
ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
235
/* Update presumed offset/domain for any buffers that moved.
236
* Dereference all buffers on validate list
238
for (i = 0; i < nvpb->nr_relocs; i++) {
239
struct drm_nouveau_gem_pushbuf_reloc *r = &nvpb->relocs[i];
240
struct drm_nouveau_gem_pushbuf_bo *pbbo =
241
&nvpb->buffers[r->bo_index];
242
struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
243
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
245
if (--nvbo->pending_refcnt)
248
if (pbbo->presumed_ok == 0) {
249
nvbo->domain = pbbo->presumed_domain;
250
nvbo->offset = pbbo->presumed_offset;
253
nvbo->pending = NULL;
254
nouveau_bo_ref(NULL, &bo);
257
nvpb->nr_buffers = 0;
260
/* Allocate space for next push buffer */
261
assert(!nouveau_pushbuf_space(chan, min));
263
if (chan->flush_notify)
264
chan->flush_notify(chan);
271
nouveau_pushbuf_marker_emit(struct nouveau_channel *chan,
272
unsigned wait_dwords, unsigned wait_relocs)
274
struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
276
if (AVAIL_RING(chan) < wait_dwords)
277
return nouveau_pushbuf_flush(chan, wait_dwords);
279
if (nvpb->nr_relocs + wait_relocs >= NOUVEAU_GEM_MAX_RELOCS)
280
return nouveau_pushbuf_flush(chan, wait_dwords);
282
nvpb->marker = nvpb->base.cur - nvpb->pushbuf;
283
nvpb->marker_relocs = nvpb->nr_relocs;
288
nouveau_pushbuf_marker_undo(struct nouveau_channel *chan)
290
struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
296
/* undo any relocs/buffers added to the list since last marker */
297
for (i = nvpb->marker_relocs; i < nvpb->nr_relocs; i++) {
298
struct drm_nouveau_gem_pushbuf_reloc *r = &nvpb->relocs[i];
299
struct drm_nouveau_gem_pushbuf_bo *pbbo =
300
&nvpb->buffers[r->bo_index];
301
struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
302
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
304
if (--nvbo->pending_refcnt)
307
nvbo->pending = NULL;
308
nouveau_bo_ref(NULL, &bo);
311
nvpb->nr_relocs = nvpb->marker_relocs;
313
/* reset pushbuf back to last marker */
314
nvpb->base.cur = nvpb->pushbuf + nvpb->marker;
315
nvpb->base.remaining = nvpb->size - nvpb->marker;
320
nouveau_pushbuf_calc_reloc(struct drm_nouveau_gem_pushbuf_bo *pbbo,
321
struct drm_nouveau_gem_pushbuf_reloc *r)
325
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
326
push = (pbbo->presumed_offset + r->data);
328
if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
329
push = (pbbo->presumed_offset + r->data) >> 32;
333
if (r->flags & NOUVEAU_GEM_RELOC_OR) {
334
if (pbbo->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM)
344
nouveau_pushbuf_emit_reloc(struct nouveau_channel *chan, void *ptr,
345
struct nouveau_bo *bo, uint32_t data, uint32_t data2,
346
uint32_t flags, uint32_t vor, uint32_t tor)
348
struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
349
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
350
struct drm_nouveau_gem_pushbuf_reloc *r;
351
struct drm_nouveau_gem_pushbuf_bo *pbbo;
352
uint32_t domains = 0;
354
if (nvpb->nr_relocs >= NOUVEAU_GEM_MAX_RELOCS) {
355
fprintf(stderr, "too many relocs!!\n");
359
if (nvbo->user && (flags & NOUVEAU_BO_WR)) {
360
fprintf(stderr, "write to user buffer!!\n");
364
pbbo = nouveau_bo_emit_buffer(chan, bo);
366
fprintf(stderr, "buffer emit fail :(\n");
370
nvbo->pending_refcnt++;
372
if (flags & NOUVEAU_BO_VRAM)
373
domains |= NOUVEAU_GEM_DOMAIN_VRAM;
374
if (flags & NOUVEAU_BO_GART)
375
domains |= NOUVEAU_GEM_DOMAIN_GART;
377
if (!(pbbo->valid_domains & domains)) {
378
fprintf(stderr, "no valid domains remain!\n");
381
pbbo->valid_domains &= domains;
383
assert(flags & NOUVEAU_BO_RDWR);
384
if (flags & NOUVEAU_BO_RD) {
385
pbbo->read_domains |= domains;
387
if (flags & NOUVEAU_BO_WR) {
388
pbbo->write_domains |= domains;
389
nvbo->write_marker = 1;
392
r = nvpb->relocs + nvpb->nr_relocs++;
393
r->bo_index = pbbo - nvpb->buffers;
394
r->reloc_index = (uint32_t *)ptr - nvpb->pushbuf;
396
if (flags & NOUVEAU_BO_LOW)
397
r->flags |= NOUVEAU_GEM_RELOC_LOW;
398
if (flags & NOUVEAU_BO_HIGH)
399
r->flags |= NOUVEAU_GEM_RELOC_HIGH;
400
if (flags & NOUVEAU_BO_OR)
401
r->flags |= NOUVEAU_GEM_RELOC_OR;
406
*(uint32_t *)ptr = (flags & NOUVEAU_BO_DUMMY) ? 0 :
407
nouveau_pushbuf_calc_reloc(pbbo, r);