2
* Copyright 2007 Nouveau Project
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28
#include "nouveau_private.h"
30
#define PB_BUFMGR_DWORDS (4096 / 2)
31
#define PB_MIN_USER_DWORDS 2048
34
nouveau_pushbuf_calc_reloc(struct drm_nouveau_gem_pushbuf_bo *pbbo,
35
struct drm_nouveau_gem_pushbuf_reloc *r)
39
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
40
push = (pbbo->presumed_offset + r->data);
42
if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
43
push = (pbbo->presumed_offset + r->data) >> 32;
47
if (r->flags & NOUVEAU_GEM_RELOC_OR) {
48
if (pbbo->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM)
58
nouveau_pushbuf_emit_reloc(struct nouveau_channel *chan, void *ptr,
59
struct nouveau_bo *bo, uint32_t data, uint32_t data2,
60
uint32_t flags, uint32_t vor, uint32_t tor)
62
struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
63
struct drm_nouveau_gem_pushbuf_reloc *r;
64
struct drm_nouveau_gem_pushbuf_bo *pbbo;
67
if (nvpb->nr_relocs >= NOUVEAU_GEM_MAX_RELOCS) {
68
fprintf(stderr, "too many relocs!!\n");
73
if (nouveau_bo(bo)->user && (flags & NOUVEAU_BO_WR)) {
74
fprintf(stderr, "write to user buffer!!\n");
78
pbbo = nouveau_bo_emit_buffer(chan, bo);
80
fprintf(stderr, "buffer emit fail :(\n");
85
if (flags & NOUVEAU_BO_VRAM)
86
domains |= NOUVEAU_GEM_DOMAIN_VRAM;
87
if (flags & NOUVEAU_BO_GART)
88
domains |= NOUVEAU_GEM_DOMAIN_GART;
89
pbbo->valid_domains &= domains;
90
assert(pbbo->valid_domains);
92
assert(flags & NOUVEAU_BO_RDWR);
93
if (flags & NOUVEAU_BO_RD) {
94
pbbo->read_domains |= domains;
96
if (flags & NOUVEAU_BO_WR) {
97
pbbo->write_domains |= domains;
98
nouveau_bo(bo)->write_marker = 1;
101
r = nvpb->relocs + nvpb->nr_relocs++;
102
r->bo_index = pbbo - nvpb->buffers;
103
r->reloc_index = (uint32_t *)ptr - nvpb->pushbuf;
105
if (flags & NOUVEAU_BO_LOW)
106
r->flags |= NOUVEAU_GEM_RELOC_LOW;
107
if (flags & NOUVEAU_BO_HIGH)
108
r->flags |= NOUVEAU_GEM_RELOC_HIGH;
109
if (flags & NOUVEAU_BO_OR)
110
r->flags |= NOUVEAU_GEM_RELOC_OR;
115
*(uint32_t *)ptr = (flags & NOUVEAU_BO_DUMMY) ? 0 :
116
nouveau_pushbuf_calc_reloc(pbbo, r);
121
nouveau_pushbuf_space_call(struct nouveau_channel *chan, unsigned min)
123
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
124
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
125
struct nouveau_bo *bo;
128
if (min < PB_MIN_USER_DWORDS)
129
min = PB_MIN_USER_DWORDS;
131
nvpb->current_offset = nvpb->base.cur - nvpb->pushbuf;
132
if (nvpb->current_offset + min + 2 <= nvpb->size)
136
if (nvpb->current == CALPB_BUFFERS)
138
bo = nvpb->buffer[nvpb->current];
140
ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
144
nvpb->size = (bo->size - 8) / 4;
145
nvpb->pushbuf = bo->map;
146
nvpb->current_offset = 0;
148
nvpb->base.channel = chan;
149
nvpb->base.remaining = nvpb->size;
150
nvpb->base.cur = nvpb->pushbuf;
152
nouveau_bo_unmap(bo);
157
nouveau_pushbuf_space(struct nouveau_channel *chan, unsigned min)
159
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
160
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
163
return nouveau_pushbuf_space_call(chan, min);
167
nvpb->pushbuf = NULL;
170
nvpb->size = min < PB_MIN_USER_DWORDS ? PB_MIN_USER_DWORDS : min;
171
nvpb->pushbuf = malloc(sizeof(uint32_t) * nvpb->size);
173
nvpb->base.channel = chan;
174
nvpb->base.remaining = nvpb->size;
175
nvpb->base.cur = nvpb->pushbuf;
181
nouveau_pushbuf_fini_call(struct nouveau_channel *chan)
183
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
184
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
187
for (i = 0; i < CALPB_BUFFERS; i++)
188
nouveau_bo_ref(NULL, &nvpb->buffer[i]);
190
nvpb->pushbuf = NULL;
194
nouveau_pushbuf_init_call(struct nouveau_channel *chan)
196
struct drm_nouveau_gem_pushbuf_call req;
197
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
198
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
199
struct nouveau_device *dev = chan->device;
202
req.channel = chan->id;
204
ret = drmCommandWriteRead(nouveau_device(dev)->fd,
205
DRM_NOUVEAU_GEM_PUSHBUF_CALL,
210
for (i = 0; i < CALPB_BUFFERS; i++) {
211
ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
212
0, CALPB_BUFSZ, &nvpb->buffer[i]);
214
nouveau_pushbuf_fini_call(chan);
220
nvpb->cal_suffix0 = req.suffix0;
221
nvpb->cal_suffix1 = req.suffix1;
225
nouveau_pushbuf_init(struct nouveau_channel *chan)
227
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
228
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
231
nouveau_pushbuf_init_call(chan);
233
ret = nouveau_pushbuf_space(chan, 0);
236
nouveau_pushbuf_fini_call(chan);
237
ret = nouveau_pushbuf_space(chan, 0);
244
nvpb->buffers = calloc(NOUVEAU_GEM_MAX_BUFFERS,
245
sizeof(struct drm_nouveau_gem_pushbuf_bo));
246
nvpb->relocs = calloc(NOUVEAU_GEM_MAX_RELOCS,
247
sizeof(struct drm_nouveau_gem_pushbuf_reloc));
249
chan->pushbuf = &nvpb->base;
254
nouveau_pushbuf_flush(struct nouveau_channel *chan, unsigned min)
256
struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
257
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
258
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
262
if (nvpb->base.remaining == nvpb->size)
266
struct drm_nouveau_gem_pushbuf_call req;
268
*(nvpb->base.cur++) = nvpb->cal_suffix0;
269
*(nvpb->base.cur++) = nvpb->cal_suffix1;
270
if (nvpb->base.remaining > 2) /* space() will fixup if not */
271
nvpb->base.remaining -= 2;
273
req.channel = chan->id;
274
req.handle = nvpb->buffer[nvpb->current]->handle;
275
req.offset = nvpb->current_offset * 4;
276
req.nr_buffers = nvpb->nr_buffers;
277
req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
278
req.nr_relocs = nvpb->nr_relocs;
279
req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
280
req.nr_dwords = (nvpb->base.cur - nvpb->pushbuf) -
281
nvpb->current_offset;
282
req.suffix0 = nvpb->cal_suffix0;
283
req.suffix1 = nvpb->cal_suffix1;
284
ret = drmCommandWriteRead(nvdev->fd,
285
DRM_NOUVEAU_GEM_PUSHBUF_CALL,
287
nvpb->cal_suffix0 = req.suffix0;
288
nvpb->cal_suffix1 = req.suffix1;
291
struct drm_nouveau_gem_pushbuf req;
293
req.channel = chan->id;
294
req.nr_dwords = nvpb->size - nvpb->base.remaining;
295
req.dwords = (uint64_t)(unsigned long)nvpb->pushbuf;
296
req.nr_buffers = nvpb->nr_buffers;
297
req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
298
req.nr_relocs = nvpb->nr_relocs;
299
req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
300
ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
306
/* Update presumed offset/domain for any buffers that moved.
307
* Dereference all buffers on validate list
309
for (i = 0; i < nvpb->nr_buffers; i++) {
310
struct drm_nouveau_gem_pushbuf_bo *pbbo = &nvpb->buffers[i];
311
struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
313
if (pbbo->presumed_ok == 0) {
314
nouveau_bo(bo)->domain = pbbo->presumed_domain;
315
nouveau_bo(bo)->offset = pbbo->presumed_offset;
318
nouveau_bo(bo)->pending = NULL;
319
nouveau_bo_ref(NULL, &bo);
321
nvpb->nr_buffers = 0;
324
/* Allocate space for next push buffer */
325
ret = nouveau_pushbuf_space(chan, min);
328
if (chan->flush_notify)
329
chan->flush_notify(chan);