1
/**********************************************************
2
* Copyright 2008-2009 VMware, Inc. All rights reserved.
4
* Permission is hereby granted, free of charge, to any person
5
* obtaining a copy of this software and associated documentation
6
* files (the "Software"), to deal in the Software without
7
* restriction, including without limitation the rights to use, copy,
8
* modify, merge, publish, distribute, sublicense, and/or sell copies
9
* of the Software, and to permit persons to whom the Software is
10
* furnished to do so, subject to the following conditions:
12
* The above copyright notice and this permission notice shall be
13
* included in all copies or substantial portions of the Software.
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
**********************************************************/
28
#include "pipe/p_state.h"
29
#include "pipe/p_defines.h"
30
#include "util/u_inlines.h"
31
#include "os/os_thread.h"
32
#include "util/u_math.h"
33
#include "util/u_memory.h"
34
#include "util/u_resource.h"
36
#include "svga_context.h"
37
#include "svga_screen.h"
38
#include "svga_resource_buffer.h"
39
#include "svga_resource_buffer_upload.h"
40
#include "svga_resource_texture.h"
41
#include "svga_sampler_view.h"
42
#include "svga_winsys.h"
43
#include "svga_debug.h"
47
* Determine what buffers eventually need hardware backing.
49
* Vertex- and index buffers need hardware backing. Constant buffers
50
* do on vgpu10. Staging texture-upload buffers do when they are
54
svga_buffer_needs_hw_storage(const struct svga_screen *ss,
55
const struct pipe_resource *template)
57
unsigned bind_mask = (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER |
58
PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_STREAM_OUTPUT |
59
PIPE_BIND_SHADER_BUFFER | PIPE_BIND_COMMAND_ARGS_BUFFER);
61
if (ss->sws->have_vgpu10) {
63
* Driver-created upload const0- and staging texture upload buffers
64
* tagged with PIPE_BIND_CUSTOM
66
bind_mask |= PIPE_BIND_CUSTOM;
68
* Uniform buffer objects.
69
* Don't create hardware storage for state-tracker constant buffers,
70
* because we frequently map them for reading and writing, and
71
* the length of those buffers are always small, so it is better
72
* to just use system memory.
76
if (template->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
79
return !!(template->bind & bind_mask);
84
need_buf_readback(struct svga_context *svga,
85
struct pipe_transfer *st)
87
struct svga_buffer *sbuf = svga_buffer(st->resource);
89
if (st->usage != PIPE_MAP_READ)
92
/* No buffer surface has been created */
96
return ((sbuf->dirty ||
97
sbuf->bufsurf->surface_state == SVGA_SURFACE_STATE_RENDERED) &&
98
!sbuf->key.coherent && !svga->swc->force_coherent);
103
* Create a buffer transfer.
105
* Unlike texture DMAs (which are written immediately to the command buffer and
106
* therefore inherently serialized with other context operations), for buffers
107
* we try to coalesce multiple range mappings (i.e, multiple calls to this
108
* function) into a single DMA command, for better efficiency in command
109
* processing. This means we need to exercise extra care here to ensure that
110
* the end result is exactly the same as if one DMA was used for every mapped
114
svga_buffer_transfer_map(struct pipe_context *pipe,
115
struct pipe_resource *resource,
118
const struct pipe_box *box,
119
struct pipe_transfer **ptransfer)
121
struct svga_context *svga = svga_context(pipe);
122
struct svga_screen *ss = svga_screen(pipe->screen);
123
struct svga_buffer *sbuf = svga_buffer(resource);
124
struct pipe_transfer *transfer;
126
int64_t begin = svga_get_time(svga);
128
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERMAP);
132
assert(box->height == 1);
133
assert(box->depth == 1);
135
transfer = MALLOC_STRUCT(pipe_transfer);
140
transfer->resource = resource;
141
transfer->level = level;
142
transfer->usage = usage;
143
transfer->box = *box;
144
transfer->stride = 0;
145
transfer->layer_stride = 0;
147
if (usage & PIPE_MAP_WRITE) {
148
/* If we write to the buffer for any reason, free any saved translated
151
pipe_resource_reference(&sbuf->translated_indices.buffer, NULL);
154
/* If it is a read transfer and the buffer is dirty or the buffer is bound
155
* to a uav, we will need to read the subresource content from the device.
157
if (need_buf_readback(svga, transfer)) {
158
/* Host-side buffers can be dirtied with vgpu10 features
159
* (streamout and buffer copy) and sm5 feature via uav.
161
assert(svga_have_vgpu10(svga));
164
(void) svga_buffer_handle(svga, resource, sbuf->bind_flags);
167
if (sbuf->dma.pending) {
168
svga_buffer_upload_flush(svga, sbuf);
169
svga_context_finish(svga);
172
assert(sbuf->handle);
174
SVGA_RETRY(svga, SVGA3D_ReadbackGBSurface(svga->swc, sbuf->handle));
175
svga->hud.num_readbacks++;
177
svga_context_finish(svga);
181
/* Mark the buffer surface state as UPDATED */
182
assert(sbuf->bufsurf);
183
sbuf->bufsurf->surface_state = SVGA_SURFACE_STATE_UPDATED;
186
if (usage & PIPE_MAP_WRITE) {
187
if ((usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&
188
!(resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)) {
190
* Flush any pending primitives, finish writing any pending DMA
191
* commands, and tell the host to discard the buffer contents on
192
* the next DMA operation.
195
svga_hwtnl_flush_buffer(svga, resource);
197
if (sbuf->dma.pending) {
198
svga_buffer_upload_flush(svga, sbuf);
201
* Instead of flushing the context command buffer, simply discard
202
* the current hwbuf, and start a new one.
203
* With GB objects, the map operation takes care of this
204
* if passed the PIPE_MAP_DISCARD_WHOLE_RESOURCE flag,
205
* and the old backing store is busy.
208
if (!svga_have_gb_objects(svga))
209
svga_buffer_destroy_hw_storage(ss, sbuf);
212
sbuf->map.num_ranges = 0;
213
sbuf->dma.flags.discard = TRUE;
216
if (usage & PIPE_MAP_UNSYNCHRONIZED) {
217
if (!sbuf->map.num_ranges) {
219
* No pending ranges to upload so far, so we can tell the host to
220
* not synchronize on the next DMA command.
223
sbuf->dma.flags.unsynchronized = TRUE;
227
* Synchronizing, so flush any pending primitives, finish writing any
228
* pending DMA command, and ensure the next DMA will be done in order.
231
svga_hwtnl_flush_buffer(svga, resource);
233
if (sbuf->dma.pending) {
234
svga_buffer_upload_flush(svga, sbuf);
236
if (svga_buffer_has_hw_storage(sbuf)) {
238
* We have a pending DMA upload from a hardware buffer, therefore
239
* we need to ensure that the host finishes processing that DMA
240
* command before the gallium frontend can start overwriting the
243
* XXX: This could be avoided by tying the hardware buffer to
244
* the transfer (just as done with textures), which would allow
245
* overlapping DMAs commands to be queued on the same context
246
* buffer. However, due to the likelihood of software vertex
247
* processing, it is more convenient to hold on to the hardware
248
* buffer, allowing to quickly access the contents from the CPU
249
* without having to do a DMA download from the host.
252
if (usage & PIPE_MAP_DONTBLOCK) {
254
* Flushing the command buffer here will most likely cause
255
* the map of the hwbuf below to block, so preemptively
256
* return NULL here if DONTBLOCK is set to prevent unnecessary
257
* command buffer flushes.
264
svga_context_flush(svga, NULL);
268
sbuf->dma.flags.unsynchronized = FALSE;
272
if (!sbuf->swbuf && !svga_buffer_has_hw_storage(sbuf)) {
273
if (svga_buffer_create_hw_storage(ss, sbuf, sbuf->bind_flags) != PIPE_OK) {
275
* We can't create a hardware buffer big enough, so create a malloc
279
debug_printf("%s: failed to allocate %u KB of DMA, "
280
"splitting DMA transfers\n",
282
(sbuf->b.width0 + 1023)/1024);
285
sbuf->swbuf = align_malloc(sbuf->b.width0, 16);
294
/* User/malloc buffer */
297
else if (svga_buffer_has_hw_storage(sbuf)) {
300
map = SVGA_TRY_MAP(svga_buffer_hw_storage_map
301
(svga, sbuf, transfer->usage, &retry), retry);
302
if (map == NULL && retry) {
304
* At this point, svga_buffer_get_transfer() has already
305
* hit the DISCARD_WHOLE_RESOURCE path and flushed HWTNL
308
svga_retry_enter(svga);
309
svga_context_flush(svga, NULL);
310
map = svga_buffer_hw_storage_map(svga, sbuf, transfer->usage, &retry);
311
svga_retry_exit(svga);
320
map += transfer->box.x;
321
*ptransfer = transfer;
326
svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
329
SVGA_STATS_TIME_POP(svga_sws(svga));
335
svga_buffer_transfer_flush_region(struct pipe_context *pipe,
336
struct pipe_transfer *transfer,
337
const struct pipe_box *box)
339
struct svga_screen *ss = svga_screen(pipe->screen);
340
struct svga_buffer *sbuf = svga_buffer(transfer->resource);
341
struct svga_context *svga = svga_context(pipe);
342
unsigned offset = transfer->box.x + box->x;
343
unsigned length = box->width;
345
assert(transfer->usage & PIPE_MAP_WRITE);
346
assert(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT);
348
if (!(svga->swc->force_coherent || sbuf->key.coherent) || sbuf->swbuf) {
349
mtx_lock(&ss->swc_mutex);
350
svga_buffer_add_range(sbuf, offset, offset + length);
351
mtx_unlock(&ss->swc_mutex);
357
svga_buffer_transfer_unmap(struct pipe_context *pipe,
358
struct pipe_transfer *transfer)
360
struct svga_screen *ss = svga_screen(pipe->screen);
361
struct svga_context *svga = svga_context(pipe);
362
struct svga_buffer *sbuf = svga_buffer(transfer->resource);
364
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERUNMAP);
366
mtx_lock(&ss->swc_mutex);
368
assert(sbuf->map.count);
369
if (sbuf->map.count) {
373
if (svga_buffer_has_hw_storage(sbuf)) {
375
/* Note: we may wind up flushing here and unmapping other buffers
376
* which leads to recursively locking ss->swc_mutex.
378
svga_buffer_hw_storage_unmap(svga, sbuf);
381
if (transfer->usage & PIPE_MAP_WRITE) {
382
if (!(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT)) {
384
* Mapped range not flushed explicitly, so flush the whole buffer,
385
* and tell the host to discard the contents when processing the DMA
389
SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
391
sbuf->dma.flags.discard = TRUE;
393
if (!(svga->swc->force_coherent || sbuf->key.coherent) || sbuf->swbuf)
394
svga_buffer_add_range(sbuf, 0, sbuf->b.width0);
398
(!sbuf->bind_flags || (sbuf->bind_flags & PIPE_BIND_CONSTANT_BUFFER))) {
400
* Since the constant buffer is in system buffer, we need
401
* to set the constant buffer dirty bits, so that the context
402
* can update the changes in the device.
403
* According to the GL spec, buffer bound to other contexts will
404
* have to be explicitly rebound by the user to have the changes take
407
svga->dirty |= SVGA_NEW_CONST_BUFFER;
411
mtx_unlock(&ss->swc_mutex);
413
SVGA_STATS_TIME_POP(svga_sws(svga));
418
svga_resource_destroy(struct pipe_screen *screen,
419
struct pipe_resource *buf)
421
if (buf->target == PIPE_BUFFER) {
422
struct svga_screen *ss = svga_screen(screen);
423
struct svga_buffer *sbuf = svga_buffer(buf);
425
assert(!p_atomic_read(&buf->reference.count));
427
assert(!sbuf->dma.pending);
430
svga_buffer_destroy_host_surface(ss, sbuf);
432
if (sbuf->uploaded.buffer)
433
pipe_resource_reference(&sbuf->uploaded.buffer, NULL);
436
svga_buffer_destroy_hw_storage(ss, sbuf);
438
if (sbuf->swbuf && !sbuf->user)
439
align_free(sbuf->swbuf);
441
pipe_resource_reference(&sbuf->translated_indices.buffer, NULL);
443
ss->hud.total_resource_bytes -= sbuf->size;
444
assert(ss->hud.num_resources > 0);
445
if (ss->hud.num_resources > 0)
446
ss->hud.num_resources--;
450
struct svga_screen *ss = svga_screen(screen);
451
struct svga_texture *tex = svga_texture(buf);
453
ss->texture_timestamp++;
455
svga_sampler_view_reference(&tex->cached_view, NULL);
458
DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
460
SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
462
boolean to_invalidate = svga_was_texture_rendered_to(tex);
463
svga_screen_surface_destroy(ss, &tex->key, to_invalidate, &tex->handle);
465
/* Destroy the backed surface handle if exists */
466
if (tex->backed_handle)
467
svga_screen_surface_destroy(ss, &tex->backed_key, to_invalidate, &tex->backed_handle);
469
ss->hud.total_resource_bytes -= tex->size;
472
FREE(tex->rendered_to);
476
assert(ss->hud.num_resources > 0);
477
if (ss->hud.num_resources > 0)
478
ss->hud.num_resources--;
482
struct pipe_resource *
483
svga_buffer_create(struct pipe_screen *screen,
484
const struct pipe_resource *template)
486
struct svga_screen *ss = svga_screen(screen);
487
struct svga_buffer *sbuf;
490
SVGA_STATS_TIME_PUSH(ss->sws, SVGA_STATS_TIME_CREATEBUFFER);
492
sbuf = CALLOC_STRUCT(svga_buffer);
497
pipe_reference_init(&sbuf->b.reference, 1);
498
sbuf->b.screen = screen;
499
bind_flags = template->bind & ~PIPE_BIND_CUSTOM;
501
list_inithead(&sbuf->surfaces);
503
if (bind_flags & PIPE_BIND_CONSTANT_BUFFER) {
504
/* Constant buffers can only have the PIPE_BIND_CONSTANT_BUFFER
507
if (ss->sws->have_vgpu10) {
508
bind_flags = PIPE_BIND_CONSTANT_BUFFER;
512
/* Although svga device only requires constant buffer size to be
513
* in multiples of 16, in order to allow bind_flags promotion,
514
* we are mandating all buffer size to be in multiples of 16.
516
sbuf->b.width0 = align(sbuf->b.width0, 16);
518
if (svga_buffer_needs_hw_storage(ss, template)) {
520
/* If the buffer is not used for constant buffer, set
521
* the vertex/index bind flags as well so that the buffer will be
522
* accepted for those uses.
523
* Note that the PIPE_BIND_ flags we get from the gallium frontend are
524
* just a hint about how the buffer may be used. And OpenGL buffer
525
* object may be used for many different things.
526
* Also note that we do not unconditionally set the streamout
527
* bind flag since streamout buffer is an output buffer and
528
* might have performance implication.
530
if (!(template->bind & PIPE_BIND_CONSTANT_BUFFER) &&
531
!(template->bind & PIPE_BIND_CUSTOM)) {
532
/* Not a constant- or staging buffer.
533
* The buffer may be used for vertex data or indexes.
535
bind_flags |= (PIPE_BIND_VERTEX_BUFFER |
536
PIPE_BIND_INDEX_BUFFER);
538
/* It may be used for shader resource as well. */
539
bind_flags |= PIPE_BIND_SAMPLER_VIEW;
542
if (svga_buffer_create_host_surface(ss, sbuf, bind_flags) != PIPE_OK)
546
sbuf->swbuf = align_malloc(sbuf->b.width0, 64);
550
/* Since constant buffer is usually small, it is much cheaper to
551
* use system memory for the data just as it is being done for
552
* the default constant buffer.
554
if ((bind_flags & PIPE_BIND_CONSTANT_BUFFER) || !bind_flags)
555
sbuf->use_swbuf = TRUE;
558
debug_reference(&sbuf->b.reference,
559
(debug_reference_descriptor)debug_describe_resource, 0);
561
sbuf->bind_flags = bind_flags;
562
sbuf->size = util_resource_size(&sbuf->b);
563
ss->hud.total_resource_bytes += sbuf->size;
565
ss->hud.num_resources++;
566
SVGA_STATS_TIME_POP(ss->sws);
573
SVGA_STATS_TIME_POP(ss->sws);
578
struct pipe_resource *
579
svga_user_buffer_create(struct pipe_screen *screen,
584
struct svga_buffer *sbuf;
585
struct svga_screen *ss = svga_screen(screen);
587
sbuf = CALLOC_STRUCT(svga_buffer);
591
pipe_reference_init(&sbuf->b.reference, 1);
592
sbuf->b.screen = screen;
593
sbuf->b.format = PIPE_FORMAT_R8_UNORM; /* ?? */
594
sbuf->b.usage = PIPE_USAGE_IMMUTABLE;
596
sbuf->b.width0 = bytes;
599
sbuf->b.array_size = 1;
601
sbuf->bind_flags = bind;
605
debug_reference(&sbuf->b.reference,
606
(debug_reference_descriptor)debug_describe_resource, 0);
608
ss->hud.num_resources++;