1
/**************************************************************************
3
* Copyright 2007 VMware, Inc.
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
**************************************************************************/
31
#include "pipe/p_context.h"
32
#include "pipe/p_defines.h"
33
#include "pipe/p_shader_tokens.h"
34
#include "pipe/p_state.h"
35
#include "pipe/p_screen.h"
36
#include "util/compiler.h"
37
#include "util/format/u_format.h"
38
#include "util/u_debug.h"
39
#include "util/u_debug_describe.h"
40
#include "util/u_debug_refcnt.h"
41
#include "util/u_atomic.h"
42
#include "util/u_box.h"
43
#include "util/u_math.h"
52
* Reference counting helper functions.
57
pipe_reference_init(struct pipe_reference *dst, unsigned count)
63
pipe_is_referenced(struct pipe_reference *src)
65
return p_atomic_read(&src->count) != 0;
69
* Update reference counting.
70
* The old thing pointed to, if any, will be unreferenced.
71
* Both 'dst' and 'src' may be NULL.
72
* \return TRUE if the object's refcount hits zero and should be destroyed.
75
pipe_reference_described(struct pipe_reference *dst,
76
struct pipe_reference *src,
77
debug_reference_descriptor get_desc)
80
/* bump the src.count first */
82
ASSERTED int count = p_atomic_inc_return(&src->count);
83
assert(count != 1); /* src had to be referenced */
84
debug_reference(src, get_desc, 1);
88
int count = p_atomic_dec_return(&dst->count);
89
assert(count != -1); /* dst had to be referenced */
90
debug_reference(dst, get_desc, -1);
100
pipe_reference(struct pipe_reference *dst, struct pipe_reference *src)
102
return pipe_reference_described(dst, src,
103
(debug_reference_descriptor)
104
debug_describe_reference);
108
pipe_surface_reference(struct pipe_surface **dst, struct pipe_surface *src)
110
struct pipe_surface *old_dst = *dst;
112
if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
113
src ? &src->reference : NULL,
114
(debug_reference_descriptor)
115
debug_describe_surface))
116
old_dst->context->surface_destroy(old_dst->context, old_dst);
121
* Similar to pipe_surface_reference() but always set the pointer to NULL
122
* and pass in an explicit context. The explicit context avoids the problem
123
* of using a deleted context's surface_destroy() method when freeing a surface
124
* that's shared by multiple contexts.
127
pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
129
struct pipe_surface *old = *ptr;
131
if (pipe_reference_described(&old->reference, NULL,
132
(debug_reference_descriptor)
133
debug_describe_surface))
134
pipe->surface_destroy(pipe, old);
139
pipe_resource_destroy(struct pipe_resource *res)
141
/* Avoid recursion, which would prevent inlining this function */
143
struct pipe_resource *next = res->next;
145
res->screen->resource_destroy(res->screen, res);
147
} while (pipe_reference_described(res ? &res->reference : NULL,
149
(debug_reference_descriptor)
150
debug_describe_resource));
154
pipe_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
156
struct pipe_resource *old_dst = *dst;
158
if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
159
src ? &src->reference : NULL,
160
(debug_reference_descriptor)
161
debug_describe_resource)) {
162
pipe_resource_destroy(old_dst);
168
* Subtract the given number of references.
171
pipe_drop_resource_references(struct pipe_resource *dst, int num_refs)
173
int count = p_atomic_add_return(&dst->reference.count, -num_refs);
176
/* Underflows shouldn't happen, but let's be safe. */
178
pipe_resource_destroy(dst);
182
* Same as pipe_surface_release, but used when pipe_context doesn't exist
186
pipe_surface_release_no_context(struct pipe_surface **ptr)
188
struct pipe_surface *surf = *ptr;
190
if (pipe_reference_described(&surf->reference, NULL,
191
(debug_reference_descriptor)
192
debug_describe_surface)) {
193
/* trivially destroy pipe_surface */
194
pipe_resource_reference(&surf->texture, NULL);
201
* Set *dst to \p src with proper reference counting.
203
* The caller must guarantee that \p src and *dst were created in
204
* the same context (if they exist), and that this must be the current context.
207
pipe_sampler_view_reference(struct pipe_sampler_view **dst,
208
struct pipe_sampler_view *src)
210
struct pipe_sampler_view *old_dst = *dst;
212
if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
213
src ? &src->reference : NULL,
214
(debug_reference_descriptor)
215
debug_describe_sampler_view))
216
old_dst->context->sampler_view_destroy(old_dst->context, old_dst);
221
pipe_so_target_reference(struct pipe_stream_output_target **dst,
222
struct pipe_stream_output_target *src)
224
struct pipe_stream_output_target *old_dst = *dst;
226
if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
227
src ? &src->reference : NULL,
228
(debug_reference_descriptor)debug_describe_so_target))
229
old_dst->context->stream_output_target_destroy(old_dst->context, old_dst);
234
pipe_vertex_state_reference(struct pipe_vertex_state **dst,
235
struct pipe_vertex_state *src)
237
struct pipe_vertex_state *old_dst = *dst;
239
if (pipe_reference(old_dst ? &old_dst->reference : NULL,
240
src ? &src->reference : NULL))
241
old_dst->screen->vertex_state_destroy(old_dst->screen, old_dst);
246
pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
248
if (dst->is_user_buffer)
249
dst->buffer.user = NULL;
251
pipe_resource_reference(&dst->buffer.resource, NULL);
255
pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
256
const struct pipe_vertex_buffer *src)
258
if (dst->buffer.resource == src->buffer.resource) {
259
/* Just copy the fields, don't touch reference counts. */
260
dst->stride = src->stride;
261
dst->is_user_buffer = src->is_user_buffer;
262
dst->buffer_offset = src->buffer_offset;
266
pipe_vertex_buffer_unreference(dst);
267
/* Don't use memcpy because there is a hole between variables.
268
* dst can be used as a hash key.
270
dst->stride = src->stride;
271
dst->is_user_buffer = src->is_user_buffer;
272
dst->buffer_offset = src->buffer_offset;
274
if (src->is_user_buffer)
275
dst->buffer.user = src->buffer.user;
277
pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
281
pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
282
struct pipe_resource *pt, unsigned level, unsigned layer)
284
pipe_resource_reference(&ps->texture, pt);
285
ps->format = pt->format;
286
ps->width = u_minify(pt->width0, level);
287
ps->height = u_minify(pt->height0, level);
288
ps->u.tex.level = level;
289
ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
294
pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
295
struct pipe_resource *pt, unsigned level, unsigned layer)
298
pipe_reference_init(&ps->reference, 1);
299
pipe_surface_reset(ctx, ps, pt, level, layer);
302
/* Return true if the surfaces are equal. */
303
static inline boolean
304
pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
306
return s1->texture == s2->texture &&
307
s1->format == s2->format &&
308
(s1->texture->target != PIPE_BUFFER ||
309
(s1->u.buf.first_element == s2->u.buf.first_element &&
310
s1->u.buf.last_element == s2->u.buf.last_element)) &&
311
(s1->texture->target == PIPE_BUFFER ||
312
(s1->u.tex.level == s2->u.tex.level &&
313
s1->u.tex.first_layer == s2->u.tex.first_layer &&
314
s1->u.tex.last_layer == s2->u.tex.last_layer));
318
* Convenience wrappers for screen buffer functions.
323
* Create a new resource.
324
* \param bind bitmask of PIPE_BIND_x flags
325
* \param usage a PIPE_USAGE_x value
327
static inline struct pipe_resource *
328
pipe_buffer_create(struct pipe_screen *screen,
330
enum pipe_resource_usage usage,
333
struct pipe_resource buffer;
334
memset(&buffer, 0, sizeof buffer);
335
buffer.target = PIPE_BUFFER;
336
buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
338
buffer.usage = usage;
340
buffer.width0 = size;
343
buffer.array_size = 1;
344
return screen->resource_create(screen, &buffer);
348
static inline struct pipe_resource *
349
pipe_buffer_create_const0(struct pipe_screen *screen,
351
enum pipe_resource_usage usage,
354
struct pipe_resource buffer;
355
memset(&buffer, 0, sizeof buffer);
356
buffer.target = PIPE_BUFFER;
357
buffer.format = PIPE_FORMAT_R8_UNORM;
359
buffer.usage = usage;
360
buffer.flags = screen->get_param(screen, PIPE_CAP_CONSTBUF0_FLAGS);
361
buffer.width0 = size;
364
buffer.array_size = 1;
365
return screen->resource_create(screen, &buffer);
370
* Map a range of a resource.
371
* \param offset start of region, in bytes
372
* \param length size of region, in bytes
373
* \param access bitmask of PIPE_MAP_x flags
374
* \param transfer returns a transfer object
377
pipe_buffer_map_range(struct pipe_context *pipe,
378
struct pipe_resource *buffer,
382
struct pipe_transfer **transfer)
387
assert(offset < buffer->width0);
388
assert(offset + length <= buffer->width0);
391
u_box_1d(offset, length, &box);
393
map = pipe->buffer_map(pipe, buffer, 0, access, &box, transfer);
403
* Map whole resource.
404
* \param access bitmask of PIPE_MAP_x flags
405
* \param transfer returns a transfer object
408
pipe_buffer_map(struct pipe_context *pipe,
409
struct pipe_resource *buffer,
411
struct pipe_transfer **transfer)
413
return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0,
419
pipe_buffer_unmap(struct pipe_context *pipe,
420
struct pipe_transfer *transfer)
422
pipe->buffer_unmap(pipe, transfer);
426
pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
427
struct pipe_transfer *transfer,
435
assert(transfer->box.x <= (int) offset);
436
assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
438
/* Match old screen->buffer_flush_mapped_range() behaviour, where
439
* offset parameter is relative to the start of the buffer, not the
442
transfer_offset = offset - transfer->box.x;
444
u_box_1d(transfer_offset, length, &box);
446
pipe->transfer_flush_region(pipe, transfer, &box);
450
pipe_buffer_write(struct pipe_context *pipe,
451
struct pipe_resource *buf,
456
/* Don't set any other usage bits. Drivers should derive them. */
457
pipe->buffer_subdata(pipe, buf, PIPE_MAP_WRITE, offset, size, data);
461
* Special case for writing non-overlapping ranges.
463
* We can avoid GPU/CPU synchronization when writing range that has never
464
* been written before.
467
pipe_buffer_write_nooverlap(struct pipe_context *pipe,
468
struct pipe_resource *buf,
469
unsigned offset, unsigned size,
472
pipe->buffer_subdata(pipe, buf,
474
PIPE_MAP_UNSYNCHRONIZED),
479
* Utility for simplifying pipe_context::resource_copy_region calls
482
pipe_buffer_copy(struct pipe_context *pipe,
483
struct pipe_resource *dst,
484
struct pipe_resource *src,
490
/* only these fields are used */
491
box.x = (int)src_offset;
492
box.width = (int)size;
493
pipe->resource_copy_region(pipe, dst, 0, dst_offset, 0, 0, src, 0, &box);
497
* Create a new resource and immediately put data into it
498
* \param bind bitmask of PIPE_BIND_x flags
499
* \param usage bitmask of PIPE_USAGE_x flags
501
static inline struct pipe_resource *
502
pipe_buffer_create_with_data(struct pipe_context *pipe,
504
enum pipe_resource_usage usage,
508
struct pipe_resource *res = pipe_buffer_create(pipe->screen,
510
pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
515
pipe_buffer_read(struct pipe_context *pipe,
516
struct pipe_resource *buf,
521
struct pipe_transfer *src_transfer;
524
map = (ubyte *) pipe_buffer_map_range(pipe,
532
memcpy(data, map, size);
533
pipe_buffer_unmap(pipe, src_transfer);
538
* Map a resource for reading/writing.
539
* \param access bitmask of PIPE_MAP_x flags
542
pipe_texture_map(struct pipe_context *context,
543
struct pipe_resource *resource,
544
unsigned level, unsigned layer,
546
unsigned x, unsigned y,
547
unsigned w, unsigned h,
548
struct pipe_transfer **transfer)
551
u_box_2d_zslice(x, y, layer, w, h, &box);
552
return context->texture_map(context, resource, level, access,
558
* Map a 3D (texture) resource for reading/writing.
559
* \param access bitmask of PIPE_MAP_x flags
562
pipe_texture_map_3d(struct pipe_context *context,
563
struct pipe_resource *resource,
566
unsigned x, unsigned y, unsigned z,
567
unsigned w, unsigned h, unsigned d,
568
struct pipe_transfer **transfer)
571
u_box_3d(x, y, z, w, h, d, &box);
572
return context->texture_map(context, resource, level, access,
577
pipe_texture_unmap(struct pipe_context *context,
578
struct pipe_transfer *transfer)
580
context->texture_unmap(context, transfer);
584
pipe_set_constant_buffer(struct pipe_context *pipe,
585
enum pipe_shader_type shader, uint index,
586
struct pipe_resource *buf)
589
struct pipe_constant_buffer cb;
591
cb.buffer_offset = 0;
592
cb.buffer_size = buf->width0;
593
cb.user_buffer = NULL;
594
pipe->set_constant_buffer(pipe, shader, index, false, &cb);
596
pipe->set_constant_buffer(pipe, shader, index, false, NULL);
602
* Get the polygon offset enable/disable flag for the given polygon fill mode.
603
* \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
605
static inline boolean
606
util_get_offset(const struct pipe_rasterizer_state *templ,
610
case PIPE_POLYGON_MODE_POINT:
611
return templ->offset_point;
612
case PIPE_POLYGON_MODE_LINE:
613
return templ->offset_line;
614
case PIPE_POLYGON_MODE_FILL:
615
return templ->offset_tri;
623
util_get_min_point_size(const struct pipe_rasterizer_state *state)
625
/* The point size should be clamped to this value at the rasterizer stage.
627
return !state->point_quad_rasterization &&
628
!state->point_smooth &&
629
!state->multisample ? 1.0f : 0.0f;
633
util_query_clear_result(union pipe_query_result *result, unsigned type)
636
case PIPE_QUERY_OCCLUSION_PREDICATE:
637
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
638
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
639
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
640
case PIPE_QUERY_GPU_FINISHED:
643
case PIPE_QUERY_OCCLUSION_COUNTER:
644
case PIPE_QUERY_TIMESTAMP:
645
case PIPE_QUERY_TIME_ELAPSED:
646
case PIPE_QUERY_PRIMITIVES_GENERATED:
647
case PIPE_QUERY_PRIMITIVES_EMITTED:
650
case PIPE_QUERY_SO_STATISTICS:
651
memset(&result->so_statistics, 0, sizeof(result->so_statistics));
653
case PIPE_QUERY_TIMESTAMP_DISJOINT:
654
memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
656
case PIPE_QUERY_PIPELINE_STATISTICS:
657
memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
660
memset(result, 0, sizeof(*result));
664
/** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
665
static inline enum tgsi_texture_type
666
util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
669
switch (pipe_tex_target) {
671
return TGSI_TEXTURE_BUFFER;
673
case PIPE_TEXTURE_1D:
674
assert(nr_samples <= 1);
675
return TGSI_TEXTURE_1D;
677
case PIPE_TEXTURE_2D:
678
return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
680
case PIPE_TEXTURE_RECT:
681
assert(nr_samples <= 1);
682
return TGSI_TEXTURE_RECT;
684
case PIPE_TEXTURE_3D:
685
assert(nr_samples <= 1);
686
return TGSI_TEXTURE_3D;
688
case PIPE_TEXTURE_CUBE:
689
assert(nr_samples <= 1);
690
return TGSI_TEXTURE_CUBE;
692
case PIPE_TEXTURE_1D_ARRAY:
693
assert(nr_samples <= 1);
694
return TGSI_TEXTURE_1D_ARRAY;
696
case PIPE_TEXTURE_2D_ARRAY:
697
return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
698
TGSI_TEXTURE_2D_ARRAY;
700
case PIPE_TEXTURE_CUBE_ARRAY:
701
return TGSI_TEXTURE_CUBE_ARRAY;
704
assert(0 && "unexpected texture target");
705
return TGSI_TEXTURE_UNKNOWN;
711
util_copy_constant_buffer(struct pipe_constant_buffer *dst,
712
const struct pipe_constant_buffer *src,
716
if (take_ownership) {
717
pipe_resource_reference(&dst->buffer, NULL);
718
dst->buffer = src->buffer;
720
pipe_resource_reference(&dst->buffer, src->buffer);
722
dst->buffer_offset = src->buffer_offset;
723
dst->buffer_size = src->buffer_size;
724
dst->user_buffer = src->user_buffer;
727
pipe_resource_reference(&dst->buffer, NULL);
728
dst->buffer_offset = 0;
729
dst->buffer_size = 0;
730
dst->user_buffer = NULL;
735
util_copy_shader_buffer(struct pipe_shader_buffer *dst,
736
const struct pipe_shader_buffer *src)
739
pipe_resource_reference(&dst->buffer, src->buffer);
740
dst->buffer_offset = src->buffer_offset;
741
dst->buffer_size = src->buffer_size;
744
pipe_resource_reference(&dst->buffer, NULL);
745
dst->buffer_offset = 0;
746
dst->buffer_size = 0;
751
util_copy_image_view(struct pipe_image_view *dst,
752
const struct pipe_image_view *src)
755
pipe_resource_reference(&dst->resource, src->resource);
756
dst->format = src->format;
757
dst->access = src->access;
758
dst->shader_access = src->shader_access;
761
pipe_resource_reference(&dst->resource, NULL);
762
dst->format = PIPE_FORMAT_NONE;
764
dst->shader_access = 0;
765
memset(&dst->u, 0, sizeof(dst->u));
769
static inline unsigned
770
util_max_layer(const struct pipe_resource *r, unsigned level)
773
case PIPE_TEXTURE_3D:
774
return u_minify(r->depth0, level) - 1;
775
case PIPE_TEXTURE_CUBE:
776
assert(r->array_size == 6);
778
case PIPE_TEXTURE_1D_ARRAY:
779
case PIPE_TEXTURE_2D_ARRAY:
780
case PIPE_TEXTURE_CUBE_ARRAY:
781
return r->array_size - 1;
787
static inline unsigned
788
util_num_layers(const struct pipe_resource *r, unsigned level)
790
return util_max_layer(r, level) + 1;
794
util_texrange_covers_whole_level(const struct pipe_resource *tex,
795
unsigned level, unsigned x, unsigned y,
796
unsigned z, unsigned width,
797
unsigned height, unsigned depth)
799
return x == 0 && y == 0 && z == 0 &&
800
width == u_minify(tex->width0, level) &&
801
height == u_minify(tex->height0, level) &&
802
depth == util_num_layers(tex, level);
806
* Returns true if the blit will fully initialize all pixels in the resource.
809
util_blit_covers_whole_resource(const struct pipe_blit_info *info)
811
/* No conditional rendering or scissoring. (We assume that the caller would
812
* have dropped any redundant scissoring)
814
if (info->scissor_enable || info->window_rectangle_include || info->render_condition_enable || info->alpha_blend)
817
const struct pipe_resource *dst = info->dst.resource;
818
/* A single blit can't initialize a miptree. */
819
if (dst->last_level != 0)
822
assert(info->dst.level == 0);
824
/* Make sure the dst box covers the whole resource. */
825
if (!(util_texrange_covers_whole_level(dst, 0,
827
info->dst.box.width, info->dst.box.height, info->dst.box.depth))) {
831
/* Make sure the mask actually updates all the channels present in the dst format. */
832
if (info->mask & PIPE_MASK_RGBA) {
833
if ((info->mask & PIPE_MASK_RGBA) != PIPE_MASK_RGBA)
837
if (info->mask & PIPE_MASK_ZS) {
838
const struct util_format_description *format_desc = util_format_description(info->dst.format);
839
uint32_t dst_has = 0;
840
if (util_format_has_depth(format_desc))
841
dst_has |= PIPE_MASK_Z;
842
if (util_format_has_stencil(format_desc))
843
dst_has |= PIPE_MASK_S;
844
if (dst_has & ~(info->mask & PIPE_MASK_ZS))
852
util_logicop_reads_dest(enum pipe_logicop op)
855
case PIPE_LOGICOP_NOR:
856
case PIPE_LOGICOP_AND_INVERTED:
857
case PIPE_LOGICOP_AND_REVERSE:
858
case PIPE_LOGICOP_INVERT:
859
case PIPE_LOGICOP_XOR:
860
case PIPE_LOGICOP_NAND:
861
case PIPE_LOGICOP_AND:
862
case PIPE_LOGICOP_EQUIV:
863
case PIPE_LOGICOP_NOOP:
864
case PIPE_LOGICOP_OR_INVERTED:
865
case PIPE_LOGICOP_OR_REVERSE:
866
case PIPE_LOGICOP_OR:
868
case PIPE_LOGICOP_CLEAR:
869
case PIPE_LOGICOP_COPY_INVERTED:
870
case PIPE_LOGICOP_COPY:
871
case PIPE_LOGICOP_SET:
874
unreachable("bad logicop");
878
util_writes_stencil(const struct pipe_stencil_state *s)
880
return s->enabled && s->writemask &&
881
((s->fail_op != PIPE_STENCIL_OP_KEEP) ||
882
(s->zpass_op != PIPE_STENCIL_OP_KEEP) ||
883
(s->zfail_op != PIPE_STENCIL_OP_KEEP));
887
util_writes_depth_stencil(const struct pipe_depth_stencil_alpha_state *zsa)
889
if (zsa->depth_enabled && zsa->depth_writemask &&
890
(zsa->depth_func != PIPE_FUNC_NEVER))
893
return util_writes_stencil(&zsa->stencil[0]) ||
894
util_writes_stencil(&zsa->stencil[1]);
897
static inline struct pipe_context *
898
pipe_create_multimedia_context(struct pipe_screen *screen)
902
if (!screen->get_param(screen, PIPE_CAP_GRAPHICS))
903
flags |= PIPE_CONTEXT_COMPUTE_ONLY;
905
return screen->context_create(screen, NULL, flags);
908
static inline unsigned util_res_sample_count(struct pipe_resource *res)
910
return res->nr_samples > 0 ? res->nr_samples : 1;
917
#endif /* U_INLINES_H */