47
static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
48
struct vmw_resource **p_res)
50
struct vmw_resource *res = *p_res;
52
if (list_empty(&res->validate_head)) {
53
list_add_tail(&res->validate_head, &sw_context->resource_list);
56
vmw_resource_unreference(p_res);
60
* vmw_bo_to_validate_list - add a bo to a validate list
62
* @sw_context: The software context used for this command submission batch.
63
* @bo: The buffer object to add.
64
* @fence_flags: Fence flags to be or'ed with any other fence flags for
65
* this buffer on this submission batch.
66
* @p_val_node: If non-NULL Will be updated with the validate node number
69
* Returns -EINVAL if the limit of number of buffer objects per command
70
* submission is reached.
72
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
73
struct ttm_buffer_object *bo,
78
struct ttm_validate_buffer *val_buf;
80
val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
82
if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
83
DRM_ERROR("Max number of DMA buffers per submission"
88
val_buf = &sw_context->val_bufs[val_node];
89
if (unlikely(val_node == sw_context->cur_val_buf)) {
90
val_buf->new_sync_obj_arg = NULL;
91
val_buf->bo = ttm_bo_reference(bo);
92
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
93
++sw_context->cur_val_buf;
96
val_buf->new_sync_obj_arg = (void *)
97
((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
98
sw_context->fence_flags |= fence_flags;
101
*p_val_node = val_node;
47
106
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48
107
struct vmw_sw_context *sw_context,
49
108
SVGA3dCmdHeader *header)
110
struct vmw_resource *ctx;
51
112
struct vmw_cid_cmd {
52
113
SVGA3dCmdHeader header;
75
139
struct vmw_sw_context *sw_context,
142
struct vmw_surface *srf;
144
struct vmw_resource *res;
78
146
if (*sid == SVGA3D_INVALID_ID)
81
if (unlikely((!sw_context->sid_valid ||
82
*sid != sw_context->last_sid))) {
84
int ret = vmw_surface_check(dev_priv, sw_context->tfile,
87
if (unlikely(ret != 0)) {
88
DRM_ERROR("Could ot find or use surface 0x%08x "
95
sw_context->last_sid = *sid;
96
sw_context->sid_valid = true;
98
sw_context->sid_translation = real_id;
149
if (likely((sw_context->sid_valid &&
150
*sid == sw_context->last_sid))) {
100
151
*sid = sw_context->sid_translation;
155
ret = vmw_user_surface_lookup_handle(dev_priv,
158
if (unlikely(ret != 0)) {
159
DRM_ERROR("Could ot find or use surface 0x%08x "
162
(unsigned long) sid);
166
ret = vmw_surface_validate(dev_priv, srf);
167
if (unlikely(ret != 0)) {
168
if (ret != -ERESTARTSYS)
169
DRM_ERROR("Could not validate surface.\n");
170
vmw_surface_unreference(&srf);
174
sw_context->last_sid = *sid;
175
sw_context->sid_valid = true;
176
sw_context->sid_translation = srf->res.id;
177
*sid = sw_context->sid_translation;
180
vmw_resource_to_validate_list(sw_context, &res);
178
264
SVGA3dCmdPresent body;
181
268
cmd = container_of(header, struct vmw_sid_cmd, header);
270
if (unlikely(!sw_context->kernel)) {
271
DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
182
275
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
279
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
281
* @dev_priv: The device private structure.
282
* @cid: The hardware context for the next query.
283
* @new_query_bo: The new buffer holding query results.
284
* @sw_context: The software context used for this command submission.
286
* This function checks whether @new_query_bo is suitable for holding
287
* query results, and if another buffer currently is pinned for query
288
* results. If so, the function prepares the state of @sw_context for
289
* switching pinned buffers after successful submission of the current
290
* command batch. It also checks whether we're using a new query context.
291
* In that case, it makes sure we emit a query barrier for the old
292
* context before the current query buffer is fenced.
294
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
296
struct ttm_buffer_object *new_query_bo,
297
struct vmw_sw_context *sw_context)
300
bool add_cid = false;
303
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
305
if (unlikely(new_query_bo->num_pages > 4)) {
306
DRM_ERROR("Query buffer too large.\n");
310
if (unlikely(sw_context->cur_query_bo != NULL)) {
311
BUG_ON(!sw_context->query_cid_valid);
313
cid_to_add = sw_context->cur_query_cid;
314
ret = vmw_bo_to_validate_list(sw_context,
315
sw_context->cur_query_bo,
316
DRM_VMW_FENCE_FLAG_EXEC,
318
if (unlikely(ret != 0))
321
sw_context->cur_query_bo = new_query_bo;
323
ret = vmw_bo_to_validate_list(sw_context,
324
dev_priv->dummy_query_bo,
325
DRM_VMW_FENCE_FLAG_EXEC,
327
if (unlikely(ret != 0))
332
if (unlikely(cid != sw_context->cur_query_cid &&
333
sw_context->query_cid_valid)) {
335
cid_to_add = sw_context->cur_query_cid;
338
sw_context->cur_query_cid = cid;
339
sw_context->query_cid_valid = true;
342
struct vmw_resource *ctx = sw_context->cur_ctx;
344
if (list_empty(&ctx->query_head))
345
list_add_tail(&ctx->query_head,
346
&sw_context->query_list);
347
ret = vmw_bo_to_validate_list(sw_context,
348
dev_priv->dummy_query_bo,
349
DRM_VMW_FENCE_FLAG_EXEC,
351
if (unlikely(ret != 0))
359
* vmw_query_bo_switch_commit - Finalize switching pinned query buffer
361
* @dev_priv: The device private structure.
362
* @sw_context: The software context used for this command submission batch.
364
* This function will check if we're switching query buffers, and will then,
365
* if no other query waits are issued this command submission batch,
366
* issue a dummy occlusion query wait used as a query barrier. When the fence
367
* object following that query wait has signaled, we are sure that all
368
* preseding queries have finished, and the old query buffer can be unpinned.
369
* However, since both the new query buffer and the old one are fenced with
370
* that fence, we can do an asynchronus unpin now, and be sure that the
371
* old query buffer won't be moved until the fence has signaled.
373
* As mentioned above, both the new - and old query buffers need to be fenced
374
* using a sequence emitted *after* calling this function.
376
static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
377
struct vmw_sw_context *sw_context)
380
struct vmw_resource *ctx, *next_ctx;
384
* The validate list should still hold references to all
388
list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
390
list_del_init(&ctx->query_head);
392
BUG_ON(list_empty(&ctx->validate_head));
394
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
396
if (unlikely(ret != 0))
397
DRM_ERROR("Out of fifo space for dummy query.\n");
400
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
401
if (dev_priv->pinned_bo) {
402
vmw_bo_pin(dev_priv->pinned_bo, false);
403
ttm_bo_unref(&dev_priv->pinned_bo);
406
vmw_bo_pin(sw_context->cur_query_bo, true);
409
* We pin also the dummy_query_bo buffer so that we
410
* don't need to validate it when emitting
411
* dummy queries in context destroy paths.
414
vmw_bo_pin(dev_priv->dummy_query_bo, true);
415
dev_priv->dummy_query_bo_pinned = true;
417
dev_priv->query_cid = sw_context->cur_query_cid;
418
dev_priv->pinned_bo =
419
ttm_bo_reference(sw_context->cur_query_bo);
424
* vmw_query_switch_backoff - clear query barrier list
425
* @sw_context: The sw context used for this submission batch.
427
* This function is used as part of an error path, where a previously
428
* set up list of query barriers needs to be cleared.
431
static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
433
struct list_head *list, *next;
435
list_for_each_safe(list, next, &sw_context->query_list) {
185
440
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186
441
struct vmw_sw_context *sw_context,
187
442
SVGAGuestPtr *ptr,
688
static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
689
struct vmw_sw_context *sw_context,
692
struct vmw_dma_buffer *vmw_bo;
697
SVGAFifoCmdDefineGMRFB body;
700
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
703
if (unlikely(ret != 0))
706
vmw_dmabuf_unreference(&vmw_bo);
711
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
712
struct vmw_sw_context *sw_context,
713
void *buf, uint32_t *size)
715
uint32_t size_remaining = *size;
718
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
720
case SVGA_CMD_UPDATE:
721
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
723
case SVGA_CMD_DEFINE_GMRFB:
724
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
726
case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
727
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
729
case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
730
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
733
DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
737
if (*size > size_remaining) {
738
DRM_ERROR("Invalid SVGA command (size mismatch):"
743
if (unlikely(!sw_context->kernel)) {
744
DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
748
if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
749
return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
423
754
typedef int (*vmw_cmd_func) (struct vmw_private *,
424
755
struct vmw_sw_context *,
605
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
606
struct drm_file *file_priv)
608
struct vmw_private *dev_priv = vmw_priv(dev);
609
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
961
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
964
if (likely(sw_context->cmd_bounce_size >= size))
967
if (sw_context->cmd_bounce_size == 0)
968
sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
970
while (sw_context->cmd_bounce_size < size) {
971
sw_context->cmd_bounce_size =
972
PAGE_ALIGN(sw_context->cmd_bounce_size +
973
(sw_context->cmd_bounce_size >> 1));
976
if (sw_context->cmd_bounce != NULL)
977
vfree(sw_context->cmd_bounce);
979
sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
981
if (sw_context->cmd_bounce == NULL) {
982
DRM_ERROR("Failed to allocate command bounce buffer.\n");
983
sw_context->cmd_bounce_size = 0;
991
* vmw_execbuf_fence_commands - create and submit a command stream fence
993
* Creates a fence object and submits a command stream marker.
994
* If this fails for some reason, We sync the fifo and return NULL.
995
* It is then safe to fence buffers with a NULL pointer.
997
* If @p_handle is not NULL @file_priv must also not be NULL. Creates
998
* a userspace handle if @p_handle is not NULL, otherwise not.
1001
int vmw_execbuf_fence_commands(struct drm_file *file_priv,
1002
struct vmw_private *dev_priv,
1003
struct vmw_fence_obj **p_fence,
1008
bool synced = false;
1010
/* p_handle implies file_priv. */
1011
BUG_ON(p_handle != NULL && file_priv == NULL);
1013
ret = vmw_fifo_send_fence(dev_priv, &sequence);
1014
if (unlikely(ret != 0)) {
1015
DRM_ERROR("Fence submission error. Syncing.\n");
1019
if (p_handle != NULL)
1020
ret = vmw_user_fence_create(file_priv, dev_priv->fman,
1022
DRM_VMW_FENCE_FLAG_EXEC,
1025
ret = vmw_fence_create(dev_priv->fman, sequence,
1026
DRM_VMW_FENCE_FLAG_EXEC,
1029
if (unlikely(ret != 0 && !synced)) {
1030
(void) vmw_fallback_wait(dev_priv, false, false,
1032
VMW_FENCE_WAIT_TIMEOUT);
1040
* vmw_execbuf_copy_fence_user - copy fence object information to
1043
* @dev_priv: Pointer to a vmw_private struct.
1044
* @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
1045
* @ret: Return value from fence object creation.
1046
* @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
1047
* which the information should be copied.
1048
* @fence: Pointer to the fenc object.
1049
* @fence_handle: User-space fence handle.
1051
* This function copies fence information to user-space. If copying fails,
1052
* The user-space struct drm_vmw_fence_rep::error member is hopefully
1053
* left untouched, and if it's preloaded with an -EFAULT by user-space,
1054
* the error will hopefully be detected.
1055
* Also if copying fails, user-space will be unable to signal the fence
1056
* object so we wait for it immediately, and then unreference the
1057
* user-space reference.
1060
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
1061
struct vmw_fpriv *vmw_fp,
1063
struct drm_vmw_fence_rep __user *user_fence_rep,
1064
struct vmw_fence_obj *fence,
1065
uint32_t fence_handle)
610
1067
struct drm_vmw_fence_rep fence_rep;
611
struct drm_vmw_fence_rep __user *user_fence_rep;
1069
if (user_fence_rep == NULL)
1072
memset(&fence_rep, 0, sizeof(fence_rep));
1074
fence_rep.error = ret;
1076
BUG_ON(fence == NULL);
1078
fence_rep.handle = fence_handle;
1079
fence_rep.seqno = fence->seqno;
1080
vmw_update_seqno(dev_priv, &dev_priv->fifo);
1081
fence_rep.passed_seqno = dev_priv->last_read_seqno;
1085
* copy_to_user errors will be detected by user space not
1086
* seeing fence_rep::error filled in. Typically
1087
* user-space would have pre-set that member to -EFAULT.
1089
ret = copy_to_user(user_fence_rep, &fence_rep,
1093
* User-space lost the fence object. We need to sync
1094
* and unreference the handle.
1096
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
1097
ttm_ref_object_base_unref(vmw_fp->tfile,
1098
fence_handle, TTM_REF_USAGE);
1099
DRM_ERROR("Fence copy error. Syncing.\n");
1100
(void) vmw_fence_obj_wait(fence, fence->signal_mask,
1102
VMW_FENCE_WAIT_TIMEOUT);
1106
int vmw_execbuf_process(struct drm_file *file_priv,
1107
struct vmw_private *dev_priv,
1108
void __user *user_commands,
1109
void *kernel_commands,
1110
uint32_t command_size,
1111
uint64_t throttle_us,
1112
struct drm_vmw_fence_rep __user *user_fence_rep)
1114
struct vmw_sw_context *sw_context = &dev_priv->ctx;
1115
struct vmw_fence_obj *fence;
616
struct vmw_sw_context *sw_context = &dev_priv->ctx;
617
struct vmw_master *vmaster = vmw_master(file_priv->master);
619
ret = ttm_read_lock(&vmaster->lock, true);
620
if (unlikely(ret != 0))
623
1120
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
624
if (unlikely(ret != 0)) {
626
goto out_no_cmd_mutex;
629
cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
630
if (unlikely(cmd == NULL)) {
631
DRM_ERROR("Failed reserving fifo space for commands.\n");
636
user_cmd = (void __user *)(unsigned long)arg->commands;
637
ret = copy_from_user(cmd, user_cmd, arg->command_size);
639
if (unlikely(ret != 0)) {
641
DRM_ERROR("Failed copying commands.\n");
1121
if (unlikely(ret != 0))
1122
return -ERESTARTSYS;
1124
if (kernel_commands == NULL) {
1125
sw_context->kernel = false;
1127
ret = vmw_resize_cmd_bounce(sw_context, command_size);
1128
if (unlikely(ret != 0))
1132
ret = copy_from_user(sw_context->cmd_bounce,
1133
user_commands, command_size);
1135
if (unlikely(ret != 0)) {
1137
DRM_ERROR("Failed copying commands.\n");
1140
kernel_commands = sw_context->cmd_bounce;
1142
sw_context->kernel = true;
645
1144
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
646
1145
sw_context->cid_valid = false;
647
1146
sw_context->sid_valid = false;
648
1147
sw_context->cur_reloc = 0;
649
1148
sw_context->cur_val_buf = 0;
1149
sw_context->fence_flags = 0;
1150
INIT_LIST_HEAD(&sw_context->query_list);
1151
INIT_LIST_HEAD(&sw_context->resource_list);
1152
sw_context->cur_query_bo = dev_priv->pinned_bo;
1153
sw_context->cur_query_cid = dev_priv->query_cid;
1154
sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
651
1156
INIT_LIST_HEAD(&sw_context->validate_nodes);
653
ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
1158
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
654
1160
if (unlikely(ret != 0))
656
1163
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
657
1164
if (unlikely(ret != 0))
664
1171
vmw_apply_relocations(sw_context);
666
if (arg->throttle_us) {
667
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
1174
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
670
1177
if (unlikely(ret != 0))
674
vmw_fifo_commit(dev_priv, arg->command_size);
676
ret = vmw_fifo_send_fence(dev_priv, &sequence);
678
ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
679
(void *)(unsigned long) sequence);
680
vmw_clear_validations(sw_context);
681
mutex_unlock(&dev_priv->cmdbuf_mutex);
1181
cmd = vmw_fifo_reserve(dev_priv, command_size);
1182
if (unlikely(cmd == NULL)) {
1183
DRM_ERROR("Failed reserving fifo space for commands.\n");
1188
memcpy(cmd, kernel_commands, command_size);
1189
vmw_fifo_commit(dev_priv, command_size);
1191
vmw_query_bo_switch_commit(dev_priv, sw_context);
1192
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1194
(user_fence_rep) ? &handle : NULL);
684
1196
* This error is harmless, because if fence submission fails,
685
* vmw_fifo_send_fence will sync.
1197
* vmw_fifo_send_fence will sync. The error will be propagated to
1198
* user-space in @fence_rep
689
1202
DRM_ERROR("Fence submission error. Syncing.\n");
691
fence_rep.error = ret;
692
fence_rep.fence_seq = (uint64_t) sequence;
695
user_fence_rep = (struct drm_vmw_fence_rep __user *)
696
(unsigned long)arg->fence_rep;
1204
ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
1207
vmw_clear_validations(sw_context);
1208
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
1209
user_fence_rep, fence, handle);
1211
if (likely(fence != NULL))
1212
vmw_fence_obj_unreference(&fence);
1214
mutex_unlock(&dev_priv->cmdbuf_mutex);
1218
vmw_free_relocations(sw_context);
1220
vmw_query_switch_backoff(sw_context);
1221
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
1222
vmw_clear_validations(sw_context);
1224
mutex_unlock(&dev_priv->cmdbuf_mutex);
1229
* vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
1231
* @dev_priv: The device private structure.
1233
* This function is called to idle the fifo and unpin the query buffer
1234
* if the normal way to do this hits an error, which should typically be
1237
static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
1239
DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
1241
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
1242
vmw_bo_pin(dev_priv->pinned_bo, false);
1243
vmw_bo_pin(dev_priv->dummy_query_bo, false);
1244
dev_priv->dummy_query_bo_pinned = false;
1249
* vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1252
* @dev_priv: The device private structure.
1253
* @only_on_cid_match: Only flush and unpin if the current active query cid
1255
* @cid: Optional context id to match.
1257
* This function should be used to unpin the pinned query bo, or
1258
* as a query barrier when we need to make sure that all queries have
1259
* finished before the next fifo command. (For example on hardware
1260
* context destructions where the hardware may otherwise leak unfinished
1263
* This function does not return any failure codes, but make attempts
1264
* to do safe unpinning in case of errors.
1266
* The function will synchronize on the previous query barrier, and will
1267
* thus not finish until that barrier has executed.
1269
void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1270
bool only_on_cid_match, uint32_t cid)
1273
struct list_head validate_list;
1274
struct ttm_validate_buffer pinned_val, query_val;
1275
struct vmw_fence_obj *fence;
1277
mutex_lock(&dev_priv->cmdbuf_mutex);
1279
if (dev_priv->pinned_bo == NULL)
1282
if (only_on_cid_match && cid != dev_priv->query_cid)
1285
INIT_LIST_HEAD(&validate_list);
1287
pinned_val.new_sync_obj_arg = (void *)(unsigned long)
1288
DRM_VMW_FENCE_FLAG_EXEC;
1289
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
1290
list_add_tail(&pinned_val.head, &validate_list);
1292
query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
1293
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
1294
list_add_tail(&query_val.head, &validate_list);
1297
ret = ttm_eu_reserve_buffers(&validate_list);
1298
} while (ret == -ERESTARTSYS);
1300
if (unlikely(ret != 0)) {
1301
vmw_execbuf_unpin_panic(dev_priv);
1302
goto out_no_reserve;
1305
ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
1306
if (unlikely(ret != 0)) {
1307
vmw_execbuf_unpin_panic(dev_priv);
1311
vmw_bo_pin(dev_priv->pinned_bo, false);
1312
vmw_bo_pin(dev_priv->dummy_query_bo, false);
1313
dev_priv->dummy_query_bo_pinned = false;
1315
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1316
ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
1318
ttm_bo_unref(&query_val.bo);
1319
ttm_bo_unref(&pinned_val.bo);
1320
ttm_bo_unref(&dev_priv->pinned_bo);
1323
mutex_unlock(&dev_priv->cmdbuf_mutex);
1327
ttm_eu_backoff_reservation(&validate_list);
1329
ttm_bo_unref(&query_val.bo);
1330
ttm_bo_unref(&pinned_val.bo);
1331
ttm_bo_unref(&dev_priv->pinned_bo);
1332
mutex_unlock(&dev_priv->cmdbuf_mutex);
1336
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
1337
struct drm_file *file_priv)
1339
struct vmw_private *dev_priv = vmw_priv(dev);
1340
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
1341
struct vmw_master *vmaster = vmw_master(file_priv->master);
699
* copy_to_user errors will be detected by user space not
700
* seeing fence_rep::error filled in.
1345
* This will allow us to extend the ioctl argument while
1346
* maintaining backwards compatibility:
1347
* We take different code paths depending on the value of
703
ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
1351
if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
1352
DRM_ERROR("Incorrect execbuf version.\n");
1353
DRM_ERROR("You're running outdated experimental "
1354
"vmwgfx user-space drivers.");
1358
ret = ttm_read_lock(&vmaster->lock, true);
1359
if (unlikely(ret != 0))
1362
ret = vmw_execbuf_process(file_priv, dev_priv,
1363
(void __user *)(unsigned long)arg->commands,
1364
NULL, arg->command_size, arg->throttle_us,
1365
(void __user *)(unsigned long)arg->fence_rep);
1367
if (unlikely(ret != 0))
705
1370
vmw_kms_cursor_post_execbuf(dev_priv);
706
ttm_read_unlock(&vmaster->lock);
709
vmw_free_relocations(sw_context);
710
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
711
vmw_clear_validations(sw_context);
713
vmw_fifo_commit(dev_priv, 0);
715
mutex_unlock(&dev_priv->cmdbuf_mutex);
717
1373
ttm_read_unlock(&vmaster->lock);