~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise-security

« back to all changes in this revision

Viewing changes to drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c

  • Committer: Package Import Robot
  • Author(s): Paolo Pisati, Paolo Pisati
  • Date: 2011-12-06 15:56:07 UTC
  • Revision ID: package-import@ubuntu.com-20111206155607-pcf44kv5fmhk564f
Tags: 3.2.0-1401.1
[ Paolo Pisati ]

* Rebased on top of Ubuntu-3.2.0-3.8
* Tilt-tracking @ ef2487af4bb15bdd0689631774b5a5e3a59f74e2
* Delete debian.ti-omap4/control, it shoudln't be tracked
* Fix architecture spelling (s/armel/armhf/)
* [Config] Update configs following 3.2 import
* [Config] Fix compilation: disable CODA and ARCH_OMAP3
* [Config] Fix compilation: disable Ethernet Faraday
* Update series to precise

Show diffs side-by-side

added added

removed removed

Lines of Context:
44
44
        return 0;
45
45
}
46
46
 
 
47
static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
 
48
                                          struct vmw_resource **p_res)
 
49
{
 
50
        struct vmw_resource *res = *p_res;
 
51
 
 
52
        if (list_empty(&res->validate_head)) {
 
53
                list_add_tail(&res->validate_head, &sw_context->resource_list);
 
54
                *p_res = NULL;
 
55
        } else
 
56
                vmw_resource_unreference(p_res);
 
57
}
 
58
 
 
59
/**
 
60
 * vmw_bo_to_validate_list - add a bo to a validate list
 
61
 *
 
62
 * @sw_context: The software context used for this command submission batch.
 
63
 * @bo: The buffer object to add.
 
64
 * @fence_flags: Fence flags to be or'ed with any other fence flags for
 
65
 * this buffer on this submission batch.
 
66
 * @p_val_node: If non-NULL Will be updated with the validate node number
 
67
 * on return.
 
68
 *
 
69
 * Returns -EINVAL if the limit of number of buffer objects per command
 
70
 * submission is reached.
 
71
 */
 
72
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 
73
                                   struct ttm_buffer_object *bo,
 
74
                                   uint32_t fence_flags,
 
75
                                   uint32_t *p_val_node)
 
76
{
 
77
        uint32_t val_node;
 
78
        struct ttm_validate_buffer *val_buf;
 
79
 
 
80
        val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
 
81
 
 
82
        if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
 
83
                DRM_ERROR("Max number of DMA buffers per submission"
 
84
                          " exceeded.\n");
 
85
                return -EINVAL;
 
86
        }
 
87
 
 
88
        val_buf = &sw_context->val_bufs[val_node];
 
89
        if (unlikely(val_node == sw_context->cur_val_buf)) {
 
90
                val_buf->new_sync_obj_arg = NULL;
 
91
                val_buf->bo = ttm_bo_reference(bo);
 
92
                list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 
93
                ++sw_context->cur_val_buf;
 
94
        }
 
95
 
 
96
        val_buf->new_sync_obj_arg = (void *)
 
97
                ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
 
98
        sw_context->fence_flags |= fence_flags;
 
99
 
 
100
        if (p_val_node)
 
101
                *p_val_node = val_node;
 
102
 
 
103
        return 0;
 
104
}
 
105
 
47
106
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48
107
                             struct vmw_sw_context *sw_context,
49
108
                             SVGA3dCmdHeader *header)
50
109
{
 
110
        struct vmw_resource *ctx;
 
111
 
51
112
        struct vmw_cid_cmd {
52
113
                SVGA3dCmdHeader header;
53
114
                __le32 cid;
58
119
        if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
59
120
                return 0;
60
121
 
61
 
        ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
 
122
        ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
 
123
                                &ctx);
62
124
        if (unlikely(ret != 0)) {
63
125
                DRM_ERROR("Could not find or use context %u\n",
64
126
                          (unsigned) cmd->cid);
67
129
 
68
130
        sw_context->last_cid = cmd->cid;
69
131
        sw_context->cid_valid = true;
 
132
        sw_context->cur_ctx = ctx;
 
133
        vmw_resource_to_validate_list(sw_context, &ctx);
70
134
 
71
135
        return 0;
72
136
}
75
139
                             struct vmw_sw_context *sw_context,
76
140
                             uint32_t *sid)
77
141
{
 
142
        struct vmw_surface *srf;
 
143
        int ret;
 
144
        struct vmw_resource *res;
 
145
 
78
146
        if (*sid == SVGA3D_INVALID_ID)
79
147
                return 0;
80
148
 
81
 
        if (unlikely((!sw_context->sid_valid  ||
82
 
                      *sid != sw_context->last_sid))) {
83
 
                int real_id;
84
 
                int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85
 
                                            *sid, &real_id);
86
 
 
87
 
                if (unlikely(ret != 0)) {
88
 
                        DRM_ERROR("Could ot find or use surface 0x%08x "
89
 
                                  "address 0x%08lx\n",
90
 
                                  (unsigned int) *sid,
91
 
                                  (unsigned long) sid);
92
 
                        return ret;
93
 
                }
94
 
 
95
 
                sw_context->last_sid = *sid;
96
 
                sw_context->sid_valid = true;
97
 
                *sid = real_id;
98
 
                sw_context->sid_translation = real_id;
99
 
        } else
 
149
        if (likely((sw_context->sid_valid  &&
 
150
                      *sid == sw_context->last_sid))) {
100
151
                *sid = sw_context->sid_translation;
 
152
                return 0;
 
153
        }
 
154
 
 
155
        ret = vmw_user_surface_lookup_handle(dev_priv,
 
156
                                             sw_context->tfile,
 
157
                                             *sid, &srf);
 
158
        if (unlikely(ret != 0)) {
 
159
                DRM_ERROR("Could ot find or use surface 0x%08x "
 
160
                          "address 0x%08lx\n",
 
161
                          (unsigned int) *sid,
 
162
                          (unsigned long) sid);
 
163
                return ret;
 
164
        }
 
165
 
 
166
        ret = vmw_surface_validate(dev_priv, srf);
 
167
        if (unlikely(ret != 0)) {
 
168
                if (ret != -ERESTARTSYS)
 
169
                        DRM_ERROR("Could not validate surface.\n");
 
170
                vmw_surface_unreference(&srf);
 
171
                return ret;
 
172
        }
 
173
 
 
174
        sw_context->last_sid = *sid;
 
175
        sw_context->sid_valid = true;
 
176
        sw_context->sid_translation = srf->res.id;
 
177
        *sid = sw_context->sid_translation;
 
178
 
 
179
        res = &srf->res;
 
180
        vmw_resource_to_validate_list(sw_context, &res);
101
181
 
102
182
        return 0;
103
183
}
166
246
        } *cmd;
167
247
 
168
248
        cmd = container_of(header, struct vmw_sid_cmd, header);
 
249
 
 
250
        if (unlikely(!sw_context->kernel)) {
 
251
                DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
 
252
                return -EPERM;
 
253
        }
 
254
 
169
255
        return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
170
256
}
171
257
 
178
264
                SVGA3dCmdPresent body;
179
265
        } *cmd;
180
266
 
 
267
 
181
268
        cmd = container_of(header, struct vmw_sid_cmd, header);
 
269
 
 
270
        if (unlikely(!sw_context->kernel)) {
 
271
                DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
 
272
                return -EPERM;
 
273
        }
 
274
 
182
275
        return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183
276
}
184
277
 
 
278
/**
 
279
 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
 
280
 *
 
281
 * @dev_priv: The device private structure.
 
282
 * @cid: The hardware context for the next query.
 
283
 * @new_query_bo: The new buffer holding query results.
 
284
 * @sw_context: The software context used for this command submission.
 
285
 *
 
286
 * This function checks whether @new_query_bo is suitable for holding
 
287
 * query results, and if another buffer currently is pinned for query
 
288
 * results. If so, the function prepares the state of @sw_context for
 
289
 * switching pinned buffers after successful submission of the current
 
290
 * command batch. It also checks whether we're using a new query context.
 
291
 * In that case, it makes sure we emit a query barrier for the old
 
292
 * context before the current query buffer is fenced.
 
293
 */
 
294
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
295
                                       uint32_t cid,
 
296
                                       struct ttm_buffer_object *new_query_bo,
 
297
                                       struct vmw_sw_context *sw_context)
 
298
{
 
299
        int ret;
 
300
        bool add_cid = false;
 
301
        uint32_t cid_to_add;
 
302
 
 
303
        if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
304
 
 
305
                if (unlikely(new_query_bo->num_pages > 4)) {
 
306
                        DRM_ERROR("Query buffer too large.\n");
 
307
                        return -EINVAL;
 
308
                }
 
309
 
 
310
                if (unlikely(sw_context->cur_query_bo != NULL)) {
 
311
                        BUG_ON(!sw_context->query_cid_valid);
 
312
                        add_cid = true;
 
313
                        cid_to_add = sw_context->cur_query_cid;
 
314
                        ret = vmw_bo_to_validate_list(sw_context,
 
315
                                                      sw_context->cur_query_bo,
 
316
                                                      DRM_VMW_FENCE_FLAG_EXEC,
 
317
                                                      NULL);
 
318
                        if (unlikely(ret != 0))
 
319
                                return ret;
 
320
                }
 
321
                sw_context->cur_query_bo = new_query_bo;
 
322
 
 
323
                ret = vmw_bo_to_validate_list(sw_context,
 
324
                                              dev_priv->dummy_query_bo,
 
325
                                              DRM_VMW_FENCE_FLAG_EXEC,
 
326
                                              NULL);
 
327
                if (unlikely(ret != 0))
 
328
                        return ret;
 
329
 
 
330
        }
 
331
 
 
332
        if (unlikely(cid != sw_context->cur_query_cid &&
 
333
                     sw_context->query_cid_valid)) {
 
334
                add_cid = true;
 
335
                cid_to_add = sw_context->cur_query_cid;
 
336
        }
 
337
 
 
338
        sw_context->cur_query_cid = cid;
 
339
        sw_context->query_cid_valid = true;
 
340
 
 
341
        if (add_cid) {
 
342
                struct vmw_resource *ctx = sw_context->cur_ctx;
 
343
 
 
344
                if (list_empty(&ctx->query_head))
 
345
                        list_add_tail(&ctx->query_head,
 
346
                                      &sw_context->query_list);
 
347
                ret = vmw_bo_to_validate_list(sw_context,
 
348
                                              dev_priv->dummy_query_bo,
 
349
                                              DRM_VMW_FENCE_FLAG_EXEC,
 
350
                                              NULL);
 
351
                if (unlikely(ret != 0))
 
352
                        return ret;
 
353
        }
 
354
        return 0;
 
355
}
 
356
 
 
357
 
 
358
/**
 
359
 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
 
360
 *
 
361
 * @dev_priv: The device private structure.
 
362
 * @sw_context: The software context used for this command submission batch.
 
363
 *
 
364
 * This function will check if we're switching query buffers, and will then,
 
365
 * if no other query waits are issued this command submission batch,
 
366
 * issue a dummy occlusion query wait used as a query barrier. When the fence
 
367
 * object following that query wait has signaled, we are sure that all
 
368
 * preseding queries have finished, and the old query buffer can be unpinned.
 
369
 * However, since both the new query buffer and the old one are fenced with
 
370
 * that fence, we can do an asynchronus unpin now, and be sure that the
 
371
 * old query buffer won't be moved until the fence has signaled.
 
372
 *
 
373
 * As mentioned above, both the new - and old query buffers need to be fenced
 
374
 * using a sequence emitted *after* calling this function.
 
375
 */
 
376
static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 
377
                                     struct vmw_sw_context *sw_context)
 
378
{
 
379
 
 
380
        struct vmw_resource *ctx, *next_ctx;
 
381
        int ret;
 
382
 
 
383
        /*
 
384
         * The validate list should still hold references to all
 
385
         * contexts here.
 
386
         */
 
387
 
 
388
        list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
 
389
                                 query_head) {
 
390
                list_del_init(&ctx->query_head);
 
391
 
 
392
                BUG_ON(list_empty(&ctx->validate_head));
 
393
 
 
394
                ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 
395
 
 
396
                if (unlikely(ret != 0))
 
397
                        DRM_ERROR("Out of fifo space for dummy query.\n");
 
398
        }
 
399
 
 
400
        if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
 
401
                if (dev_priv->pinned_bo) {
 
402
                        vmw_bo_pin(dev_priv->pinned_bo, false);
 
403
                        ttm_bo_unref(&dev_priv->pinned_bo);
 
404
                }
 
405
 
 
406
                vmw_bo_pin(sw_context->cur_query_bo, true);
 
407
 
 
408
                /*
 
409
                 * We pin also the dummy_query_bo buffer so that we
 
410
                 * don't need to validate it when emitting
 
411
                 * dummy queries in context destroy paths.
 
412
                 */
 
413
 
 
414
                vmw_bo_pin(dev_priv->dummy_query_bo, true);
 
415
                dev_priv->dummy_query_bo_pinned = true;
 
416
 
 
417
                dev_priv->query_cid = sw_context->cur_query_cid;
 
418
                dev_priv->pinned_bo =
 
419
                        ttm_bo_reference(sw_context->cur_query_bo);
 
420
        }
 
421
}
 
422
 
 
423
/**
 
424
 * vmw_query_switch_backoff - clear query barrier list
 
425
 * @sw_context: The sw context used for this submission batch.
 
426
 *
 
427
 * This function is used as part of an error path, where a previously
 
428
 * set up list of query barriers needs to be cleared.
 
429
 *
 
430
 */
 
431
static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
 
432
{
 
433
        struct list_head *list, *next;
 
434
 
 
435
        list_for_each_safe(list, next, &sw_context->query_list) {
 
436
                list_del_init(list);
 
437
        }
 
438
}
 
439
 
185
440
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186
441
                                   struct vmw_sw_context *sw_context,
187
442
                                   SVGAGuestPtr *ptr,
191
446
        struct ttm_buffer_object *bo;
192
447
        uint32_t handle = ptr->gmrId;
193
448
        struct vmw_relocation *reloc;
194
 
        uint32_t cur_validate_node;
195
 
        struct ttm_validate_buffer *val_buf;
196
449
        int ret;
197
450
 
198
451
        ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
212
465
        reloc = &sw_context->relocs[sw_context->cur_reloc++];
213
466
        reloc->location = ptr;
214
467
 
215
 
        cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
216
 
        if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
217
 
                DRM_ERROR("Max number of DMA buffers per submission"
218
 
                          " exceeded.\n");
219
 
                ret = -EINVAL;
 
468
        ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
 
469
                                      &reloc->index);
 
470
        if (unlikely(ret != 0))
220
471
                goto out_no_reloc;
221
 
        }
222
472
 
223
 
        reloc->index = cur_validate_node;
224
 
        if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
225
 
                val_buf = &sw_context->val_bufs[cur_validate_node];
226
 
                val_buf->bo = ttm_bo_reference(bo);
227
 
                val_buf->new_sync_obj_arg = (void *) dev_priv;
228
 
                list_add_tail(&val_buf->head, &sw_context->validate_nodes);
229
 
                ++sw_context->cur_val_buf;
230
 
        }
231
473
        *vmw_bo_p = vmw_bo;
232
474
        return 0;
233
475
 
259
501
        if (unlikely(ret != 0))
260
502
                return ret;
261
503
 
 
504
        ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
 
505
                                          &vmw_bo->base, sw_context);
 
506
 
262
507
        vmw_dmabuf_unreference(&vmw_bo);
263
 
        return 0;
 
508
        return ret;
264
509
}
265
510
 
266
511
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
273
518
                SVGA3dCmdWaitForQuery q;
274
519
        } *cmd;
275
520
        int ret;
 
521
        struct vmw_resource *ctx;
276
522
 
277
523
        cmd = container_of(header, struct vmw_query_cmd, header);
278
524
        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
286
532
                return ret;
287
533
 
288
534
        vmw_dmabuf_unreference(&vmw_bo);
 
535
 
 
536
        /*
 
537
         * This wait will act as a barrier for previous waits for this
 
538
         * context.
 
539
         */
 
540
 
 
541
        ctx = sw_context->cur_ctx;
 
542
        if (!list_empty(&ctx->query_head))
 
543
                list_del_init(&ctx->query_head);
 
544
 
289
545
        return 0;
290
546
}
291
547
 
292
 
 
293
548
static int vmw_cmd_dma(struct vmw_private *dev_priv,
294
549
                       struct vmw_sw_context *sw_context,
295
550
                       SVGA3dCmdHeader *header)
302
557
                SVGA3dCmdSurfaceDMA dma;
303
558
        } *cmd;
304
559
        int ret;
 
560
        struct vmw_resource *res;
305
561
 
306
562
        cmd = container_of(header, struct vmw_dma_cmd, header);
307
563
        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
318
574
                goto out_no_reloc;
319
575
        }
320
576
 
321
 
        /**
 
577
        ret = vmw_surface_validate(dev_priv, srf);
 
578
        if (unlikely(ret != 0)) {
 
579
                if (ret != -ERESTARTSYS)
 
580
                        DRM_ERROR("Culd not validate surface.\n");
 
581
                goto out_no_validate;
 
582
        }
 
583
 
 
584
        /*
322
585
         * Patch command stream with device SID.
323
586
         */
324
 
 
325
587
        cmd->dma.host.sid = srf->res.id;
326
588
        vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
327
 
        /**
328
 
         * FIXME: May deadlock here when called from the
329
 
         * command parsing code.
330
 
         */
 
589
 
 
590
        vmw_dmabuf_unreference(&vmw_bo);
 
591
 
 
592
        res = &srf->res;
 
593
        vmw_resource_to_validate_list(sw_context, &res);
 
594
 
 
595
        return 0;
 
596
 
 
597
out_no_validate:
331
598
        vmw_surface_unreference(&srf);
332
 
 
333
599
out_no_reloc:
334
600
        vmw_dmabuf_unreference(&vmw_bo);
335
601
        return ret;
419
685
        return 0;
420
686
}
421
687
 
 
688
static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
 
689
                                      struct vmw_sw_context *sw_context,
 
690
                                      void *buf)
 
691
{
 
692
        struct vmw_dma_buffer *vmw_bo;
 
693
        int ret;
 
694
 
 
695
        struct {
 
696
                uint32_t header;
 
697
                SVGAFifoCmdDefineGMRFB body;
 
698
        } *cmd = buf;
 
699
 
 
700
        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
 
701
                                      &cmd->body.ptr,
 
702
                                      &vmw_bo);
 
703
        if (unlikely(ret != 0))
 
704
                return ret;
 
705
 
 
706
        vmw_dmabuf_unreference(&vmw_bo);
 
707
 
 
708
        return ret;
 
709
}
 
710
 
 
711
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 
712
                                struct vmw_sw_context *sw_context,
 
713
                                void *buf, uint32_t *size)
 
714
{
 
715
        uint32_t size_remaining = *size;
 
716
        uint32_t cmd_id;
 
717
 
 
718
        cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
 
719
        switch (cmd_id) {
 
720
        case SVGA_CMD_UPDATE:
 
721
                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
 
722
                break;
 
723
        case SVGA_CMD_DEFINE_GMRFB:
 
724
                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
 
725
                break;
 
726
        case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
 
727
                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
 
728
                break;
 
729
        case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
 
730
                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
 
731
                break;
 
732
        default:
 
733
                DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
 
734
                return -EINVAL;
 
735
        }
 
736
 
 
737
        if (*size > size_remaining) {
 
738
                DRM_ERROR("Invalid SVGA command (size mismatch):"
 
739
                          " %u.\n", cmd_id);
 
740
                return -EINVAL;
 
741
        }
 
742
 
 
743
        if (unlikely(!sw_context->kernel)) {
 
744
                DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
 
745
                return -EPERM;
 
746
        }
 
747
 
 
748
        if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
 
749
                return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
 
750
 
 
751
        return 0;
 
752
}
422
753
 
423
754
typedef int (*vmw_cmd_func) (struct vmw_private *,
424
755
                             struct vmw_sw_context *,
471
802
        SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
472
803
        int ret;
473
804
 
474
 
        cmd_id = ((uint32_t *)buf)[0];
475
 
        if (cmd_id == SVGA_CMD_UPDATE) {
476
 
                *size = 5 << 2;
477
 
                return 0;
478
 
        }
 
805
        cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
 
806
        /* Handle any none 3D commands */
 
807
        if (unlikely(cmd_id < SVGA_CMD_MAX))
 
808
                return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
 
809
 
479
810
 
480
811
        cmd_id = le32_to_cpu(header->id);
481
812
        *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
500
831
 
501
832
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
502
833
                             struct vmw_sw_context *sw_context,
503
 
                             void *buf, uint32_t size)
 
834
                             void *buf,
 
835
                             uint32_t size)
504
836
{
505
837
        int32_t cur_size = size;
506
838
        int ret;
550
882
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
551
883
{
552
884
        struct ttm_validate_buffer *entry, *next;
 
885
        struct vmw_resource *res, *res_next;
553
886
 
 
887
        /*
 
888
         * Drop references to DMA buffers held during command submission.
 
889
         */
554
890
        list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
555
891
                                 head) {
556
892
                list_del(&entry->head);
559
895
                sw_context->cur_val_buf--;
560
896
        }
561
897
        BUG_ON(sw_context->cur_val_buf != 0);
 
898
 
 
899
        /*
 
900
         * Drop references to resources held during command submission.
 
901
         */
 
902
        vmw_resource_unreserve(&sw_context->resource_list);
 
903
        list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
 
904
                                 validate_head) {
 
905
                list_del_init(&res->validate_head);
 
906
                vmw_resource_unreference(&res);
 
907
        }
562
908
}
563
909
 
564
910
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
566
912
{
567
913
        int ret;
568
914
 
 
915
 
 
916
        /*
 
917
         * Don't validate pinned buffers.
 
918
         */
 
919
 
 
920
        if (bo == dev_priv->pinned_bo ||
 
921
            (bo == dev_priv->dummy_query_bo &&
 
922
             dev_priv->dummy_query_bo_pinned))
 
923
                return 0;
 
924
 
569
925
        /**
570
926
         * Put BO in VRAM if there is space, otherwise as a GMR.
571
927
         * If there is no space in VRAM and GMR ids are all used up,
602
958
        return 0;
603
959
}
604
960
 
605
 
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
606
 
                      struct drm_file *file_priv)
607
 
{
608
 
        struct vmw_private *dev_priv = vmw_priv(dev);
609
 
        struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
 
961
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
 
962
                                 uint32_t size)
 
963
{
 
964
        if (likely(sw_context->cmd_bounce_size >= size))
 
965
                return 0;
 
966
 
 
967
        if (sw_context->cmd_bounce_size == 0)
 
968
                sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
 
969
 
 
970
        while (sw_context->cmd_bounce_size < size) {
 
971
                sw_context->cmd_bounce_size =
 
972
                        PAGE_ALIGN(sw_context->cmd_bounce_size +
 
973
                                   (sw_context->cmd_bounce_size >> 1));
 
974
        }
 
975
 
 
976
        if (sw_context->cmd_bounce != NULL)
 
977
                vfree(sw_context->cmd_bounce);
 
978
 
 
979
        sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
 
980
 
 
981
        if (sw_context->cmd_bounce == NULL) {
 
982
                DRM_ERROR("Failed to allocate command bounce buffer.\n");
 
983
                sw_context->cmd_bounce_size = 0;
 
984
                return -ENOMEM;
 
985
        }
 
986
 
 
987
        return 0;
 
988
}
 
989
 
 
990
/**
 
991
 * vmw_execbuf_fence_commands - create and submit a command stream fence
 
992
 *
 
993
 * Creates a fence object and submits a command stream marker.
 
994
 * If this fails for some reason, We sync the fifo and return NULL.
 
995
 * It is then safe to fence buffers with a NULL pointer.
 
996
 *
 
997
 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
 
998
 * a userspace handle if @p_handle is not NULL, otherwise not.
 
999
 */
 
1000
 
 
1001
int vmw_execbuf_fence_commands(struct drm_file *file_priv,
 
1002
                               struct vmw_private *dev_priv,
 
1003
                               struct vmw_fence_obj **p_fence,
 
1004
                               uint32_t *p_handle)
 
1005
{
 
1006
        uint32_t sequence;
 
1007
        int ret;
 
1008
        bool synced = false;
 
1009
 
 
1010
        /* p_handle implies file_priv. */
 
1011
        BUG_ON(p_handle != NULL && file_priv == NULL);
 
1012
 
 
1013
        ret = vmw_fifo_send_fence(dev_priv, &sequence);
 
1014
        if (unlikely(ret != 0)) {
 
1015
                DRM_ERROR("Fence submission error. Syncing.\n");
 
1016
                synced = true;
 
1017
        }
 
1018
 
 
1019
        if (p_handle != NULL)
 
1020
                ret = vmw_user_fence_create(file_priv, dev_priv->fman,
 
1021
                                            sequence,
 
1022
                                            DRM_VMW_FENCE_FLAG_EXEC,
 
1023
                                            p_fence, p_handle);
 
1024
        else
 
1025
                ret = vmw_fence_create(dev_priv->fman, sequence,
 
1026
                                       DRM_VMW_FENCE_FLAG_EXEC,
 
1027
                                       p_fence);
 
1028
 
 
1029
        if (unlikely(ret != 0 && !synced)) {
 
1030
                (void) vmw_fallback_wait(dev_priv, false, false,
 
1031
                                         sequence, false,
 
1032
                                         VMW_FENCE_WAIT_TIMEOUT);
 
1033
                *p_fence = NULL;
 
1034
        }
 
1035
 
 
1036
        return 0;
 
1037
}
 
1038
 
 
1039
/**
 
1040
 * vmw_execbuf_copy_fence_user - copy fence object information to
 
1041
 * user-space.
 
1042
 *
 
1043
 * @dev_priv: Pointer to a vmw_private struct.
 
1044
 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
 
1045
 * @ret: Return value from fence object creation.
 
1046
 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
 
1047
 * which the information should be copied.
 
1048
 * @fence: Pointer to the fenc object.
 
1049
 * @fence_handle: User-space fence handle.
 
1050
 *
 
1051
 * This function copies fence information to user-space. If copying fails,
 
1052
 * The user-space struct drm_vmw_fence_rep::error member is hopefully
 
1053
 * left untouched, and if it's preloaded with an -EFAULT by user-space,
 
1054
 * the error will hopefully be detected.
 
1055
 * Also if copying fails, user-space will be unable to signal the fence
 
1056
 * object so we wait for it immediately, and then unreference the
 
1057
 * user-space reference.
 
1058
 */
 
1059
void
 
1060
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
 
1061
                            struct vmw_fpriv *vmw_fp,
 
1062
                            int ret,
 
1063
                            struct drm_vmw_fence_rep __user *user_fence_rep,
 
1064
                            struct vmw_fence_obj *fence,
 
1065
                            uint32_t fence_handle)
 
1066
{
610
1067
        struct drm_vmw_fence_rep fence_rep;
611
 
        struct drm_vmw_fence_rep __user *user_fence_rep;
612
 
        int ret;
613
 
        void *user_cmd;
 
1068
 
 
1069
        if (user_fence_rep == NULL)
 
1070
                return;
 
1071
 
 
1072
        memset(&fence_rep, 0, sizeof(fence_rep));
 
1073
 
 
1074
        fence_rep.error = ret;
 
1075
        if (ret == 0) {
 
1076
                BUG_ON(fence == NULL);
 
1077
 
 
1078
                fence_rep.handle = fence_handle;
 
1079
                fence_rep.seqno = fence->seqno;
 
1080
                vmw_update_seqno(dev_priv, &dev_priv->fifo);
 
1081
                fence_rep.passed_seqno = dev_priv->last_read_seqno;
 
1082
        }
 
1083
 
 
1084
        /*
 
1085
         * copy_to_user errors will be detected by user space not
 
1086
         * seeing fence_rep::error filled in. Typically
 
1087
         * user-space would have pre-set that member to -EFAULT.
 
1088
         */
 
1089
        ret = copy_to_user(user_fence_rep, &fence_rep,
 
1090
                           sizeof(fence_rep));
 
1091
 
 
1092
        /*
 
1093
         * User-space lost the fence object. We need to sync
 
1094
         * and unreference the handle.
 
1095
         */
 
1096
        if (unlikely(ret != 0) && (fence_rep.error == 0)) {
 
1097
                ttm_ref_object_base_unref(vmw_fp->tfile,
 
1098
                                          fence_handle, TTM_REF_USAGE);
 
1099
                DRM_ERROR("Fence copy error. Syncing.\n");
 
1100
                (void) vmw_fence_obj_wait(fence, fence->signal_mask,
 
1101
                                          false, false,
 
1102
                                          VMW_FENCE_WAIT_TIMEOUT);
 
1103
        }
 
1104
}
 
1105
 
 
1106
int vmw_execbuf_process(struct drm_file *file_priv,
 
1107
                        struct vmw_private *dev_priv,
 
1108
                        void __user *user_commands,
 
1109
                        void *kernel_commands,
 
1110
                        uint32_t command_size,
 
1111
                        uint64_t throttle_us,
 
1112
                        struct drm_vmw_fence_rep __user *user_fence_rep)
 
1113
{
 
1114
        struct vmw_sw_context *sw_context = &dev_priv->ctx;
 
1115
        struct vmw_fence_obj *fence;
 
1116
        uint32_t handle;
614
1117
        void *cmd;
615
 
        uint32_t sequence;
616
 
        struct vmw_sw_context *sw_context = &dev_priv->ctx;
617
 
        struct vmw_master *vmaster = vmw_master(file_priv->master);
618
 
 
619
 
        ret = ttm_read_lock(&vmaster->lock, true);
620
 
        if (unlikely(ret != 0))
621
 
                return ret;
 
1118
        int ret;
622
1119
 
623
1120
        ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
624
 
        if (unlikely(ret != 0)) {
625
 
                ret = -ERESTARTSYS;
626
 
                goto out_no_cmd_mutex;
627
 
        }
628
 
 
629
 
        cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
630
 
        if (unlikely(cmd == NULL)) {
631
 
                DRM_ERROR("Failed reserving fifo space for commands.\n");
632
 
                ret = -ENOMEM;
633
 
                goto out_unlock;
634
 
        }
635
 
 
636
 
        user_cmd = (void __user *)(unsigned long)arg->commands;
637
 
        ret = copy_from_user(cmd, user_cmd, arg->command_size);
638
 
 
639
 
        if (unlikely(ret != 0)) {
640
 
                ret = -EFAULT;
641
 
                DRM_ERROR("Failed copying commands.\n");
642
 
                goto out_commit;
643
 
        }
 
1121
        if (unlikely(ret != 0))
 
1122
                return -ERESTARTSYS;
 
1123
 
 
1124
        if (kernel_commands == NULL) {
 
1125
                sw_context->kernel = false;
 
1126
 
 
1127
                ret = vmw_resize_cmd_bounce(sw_context, command_size);
 
1128
                if (unlikely(ret != 0))
 
1129
                        goto out_unlock;
 
1130
 
 
1131
 
 
1132
                ret = copy_from_user(sw_context->cmd_bounce,
 
1133
                                     user_commands, command_size);
 
1134
 
 
1135
                if (unlikely(ret != 0)) {
 
1136
                        ret = -EFAULT;
 
1137
                        DRM_ERROR("Failed copying commands.\n");
 
1138
                        goto out_unlock;
 
1139
                }
 
1140
                kernel_commands = sw_context->cmd_bounce;
 
1141
        } else
 
1142
                sw_context->kernel = true;
644
1143
 
645
1144
        sw_context->tfile = vmw_fpriv(file_priv)->tfile;
646
1145
        sw_context->cid_valid = false;
647
1146
        sw_context->sid_valid = false;
648
1147
        sw_context->cur_reloc = 0;
649
1148
        sw_context->cur_val_buf = 0;
 
1149
        sw_context->fence_flags = 0;
 
1150
        INIT_LIST_HEAD(&sw_context->query_list);
 
1151
        INIT_LIST_HEAD(&sw_context->resource_list);
 
1152
        sw_context->cur_query_bo = dev_priv->pinned_bo;
 
1153
        sw_context->cur_query_cid = dev_priv->query_cid;
 
1154
        sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
650
1155
 
651
1156
        INIT_LIST_HEAD(&sw_context->validate_nodes);
652
1157
 
653
 
        ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
 
1158
        ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
 
1159
                                command_size);
654
1160
        if (unlikely(ret != 0))
655
1161
                goto out_err;
 
1162
 
656
1163
        ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
657
1164
        if (unlikely(ret != 0))
658
1165
                goto out_err;
663
1170
 
664
1171
        vmw_apply_relocations(sw_context);
665
1172
 
666
 
        if (arg->throttle_us) {
667
 
                ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
668
 
                                   arg->throttle_us);
 
1173
        if (throttle_us) {
 
1174
                ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
 
1175
                                   throttle_us);
669
1176
 
670
1177
                if (unlikely(ret != 0))
671
 
                        goto out_err;
672
 
        }
673
 
 
674
 
        vmw_fifo_commit(dev_priv, arg->command_size);
675
 
 
676
 
        ret = vmw_fifo_send_fence(dev_priv, &sequence);
677
 
 
678
 
        ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
679
 
                                    (void *)(unsigned long) sequence);
680
 
        vmw_clear_validations(sw_context);
681
 
        mutex_unlock(&dev_priv->cmdbuf_mutex);
682
 
 
 
1178
                        goto out_throttle;
 
1179
        }
 
1180
 
 
1181
        cmd = vmw_fifo_reserve(dev_priv, command_size);
 
1182
        if (unlikely(cmd == NULL)) {
 
1183
                DRM_ERROR("Failed reserving fifo space for commands.\n");
 
1184
                ret = -ENOMEM;
 
1185
                goto out_throttle;
 
1186
        }
 
1187
 
 
1188
        memcpy(cmd, kernel_commands, command_size);
 
1189
        vmw_fifo_commit(dev_priv, command_size);
 
1190
 
 
1191
        vmw_query_bo_switch_commit(dev_priv, sw_context);
 
1192
        ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
 
1193
                                         &fence,
 
1194
                                         (user_fence_rep) ? &handle : NULL);
683
1195
        /*
684
1196
         * This error is harmless, because if fence submission fails,
685
 
         * vmw_fifo_send_fence will sync.
 
1197
         * vmw_fifo_send_fence will sync. The error will be propagated to
 
1198
         * user-space in @fence_rep
686
1199
         */
687
1200
 
688
1201
        if (ret != 0)
689
1202
                DRM_ERROR("Fence submission error. Syncing.\n");
690
1203
 
691
 
        fence_rep.error = ret;
692
 
        fence_rep.fence_seq = (uint64_t) sequence;
693
 
        fence_rep.pad64 = 0;
694
 
 
695
 
        user_fence_rep = (struct drm_vmw_fence_rep __user *)
696
 
            (unsigned long)arg->fence_rep;
 
1204
        ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
 
1205
                                    (void *) fence);
 
1206
 
 
1207
        vmw_clear_validations(sw_context);
 
1208
        vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
 
1209
                                    user_fence_rep, fence, handle);
 
1210
 
 
1211
        if (likely(fence != NULL))
 
1212
                vmw_fence_obj_unreference(&fence);
 
1213
 
 
1214
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1215
        return 0;
 
1216
 
 
1217
out_err:
 
1218
        vmw_free_relocations(sw_context);
 
1219
out_throttle:
 
1220
        vmw_query_switch_backoff(sw_context);
 
1221
        ttm_eu_backoff_reservation(&sw_context->validate_nodes);
 
1222
        vmw_clear_validations(sw_context);
 
1223
out_unlock:
 
1224
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1225
        return ret;
 
1226
}
 
1227
 
 
1228
/**
 
1229
 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
 
1230
 *
 
1231
 * @dev_priv: The device private structure.
 
1232
 *
 
1233
 * This function is called to idle the fifo and unpin the query buffer
 
1234
 * if the normal way to do this hits an error, which should typically be
 
1235
 * extremely rare.
 
1236
 */
 
1237
static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
 
1238
{
 
1239
        DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
 
1240
 
 
1241
        (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
 
1242
        vmw_bo_pin(dev_priv->pinned_bo, false);
 
1243
        vmw_bo_pin(dev_priv->dummy_query_bo, false);
 
1244
        dev_priv->dummy_query_bo_pinned = false;
 
1245
}
 
1246
 
 
1247
 
 
1248
/**
 
1249
 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
 
1250
 * query bo.
 
1251
 *
 
1252
 * @dev_priv: The device private structure.
 
1253
 * @only_on_cid_match: Only flush and unpin if the current active query cid
 
1254
 * matches @cid.
 
1255
 * @cid: Optional context id to match.
 
1256
 *
 
1257
 * This function should be used to unpin the pinned query bo, or
 
1258
 * as a query barrier when we need to make sure that all queries have
 
1259
 * finished before the next fifo command. (For example on hardware
 
1260
 * context destructions where the hardware may otherwise leak unfinished
 
1261
 * queries).
 
1262
 *
 
1263
 * This function does not return any failure codes, but make attempts
 
1264
 * to do safe unpinning in case of errors.
 
1265
 *
 
1266
 * The function will synchronize on the previous query barrier, and will
 
1267
 * thus not finish until that barrier has executed.
 
1268
 */
 
1269
void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 
1270
                                   bool only_on_cid_match, uint32_t cid)
 
1271
{
 
1272
        int ret = 0;
 
1273
        struct list_head validate_list;
 
1274
        struct ttm_validate_buffer pinned_val, query_val;
 
1275
        struct vmw_fence_obj *fence;
 
1276
 
 
1277
        mutex_lock(&dev_priv->cmdbuf_mutex);
 
1278
 
 
1279
        if (dev_priv->pinned_bo == NULL)
 
1280
                goto out_unlock;
 
1281
 
 
1282
        if (only_on_cid_match && cid != dev_priv->query_cid)
 
1283
                goto out_unlock;
 
1284
 
 
1285
        INIT_LIST_HEAD(&validate_list);
 
1286
 
 
1287
        pinned_val.new_sync_obj_arg = (void *)(unsigned long)
 
1288
                DRM_VMW_FENCE_FLAG_EXEC;
 
1289
        pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
 
1290
        list_add_tail(&pinned_val.head, &validate_list);
 
1291
 
 
1292
        query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
 
1293
        query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
 
1294
        list_add_tail(&query_val.head, &validate_list);
 
1295
 
 
1296
        do {
 
1297
                ret = ttm_eu_reserve_buffers(&validate_list);
 
1298
        } while (ret == -ERESTARTSYS);
 
1299
 
 
1300
        if (unlikely(ret != 0)) {
 
1301
                vmw_execbuf_unpin_panic(dev_priv);
 
1302
                goto out_no_reserve;
 
1303
        }
 
1304
 
 
1305
        ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
 
1306
        if (unlikely(ret != 0)) {
 
1307
                vmw_execbuf_unpin_panic(dev_priv);
 
1308
                goto out_no_emit;
 
1309
        }
 
1310
 
 
1311
        vmw_bo_pin(dev_priv->pinned_bo, false);
 
1312
        vmw_bo_pin(dev_priv->dummy_query_bo, false);
 
1313
        dev_priv->dummy_query_bo_pinned = false;
 
1314
 
 
1315
        (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 
1316
        ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
 
1317
 
 
1318
        ttm_bo_unref(&query_val.bo);
 
1319
        ttm_bo_unref(&pinned_val.bo);
 
1320
        ttm_bo_unref(&dev_priv->pinned_bo);
 
1321
 
 
1322
out_unlock:
 
1323
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1324
        return;
 
1325
 
 
1326
out_no_emit:
 
1327
        ttm_eu_backoff_reservation(&validate_list);
 
1328
out_no_reserve:
 
1329
        ttm_bo_unref(&query_val.bo);
 
1330
        ttm_bo_unref(&pinned_val.bo);
 
1331
        ttm_bo_unref(&dev_priv->pinned_bo);
 
1332
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1333
}
 
1334
 
 
1335
 
 
1336
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
 
1337
                      struct drm_file *file_priv)
 
1338
{
 
1339
        struct vmw_private *dev_priv = vmw_priv(dev);
 
1340
        struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
 
1341
        struct vmw_master *vmaster = vmw_master(file_priv->master);
 
1342
        int ret;
697
1343
 
698
1344
        /*
699
 
         * copy_to_user errors will be detected by user space not
700
 
         * seeing fence_rep::error filled in.
 
1345
         * This will allow us to extend the ioctl argument while
 
1346
         * maintaining backwards compatibility:
 
1347
         * We take different code paths depending on the value of
 
1348
         * arg->version.
701
1349
         */
702
1350
 
703
 
        ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
 
1351
        if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
 
1352
                DRM_ERROR("Incorrect execbuf version.\n");
 
1353
                DRM_ERROR("You're running outdated experimental "
 
1354
                          "vmwgfx user-space drivers.");
 
1355
                return -EINVAL;
 
1356
        }
 
1357
 
 
1358
        ret = ttm_read_lock(&vmaster->lock, true);
 
1359
        if (unlikely(ret != 0))
 
1360
                return ret;
 
1361
 
 
1362
        ret = vmw_execbuf_process(file_priv, dev_priv,
 
1363
                                  (void __user *)(unsigned long)arg->commands,
 
1364
                                  NULL, arg->command_size, arg->throttle_us,
 
1365
                                  (void __user *)(unsigned long)arg->fence_rep);
 
1366
 
 
1367
        if (unlikely(ret != 0))
 
1368
                goto out_unlock;
704
1369
 
705
1370
        vmw_kms_cursor_post_execbuf(dev_priv);
706
 
        ttm_read_unlock(&vmaster->lock);
707
 
        return 0;
708
 
out_err:
709
 
        vmw_free_relocations(sw_context);
710
 
        ttm_eu_backoff_reservation(&sw_context->validate_nodes);
711
 
        vmw_clear_validations(sw_context);
712
 
out_commit:
713
 
        vmw_fifo_commit(dev_priv, 0);
 
1371
 
714
1372
out_unlock:
715
 
        mutex_unlock(&dev_priv->cmdbuf_mutex);
716
 
out_no_cmd_mutex:
717
1373
        ttm_read_unlock(&vmaster->lock);
718
1374
        return ret;
719
1375
}