~ubuntu-branches/ubuntu/precise/linux-lowlatency/precise

« back to all changes in this revision

Viewing changes to drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c

  • Committer: Package Import Robot
  • Author(s): Alessio Igor Bogani
  • Date: 2011-10-26 11:13:05 UTC
  • Revision ID: package-import@ubuntu.com-20111026111305-tz023xykf0i6eosh
Tags: upstream-3.2.0
ImportĀ upstreamĀ versionĀ 3.2.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/**************************************************************************
 
2
 *
 
3
 * Copyright Ā© 2009 VMware, Inc., Palo Alto, CA., USA
 
4
 * All Rights Reserved.
 
5
 *
 
6
 * Permission is hereby granted, free of charge, to any person obtaining a
 
7
 * copy of this software and associated documentation files (the
 
8
 * "Software"), to deal in the Software without restriction, including
 
9
 * without limitation the rights to use, copy, modify, merge, publish,
 
10
 * distribute, sub license, and/or sell copies of the Software, and to
 
11
 * permit persons to whom the Software is furnished to do so, subject to
 
12
 * the following conditions:
 
13
 *
 
14
 * The above copyright notice and this permission notice (including the
 
15
 * next paragraph) shall be included in all copies or substantial portions
 
16
 * of the Software.
 
17
 *
 
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 
25
 *
 
26
 **************************************************************************/
 
27
 
 
28
#include "vmwgfx_drv.h"
 
29
#include "ttm/ttm_bo_driver.h"
 
30
#include "ttm/ttm_placement.h"
 
31
 
 
32
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
 
33
        TTM_PL_FLAG_CACHED;
 
34
 
 
35
static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
 
36
        TTM_PL_FLAG_CACHED |
 
37
        TTM_PL_FLAG_NO_EVICT;
 
38
 
 
39
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
 
40
        TTM_PL_FLAG_CACHED;
 
41
 
 
42
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
 
43
        TTM_PL_FLAG_CACHED;
 
44
 
 
45
static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
 
46
        TTM_PL_FLAG_CACHED |
 
47
        TTM_PL_FLAG_NO_EVICT;
 
48
 
 
49
struct ttm_placement vmw_vram_placement = {
 
50
        .fpfn = 0,
 
51
        .lpfn = 0,
 
52
        .num_placement = 1,
 
53
        .placement = &vram_placement_flags,
 
54
        .num_busy_placement = 1,
 
55
        .busy_placement = &vram_placement_flags
 
56
};
 
57
 
 
58
static uint32_t vram_gmr_placement_flags[] = {
 
59
        TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
 
60
        VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 
61
};
 
62
 
 
63
static uint32_t gmr_vram_placement_flags[] = {
 
64
        VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
 
65
        TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
 
66
};
 
67
 
 
68
struct ttm_placement vmw_vram_gmr_placement = {
 
69
        .fpfn = 0,
 
70
        .lpfn = 0,
 
71
        .num_placement = 2,
 
72
        .placement = vram_gmr_placement_flags,
 
73
        .num_busy_placement = 1,
 
74
        .busy_placement = &gmr_placement_flags
 
75
};
 
76
 
 
77
static uint32_t vram_gmr_ne_placement_flags[] = {
 
78
        TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
 
79
        VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
 
80
};
 
81
 
 
82
struct ttm_placement vmw_vram_gmr_ne_placement = {
 
83
        .fpfn = 0,
 
84
        .lpfn = 0,
 
85
        .num_placement = 2,
 
86
        .placement = vram_gmr_ne_placement_flags,
 
87
        .num_busy_placement = 1,
 
88
        .busy_placement = &gmr_ne_placement_flags
 
89
};
 
90
 
 
91
struct ttm_placement vmw_vram_sys_placement = {
 
92
        .fpfn = 0,
 
93
        .lpfn = 0,
 
94
        .num_placement = 1,
 
95
        .placement = &vram_placement_flags,
 
96
        .num_busy_placement = 1,
 
97
        .busy_placement = &sys_placement_flags
 
98
};
 
99
 
 
100
struct ttm_placement vmw_vram_ne_placement = {
 
101
        .fpfn = 0,
 
102
        .lpfn = 0,
 
103
        .num_placement = 1,
 
104
        .placement = &vram_ne_placement_flags,
 
105
        .num_busy_placement = 1,
 
106
        .busy_placement = &vram_ne_placement_flags
 
107
};
 
108
 
 
109
struct ttm_placement vmw_sys_placement = {
 
110
        .fpfn = 0,
 
111
        .lpfn = 0,
 
112
        .num_placement = 1,
 
113
        .placement = &sys_placement_flags,
 
114
        .num_busy_placement = 1,
 
115
        .busy_placement = &sys_placement_flags
 
116
};
 
117
 
 
118
static uint32_t evictable_placement_flags[] = {
 
119
        TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
 
120
        TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
 
121
        VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 
122
};
 
123
 
 
124
struct ttm_placement vmw_evictable_placement = {
 
125
        .fpfn = 0,
 
126
        .lpfn = 0,
 
127
        .num_placement = 3,
 
128
        .placement = evictable_placement_flags,
 
129
        .num_busy_placement = 1,
 
130
        .busy_placement = &sys_placement_flags
 
131
};
 
132
 
 
133
struct ttm_placement vmw_srf_placement = {
 
134
        .fpfn = 0,
 
135
        .lpfn = 0,
 
136
        .num_placement = 1,
 
137
        .num_busy_placement = 2,
 
138
        .placement = &gmr_placement_flags,
 
139
        .busy_placement = gmr_vram_placement_flags
 
140
};
 
141
 
 
142
struct vmw_ttm_backend {
 
143
        struct ttm_backend backend;
 
144
        struct page **pages;
 
145
        unsigned long num_pages;
 
146
        struct vmw_private *dev_priv;
 
147
        int gmr_id;
 
148
};
 
149
 
 
150
static int vmw_ttm_populate(struct ttm_backend *backend,
 
151
                            unsigned long num_pages, struct page **pages,
 
152
                            struct page *dummy_read_page,
 
153
                            dma_addr_t *dma_addrs)
 
154
{
 
155
        struct vmw_ttm_backend *vmw_be =
 
156
            container_of(backend, struct vmw_ttm_backend, backend);
 
157
 
 
158
        vmw_be->pages = pages;
 
159
        vmw_be->num_pages = num_pages;
 
160
 
 
161
        return 0;
 
162
}
 
163
 
 
164
static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
 
165
{
 
166
        struct vmw_ttm_backend *vmw_be =
 
167
            container_of(backend, struct vmw_ttm_backend, backend);
 
168
 
 
169
        vmw_be->gmr_id = bo_mem->start;
 
170
 
 
171
        return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
 
172
                            vmw_be->num_pages, vmw_be->gmr_id);
 
173
}
 
174
 
 
175
static int vmw_ttm_unbind(struct ttm_backend *backend)
 
176
{
 
177
        struct vmw_ttm_backend *vmw_be =
 
178
            container_of(backend, struct vmw_ttm_backend, backend);
 
179
 
 
180
        vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
 
181
        return 0;
 
182
}
 
183
 
 
184
static void vmw_ttm_clear(struct ttm_backend *backend)
 
185
{
 
186
        struct vmw_ttm_backend *vmw_be =
 
187
                container_of(backend, struct vmw_ttm_backend, backend);
 
188
 
 
189
        vmw_be->pages = NULL;
 
190
        vmw_be->num_pages = 0;
 
191
}
 
192
 
 
193
static void vmw_ttm_destroy(struct ttm_backend *backend)
 
194
{
 
195
        struct vmw_ttm_backend *vmw_be =
 
196
            container_of(backend, struct vmw_ttm_backend, backend);
 
197
 
 
198
        kfree(vmw_be);
 
199
}
 
200
 
 
201
static struct ttm_backend_func vmw_ttm_func = {
 
202
        .populate = vmw_ttm_populate,
 
203
        .clear = vmw_ttm_clear,
 
204
        .bind = vmw_ttm_bind,
 
205
        .unbind = vmw_ttm_unbind,
 
206
        .destroy = vmw_ttm_destroy,
 
207
};
 
208
 
 
209
struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
 
210
{
 
211
        struct vmw_ttm_backend *vmw_be;
 
212
 
 
213
        vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
 
214
        if (!vmw_be)
 
215
                return NULL;
 
216
 
 
217
        vmw_be->backend.func = &vmw_ttm_func;
 
218
        vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 
219
 
 
220
        return &vmw_be->backend;
 
221
}
 
222
 
 
223
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 
224
{
 
225
        return 0;
 
226
}
 
227
 
 
228
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 
229
                      struct ttm_mem_type_manager *man)
 
230
{
 
231
        switch (type) {
 
232
        case TTM_PL_SYSTEM:
 
233
                /* System memory */
 
234
 
 
235
                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 
236
                man->available_caching = TTM_PL_FLAG_CACHED;
 
237
                man->default_caching = TTM_PL_FLAG_CACHED;
 
238
                break;
 
239
        case TTM_PL_VRAM:
 
240
                /* "On-card" video ram */
 
241
                man->func = &ttm_bo_manager_func;
 
242
                man->gpu_offset = 0;
 
243
                man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
 
244
                man->available_caching = TTM_PL_FLAG_CACHED;
 
245
                man->default_caching = TTM_PL_FLAG_CACHED;
 
246
                break;
 
247
        case VMW_PL_GMR:
 
248
                /*
 
249
                 * "Guest Memory Regions" is an aperture like feature with
 
250
                 *  one slot per bo. There is an upper limit of the number of
 
251
                 *  slots as well as the bo size.
 
252
                 */
 
253
                man->func = &vmw_gmrid_manager_func;
 
254
                man->gpu_offset = 0;
 
255
                man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
 
256
                man->available_caching = TTM_PL_FLAG_CACHED;
 
257
                man->default_caching = TTM_PL_FLAG_CACHED;
 
258
                break;
 
259
        default:
 
260
                DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 
261
                return -EINVAL;
 
262
        }
 
263
        return 0;
 
264
}
 
265
 
 
266
void vmw_evict_flags(struct ttm_buffer_object *bo,
 
267
                     struct ttm_placement *placement)
 
268
{
 
269
        *placement = vmw_sys_placement;
 
270
}
 
271
 
 
272
/**
 
273
 * FIXME: Proper access checks on buffers.
 
274
 */
 
275
 
 
276
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 
277
{
 
278
        return 0;
 
279
}
 
280
 
 
281
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 
282
{
 
283
        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 
284
        struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
 
285
 
 
286
        mem->bus.addr = NULL;
 
287
        mem->bus.is_iomem = false;
 
288
        mem->bus.offset = 0;
 
289
        mem->bus.size = mem->num_pages << PAGE_SHIFT;
 
290
        mem->bus.base = 0;
 
291
        if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
 
292
                return -EINVAL;
 
293
        switch (mem->mem_type) {
 
294
        case TTM_PL_SYSTEM:
 
295
        case VMW_PL_GMR:
 
296
                return 0;
 
297
        case TTM_PL_VRAM:
 
298
                mem->bus.offset = mem->start << PAGE_SHIFT;
 
299
                mem->bus.base = dev_priv->vram_start;
 
300
                mem->bus.is_iomem = true;
 
301
                break;
 
302
        default:
 
303
                return -EINVAL;
 
304
        }
 
305
        return 0;
 
306
}
 
307
 
 
308
static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 
309
{
 
310
}
 
311
 
 
312
static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 
313
{
 
314
        return 0;
 
315
}
 
316
 
 
317
/**
 
318
 * FIXME: We're using the old vmware polling method to sync.
 
319
 * Do this with fences instead.
 
320
 */
 
321
 
 
322
static void *vmw_sync_obj_ref(void *sync_obj)
 
323
{
 
324
 
 
325
        return (void *)
 
326
                vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
 
327
}
 
328
 
 
329
static void vmw_sync_obj_unref(void **sync_obj)
 
330
{
 
331
        vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
 
332
}
 
333
 
 
334
static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
 
335
{
 
336
        vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
 
337
        return 0;
 
338
}
 
339
 
 
340
static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
 
341
{
 
342
        unsigned long flags = (unsigned long) sync_arg;
 
343
        return  vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
 
344
                                       (uint32_t) flags);
 
345
 
 
346
}
 
347
 
 
348
static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
 
349
                             bool lazy, bool interruptible)
 
350
{
 
351
        unsigned long flags = (unsigned long) sync_arg;
 
352
 
 
353
        return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
 
354
                                  (uint32_t) flags,
 
355
                                  lazy, interruptible,
 
356
                                  VMW_FENCE_WAIT_TIMEOUT);
 
357
}
 
358
 
 
359
struct ttm_bo_driver vmw_bo_driver = {
 
360
        .create_ttm_backend_entry = vmw_ttm_backend_init,
 
361
        .invalidate_caches = vmw_invalidate_caches,
 
362
        .init_mem_type = vmw_init_mem_type,
 
363
        .evict_flags = vmw_evict_flags,
 
364
        .move = NULL,
 
365
        .verify_access = vmw_verify_access,
 
366
        .sync_obj_signaled = vmw_sync_obj_signaled,
 
367
        .sync_obj_wait = vmw_sync_obj_wait,
 
368
        .sync_obj_flush = vmw_sync_obj_flush,
 
369
        .sync_obj_unref = vmw_sync_obj_unref,
 
370
        .sync_obj_ref = vmw_sync_obj_ref,
 
371
        .move_notify = NULL,
 
372
        .swap_notify = NULL,
 
373
        .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
 
374
        .io_mem_reserve = &vmw_ttm_io_mem_reserve,
 
375
        .io_mem_free = &vmw_ttm_io_mem_free,
 
376
};