2
#include "nouveau_drv.h"
3
#include <linux/pagemap.h>
4
#include <linux/slab.h>
6
#define NV_CTXDMA_PAGE_SHIFT 12
7
#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10
struct nouveau_sgdma_be {
11
struct ttm_backend backend;
12
struct drm_device *dev;
22
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23
struct page **pages, struct page *dummy_read_page)
25
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26
struct drm_device *dev = nvbe->dev;
28
NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
33
nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
39
nvbe->pages[nvbe->nr_pages] =
40
pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42
if (pci_dma_mapping_error(dev->pdev,
43
nvbe->pages[nvbe->nr_pages])) {
55
nouveau_sgdma_clear(struct ttm_backend *be)
57
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
58
struct drm_device *dev;
60
if (nvbe && nvbe->pages) {
67
while (nvbe->nr_pages--) {
68
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
78
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
80
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
81
struct drm_device *dev = nvbe->dev;
82
struct drm_nouveau_private *dev_priv = dev->dev_private;
83
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
86
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
88
nvbe->offset = mem->start << PAGE_SHIFT;
89
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
90
for (i = 0; i < nvbe->nr_pages; i++) {
91
dma_addr_t dma_offset = nvbe->pages[i];
92
uint32_t offset_l = lower_32_bits(dma_offset);
94
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
95
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
96
dma_offset += NV_CTXDMA_PAGE_SIZE;
105
nouveau_sgdma_unbind(struct ttm_backend *be)
107
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
108
struct drm_device *dev = nvbe->dev;
109
struct drm_nouveau_private *dev_priv = dev->dev_private;
110
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
118
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
119
for (i = 0; i < nvbe->nr_pages; i++) {
120
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
121
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
129
nouveau_sgdma_destroy(struct ttm_backend *be)
131
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
134
NV_DEBUG(nvbe->dev, "\n");
145
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
147
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
148
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
150
nvbe->offset = mem->start << PAGE_SHIFT;
152
nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
153
nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
159
nv50_sgdma_unbind(struct ttm_backend *be)
161
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
162
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
167
nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
168
nvbe->nr_pages << PAGE_SHIFT);
173
static struct ttm_backend_func nouveau_sgdma_backend = {
174
.populate = nouveau_sgdma_populate,
175
.clear = nouveau_sgdma_clear,
176
.bind = nouveau_sgdma_bind,
177
.unbind = nouveau_sgdma_unbind,
178
.destroy = nouveau_sgdma_destroy
181
static struct ttm_backend_func nv50_sgdma_backend = {
182
.populate = nouveau_sgdma_populate,
183
.clear = nouveau_sgdma_clear,
184
.bind = nv50_sgdma_bind,
185
.unbind = nv50_sgdma_unbind,
186
.destroy = nouveau_sgdma_destroy
190
nouveau_sgdma_init_ttm(struct drm_device *dev)
192
struct drm_nouveau_private *dev_priv = dev->dev_private;
193
struct nouveau_sgdma_be *nvbe;
195
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
201
if (dev_priv->card_type < NV_50)
202
nvbe->backend.func = &nouveau_sgdma_backend;
204
nvbe->backend.func = &nv50_sgdma_backend;
205
return &nvbe->backend;
209
nouveau_sgdma_init(struct drm_device *dev)
211
struct drm_nouveau_private *dev_priv = dev->dev_private;
212
struct nouveau_gpuobj *gpuobj = NULL;
213
uint32_t aper_size, obj_size;
216
if (dev_priv->card_type < NV_50) {
217
if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
218
aper_size = 64 * 1024 * 1024;
220
aper_size = 512 * 1024 * 1024;
222
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
223
obj_size += 8; /* ctxdma header */
225
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
226
NVOBJ_FLAG_ZERO_ALLOC |
227
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
229
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
233
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
234
(1 << 12) /* PT present */ |
235
(0 << 13) /* PT *not* linear */ |
237
(2 << 16) /* PCI */);
238
nv_wo32(gpuobj, 4, aper_size - 1);
239
for (i = 2; i < 2 + (aper_size >> 12); i++)
240
nv_wo32(gpuobj, i * 4, 0x00000000);
242
dev_priv->gart_info.sg_ctxdma = gpuobj;
243
dev_priv->gart_info.aper_base = 0;
244
dev_priv->gart_info.aper_size = aper_size;
246
if (dev_priv->chan_vm) {
247
ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
248
12, NV_MEM_ACCESS_RW,
249
&dev_priv->gart_info.vma);
253
dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
254
dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
257
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
262
nouveau_sgdma_takedown(struct drm_device *dev)
264
struct drm_nouveau_private *dev_priv = dev->dev_private;
266
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
267
nouveau_vm_put(&dev_priv->gart_info.vma);
271
nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
273
struct drm_nouveau_private *dev_priv = dev->dev_private;
274
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
275
int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
277
BUG_ON(dev_priv->card_type >= NV_50);
279
return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
280
(offset & NV_CTXDMA_PAGE_MASK);