2
* Copyright 2010 Red Hat Inc.
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
27
#include "nouveau_drv.h"
28
#include "nouveau_dma.h"
29
#include "nouveau_ramht.h"
30
#include "nv50_display.h"
33
nv50_evo_channel_del(struct nouveau_channel **pevo)
35
struct nouveau_channel *evo = *pevo;
41
nouveau_ramht_ref(NULL, &evo->ramht, evo);
42
nouveau_gpuobj_channel_takedown(evo);
43
nouveau_bo_unmap(evo->pushbuf_bo);
44
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
53
nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size)
55
struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
58
if (dev_priv->chipset < 0xc0) {
59
/* not supported on 0x50, specified in format mthd */
60
if (dev_priv->chipset == 0x50)
64
if (memtype & 0x80000000)
65
flags5 = 0x00000000; /* large pages */
70
nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
71
NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
72
nv_wo32(obj, 0x14, flags5);
73
dev_priv->engine.instmem.flush(obj->dev);
77
nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
78
u64 base, u64 size, struct nouveau_gpuobj **pobj)
80
struct nv50_display *disp = nv50_display(evo->dev);
81
struct nouveau_gpuobj *obj = NULL;
84
ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
87
obj->engine = NVOBJ_ENGINE_DISPLAY;
89
nv50_evo_dmaobj_init(obj, memtype, base, size);
91
ret = nouveau_ramht_insert(evo, handle, obj);
96
nouveau_gpuobj_ref(obj, pobj);
98
nouveau_gpuobj_ref(NULL, &obj);
103
nv50_evo_channel_new(struct drm_device *dev, int chid,
104
struct nouveau_channel **pevo)
106
struct nv50_display *disp = nv50_display(dev);
107
struct nouveau_channel *evo;
110
evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
120
ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
123
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
125
NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
126
nv50_evo_channel_del(pevo);
130
ret = nouveau_bo_map(evo->pushbuf_bo);
132
NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
133
nv50_evo_channel_del(pevo);
137
evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
138
NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
140
NV_ERROR(dev, "Error mapping EVO control regs.\n");
141
nv50_evo_channel_del(pevo);
145
/* bind primary evo channel's ramht to the channel */
146
if (disp->master && evo != disp->master)
147
nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL);
153
nv50_evo_channel_init(struct nouveau_channel *evo)
155
struct drm_device *dev = evo->dev;
156
int id = evo->id, ret, i;
157
u64 pushbuf = evo->pushbuf_bo->bo.offset;
160
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
161
if ((tmp & 0x009f0000) == 0x00020000)
162
nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
164
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
165
if ((tmp & 0x003f0000) == 0x00030000)
166
nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
168
/* initialise fifo */
169
nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
170
NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
171
NV50_PDISPLAY_EVO_DMA_CB_VALID);
172
nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
173
nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
174
nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
175
NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
177
nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
178
nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
179
NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
180
if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
181
NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
182
nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
186
/* enable error reporting on the channel */
187
nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
189
evo->dma.max = (4096/4) - 2;
192
evo->dma.cur = evo->dma.put;
193
evo->dma.free = evo->dma.max - evo->dma.cur;
195
ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
199
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
206
nv50_evo_channel_fini(struct nouveau_channel *evo)
208
struct drm_device *dev = evo->dev;
211
nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
212
nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
213
nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
214
nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
215
if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
216
NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
217
nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
222
nv50_evo_destroy(struct drm_device *dev)
224
struct nv50_display *disp = nv50_display(dev);
227
for (i = 0; i < 2; i++) {
228
if (disp->crtc[i].sem.bo) {
229
nouveau_bo_unmap(disp->crtc[i].sem.bo);
230
nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
232
nv50_evo_channel_del(&disp->crtc[i].sync);
234
nouveau_gpuobj_ref(NULL, &disp->ntfy);
235
nv50_evo_channel_del(&disp->master);
239
nv50_evo_create(struct drm_device *dev)
241
struct drm_nouveau_private *dev_priv = dev->dev_private;
242
struct nv50_display *disp = nv50_display(dev);
243
struct nouveau_gpuobj *ramht = NULL;
244
struct nouveau_channel *evo;
247
/* create primary evo channel, the one we use for modesetting
250
ret = nv50_evo_channel_new(dev, 0, &disp->master);
255
/* setup object management on it, any other evo channel will
256
* use this also as there's no per-channel support on the
259
ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
260
NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
262
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
266
ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
268
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
272
ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
274
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
278
ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
279
nouveau_gpuobj_ref(NULL, &ramht);
283
/* not sure exactly what this is..
285
* the first dword of the structure is used by nvidia to wait on
286
* full completion of an EVO "update" command.
288
* method 0x8c on the master evo channel will fill a lot more of
289
* this structure with some undefined info
291
ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0,
292
NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
296
ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
297
disp->ntfy->vinst, disp->ntfy->size, NULL);
301
/* create some default objects for the scanout memtypes we support */
302
ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
303
0, dev_priv->vram_size, NULL);
307
ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
308
0, dev_priv->vram_size, NULL);
312
ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
313
(dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00),
314
0, dev_priv->vram_size, NULL);
318
ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
319
(dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00),
320
0, dev_priv->vram_size, NULL);
324
/* create "display sync" channels and other structures we need
325
* to implement page flipping
327
for (i = 0; i < 2; i++) {
328
struct nv50_display_crtc *dispc = &disp->crtc[i];
331
ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
335
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
336
0, 0x0000, &dispc->sem.bo);
338
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
340
ret = nouveau_bo_map(dispc->sem.bo);
342
nouveau_bo_ref(NULL, &dispc->sem.bo);
343
offset = dispc->sem.bo->bo.offset;
349
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
354
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
355
0, dev_priv->vram_size, NULL);
359
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
360
(dev_priv->chipset < 0xc0 ?
362
0, dev_priv->vram_size, NULL);
366
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
367
(dev_priv->chipset < 0xc0 ?
369
0, dev_priv->vram_size, NULL);
373
for (j = 0; j < 4096; j += 4)
374
nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
375
dispc->sem.offset = 0;
381
nv50_evo_destroy(dev);
386
nv50_evo_init(struct drm_device *dev)
388
struct nv50_display *disp = nv50_display(dev);
392
ret = nv50_evo_create(dev);
397
ret = nv50_evo_channel_init(disp->master);
401
for (i = 0; i < 2; i++) {
402
ret = nv50_evo_channel_init(disp->crtc[i].sync);
411
nv50_evo_fini(struct drm_device *dev)
413
struct nv50_display *disp = nv50_display(dev);
416
for (i = 0; i < 2; i++) {
417
if (disp->crtc[i].sync)
418
nv50_evo_channel_fini(disp->crtc[i].sync);
422
nv50_evo_channel_fini(disp->master);
424
nv50_evo_destroy(dev);