2
* Copyright 2010 Red Hat Inc.
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
27
#include "nouveau_drv.h"
28
#include "nouveau_ramht.h"
31
nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
33
struct drm_device *dev = chan->dev;
34
struct drm_nouveau_private *dev_priv = dev->dev_private;
35
struct nouveau_ramht *ramht = chan->ramht;
39
NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
41
for (i = 32; i > 0; i -= ramht->bits) {
42
hash ^= (handle & ((1 << ramht->bits) - 1));
43
handle >>= ramht->bits;
46
if (dev_priv->card_type < NV_50)
47
hash ^= chan->id << (ramht->bits - 4);
50
NV_DEBUG(dev, "hash=0x%08x\n", hash);
55
nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
58
struct drm_nouveau_private *dev_priv = dev->dev_private;
59
u32 ctx = nv_ro32(ramht, offset + 4);
61
if (dev_priv->card_type < NV_40)
62
return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
67
nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
68
struct nouveau_gpuobj *ramht, u32 offset)
70
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
71
u32 ctx = nv_ro32(ramht, offset + 4);
73
if (dev_priv->card_type >= NV_50)
75
else if (dev_priv->card_type >= NV_40)
77
((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
80
((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
84
nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
85
struct nouveau_gpuobj *gpuobj)
87
struct drm_device *dev = chan->dev;
88
struct drm_nouveau_private *dev_priv = dev->dev_private;
89
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
90
struct nouveau_ramht_entry *entry;
91
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
95
if (nouveau_ramht_find(chan, handle))
98
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
101
entry->channel = chan;
102
entry->gpuobj = NULL;
103
entry->handle = handle;
104
nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
106
if (dev_priv->card_type < NV_40) {
107
ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
108
(chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
109
(gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
111
if (dev_priv->card_type < NV_50) {
112
ctx = (gpuobj->pinst >> 4) |
113
(chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
114
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
116
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
117
ctx = (gpuobj->cinst << 10) |
119
chan->id; /* HASH_TAG */
121
ctx = (gpuobj->cinst >> 4) |
123
NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
127
spin_lock_irqsave(&chan->ramht->lock, flags);
128
list_add(&entry->head, &chan->ramht->entries);
130
co = ho = nouveau_ramht_hash_handle(chan, handle);
132
if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
134
"insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
135
chan->id, co, handle, ctx);
136
nv_wo32(ramht, co + 0, handle);
137
nv_wo32(ramht, co + 4, ctx);
139
spin_unlock_irqrestore(&chan->ramht->lock, flags);
143
NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
144
chan->id, co, nv_ro32(ramht, co));
147
if (co >= ramht->size)
151
NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
152
list_del(&entry->head);
153
spin_unlock_irqrestore(&chan->ramht->lock, flags);
158
static struct nouveau_ramht_entry *
159
nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
161
struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
162
struct nouveau_ramht_entry *entry;
168
spin_lock_irqsave(&ramht->lock, flags);
169
list_for_each_entry(entry, &ramht->entries, head) {
170
if (entry->channel == chan &&
171
(!handle || entry->handle == handle)) {
172
list_del(&entry->head);
173
spin_unlock_irqrestore(&ramht->lock, flags);
178
spin_unlock_irqrestore(&ramht->lock, flags);
184
nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
186
struct drm_device *dev = chan->dev;
187
struct drm_nouveau_private *dev_priv = dev->dev_private;
188
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
189
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
193
spin_lock_irqsave(&chan->ramht->lock, flags);
194
co = ho = nouveau_ramht_hash_handle(chan, handle);
196
if (nouveau_ramht_entry_valid(dev, ramht, co) &&
197
nouveau_ramht_entry_same_channel(chan, ramht, co) &&
198
(handle == nv_ro32(ramht, co))) {
200
"remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
201
chan->id, co, handle, nv_ro32(ramht, co + 4));
202
nv_wo32(ramht, co + 0, 0x00000000);
203
nv_wo32(ramht, co + 4, 0x00000000);
209
if (co >= ramht->size)
213
NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
216
spin_unlock_irqrestore(&chan->ramht->lock, flags);
220
nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
222
struct nouveau_ramht_entry *entry;
224
entry = nouveau_ramht_remove_entry(chan, handle);
228
nouveau_ramht_remove_hash(chan, entry->handle);
229
nouveau_gpuobj_ref(NULL, &entry->gpuobj);
234
struct nouveau_gpuobj *
235
nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
237
struct nouveau_ramht *ramht = chan->ramht;
238
struct nouveau_ramht_entry *entry;
239
struct nouveau_gpuobj *gpuobj = NULL;
242
if (unlikely(!chan->ramht))
245
spin_lock_irqsave(&ramht->lock, flags);
246
list_for_each_entry(entry, &chan->ramht->entries, head) {
247
if (entry->channel == chan && entry->handle == handle) {
248
gpuobj = entry->gpuobj;
252
spin_unlock_irqrestore(&ramht->lock, flags);
258
nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
259
struct nouveau_ramht **pramht)
261
struct nouveau_ramht *ramht;
263
ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
268
kref_init(&ramht->refcount);
269
ramht->bits = drm_order(gpuobj->size / 8);
270
INIT_LIST_HEAD(&ramht->entries);
271
spin_lock_init(&ramht->lock);
272
nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
279
nouveau_ramht_del(struct kref *ref)
281
struct nouveau_ramht *ramht =
282
container_of(ref, struct nouveau_ramht, refcount);
284
nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
289
nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
290
struct nouveau_channel *chan)
292
struct nouveau_ramht_entry *entry;
293
struct nouveau_ramht *ramht;
296
kref_get(&ref->refcount);
300
while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
301
nouveau_ramht_remove_hash(chan, entry->handle);
302
nouveau_gpuobj_ref(NULL, &entry->gpuobj);
306
kref_put(&ramht->refcount, nouveau_ramht_del);