1
/* drm_drv.h -- Generic driver template -*- linux-c -*-
2
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8
* Permission is hereby granted, free of charge, to any person obtaining a
9
* copy of this software and associated documentation files (the "Software"),
10
* to deal in the Software without restriction, including without limitation
11
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
12
* and/or sell copies of the Software, and to permit persons to whom the
13
* Software is furnished to do so, subject to the following conditions:
15
* The above copyright notice and this permission notice (including the next
16
* paragraph) shall be included in all copies or substantial portions of the
19
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25
* OTHER DEALINGS IN THE SOFTWARE.
28
* Rickard E. (Rik) Faith <faith@valinux.com>
29
* Gareth Hughes <gareth@valinux.com>
33
* To use this template, you must at least define the following (samples
34
* given for the MGA driver):
36
* #define DRIVER_AUTHOR "VA Linux Systems, Inc."
38
* #define DRIVER_NAME "mga"
39
* #define DRIVER_DESC "Matrox G200/G400"
40
* #define DRIVER_DATE "20001127"
42
* #define DRIVER_MAJOR 2
43
* #define DRIVER_MINOR 0
44
* #define DRIVER_PATCHLEVEL 2
46
* #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
48
* #define DRM(x) mga_##x
51
#ifndef __MUST_HAVE_AGP
52
#define __MUST_HAVE_AGP 0
54
#ifndef __HAVE_CTX_BITMAP
55
#define __HAVE_CTX_BITMAP 0
57
#ifndef __HAVE_DMA_IRQ
58
#define __HAVE_DMA_IRQ 0
60
#ifndef __HAVE_DMA_QUEUE
61
#define __HAVE_DMA_QUEUE 0
63
#ifndef __HAVE_MULTIPLE_DMA_QUEUES
64
#define __HAVE_MULTIPLE_DMA_QUEUES 0
66
#ifndef __HAVE_DMA_SCHEDULE
67
#define __HAVE_DMA_SCHEDULE 0
69
#ifndef __HAVE_DMA_FLUSH
70
#define __HAVE_DMA_FLUSH 0
72
#ifndef __HAVE_DMA_READY
73
#define __HAVE_DMA_READY 0
75
#ifndef __HAVE_DMA_QUIESCENT
76
#define __HAVE_DMA_QUIESCENT 0
78
#ifndef __HAVE_RELEASE
79
#define __HAVE_RELEASE 0
81
#ifndef __HAVE_COUNTERS
82
#define __HAVE_COUNTERS 0
87
#ifndef __HAVE_KERNEL_CTX_SWITCH
88
#define __HAVE_KERNEL_CTX_SWITCH 0
94
#ifndef DRIVER_PREINIT
95
#define DRIVER_PREINIT()
97
#ifndef DRIVER_POSTINIT
98
#define DRIVER_POSTINIT()
100
#ifndef DRIVER_PRERELEASE
101
#define DRIVER_PRERELEASE()
103
#ifndef DRIVER_PRETAKEDOWN
104
#define DRIVER_PRETAKEDOWN()
106
#ifndef DRIVER_POSTCLEANUP
107
#define DRIVER_POSTCLEANUP()
109
#ifndef DRIVER_PRESETUP
110
#define DRIVER_PRESETUP()
112
#ifndef DRIVER_POSTSETUP
113
#define DRIVER_POSTSETUP()
115
#ifndef DRIVER_IOCTLS
116
#define DRIVER_IOCTLS
122
* The default number of instances (minor numbers) to initialize.
124
#ifndef DRIVER_NUM_CARDS
125
#define DRIVER_NUM_CARDS 1
129
static int DRM(init)(device_t nbdev);
130
static void DRM(cleanup)(device_t nbdev);
131
#elif defined(__NetBSD__)
132
static int DRM(init)(drm_device_t *);
133
static void DRM(cleanup)(drm_device_t *);
137
#define CDEV_MAJOR 145
138
#define DRIVER_SOFTC(unit) \
139
((drm_device_t *) devclass_get_softc(DRM(devclass), unit))
141
#if __REALLY_HAVE_AGP
142
MODULE_DEPEND(DRIVER_NAME, agp, 1, 1, 1);
145
MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
147
#endif /* __FreeBSD__ */
150
#define CDEV_MAJOR 90
151
#define DRIVER_SOFTC(unit) \
152
((drm_device_t *) device_lookup(&DRM(_cd), unit))
153
#endif /* __NetBSD__ */
155
static drm_ioctl_desc_t DRM(ioctls)[] = {
156
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
157
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
158
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
159
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_busid), 0, 1 },
160
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 },
161
[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 },
162
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
164
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
165
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(block), 1, 1 },
166
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(unblock), 1, 1 },
167
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
169
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
170
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
172
#if __HAVE_CTX_BITMAP
173
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
174
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
177
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
178
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
179
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 },
180
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 },
181
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 },
182
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 },
183
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 },
185
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 },
186
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 },
188
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
189
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
190
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
193
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
194
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 },
195
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 },
196
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 },
197
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 },
199
/* The DRM_IOCTL_DMA ioctl should be defined by the driver.
201
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
204
#if __REALLY_HAVE_AGP
205
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
206
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 },
207
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 },
208
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 },
209
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 },
210
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 },
211
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 },
212
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 },
216
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { DRM(sg_alloc), 1, 1 },
217
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
221
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 },
227
#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
229
const char *DRM(find_description)(int vendor, int device);
232
static int DRM(probe)(device_t dev)
234
const char *s = NULL;
236
int pciid=pci_get_devid(dev);
237
int vendor = (pciid & 0x0000ffff);
238
int device = (pciid & 0xffff0000) >> 16;
240
s = DRM(find_description)(vendor, device);
242
device_set_desc(dev, s);
249
static int DRM(attach)(device_t dev)
251
return DRM(init)(dev);
254
static int DRM(detach)(device_t dev)
259
static device_method_t DRM(methods)[] = {
260
/* Device interface */
261
DEVMETHOD(device_probe, DRM( probe)),
262
DEVMETHOD(device_attach, DRM( attach)),
263
DEVMETHOD(device_detach, DRM( detach)),
268
static driver_t DRM(driver) = {
271
sizeof(drm_device_t),
274
static devclass_t DRM( devclass);
276
static struct cdevsw DRM( cdevsw) = {
277
/* open */ DRM( open ),
278
/* close */ DRM( close ),
279
/* read */ DRM( read ),
280
/* write */ DRM( write ),
281
/* ioctl */ DRM( ioctl ),
282
/* poll */ DRM( poll ),
283
/* mmap */ DRM( mmap ),
284
/* strategy */ nostrategy,
285
/* name */ DRIVER_NAME,
286
/* maj */ CDEV_MAJOR,
289
/* flags */ D_TTY | D_TRACKCLOSE,
290
#if __FreeBSD_version >= 500000
297
#elif defined(__NetBSD__)
298
int DRM(probe)(struct device *parent, struct cfdata *match, void *aux);
299
void DRM(attach)(struct device *parent, struct device *self, void *aux);
300
int DRM(detach)(struct device *self, int flags);
301
int DRM(activate)(struct device *self, enum devact act);
303
struct cfattach DRM(_ca) = {
304
sizeof(drm_device_t), DRM(probe),
305
DRM(attach), DRM(detach), DRM(activate) };
307
int DRM(probe)(struct device *parent, struct cfdata *match, void *aux)
309
struct pci_attach_args *pa = aux;
312
desc = DRM(find_description)(PCI_VENDOR(pa->pa_id), PCI_PRODUCT(pa->pa_id));
318
void DRM(attach)(struct device *parent, struct device *self, void *aux)
320
struct pci_attach_args *pa = aux;
321
drm_device_t *dev = (drm_device_t *)self;
323
memcpy(&dev->pa, aux, sizeof(dev->pa));
325
DRM_INFO("%s", DRM(find_description)(PCI_VENDOR(pa->pa_id), PCI_PRODUCT(pa->pa_id)));
329
int DRM(detach)(struct device *self, int flags)
331
DRM(cleanup)((drm_device_t *)self);
335
int DRM(activate)(struct device *self, enum devact act)
342
case DVACT_DEACTIVATE:
351
const char *DRM(find_description)(int vendor, int device) {
352
const char *s = NULL;
355
while ( !done && (DRM(devicelist)[i].vendor != 0 ) ) {
356
if ( (DRM(devicelist)[i].vendor == vendor) &&
357
(DRM(devicelist)[i].device == device) ) {
359
if ( DRM(devicelist)[i].supported )
360
s = DRM(devicelist)[i].name;
362
DRM_INFO("%s not supported\n", DRM(devicelist)[i].name);
369
static int DRM(setup)( drm_device_t *dev )
374
atomic_set( &dev->ioctl_count, 0 );
375
atomic_set( &dev->vma_count, 0 );
377
atomic_set( &dev->buf_alloc, 0 );
380
i = DRM(dma_setup)( dev );
385
dev->counters = 6 + __HAVE_COUNTERS;
386
dev->types[0] = _DRM_STAT_LOCK;
387
dev->types[1] = _DRM_STAT_OPENS;
388
dev->types[2] = _DRM_STAT_CLOSES;
389
dev->types[3] = _DRM_STAT_IOCTLS;
390
dev->types[4] = _DRM_STAT_LOCKS;
391
dev->types[5] = _DRM_STAT_UNLOCKS;
392
#ifdef __HAVE_COUNTER6
393
dev->types[6] = __HAVE_COUNTER6;
395
#ifdef __HAVE_COUNTER7
396
dev->types[7] = __HAVE_COUNTER7;
398
#ifdef __HAVE_COUNTER8
399
dev->types[8] = __HAVE_COUNTER8;
401
#ifdef __HAVE_COUNTER9
402
dev->types[9] = __HAVE_COUNTER9;
404
#ifdef __HAVE_COUNTER10
405
dev->types[10] = __HAVE_COUNTER10;
407
#ifdef __HAVE_COUNTER11
408
dev->types[11] = __HAVE_COUNTER11;
410
#ifdef __HAVE_COUNTER12
411
dev->types[12] = __HAVE_COUNTER12;
413
#ifdef __HAVE_COUNTER13
414
dev->types[13] = __HAVE_COUNTER13;
416
#ifdef __HAVE_COUNTER14
417
dev->types[14] = __HAVE_COUNTER14;
419
#ifdef __HAVE_COUNTER15
420
dev->types[14] = __HAVE_COUNTER14;
423
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
424
atomic_set( &dev->counts[i], 0 );
426
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
427
dev->magiclist[i].head = NULL;
428
dev->magiclist[i].tail = NULL;
431
dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
433
if(dev->maplist == NULL) return DRM_ERR(ENOMEM);
434
memset(dev->maplist, 0, sizeof(*dev->maplist));
435
TAILQ_INIT(dev->maplist);
439
dev->lock.hw_lock = NULL;
440
dev->lock.lock_queue = 0;
441
dev->queue_count = 0;
442
dev->queue_reserved = 0;
443
dev->queue_slots = 0;
444
dev->queuelist = NULL;
446
dev->context_flag = 0;
447
dev->interrupt_flag = 0;
449
dev->last_context = 0;
450
dev->last_switch = 0;
451
dev->last_checked = 0;
452
#if __FreeBSD_version >= 500000
453
callout_init( &dev->timer, 1 );
455
callout_init( &dev->timer );
457
dev->context_wait = 0;
462
dev->buf_rp = dev->buf;
463
dev->buf_wp = dev->buf;
464
dev->buf_end = dev->buf + DRM_BSZ;
466
dev->buf_sigio = NULL;
467
#elif defined(__NetBSD__)
470
dev->buf_readers = 0;
471
dev->buf_writers = 0;
472
dev->buf_selecting = 0;
476
/* The kernel's context could be created here, but is now created
477
* in drm_dma_enqueue. This is more resource-efficient for
478
* hardware that does not do DMA, but may mean that
479
* drm_select_queue fails between the time the interrupt is
480
* initialized and the time the queues are initialized.
487
static int DRM(takedown)( drm_device_t *dev )
489
drm_magic_entry_t *pt, *next;
491
drm_map_list_entry_t *list;
492
drm_vma_entry_t *vma, *vma_next;
497
DRIVER_PRETAKEDOWN();
499
if ( dev->irq ) DRM(irq_uninstall)( dev );
503
callout_stop( &dev->timer );
505
if ( dev->devname ) {
506
DRM(free)( dev->devname, strlen( dev->devname ) + 1,
512
DRM(free)( dev->unique, strlen( dev->unique ) + 1,
518
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
519
for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
521
DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
523
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
526
#if __REALLY_HAVE_AGP
527
/* Clear AGP information */
529
drm_agp_mem_t *entry;
530
drm_agp_mem_t *nexte;
532
/* Remove AGP resources, but leave dev->agp
533
intact until drv_cleanup is called. */
534
for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
536
if ( entry->bound ) DRM(unbind_agp)( entry->handle );
537
DRM(free_agp)( entry->handle, entry->pages );
538
DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
540
dev->agp->memory = NULL;
542
if ( dev->agp->acquired ) DRM(agp_do_release)();
544
dev->agp->acquired = 0;
545
dev->agp->enabled = 0;
549
/* Clear vma list (only built for debugging) */
550
if ( dev->vmalist ) {
551
for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
552
vma_next = vma->next;
553
DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
559
while ((list=TAILQ_FIRST(dev->maplist))) {
561
switch ( map->type ) {
563
case _DRM_FRAME_BUFFER:
564
#if __REALLY_HAVE_MTRR
565
if ( map->mtrr >= 0 ) {
569
struct mem_range_desc mrdesc;
570
mrdesc.mr_base = map->offset;
571
mrdesc.mr_len = map->size;
572
mrdesc.mr_flags = MDF_WRITECOMBINE;
573
act = MEMRANGE_SET_UPDATE;
574
bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME));
575
retcode = mem_range_attr_set(&mrdesc, &act);
577
#elif defined __NetBSD__
580
mtrrmap.base = map->offset;
581
mtrrmap.len = map->size;
582
mtrrmap.type = MTRR_TYPE_WC;
584
/*mtrrmap.owner = p->p_pid;*/
585
/* XXX: Use curproc here? */
586
retcode = mtrr_set( &mtrrmap, &one,
587
DRM_CURPROC, MTRR_GETSET_KERNEL);
589
DRM_DEBUG( "mtrr_del=%d\n", retcode );
592
DRM(ioremapfree)( map->handle, map->size );
595
DRM(free)(map->handle,
601
/* Do nothing here, because this is all
602
* handled in the AGP/GART driver.
605
case _DRM_SCATTER_GATHER:
606
/* Handle it, but do nothing, if REALLY_HAVE_SG
611
DRM(sg_cleanup)(dev->sg);
617
TAILQ_REMOVE(dev->maplist, list, link);
618
DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
619
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
621
DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
625
#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
626
if ( dev->queuelist ) {
627
for ( i = 0 ; i < dev->queue_count ; i++ ) {
628
DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
629
if ( dev->queuelist[i] ) {
630
DRM(free)( dev->queuelist[i],
631
sizeof(*dev->queuelist[0]),
633
dev->queuelist[i] = NULL;
636
DRM(free)( dev->queuelist,
637
dev->queue_slots * sizeof(*dev->queuelist),
639
dev->queuelist = NULL;
641
dev->queue_count = 0;
645
DRM(dma_takedown)( dev );
647
if ( dev->lock.hw_lock ) {
648
dev->lock.hw_lock = NULL; /* SHM removed */
650
DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
657
/* linux: drm_init is called via init_module at module load time, or via
658
* linux/init/main.c (this is not currently supported).
659
* bsd: drm_init is called via the attach function per device.
662
static int DRM(init)( device_t nbdev )
663
#elif defined(__NetBSD__)
664
static int DRM(init)( drm_device_t *dev )
671
#if __HAVE_CTX_BITMAP
678
unit = device_get_unit(nbdev);
679
dev = device_get_softc(nbdev);
680
memset( (void *)dev, 0, sizeof(*dev) );
682
dev->devnode = make_dev( &DRM(cdevsw),
687
"dri/card%d", unit );
688
#elif defined(__NetBSD__)
689
unit = minor(dev->device.dv_unit);
691
DRM_SPININIT(dev->count_lock, "drm device");
692
lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
693
dev->name = DRIVER_NAME;
695
DRM(sysctl_init)(dev);
696
TAILQ_INIT(&dev->files);
698
#if __REALLY_HAVE_AGP
699
dev->agp = DRM(agp_init)();
701
if ( dev->agp == NULL ) {
702
DRM_ERROR( "Cannot initialize the agpgart module.\n" );
703
DRM(sysctl_cleanup)( dev );
705
destroy_dev(dev->devnode);
707
DRM(takedown)( dev );
708
return DRM_ERR(ENOMEM);
710
#endif /* __MUST_HAVE_AGP */
711
#if __REALLY_HAVE_MTRR
714
int retcode = 0, act;
715
struct mem_range_desc mrdesc;
716
mrdesc.mr_base = dev->agp->info.ai_aperture_base;
717
mrdesc.mr_len = dev->agp->info.ai_aperture_size;
718
mrdesc.mr_flags = MDF_WRITECOMBINE;
719
act = MEMRANGE_SET_UPDATE;
720
bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME));
721
retcode = mem_range_attr_set(&mrdesc, &act);
722
dev->agp->agp_mtrr=1;
723
#elif defined __NetBSD__
726
mtrrmap.base = dev->agp->info.ai_aperture_base;
727
/* Might need a multiplier here XXX */
728
mtrrmap.len = dev->agp->info.ai_aperture_size;
729
mtrrmap.type = MTRR_TYPE_WC;
730
mtrrmap.flags = MTRR_VALID;
731
dev->agp->agp_mtrr = mtrr_set( &mtrrmap, &one, NULL, MTRR_GETSET_KERNEL);
732
#endif /* __NetBSD__ */
734
#endif /* __REALLY_HAVE_MTRR */
735
#endif /* __REALLY_HAVE_AGP */
737
#if __HAVE_CTX_BITMAP
738
retcode = DRM(ctxbitmap_init)( dev );
740
DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
741
DRM(sysctl_cleanup)( dev );
743
destroy_dev(dev->devnode);
745
DRM(takedown)( dev );
749
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
762
/* linux: drm_cleanup is called via cleanup_module at module unload time.
763
* bsd: drm_cleanup is called per device at module unload time.
767
static void DRM(cleanup)(device_t nbdev)
768
#elif defined(__NetBSD__)
769
static void DRM(cleanup)(drm_device_t *dev)
775
#if __REALLY_HAVE_MTRR
779
#endif /* __NetBSD__ */
780
#endif /* __REALLY_HAVE_MTRR */
785
dev = device_get_softc(nbdev);
787
DRM(sysctl_cleanup)( dev );
789
destroy_dev(dev->devnode);
791
#if __HAVE_CTX_BITMAP
792
DRM(ctxbitmap_cleanup)( dev );
795
#if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
796
if ( dev->agp && dev->agp->agp_mtrr >= 0) {
797
#if defined(__NetBSD__)
798
mtrrmap.base = dev->agp->info.ai_aperture_base;
799
mtrrmap.len = dev->agp->info.ai_aperture_size;
802
retval = mtrr_set( &mtrrmap, &one, NULL, MTRR_GETSET_KERNEL);
807
DRM(takedown)( dev );
809
#if __REALLY_HAVE_AGP
812
DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
816
DRIVER_POSTCLEANUP();
818
DRM_SPINUNINIT(dev->count_lock);
822
int DRM(version)( DRM_IOCTL_ARGS )
824
drm_version_t version;
827
DRM_COPY_FROM_USER_IOCTL( version, (drm_version_t *)data, sizeof(version) );
829
#define DRM_COPY( name, value ) \
830
len = strlen( value ); \
831
if ( len > name##_len ) len = name##_len; \
832
name##_len = strlen( value ); \
833
if ( len && name ) { \
834
if ( DRM_COPY_TO_USER( name, value, len ) ) \
835
return DRM_ERR(EFAULT); \
838
version.version_major = DRIVER_MAJOR;
839
version.version_minor = DRIVER_MINOR;
840
version.version_patchlevel = DRIVER_PATCHLEVEL;
842
DRM_COPY( version.name, DRIVER_NAME );
843
DRM_COPY( version.date, DRIVER_DATE );
844
DRM_COPY( version.desc, DRIVER_DESC );
846
DRM_COPY_TO_USER_IOCTL( (drm_version_t *)data, version, sizeof(version) );
851
int DRM(open)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p)
853
drm_device_t *dev = NULL;
856
dev = DRIVER_SOFTC(minor(kdev));
858
DRM_DEBUG( "open_count = %d\n", dev->open_count );
860
retcode = DRM(open_helper)(kdev, flags, fmt, p, dev);
863
atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
864
DRM_SPINLOCK( &dev->count_lock );
866
device_busy(dev->device);
868
if ( !dev->open_count++ )
869
retcode = DRM(setup)( dev );
870
DRM_SPINUNLOCK( &dev->count_lock );
876
int DRM(close)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p)
882
DRM_DEBUG( "open_count = %d\n", dev->open_count );
883
priv = DRM(find_file_by_proc)(dev, p);
885
DRM_DEBUG("can't find authenticator\n");
891
/* ========================================================
892
* Begin inline drm_release
896
DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
897
DRM_CURRENTPID, (long)dev->device, dev->open_count );
898
#elif defined(__NetBSD__)
899
DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
900
DRM_CURRENTPID, (long)&dev->device, dev->open_count);
903
if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
904
&& dev->lock.pid == DRM_CURRENTPID) {
905
DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
907
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
908
#if HAVE_DRIVER_RELEASE
912
&dev->lock.hw_lock->lock,
913
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
915
/* FIXME: may require heavy-handed reset of
916
hardware at this point, possibly
917
processed via a callback to the X
921
else if ( dev->lock.hw_lock ) {
922
/* The lock is required to reclaim buffers */
924
if ( !dev->lock.hw_lock ) {
925
/* Device has been unregistered */
926
retcode = DRM_ERR(EINTR);
929
if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
930
DRM_KERNEL_CONTEXT ) ) {
931
dev->lock.pid = p->p_pid;
932
dev->lock.lock_time = jiffies;
933
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
934
break; /* Got lock */
938
atomic_inc( &dev->total_sleeps );
940
retcode = tsleep(&dev->lock.lock_queue,
949
DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
950
DRM_KERNEL_CONTEXT );
954
DRM(reclaim_buffers)( dev, priv->pid );
957
#if defined (__FreeBSD__) && (__FreeBSD_version >= 500000)
958
funsetown(&dev->buf_sigio);
959
#elif defined(__FreeBSD__)
960
funsetown(dev->buf_sigio);
961
#elif defined(__NetBSD__)
963
#endif /* __NetBSD__ */
966
priv = DRM(find_file_by_proc)(dev, p);
970
TAILQ_REMOVE(&dev->files, priv, link);
971
DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
977
/* ========================================================
978
* End inline drm_release
981
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
982
DRM_SPINLOCK( &dev->count_lock );
984
device_unbusy(dev->device);
986
if ( !--dev->open_count ) {
987
if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
988
DRM_ERROR( "Device busy: %ld %d\n",
989
(unsigned long)atomic_read( &dev->ioctl_count ),
991
DRM_SPINUNLOCK( &dev->count_lock );
992
return DRM_ERR(EBUSY);
994
DRM_SPINUNLOCK( &dev->count_lock );
995
return DRM(takedown)( dev );
997
DRM_SPINUNLOCK( &dev->count_lock );
1002
/* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm.
1004
int DRM(ioctl)( DRM_IOCTL_ARGS )
1008
drm_ioctl_desc_t *ioctl;
1010
int nr = DRM_IOCTL_NR(cmd);
1013
atomic_inc( &dev->ioctl_count );
1014
atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
1015
++priv->ioctl_count;
1018
DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
1019
DRM_CURRENTPID, cmd, nr, (long)dev->device, priv->authenticated );
1020
#elif defined(__NetBSD__)
1021
DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
1022
DRM_CURRENTPID, cmd, nr, (long)&dev->device, priv->authenticated );
1027
atomic_dec(&dev->ioctl_count);
1031
atomic_dec(&dev->ioctl_count);
1032
dev->flags |= FASYNC;
1037
atomic_dec(&dev->ioctl_count);
1038
return fsetown(*(int *)data, &dev->buf_sigio);
1041
atomic_dec(&dev->ioctl_count);
1042
#if (__FreeBSD_version >= 500000)
1043
*(int *) data = fgetown(&dev->buf_sigio);
1045
*(int *) data = fgetown(dev->buf_sigio);
1049
#endif /* __FreeBSD__ */
1052
atomic_dec(&dev->ioctl_count);
1053
dev->buf_pgid = *(int *)data;
1057
atomic_dec(&dev->ioctl_count);
1058
*(int *)data = dev->buf_pgid;
1060
#endif /* __NetBSD__ */
1062
if ( nr >= DRIVER_IOCTL_COUNT ) {
1065
ioctl = &DRM(ioctls)[nr];
1069
DRM_DEBUG( "no function\n" );
1071
} else if ( ( ioctl->root_only && DRM_SUSER(p) )
1072
|| ( ioctl->auth_needed && !priv->authenticated ) ) {
1075
retcode = func( kdev, cmd, data, flags, p );
1079
atomic_dec( &dev->ioctl_count );
1080
return DRM_ERR(retcode);
1083
int DRM(lock)( DRM_IOCTL_ARGS )
1088
#if __HAVE_MULTIPLE_DMA_QUEUES
1091
#if __HAVE_DMA_HISTOGRAM
1094
dev->lck_start = start = get_cycles();
1097
DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) );
1099
if ( lock.context == DRM_KERNEL_CONTEXT ) {
1100
DRM_ERROR( "Process %d using kernel context %d\n",
1101
DRM_CURRENTPID, lock.context );
1102
return DRM_ERR(EINVAL);
1105
DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
1106
lock.context, DRM_CURRENTPID,
1107
dev->lock.hw_lock->lock, lock.flags );
1109
#if __HAVE_DMA_QUEUE
1110
if ( lock.context < 0 )
1111
return DRM_ERR(EINVAL);
1112
#elif __HAVE_MULTIPLE_DMA_QUEUES
1113
if ( lock.context < 0 || lock.context >= dev->queue_count )
1114
return DRM_ERR(EINVAL);
1115
q = dev->queuelist[lock.context];
1118
#if __HAVE_DMA_FLUSH
1119
ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
1123
if ( !dev->lock.hw_lock ) {
1124
/* Device has been unregistered */
1128
if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
1130
dev->lock.pid = DRM_CURRENTPID;
1131
dev->lock.lock_time = jiffies;
1132
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
1133
break; /* Got lock */
1137
ret = tsleep((void *)&dev->lock.lock_queue,
1146
#if __HAVE_DMA_FLUSH
1147
DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
1152
#if __HAVE_DMA_READY
1153
if ( lock.flags & _DRM_LOCK_READY ) {
1157
#if __HAVE_DMA_QUIESCENT
1158
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
1159
DRIVER_DMA_QUIESCENT();
1162
#if __HAVE_KERNEL_CTX_SWITCH
1163
if ( dev->last_context != lock.context ) {
1164
DRM(context_switch)(dev, dev->last_context,
1170
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1172
#if __HAVE_DMA_HISTOGRAM
1173
atomic_inc(&dev->histo.lacq[DRM(histogram_slot)(get_cycles()-start)]);
1176
return DRM_ERR(ret);
1180
int DRM(unlock)( DRM_IOCTL_ARGS )
1185
DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) ) ;
1187
if ( lock.context == DRM_KERNEL_CONTEXT ) {
1188
DRM_ERROR( "Process %d using kernel context %d\n",
1189
DRM_CURRENTPID, lock.context );
1190
return DRM_ERR(EINVAL);
1193
atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1195
#if __HAVE_KERNEL_CTX_SWITCH
1196
/* We no longer really hold it, but if we are the next
1197
* agent to request it then we should just be able to
1198
* take it immediately and not eat the ioctl.
1202
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
1203
unsigned int old, new, prev, ctx;
1209
prev = cmpxchg(plock, old, new);
1210
} while (prev != old);
1212
wake_up_interruptible(&dev->lock.lock_queue);
1214
DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
1215
DRM_KERNEL_CONTEXT );
1216
#if __HAVE_DMA_SCHEDULE
1217
DRM(dma_schedule)( dev, 1 );
1220
/* FIXME: Do we ever really need to check this?
1222
if ( 1 /* !dev->context_flag */ ) {
1223
if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1224
DRM_KERNEL_CONTEXT ) ) {
1228
#endif /* !__HAVE_KERNEL_CTX_SWITCH */
1234
#define LINUX_IOCTL_DRM_MIN 0x6400
1235
#define LINUX_IOCTL_DRM_MAX 0x64ff
1237
static linux_ioctl_function_t DRM( linux_ioctl);
1238
static struct linux_ioctl_handler DRM( handler) = {DRM( linux_ioctl), LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
1239
SYSINIT (DRM( register), SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_register_handler, &DRM( handler));
1240
SYSUNINIT(DRM( unregister), SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_unregister_handler, &DRM( handler));
1242
#define LINUX_IOC_VOID IOC_VOID
1243
#define LINUX_IOC_IN IOC_OUT /* Linux has the values the other way around */
1244
#define LINUX_IOC_OUT IOC_IN
1247
* Linux emulation IOCTL
1250
DRM(linux_ioctl)(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
1252
u_long cmd = args->cmd;
1253
#define STK_PARAMS 128
1255
char stkbuf[STK_PARAMS];
1258
caddr_t data=NULL, memp=NULL;
1259
u_int size = IOCPARM_LEN(cmd);
1261
#if (__FreeBSD_version >= 500000)
1264
struct file *fp = p->p_fd->fd_ofiles[args->fd];
1266
if ( size > STK_PARAMS ) {
1267
if ( size > IOCPARM_MAX )
1269
memp = malloc( (u_long)size, DRM(M_DRM), M_WAITOK );
1275
if ( cmd & LINUX_IOC_IN ) {
1277
error = copyin( (caddr_t)args->arg, data, (u_int)size );
1280
free( data, DRM(M_DRM) );
1284
data = (caddr_t)args->arg;
1286
} else if ( (cmd & LINUX_IOC_OUT) && size ) {
1288
* Zero the buffer so the user always
1289
* gets back something deterministic.
1291
bzero( data, size );
1292
} else if ( cmd & LINUX_IOC_VOID ) {
1293
*(caddr_t *)data = (caddr_t)args->arg;
1296
#if (__FreeBSD_version >= 500000)
1297
if ( (error = fget( p, args->fd, &fp )) != 0 ) {
1299
free( memp, DRM(M_DRM) );
1302
error = fo_ioctl( fp, cmd, data, p->td_ucred, p );
1305
error = fo_ioctl( fp, cmd, data, p );
1307
if ( error == 0 && (cmd & LINUX_IOC_OUT) && size )
1308
error = copyout( data, (caddr_t)args->arg, (u_int)size );
1310
free( memp, DRM(M_DRM) );
1313
#endif /* DRM_LINUX */