3 * Generic driver template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
8 * To use this template, you must at least define the following (samples
9 * given for the MGA driver):
12 * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
14 * #define DRIVER_NAME "mga"
15 * #define DRIVER_DESC "Matrox G200/G400"
16 * #define DRIVER_DATE "20001127"
18 * #define DRIVER_MAJOR 2
19 * #define DRIVER_MINOR 0
20 * #define DRIVER_PATCHLEVEL 2
22 * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
24 * #define DRM(x) mga_##x
29 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
31 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
32 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
33 * All Rights Reserved.
35 * Permission is hereby granted, free of charge, to any person obtaining a
36 * copy of this software and associated documentation files (the "Software"),
37 * to deal in the Software without restriction, including without limitation
38 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
39 * and/or sell copies of the Software, and to permit persons to whom the
40 * Software is furnished to do so, subject to the following conditions:
42 * The above copyright notice and this permission notice (including the next
43 * paragraph) shall be included in all copies or substantial portions of the
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
49 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
50 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
51 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
52 * OTHER DEALINGS IN THE SOFTWARE.
55 #ifndef __MUST_HAVE_AGP
56 #define __MUST_HAVE_AGP 0
58 #ifndef __HAVE_CTX_BITMAP
59 #define __HAVE_CTX_BITMAP 0
64 #ifndef __HAVE_DMA_QUEUE
65 #define __HAVE_DMA_QUEUE 0
67 #ifndef __HAVE_MULTIPLE_DMA_QUEUES
68 #define __HAVE_MULTIPLE_DMA_QUEUES 0
70 #ifndef __HAVE_DMA_SCHEDULE
71 #define __HAVE_DMA_SCHEDULE 0
73 #ifndef __HAVE_DMA_FLUSH
74 #define __HAVE_DMA_FLUSH 0
76 #ifndef __HAVE_DMA_READY
77 #define __HAVE_DMA_READY 0
79 #ifndef __HAVE_DMA_QUIESCENT
80 #define __HAVE_DMA_QUIESCENT 0
82 #ifndef __HAVE_RELEASE
83 #define __HAVE_RELEASE 0
85 #ifndef __HAVE_COUNTERS
86 #define __HAVE_COUNTERS 0
91 /* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the drm modules in
92 * the DRI cvs tree, but it is required by the kernel tree's sparc
95 #ifndef __HAVE_KERNEL_CTX_SWITCH
96 #define __HAVE_KERNEL_CTX_SWITCH 0
98 #ifndef __HAVE_DRIVER_FOPS_READ
99 #define __HAVE_DRIVER_FOPS_READ 0
101 #ifndef __HAVE_DRIVER_FOPS_POLL
102 #define __HAVE_DRIVER_FOPS_POLL 0
105 #ifndef DRIVER_PREINIT
106 #define DRIVER_PREINIT()
108 #ifndef DRIVER_POSTINIT
109 #define DRIVER_POSTINIT()
111 #ifndef DRIVER_PRERELEASE
112 #define DRIVER_PRERELEASE()
114 #ifndef DRIVER_PRETAKEDOWN
115 #define DRIVER_PRETAKEDOWN()
117 #ifndef DRIVER_POSTCLEANUP
118 #define DRIVER_POSTCLEANUP()
120 #ifndef DRIVER_PRESETUP
121 #define DRIVER_PRESETUP()
123 #ifndef DRIVER_POSTSETUP
124 #define DRIVER_POSTSETUP()
126 #ifndef DRIVER_IOCTLS
127 #define DRIVER_IOCTLS
129 #ifndef DRIVER_OPEN_HELPER
130 #define DRIVER_OPEN_HELPER( priv, dev )
133 #define DRIVER_FOPS \
134 static struct file_operations DRM(fops) = { \
135 .owner = THIS_MODULE, \
137 .flush = DRM(flush), \
138 .release = DRM(release), \
139 .ioctl = DRM(ioctl), \
141 .fasync = DRM(fasync), \
148 /** Use an additional macro to avoid preprocessor troubles */
149 #define DRM_OPTIONS_FUNC DRM(options)
151 * Called by the kernel to parse command-line options passed via the
152 * boot-loader (e.g., LILO). It calls the insmod option routine,
155 static int __init DRM(options)( char *str )
157 DRM(parse_options)( str );
161 __setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
162 #undef DRM_OPTIONS_FUNC
165 #define MAX_DEVICES 4
166 static drm_device_t DRM(device)[MAX_DEVICES];
167 static int DRM(numdevs) = 0;
172 static drm_ioctl_desc_t DRM(ioctls)[] = {
173 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
174 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
175 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
177 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_by_busid), 0, 1 },
179 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 },
180 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 },
181 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
182 [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = { DRM(setversion), 0, 1 },
184 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
185 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(noop), 1, 1 },
186 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(noop), 1, 1 },
187 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
189 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
190 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
192 #if __HAVE_CTX_BITMAP
193 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
194 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
197 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
198 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
199 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 },
200 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 },
201 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 },
202 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 },
203 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 },
205 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 },
206 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 },
208 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
209 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
212 /* Gamma only, really */
213 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
215 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 },
219 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
220 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 },
221 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 },
222 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 },
223 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 },
224 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
226 #if __HAVE_IRQ || __HAVE_DMA
227 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
230 #if __REALLY_HAVE_AGP
231 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
232 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 },
233 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 },
234 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 },
235 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 },
236 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 },
237 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 },
238 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 },
242 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { DRM(sg_alloc), 1, 1 },
243 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
246 #ifdef __HAVE_VBL_IRQ
247 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 },
253 #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
256 static char *drm_opts = NULL;
259 MODULE_AUTHOR( DRIVER_AUTHOR );
260 MODULE_DESCRIPTION( DRIVER_DESC );
261 MODULE_PARM( drm_opts, "s" );
262 MODULE_LICENSE("GPL and additional rights");
264 static int DRM(setup)( drm_device_t *dev )
269 atomic_set( &dev->ioctl_count, 0 );
270 atomic_set( &dev->vma_count, 0 );
272 atomic_set( &dev->buf_alloc, 0 );
275 i = DRM(dma_setup)( dev );
280 dev->counters = 6 + __HAVE_COUNTERS;
281 dev->types[0] = _DRM_STAT_LOCK;
282 dev->types[1] = _DRM_STAT_OPENS;
283 dev->types[2] = _DRM_STAT_CLOSES;
284 dev->types[3] = _DRM_STAT_IOCTLS;
285 dev->types[4] = _DRM_STAT_LOCKS;
286 dev->types[5] = _DRM_STAT_UNLOCKS;
287 #ifdef __HAVE_COUNTER6
288 dev->types[6] = __HAVE_COUNTER6;
290 #ifdef __HAVE_COUNTER7
291 dev->types[7] = __HAVE_COUNTER7;
293 #ifdef __HAVE_COUNTER8
294 dev->types[8] = __HAVE_COUNTER8;
296 #ifdef __HAVE_COUNTER9
297 dev->types[9] = __HAVE_COUNTER9;
299 #ifdef __HAVE_COUNTER10
300 dev->types[10] = __HAVE_COUNTER10;
302 #ifdef __HAVE_COUNTER11
303 dev->types[11] = __HAVE_COUNTER11;
305 #ifdef __HAVE_COUNTER12
306 dev->types[12] = __HAVE_COUNTER12;
308 #ifdef __HAVE_COUNTER13
309 dev->types[13] = __HAVE_COUNTER13;
311 #ifdef __HAVE_COUNTER14
312 dev->types[14] = __HAVE_COUNTER14;
314 #ifdef __HAVE_COUNTER15
315 dev->types[14] = __HAVE_COUNTER14;
318 for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
319 atomic_set( &dev->counts[i], 0 );
321 for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
322 dev->magiclist[i].head = NULL;
323 dev->magiclist[i].tail = NULL;
326 dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
328 if(dev->maplist == NULL) return -ENOMEM;
329 memset(dev->maplist, 0, sizeof(*dev->maplist));
330 INIT_LIST_HEAD(&dev->maplist->head);
332 dev->ctxlist = DRM(alloc)(sizeof(*dev->ctxlist),
334 if(dev->ctxlist == NULL) return -ENOMEM;
335 memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
336 INIT_LIST_HEAD(&dev->ctxlist->head);
339 dev->sigdata.lock = dev->lock.hw_lock = NULL;
340 init_waitqueue_head( &dev->lock.lock_queue );
341 dev->queue_count = 0;
342 dev->queue_reserved = 0;
343 dev->queue_slots = 0;
344 dev->queuelist = NULL;
345 dev->irq_enabled = 0;
346 dev->context_flag = 0;
347 dev->interrupt_flag = 0;
349 dev->last_context = 0;
350 dev->last_switch = 0;
351 dev->last_checked = 0;
352 init_waitqueue_head( &dev->context_wait );
358 dev->buf_rp = dev->buf;
359 dev->buf_wp = dev->buf;
360 dev->buf_end = dev->buf + DRM_BSZ;
361 dev->buf_async = NULL;
362 init_waitqueue_head( &dev->buf_readers );
363 init_waitqueue_head( &dev->buf_writers );
368 * The kernel's context could be created here, but is now created
369 * in drm_dma_enqueue. This is more resource-efficient for
370 * hardware that does not do DMA, but may mean that
371 * drm_select_queue fails between the time the interrupt is
372 * initialized and the time the queues are initialized.
380 * Take down the DRM device.
382 * \param dev DRM device structure.
384 * Frees every resource in \p dev.
386 * \sa drm_device and setup().
388 static int DRM(takedown)( drm_device_t *dev )
390 drm_magic_entry_t *pt, *next;
392 drm_map_list_t *r_list;
393 struct list_head *list, *list_next;
394 drm_vma_entry_t *vma, *vma_next;
399 DRIVER_PRETAKEDOWN();
401 if ( dev->irq_enabled ) DRM(irq_uninstall)( dev );
404 down( &dev->struct_sem );
405 del_timer( &dev->timer );
407 if ( dev->devname ) {
408 DRM(free)( dev->devname, strlen( dev->devname ) + 1,
414 DRM(free)( dev->unique, strlen( dev->unique ) + 1,
420 for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
421 for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
423 DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
425 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
428 #if __REALLY_HAVE_AGP
429 /* Clear AGP information */
431 drm_agp_mem_t *entry;
432 drm_agp_mem_t *nexte;
434 /* Remove AGP resources, but leave dev->agp
435 intact until drv_cleanup is called. */
436 for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
438 if ( entry->bound ) DRM(unbind_agp)( entry->memory );
439 DRM(free_agp)( entry->memory, entry->pages );
440 DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
442 dev->agp->memory = NULL;
444 if ( dev->agp->acquired ) DRM(agp_do_release)();
446 dev->agp->acquired = 0;
447 dev->agp->enabled = 0;
451 /* Clear vma list (only built for debugging) */
452 if ( dev->vmalist ) {
453 for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
454 vma_next = vma->next;
455 DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
461 list_for_each_safe( list, list_next, &dev->maplist->head ) {
462 r_list = (drm_map_list_t *)list;
464 if ( ( map = r_list->map ) ) {
465 switch ( map->type ) {
467 case _DRM_FRAME_BUFFER:
468 #if __REALLY_HAVE_MTRR
469 if ( map->mtrr >= 0 ) {
471 retcode = mtrr_del( map->mtrr,
474 DRM_DEBUG( "mtrr_del=%d\n", retcode );
477 DRM(ioremapfree)( map->handle, map->size, dev );
484 /* Do nothing here, because this is all
485 * handled in the AGP/GART driver.
488 case _DRM_SCATTER_GATHER:
489 /* Handle it, but do nothing, if HAVE_SG
494 DRM(sg_cleanup)(dev->sg);
500 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
503 DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
505 DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
509 #if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
510 if ( dev->queuelist ) {
511 for ( i = 0 ; i < dev->queue_count ; i++ ) {
512 #if __HAVE_DMA_WAITLIST
513 DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
515 if ( dev->queuelist[i] ) {
516 DRM(free)( dev->queuelist[i],
517 sizeof(*dev->queuelist[0]),
519 dev->queuelist[i] = NULL;
522 DRM(free)( dev->queuelist,
523 dev->queue_slots * sizeof(*dev->queuelist),
525 dev->queuelist = NULL;
527 dev->queue_count = 0;
531 DRM(dma_takedown)( dev );
533 if ( dev->lock.hw_lock ) {
534 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
535 dev->lock.filp = NULL;
536 wake_up_interruptible( &dev->lock.lock_queue );
538 up( &dev->struct_sem );
543 #include "drm_pciids.h"
545 static struct pci_device_id DRM(pciidlist)[] = {
549 static int DRM(probe)(struct pci_dev *pdev)
552 #if __HAVE_CTX_BITMAP
560 for (i = 0; DRM(pciidlist)[i].vendor != 0; i++) {
561 if ((DRM(pciidlist)[i].vendor == pdev->vendor) &&
562 (DRM(pciidlist)[i].device == pdev->device)) {
569 if (DRM(numdevs) >= MAX_DEVICES)
572 dev = &(DRM(device)[DRM(numdevs)]);
574 memset( (void *)dev, 0, sizeof(*dev) );
575 dev->count_lock = SPIN_LOCK_UNLOCKED;
576 init_timer( &dev->timer );
577 sema_init( &dev->struct_sem, 1 );
578 sema_init( &dev->ctxlist_sem, 1 );
580 if ((dev->minor = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
582 dev->device = MKDEV(DRM_MAJOR, dev->minor );
583 dev->name = DRIVER_NAME;
587 dev->hose = pdev->sysdata;
588 dev->pci_domain = dev->hose->bus->number;
592 dev->pci_bus = pdev->bus->number;
593 dev->pci_slot = PCI_SLOT(pdev->devfn);
594 dev->pci_func = PCI_FUNC(pdev->devfn);
595 dev->irq = pdev->irq;
599 #if __REALLY_HAVE_AGP
600 dev->agp = DRM(agp_init)();
602 if ( dev->agp == NULL ) {
603 DRM_ERROR( "Cannot initialize the agpgart module.\n" );
604 DRM(stub_unregister)(dev->minor);
605 DRM(takedown)( dev );
609 #if __REALLY_HAVE_MTRR
611 dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
612 dev->agp->agp_info.aper_size*1024*1024,
618 #if __HAVE_CTX_BITMAP
619 retcode = DRM(ctxbitmap_init)( dev );
621 DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
622 DRM(stub_unregister)(dev->minor);
623 DRM(takedown)( dev );
627 DRM(numdevs)++; /* no errors, mark it reserved */
629 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
636 pci_pretty_name(pdev));
644 * Module initialization. Called via init_module at module load time, or via
645 * linux/init/main.c (this is not currently supported).
647 * \return zero on success or a negative number on failure.
649 * Initializes an array of drm_device structures, and attempts to
650 * initialize all available devices, using consecutive minors, registering the
651 * stubs and initializing the AGP device.
653 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
654 * after the initialization for driver customization.
656 static int __init drm_init( void )
658 struct pci_dev *pdev = NULL;
663 DRM(parse_options)( drm_opts );
668 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
675 * Called via cleanup_module() at module unload time.
677 * Cleans up all DRM device, calling takedown().
681 static void __exit drm_cleanup( void )
688 for (i = DRM(numdevs) - 1; i >= 0; i--) {
689 dev = &(DRM(device)[i]);
690 if ( DRM(stub_unregister)(dev->minor) ) {
691 DRM_ERROR( "Cannot unload module\n" );
693 DRM_DEBUG("minor %d unregistered\n", dev->minor);
695 DRM_INFO( "Module unloaded\n" );
698 #if __HAVE_CTX_BITMAP
699 DRM(ctxbitmap_cleanup)( dev );
702 #if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
703 if ( dev->agp && dev->agp->agp_mtrr >= 0) {
705 retval = mtrr_del( dev->agp->agp_mtrr,
706 dev->agp->agp_info.aper_base,
707 dev->agp->agp_info.aper_size*1024*1024 );
708 DRM_DEBUG( "mtrr_del=%d\n", retval );
712 DRM(takedown)( dev );
714 #if __REALLY_HAVE_AGP
717 DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
722 DRIVER_POSTCLEANUP();
726 module_init( drm_init );
727 module_exit( drm_cleanup );
731 * Get version information
733 * \param inode device inode.
734 * \param filp file pointer.
735 * \param cmd command.
736 * \param arg user argument, pointing to a drm_version structure.
737 * \return zero on success or negative number on failure.
739 * Fills in the version information in \p arg.
741 int DRM(version)( struct inode *inode, struct file *filp,
742 unsigned int cmd, unsigned long arg )
744 drm_version_t __user *argp = (void __user *)arg;
745 drm_version_t version;
748 if ( copy_from_user( &version, argp, sizeof(version) ) )
751 #define DRM_COPY( name, value ) \
752 len = strlen( value ); \
753 if ( len > name##_len ) len = name##_len; \
754 name##_len = strlen( value ); \
755 if ( len && name ) { \
756 if ( copy_to_user( name, value, len ) ) \
760 version.version_major = DRIVER_MAJOR;
761 version.version_minor = DRIVER_MINOR;
762 version.version_patchlevel = DRIVER_PATCHLEVEL;
764 DRM_COPY( version.name, DRIVER_NAME );
765 DRM_COPY( version.date, DRIVER_DATE );
766 DRM_COPY( version.desc, DRIVER_DESC );
768 if ( copy_to_user( argp, &version, sizeof(version) ) )
776 * \param inode device inode
777 * \param filp file pointer.
778 * \return zero on success or a negative number on failure.
780 * Searches the DRM device with the same minor number, calls open_helper(), and
781 * increments the device open count. If the open count was previous at zero,
782 * i.e., it's the first that the device is open, then calls setup().
784 int DRM(open)( struct inode *inode, struct file *filp )
786 drm_device_t *dev = NULL;
790 for (i = 0; i < DRM(numdevs); i++) {
791 if (iminor(inode) == DRM(device)[i].minor) {
792 dev = &(DRM(device)[i]);
800 retcode = DRM(open_helper)( inode, filp, dev );
802 atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
803 spin_lock( &dev->count_lock );
804 if ( !dev->open_count++ ) {
805 spin_unlock( &dev->count_lock );
806 return DRM(setup)( dev );
808 spin_unlock( &dev->count_lock );
817 * \param inode device inode
818 * \param filp file pointer.
819 * \return zero on success or a negative number on failure.
821 * If the hardware lock is held then free it, and take it again for the kernel
822 * context since it's necessary to reclaim buffers. Unlink the file private
823 * data from its list and free it. Decreases the open count and if it reaches
824 * zero calls takedown().
826 int DRM(release)( struct inode *inode, struct file *filp )
828 drm_file_t *priv = filp->private_data;
835 DRM_DEBUG( "open_count = %d\n", dev->open_count );
839 /* ========================================================
840 * Begin inline drm_release
843 DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
844 current->pid, (long)old_encode_dev(dev->device), dev->open_count );
846 if ( priv->lock_count && dev->lock.hw_lock &&
847 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
848 dev->lock.filp == filp ) {
849 DRM_DEBUG( "File %p released, freeing lock for context %d\n",
851 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
855 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
856 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
858 /* FIXME: may require heavy-handed reset of
859 hardware at this point, possibly
860 processed via a callback to the X
864 else if ( priv->lock_count && dev->lock.hw_lock ) {
865 /* The lock is required to reclaim buffers */
866 DECLARE_WAITQUEUE( entry, current );
868 add_wait_queue( &dev->lock.lock_queue, &entry );
870 current->state = TASK_INTERRUPTIBLE;
871 if ( !dev->lock.hw_lock ) {
872 /* Device has been unregistered */
876 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
877 DRM_KERNEL_CONTEXT ) ) {
878 dev->lock.filp = filp;
879 dev->lock.lock_time = jiffies;
880 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
881 break; /* Got lock */
885 if ( signal_pending( current ) ) {
886 retcode = -ERESTARTSYS;
890 current->state = TASK_RUNNING;
891 remove_wait_queue( &dev->lock.lock_queue, &entry );
894 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
895 DRM_KERNEL_CONTEXT );
899 DRM(reclaim_buffers)( filp );
902 DRM(fasync)( -1, filp, 0 );
904 down( &dev->ctxlist_sem );
905 if ( !list_empty( &dev->ctxlist->head ) ) {
906 drm_ctx_list_t *pos, *n;
908 list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
909 if ( pos->tag == priv &&
910 pos->handle != DRM_KERNEL_CONTEXT ) {
911 #ifdef DRIVER_CTX_DTOR
912 DRIVER_CTX_DTOR(pos->handle);
914 #if __HAVE_CTX_BITMAP
915 DRM(ctxbitmap_free)( dev, pos->handle );
917 list_del( &pos->head );
918 DRM(free)( pos, sizeof(*pos), DRM_MEM_CTXLIST );
922 up( &dev->ctxlist_sem );
924 down( &dev->struct_sem );
925 if ( priv->remove_auth_on_close == 1 ) {
926 drm_file_t *temp = dev->file_first;
928 temp->authenticated = 0;
933 priv->prev->next = priv->next;
935 dev->file_first = priv->next;
938 priv->next->prev = priv->prev;
940 dev->file_last = priv->prev;
942 up( &dev->struct_sem );
944 DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
946 /* ========================================================
947 * End inline drm_release
950 atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
951 spin_lock( &dev->count_lock );
952 if ( !--dev->open_count ) {
953 if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
954 DRM_ERROR( "Device busy: %d %d\n",
955 atomic_read( &dev->ioctl_count ),
957 spin_unlock( &dev->count_lock );
961 spin_unlock( &dev->count_lock );
963 return DRM(takedown)( dev );
965 spin_unlock( &dev->count_lock );
973 * Called whenever a process performs an ioctl on /dev/drm.
975 * \param inode device inode.
976 * \param filp file pointer.
977 * \param cmd command.
978 * \param arg user argument.
979 * \return zero on success or negative number on failure.
981 * Looks up the ioctl function in the ::ioctls table, checking for root
982 * previleges if so required, and dispatches to the respective function.
984 int DRM(ioctl)( struct inode *inode, struct file *filp,
985 unsigned int cmd, unsigned long arg )
987 drm_file_t *priv = filp->private_data;
988 drm_device_t *dev = priv->dev;
989 drm_ioctl_desc_t *ioctl;
991 int nr = DRM_IOCTL_NR(cmd);
994 atomic_inc( &dev->ioctl_count );
995 atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
998 DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
999 current->pid, cmd, nr, (long)old_encode_dev(dev->device),
1000 priv->authenticated );
1002 if ( nr >= DRIVER_IOCTL_COUNT ) {
1005 ioctl = &DRM(ioctls)[nr];
1009 DRM_DEBUG( "no function\n" );
1011 } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
1012 ( ioctl->auth_needed && !priv->authenticated ) ) {
1015 retcode = func( inode, filp, cmd, arg );
1019 atomic_dec( &dev->ioctl_count );
1026 * \param inode device inode.
1027 * \param filp file pointer.
1028 * \param cmd command.
1029 * \param arg user argument, pointing to a drm_lock structure.
1030 * \return zero on success or negative number on failure.
1032 * Add the current task to the lock wait queue, and attempt to take to lock.
1034 int DRM(lock)( struct inode *inode, struct file *filp,
1035 unsigned int cmd, unsigned long arg )
1037 drm_file_t *priv = filp->private_data;
1038 drm_device_t *dev = priv->dev;
1039 DECLARE_WAITQUEUE( entry, current );
1042 #if __HAVE_MULTIPLE_DMA_QUEUES
1048 if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
1051 if ( lock.context == DRM_KERNEL_CONTEXT ) {
1052 DRM_ERROR( "Process %d using kernel context %d\n",
1053 current->pid, lock.context );
1057 DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
1058 lock.context, current->pid,
1059 dev->lock.hw_lock->lock, lock.flags );
1061 #if __HAVE_DMA_QUEUE
1062 if ( lock.context < 0 )
1064 #elif __HAVE_MULTIPLE_DMA_QUEUES
1065 if ( lock.context < 0 || lock.context >= dev->queue_count )
1067 q = dev->queuelist[lock.context];
1070 #if __HAVE_DMA_FLUSH
1071 ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
1074 add_wait_queue( &dev->lock.lock_queue, &entry );
1076 current->state = TASK_INTERRUPTIBLE;
1077 if ( !dev->lock.hw_lock ) {
1078 /* Device has been unregistered */
1082 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
1084 dev->lock.filp = filp;
1085 dev->lock.lock_time = jiffies;
1086 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
1087 break; /* Got lock */
1092 if ( signal_pending( current ) ) {
1097 current->state = TASK_RUNNING;
1098 remove_wait_queue( &dev->lock.lock_queue, &entry );
1101 #if __HAVE_DMA_FLUSH
1102 DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
1106 sigemptyset( &dev->sigmask );
1107 sigaddset( &dev->sigmask, SIGSTOP );
1108 sigaddset( &dev->sigmask, SIGTSTP );
1109 sigaddset( &dev->sigmask, SIGTTIN );
1110 sigaddset( &dev->sigmask, SIGTTOU );
1111 dev->sigdata.context = lock.context;
1112 dev->sigdata.lock = dev->lock.hw_lock;
1113 block_all_signals( DRM(notifier),
1114 &dev->sigdata, &dev->sigmask );
1116 #if __HAVE_DMA_READY
1117 if ( lock.flags & _DRM_LOCK_READY ) {
1121 #if __HAVE_DMA_QUIESCENT
1122 if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
1123 DRIVER_DMA_QUIESCENT();
1126 /* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the
1127 * drm modules in the DRI cvs tree, but it is required
1128 * by the Sparc driver.
1130 #if __HAVE_KERNEL_CTX_SWITCH
1131 if ( dev->last_context != lock.context ) {
1132 DRM(context_switch)(dev, dev->last_context,
1138 DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1146 * \param inode device inode.
1147 * \param filp file pointer.
1148 * \param cmd command.
1149 * \param arg user argument, pointing to a drm_lock structure.
1150 * \return zero on success or negative number on failure.
1152 * Transfer and free the lock.
1154 int DRM(unlock)( struct inode *inode, struct file *filp,
1155 unsigned int cmd, unsigned long arg )
1157 drm_file_t *priv = filp->private_data;
1158 drm_device_t *dev = priv->dev;
1161 if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
1164 if ( lock.context == DRM_KERNEL_CONTEXT ) {
1165 DRM_ERROR( "Process %d using kernel context %d\n",
1166 current->pid, lock.context );
1170 atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1172 /* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the drm
1173 * modules in the DRI cvs tree, but it is required by the
1176 #if __HAVE_KERNEL_CTX_SWITCH
1177 /* We no longer really hold it, but if we are the next
1178 * agent to request it then we should just be able to
1179 * take it immediately and not eat the ioctl.
1181 dev->lock.filp = NULL;
1183 __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
1184 unsigned int old, new, prev, ctx;
1190 prev = cmpxchg(plock, old, new);
1191 } while (prev != old);
1193 wake_up_interruptible(&dev->lock.lock_queue);
1195 DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
1196 DRM_KERNEL_CONTEXT );
1197 #if __HAVE_DMA_SCHEDULE
1198 DRM(dma_schedule)( dev, 1 );
1201 if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1202 DRM_KERNEL_CONTEXT ) ) {
1205 #endif /* !__HAVE_KERNEL_CTX_SWITCH */
1207 unblock_all_signals();