3 * Generic driver template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
8 * To use this template, you must at least define the following (samples
9 * given for the MGA driver):
12 * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
14 * #define DRIVER_NAME "mga"
15 * #define DRIVER_DESC "Matrox G200/G400"
16 * #define DRIVER_DATE "20001127"
18 * #define DRIVER_MAJOR 2
19 * #define DRIVER_MINOR 0
20 * #define DRIVER_PATCHLEVEL 2
22 * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
24 * #define DRM(x) mga_##x
29 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
31 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
32 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
33 * All Rights Reserved.
35 * Permission is hereby granted, free of charge, to any person obtaining a
36 * copy of this software and associated documentation files (the "Software"),
37 * to deal in the Software without restriction, including without limitation
38 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
39 * and/or sell copies of the Software, and to permit persons to whom the
40 * Software is furnished to do so, subject to the following conditions:
42 * The above copyright notice and this permission notice (including the next
43 * paragraph) shall be included in all copies or substantial portions of the
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
49 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
50 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
51 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
52 * OTHER DEALINGS IN THE SOFTWARE.
60 /** Use an additional macro to avoid preprocessor troubles */
61 #define DRM_OPTIONS_FUNC DRM(options)
63 * Called by the kernel to parse command-line options passed via the
64 * boot-loader (e.g., LILO). It calls the insmod option routine,
67 static int __init DRM(options)( char *str )
69 DRM(parse_options)( str );
73 __setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
74 #undef DRM_OPTIONS_FUNC
78 static drm_device_t DRM(device)[MAX_DEVICES];
79 static int DRM(numdevs) = 0;
81 struct file_operations DRM(fops) = {
85 .release = DRM(release),
88 .fasync = DRM(fasync),
94 drm_ioctl_desc_t DRM(ioctls)[] = {
95 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
96 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
97 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
98 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_by_busid), 0, 1 },
99 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 },
100 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 },
101 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
102 [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = { DRM(setversion), 0, 1 },
104 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
105 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(noop), 1, 1 },
106 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(noop), 1, 1 },
107 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
109 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
110 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
112 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
113 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
115 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
116 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
117 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 },
118 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 },
119 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 },
120 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 },
121 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 },
123 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 },
124 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 },
126 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
127 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
129 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 },
131 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
132 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 },
133 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 },
134 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 },
135 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 },
136 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
138 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
141 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
142 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 },
143 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 },
144 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 },
145 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 },
146 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 },
147 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 },
148 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 },
151 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { DRM(sg_alloc), 1, 1 },
152 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
154 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 },
159 #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
162 static char *drm_opts = NULL;
165 MODULE_AUTHOR( DRIVER_AUTHOR );
166 MODULE_DESCRIPTION( DRIVER_DESC );
167 MODULE_PARM( drm_opts, "s" );
168 MODULE_LICENSE("GPL and additional rights");
170 static int DRM(setup)( drm_device_t *dev )
175 if (dev->fn_tbl.presetup)
177 ret=dev->fn_tbl.presetup(dev);
182 atomic_set( &dev->ioctl_count, 0 );
183 atomic_set( &dev->vma_count, 0 );
185 atomic_set( &dev->buf_alloc, 0 );
187 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
189 i = DRM(dma_setup)( dev );
194 for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
195 atomic_set( &dev->counts[i], 0 );
197 for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
198 dev->magiclist[i].head = NULL;
199 dev->magiclist[i].tail = NULL;
202 dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
204 if(dev->maplist == NULL) return -ENOMEM;
205 memset(dev->maplist, 0, sizeof(*dev->maplist));
206 INIT_LIST_HEAD(&dev->maplist->head);
208 dev->ctxlist = DRM(alloc)(sizeof(*dev->ctxlist),
210 if(dev->ctxlist == NULL) return -ENOMEM;
211 memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
212 INIT_LIST_HEAD(&dev->ctxlist->head);
215 dev->sigdata.lock = dev->lock.hw_lock = NULL;
216 init_waitqueue_head( &dev->lock.lock_queue );
217 dev->queue_count = 0;
218 dev->queue_reserved = 0;
219 dev->queue_slots = 0;
220 dev->queuelist = NULL;
221 dev->irq_enabled = 0;
222 dev->context_flag = 0;
223 dev->interrupt_flag = 0;
225 dev->last_context = 0;
226 dev->last_switch = 0;
227 dev->last_checked = 0;
228 init_waitqueue_head( &dev->context_wait );
234 dev->buf_rp = dev->buf;
235 dev->buf_wp = dev->buf;
236 dev->buf_end = dev->buf + DRM_BSZ;
237 dev->buf_async = NULL;
238 init_waitqueue_head( &dev->buf_readers );
239 init_waitqueue_head( &dev->buf_writers );
244 * The kernel's context could be created here, but is now created
245 * in drm_dma_enqueue. This is more resource-efficient for
246 * hardware that does not do DMA, but may mean that
247 * drm_select_queue fails between the time the interrupt is
248 * initialized and the time the queues are initialized.
250 if (dev->fn_tbl.postsetup)
251 dev->fn_tbl.postsetup(dev);
258 * Take down the DRM device.
260 * \param dev DRM device structure.
262 * Frees every resource in \p dev.
264 * \sa drm_device and setup().
266 static int DRM(takedown)( drm_device_t *dev )
268 drm_magic_entry_t *pt, *next;
270 drm_map_list_t *r_list;
271 struct list_head *list, *list_next;
272 drm_vma_entry_t *vma, *vma_next;
277 if (dev->fn_tbl.pretakedown)
278 dev->fn_tbl.pretakedown(dev);
280 if ( dev->irq_enabled ) DRM(irq_uninstall)( dev );
282 down( &dev->struct_sem );
283 del_timer( &dev->timer );
285 if ( dev->devname ) {
286 DRM(free)( dev->devname, strlen( dev->devname ) + 1,
292 DRM(free)( dev->unique, strlen( dev->unique ) + 1,
298 for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
299 for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
301 DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
303 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
306 /* Clear AGP information */
307 if (drm_core_has_AGP(dev) && dev->agp) {
308 drm_agp_mem_t *entry;
309 drm_agp_mem_t *nexte;
311 /* Remove AGP resources, but leave dev->agp
312 intact until drv_cleanup is called. */
313 for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
315 if ( entry->bound ) DRM(unbind_agp)( entry->memory );
316 DRM(free_agp)( entry->memory, entry->pages );
317 DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
319 dev->agp->memory = NULL;
321 if ( dev->agp->acquired ) DRM(agp_do_release)();
323 dev->agp->acquired = 0;
324 dev->agp->enabled = 0;
327 /* Clear vma list (only built for debugging) */
328 if ( dev->vmalist ) {
329 for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
330 vma_next = vma->next;
331 DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
337 list_for_each_safe( list, list_next, &dev->maplist->head ) {
338 r_list = (drm_map_list_t *)list;
340 if ( ( map = r_list->map ) ) {
341 switch ( map->type ) {
343 case _DRM_FRAME_BUFFER:
344 if (drm_core_has_MTRR(dev)) {
345 if ( map->mtrr >= 0 ) {
347 retcode = mtrr_del( map->mtrr,
350 DRM_DEBUG( "mtrr_del=%d\n", retcode );
353 DRM(ioremapfree)( map->handle, map->size, dev );
360 /* Do nothing here, because this is all
361 * handled in the AGP/GART driver.
364 case _DRM_SCATTER_GATHER:
366 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
367 DRM(sg_cleanup)(dev->sg);
372 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
375 DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
377 DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
381 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) {
382 for ( i = 0 ; i < dev->queue_count ; i++ ) {
383 if ( dev->queuelist[i] ) {
384 DRM(free)( dev->queuelist[i],
385 sizeof(*dev->queuelist[0]),
387 dev->queuelist[i] = NULL;
390 DRM(free)( dev->queuelist,
391 dev->queue_slots * sizeof(*dev->queuelist),
393 dev->queuelist = NULL;
395 dev->queue_count = 0;
397 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
398 DRM(dma_takedown)( dev );
400 if ( dev->lock.hw_lock ) {
401 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
402 dev->lock.filp = NULL;
403 wake_up_interruptible( &dev->lock.lock_queue );
405 up( &dev->struct_sem );
410 static void DRM(init_fn_table)(struct drm_device *dev)
412 dev->fn_tbl.reclaim_buffers = DRM(core_reclaim_buffers);
413 dev->fn_tbl.get_map_ofs = DRM(core_get_map_ofs);
414 dev->fn_tbl.get_reg_ofs = DRM(core_get_reg_ofs);
417 #include "drm_pciids.h"
419 static struct pci_device_id DRM(pciidlist)[] = {
423 static int DRM(probe)(struct pci_dev *pdev)
432 for (i = 0; DRM(pciidlist)[i].vendor != 0; i++) {
433 if ((DRM(pciidlist)[i].vendor == pdev->vendor) &&
434 (DRM(pciidlist)[i].device == pdev->device)) {
441 if (DRM(numdevs) >= MAX_DEVICES)
444 if ((retcode=pci_enable_device(pdev)))
447 dev = &(DRM(device)[DRM(numdevs)]);
449 memset( (void *)dev, 0, sizeof(*dev) );
450 dev->count_lock = SPIN_LOCK_UNLOCKED;
451 init_timer( &dev->timer );
452 sema_init( &dev->struct_sem, 1 );
453 sema_init( &dev->ctxlist_sem, 1 );
455 if ((dev->minor = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
457 dev->device = MKDEV(DRM_MAJOR, dev->minor );
458 dev->name = DRIVER_NAME;
462 dev->hose = pdev->sysdata;
463 dev->pci_domain = dev->hose->bus->number;
467 dev->pci_bus = pdev->bus->number;
468 dev->pci_slot = PCI_SLOT(pdev->devfn);
469 dev->pci_func = PCI_FUNC(pdev->devfn);
470 dev->irq = pdev->irq;
472 /* dev_priv_size can be changed by a driver in driver_register_fns */
473 dev->dev_priv_size = sizeof(u32);
475 /* the DRM has 6 basic counters - drivers add theirs in register_fns */
477 dev->types[0] = _DRM_STAT_LOCK;
478 dev->types[1] = _DRM_STAT_OPENS;
479 dev->types[2] = _DRM_STAT_CLOSES;
480 dev->types[3] = _DRM_STAT_IOCTLS;
481 dev->types[4] = _DRM_STAT_LOCKS;
482 dev->types[5] = _DRM_STAT_UNLOCKS;
484 DRM(init_fn_table)(dev);
486 DRM(driver_register_fns)(dev);
488 if (dev->fn_tbl.preinit)
489 dev->fn_tbl.preinit(dev);
491 if (drm_core_has_AGP(dev))
493 dev->agp = DRM(agp_init)();
494 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
495 DRM_ERROR( "Cannot initialize the agpgart module.\n" );
496 DRM(stub_unregister)(dev->minor);
497 DRM(takedown)( dev );
500 if (drm_core_has_MTRR(dev)) {
502 dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
503 dev->agp->agp_info.aper_size*1024*1024,
509 retcode = DRM(ctxbitmap_init)( dev );
511 DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
512 DRM(stub_unregister)(dev->minor);
513 DRM(takedown)( dev );
517 DRM(numdevs)++; /* no errors, mark it reserved */
519 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
526 pci_pretty_name(pdev));
528 if (dev->fn_tbl.postinit)
529 dev->fn_tbl.postinit(dev);
535 * Module initialization. Called via init_module at module load time, or via
536 * linux/init/main.c (this is not currently supported).
538 * \return zero on success or a negative number on failure.
540 * Initializes an array of drm_device structures, and attempts to
541 * initialize all available devices, using consecutive minors, registering the
542 * stubs and initializing the AGP device.
544 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
545 * after the initialization for driver customization.
547 static int __init drm_init( void )
549 struct pci_dev *pdev = NULL;
554 DRM(parse_options)( drm_opts );
559 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
566 * Called via cleanup_module() at module unload time.
568 * Cleans up all DRM device, calling takedown().
572 static void __exit drm_cleanup( void )
579 for (i = DRM(numdevs) - 1; i >= 0; i--) {
580 dev = &(DRM(device)[i]);
581 if ( DRM(stub_unregister)(dev->minor) ) {
582 DRM_ERROR( "Cannot unload module\n" );
584 DRM_DEBUG("minor %d unregistered\n", dev->minor);
586 DRM_INFO( "Module unloaded\n" );
590 DRM(ctxbitmap_cleanup)( dev );
592 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
593 dev->agp && dev->agp->agp_mtrr >= 0) {
595 retval = mtrr_del( dev->agp->agp_mtrr,
596 dev->agp->agp_info.aper_base,
597 dev->agp->agp_info.aper_size*1024*1024 );
598 DRM_DEBUG( "mtrr_del=%d\n", retval );
601 DRM(takedown)( dev );
603 if (drm_core_has_AGP(dev) && dev->agp ) {
605 DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
609 if (dev->fn_tbl.postcleanup)
610 dev->fn_tbl.postcleanup(dev);
616 module_init( drm_init );
617 module_exit( drm_cleanup );
621 * Get version information
623 * \param inode device inode.
624 * \param filp file pointer.
625 * \param cmd command.
626 * \param arg user argument, pointing to a drm_version structure.
627 * \return zero on success or negative number on failure.
629 * Fills in the version information in \p arg.
631 int DRM(version)( struct inode *inode, struct file *filp,
632 unsigned int cmd, unsigned long arg )
634 drm_version_t __user *argp = (void __user *)arg;
635 drm_version_t version;
638 if ( copy_from_user( &version, argp, sizeof(version) ) )
641 #define DRM_COPY( name, value ) \
642 len = strlen( value ); \
643 if ( len > name##_len ) len = name##_len; \
644 name##_len = strlen( value ); \
645 if ( len && name ) { \
646 if ( copy_to_user( name, value, len ) ) \
650 version.version_major = DRIVER_MAJOR;
651 version.version_minor = DRIVER_MINOR;
652 version.version_patchlevel = DRIVER_PATCHLEVEL;
654 DRM_COPY( version.name, DRIVER_NAME );
655 DRM_COPY( version.date, DRIVER_DATE );
656 DRM_COPY( version.desc, DRIVER_DESC );
658 if ( copy_to_user( argp, &version, sizeof(version) ) )
666 * \param inode device inode
667 * \param filp file pointer.
668 * \return zero on success or a negative number on failure.
670 * Searches the DRM device with the same minor number, calls open_helper(), and
671 * increments the device open count. If the open count was previous at zero,
672 * i.e., it's the first that the device is open, then calls setup().
674 int DRM(open)( struct inode *inode, struct file *filp )
676 drm_device_t *dev = NULL;
680 for (i = 0; i < DRM(numdevs); i++) {
681 if (iminor(inode) == DRM(device)[i].minor) {
682 dev = &(DRM(device)[i]);
690 retcode = DRM(open_helper)( inode, filp, dev );
692 atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
693 spin_lock( &dev->count_lock );
694 if ( !dev->open_count++ ) {
695 spin_unlock( &dev->count_lock );
696 return DRM(setup)( dev );
698 spin_unlock( &dev->count_lock );
707 * \param inode device inode
708 * \param filp file pointer.
709 * \return zero on success or a negative number on failure.
711 * If the hardware lock is held then free it, and take it again for the kernel
712 * context since it's necessary to reclaim buffers. Unlink the file private
713 * data from its list and free it. Decreases the open count and if it reaches
714 * zero calls takedown().
716 int DRM(release)( struct inode *inode, struct file *filp )
718 drm_file_t *priv = filp->private_data;
725 DRM_DEBUG( "open_count = %d\n", dev->open_count );
727 if (dev->fn_tbl.prerelease)
728 dev->fn_tbl.prerelease(dev, filp);
730 /* ========================================================
731 * Begin inline drm_release
734 DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
735 current->pid, (long)old_encode_dev(dev->device), dev->open_count );
737 if ( priv->lock_count && dev->lock.hw_lock &&
738 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
739 dev->lock.filp == filp ) {
740 DRM_DEBUG( "File %p released, freeing lock for context %d\n",
742 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
744 if (dev->fn_tbl.release)
745 dev->fn_tbl.release(dev, filp);
747 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
748 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
750 /* FIXME: may require heavy-handed reset of
751 hardware at this point, possibly
752 processed via a callback to the X
755 else if ( dev->fn_tbl.release && priv->lock_count && dev->lock.hw_lock ) {
756 /* The lock is required to reclaim buffers */
757 DECLARE_WAITQUEUE( entry, current );
759 add_wait_queue( &dev->lock.lock_queue, &entry );
761 __set_current_state(TASK_INTERRUPTIBLE);
762 if ( !dev->lock.hw_lock ) {
763 /* Device has been unregistered */
767 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
768 DRM_KERNEL_CONTEXT ) ) {
769 dev->lock.filp = filp;
770 dev->lock.lock_time = jiffies;
771 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
772 break; /* Got lock */
776 if ( signal_pending( current ) ) {
777 retcode = -ERESTARTSYS;
781 __set_current_state(TASK_RUNNING);
782 remove_wait_queue( &dev->lock.lock_queue, &entry );
784 if (dev->fn_tbl.release)
785 dev->fn_tbl.release(dev, filp);
786 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
787 DRM_KERNEL_CONTEXT );
791 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
793 dev->fn_tbl.reclaim_buffers(filp);
796 DRM(fasync)( -1, filp, 0 );
798 down( &dev->ctxlist_sem );
799 if ( !list_empty( &dev->ctxlist->head ) ) {
800 drm_ctx_list_t *pos, *n;
802 list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
803 if ( pos->tag == priv &&
804 pos->handle != DRM_KERNEL_CONTEXT ) {
805 if (dev->fn_tbl.context_dtor)
806 dev->fn_tbl.context_dtor(dev, pos->handle);
808 DRM(ctxbitmap_free)( dev, pos->handle );
810 list_del( &pos->head );
811 DRM(free)( pos, sizeof(*pos), DRM_MEM_CTXLIST );
816 up( &dev->ctxlist_sem );
818 down( &dev->struct_sem );
819 if ( priv->remove_auth_on_close == 1 ) {
820 drm_file_t *temp = dev->file_first;
822 temp->authenticated = 0;
827 priv->prev->next = priv->next;
829 dev->file_first = priv->next;
832 priv->next->prev = priv->prev;
834 dev->file_last = priv->prev;
836 up( &dev->struct_sem );
838 if (dev->fn_tbl.free_filp_priv)
839 dev->fn_tbl.free_filp_priv(dev, priv);
841 DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
843 /* ========================================================
844 * End inline drm_release
847 atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
848 spin_lock( &dev->count_lock );
849 if ( !--dev->open_count ) {
850 if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
851 DRM_ERROR( "Device busy: %d %d\n",
852 atomic_read( &dev->ioctl_count ),
854 spin_unlock( &dev->count_lock );
858 spin_unlock( &dev->count_lock );
860 return DRM(takedown)( dev );
862 spin_unlock( &dev->count_lock );
870 * Called whenever a process performs an ioctl on /dev/drm.
872 * \param inode device inode.
873 * \param filp file pointer.
874 * \param cmd command.
875 * \param arg user argument.
876 * \return zero on success or negative number on failure.
878 * Looks up the ioctl function in the ::ioctls table, checking for root
879 * previleges if so required, and dispatches to the respective function.
881 int DRM(ioctl)( struct inode *inode, struct file *filp,
882 unsigned int cmd, unsigned long arg )
884 drm_file_t *priv = filp->private_data;
885 drm_device_t *dev = priv->dev;
886 drm_ioctl_desc_t *ioctl;
888 int nr = DRM_IOCTL_NR(cmd);
891 atomic_inc( &dev->ioctl_count );
892 atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
895 DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
896 current->pid, cmd, nr, (long)old_encode_dev(dev->device),
897 priv->authenticated );
899 if ( nr >= DRIVER_IOCTL_COUNT ) {
902 ioctl = &DRM(ioctls)[nr];
906 DRM_DEBUG( "no function\n" );
908 } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
909 ( ioctl->auth_needed && !priv->authenticated ) ) {
912 retcode = func( inode, filp, cmd, arg );
916 atomic_dec( &dev->ioctl_count );
923 * \param inode device inode.
924 * \param filp file pointer.
925 * \param cmd command.
926 * \param arg user argument, pointing to a drm_lock structure.
927 * \return zero on success or negative number on failure.
929 * Add the current task to the lock wait queue, and attempt to take to lock.
931 int DRM(lock)( struct inode *inode, struct file *filp,
932 unsigned int cmd, unsigned long arg )
934 drm_file_t *priv = filp->private_data;
935 drm_device_t *dev = priv->dev;
936 DECLARE_WAITQUEUE( entry, current );
942 if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
945 if ( lock.context == DRM_KERNEL_CONTEXT ) {
946 DRM_ERROR( "Process %d using kernel context %d\n",
947 current->pid, lock.context );
951 DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
952 lock.context, current->pid,
953 dev->lock.hw_lock->lock, lock.flags );
955 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
956 if ( lock.context < 0 )
959 add_wait_queue( &dev->lock.lock_queue, &entry );
961 __set_current_state(TASK_INTERRUPTIBLE);
962 if ( !dev->lock.hw_lock ) {
963 /* Device has been unregistered */
967 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
969 dev->lock.filp = filp;
970 dev->lock.lock_time = jiffies;
971 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
972 break; /* Got lock */
977 if ( signal_pending( current ) ) {
982 __set_current_state(TASK_RUNNING);
983 remove_wait_queue( &dev->lock.lock_queue, &entry );
985 sigemptyset( &dev->sigmask );
986 sigaddset( &dev->sigmask, SIGSTOP );
987 sigaddset( &dev->sigmask, SIGTSTP );
988 sigaddset( &dev->sigmask, SIGTTIN );
989 sigaddset( &dev->sigmask, SIGTTOU );
990 dev->sigdata.context = lock.context;
991 dev->sigdata.lock = dev->lock.hw_lock;
992 block_all_signals( DRM(notifier),
993 &dev->sigdata, &dev->sigmask );
995 if (dev->fn_tbl.dma_ready && (lock.flags & _DRM_LOCK_READY))
996 dev->fn_tbl.dma_ready(dev);
998 if ( dev->fn_tbl.dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT ))
999 return dev->fn_tbl.dma_quiescent(dev);
1001 /* dev->fn_tbl.kernel_context_switch isn't used by any of the x86
1002 * drivers but is used by the Sparc driver.
1005 if (dev->fn_tbl.kernel_context_switch &&
1006 dev->last_context != lock.context) {
1007 dev->fn_tbl.kernel_context_switch(dev, dev->last_context,
1010 DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1018 * \param inode device inode.
1019 * \param filp file pointer.
1020 * \param cmd command.
1021 * \param arg user argument, pointing to a drm_lock structure.
1022 * \return zero on success or negative number on failure.
1024 * Transfer and free the lock.
1026 int DRM(unlock)( struct inode *inode, struct file *filp,
1027 unsigned int cmd, unsigned long arg )
1029 drm_file_t *priv = filp->private_data;
1030 drm_device_t *dev = priv->dev;
1033 if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
1036 if ( lock.context == DRM_KERNEL_CONTEXT ) {
1037 DRM_ERROR( "Process %d using kernel context %d\n",
1038 current->pid, lock.context );
1042 atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1044 /* kernel_context_switch isn't used by any of the x86 drm
1045 * modules but is required by the Sparc driver.
1047 if (dev->fn_tbl.kernel_context_switch_unlock)
1048 dev->fn_tbl.kernel_context_switch_unlock(dev, &lock);
1050 DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
1051 DRM_KERNEL_CONTEXT );
1053 if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1054 DRM_KERNEL_CONTEXT ) ) {
1059 unblock_all_signals();