ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / char / drm / drm_drv.h
1 /**
2  * \file drm_drv.h 
3  * Generic driver template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  *
8  * To use this template, you must at least define the following (samples
9  * given for the MGA driver):
10  *
11  * \code
12  * #define DRIVER_AUTHOR        "VA Linux Systems, Inc."
13  *
14  * #define DRIVER_NAME          "mga"
15  * #define DRIVER_DESC          "Matrox G200/G400"
16  * #define DRIVER_DATE          "20001127"
17  *
18  * #define DRIVER_MAJOR         2
19  * #define DRIVER_MINOR         0
20  * #define DRIVER_PATCHLEVEL    2
21  *
22  * #define DRIVER_IOCTL_COUNT   DRM_ARRAY_SIZE( mga_ioctls )
23  *
24  * #define DRM(x)               mga_##x
25  * \endcode
26  */
27
28 /*
29  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
30  *
31  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
32  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
33  * All Rights Reserved.
34  *
35  * Permission is hereby granted, free of charge, to any person obtaining a
36  * copy of this software and associated documentation files (the "Software"),
37  * to deal in the Software without restriction, including without limitation
38  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
39  * and/or sell copies of the Software, and to permit persons to whom the
40  * Software is furnished to do so, subject to the following conditions:
41  *
42  * The above copyright notice and this permission notice (including the next
43  * paragraph) shall be included in all copies or substantial portions of the
44  * Software.
45  *
46  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
49  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
50  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
51  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
52  * OTHER DEALINGS IN THE SOFTWARE.
53  */
54
55 #ifndef __MUST_HAVE_AGP
56 #define __MUST_HAVE_AGP                 0
57 #endif
58 #ifndef __HAVE_CTX_BITMAP
59 #define __HAVE_CTX_BITMAP               0
60 #endif
61 #ifndef __HAVE_DMA_IRQ
62 #define __HAVE_DMA_IRQ                  0
63 #endif
64 #ifndef __HAVE_DMA_QUEUE
65 #define __HAVE_DMA_QUEUE                0
66 #endif
67 #ifndef __HAVE_MULTIPLE_DMA_QUEUES
68 #define __HAVE_MULTIPLE_DMA_QUEUES      0
69 #endif
70 #ifndef __HAVE_DMA_SCHEDULE
71 #define __HAVE_DMA_SCHEDULE             0
72 #endif
73 #ifndef __HAVE_DMA_FLUSH
74 #define __HAVE_DMA_FLUSH                0
75 #endif
76 #ifndef __HAVE_DMA_READY
77 #define __HAVE_DMA_READY                0
78 #endif
79 #ifndef __HAVE_DMA_QUIESCENT
80 #define __HAVE_DMA_QUIESCENT            0
81 #endif
82 #ifndef __HAVE_RELEASE
83 #define __HAVE_RELEASE                  0
84 #endif
85 #ifndef __HAVE_COUNTERS
86 #define __HAVE_COUNTERS                 0
87 #endif
88 #ifndef __HAVE_SG
89 #define __HAVE_SG                       0
90 #endif
91 /* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the drm modules in
92  * the DRI cvs tree, but it is required by the kernel tree's sparc
93  * driver.
94  */
95 #ifndef __HAVE_KERNEL_CTX_SWITCH
96 #define __HAVE_KERNEL_CTX_SWITCH        0
97 #endif
98 #ifndef __HAVE_DRIVER_FOPS_READ
99 #define __HAVE_DRIVER_FOPS_READ         0
100 #endif
101 #ifndef __HAVE_DRIVER_FOPS_POLL
102 #define __HAVE_DRIVER_FOPS_POLL         0
103 #endif
104
105 #ifndef DRIVER_PREINIT
106 #define DRIVER_PREINIT()
107 #endif
108 #ifndef DRIVER_POSTINIT
109 #define DRIVER_POSTINIT()
110 #endif
111 #ifndef DRIVER_PRERELEASE
112 #define DRIVER_PRERELEASE()
113 #endif
114 #ifndef DRIVER_PRETAKEDOWN
115 #define DRIVER_PRETAKEDOWN()
116 #endif
117 #ifndef DRIVER_POSTCLEANUP
118 #define DRIVER_POSTCLEANUP()
119 #endif
120 #ifndef DRIVER_PRESETUP
121 #define DRIVER_PRESETUP()
122 #endif
123 #ifndef DRIVER_POSTSETUP
124 #define DRIVER_POSTSETUP()
125 #endif
126 #ifndef DRIVER_IOCTLS
127 #define DRIVER_IOCTLS
128 #endif
129 #ifndef DRIVER_FOPS
130 #define DRIVER_FOPS                             \
131 static struct file_operations   DRM(fops) = {   \
132         .owner   = THIS_MODULE,                 \
133         .open    = DRM(open),                   \
134         .flush   = DRM(flush),                  \
135         .release = DRM(release),                \
136         .ioctl   = DRM(ioctl),                  \
137         .mmap    = DRM(mmap),                   \
138         .fasync  = DRM(fasync),                 \
139         .poll    = DRM(poll),                   \
140         .read    = DRM(read),                   \
141 }
142 #endif
143
144 #ifndef MODULE
145 /** Use an additional macro to avoid preprocessor troubles */
146 #define DRM_OPTIONS_FUNC DRM(options)
147 /**
148  * Called by the kernel to parse command-line options passed via the
149  * boot-loader (e.g., LILO).  It calls the insmod option routine,
150  * parse_options().
151  */
152 static int __init DRM(options)( char *str )
153 {
154         DRM(parse_options)( str );
155         return 1;
156 }
157
158 __setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
159 #undef DRM_OPTIONS_FUNC
160 #endif
161
162 /**
163  * The default number of instances (minor numbers) to initialize.
164  */
165 #ifndef DRIVER_NUM_CARDS
166 #define DRIVER_NUM_CARDS 1
167 #endif
168
169 static drm_device_t     *DRM(device);
170 static int              *DRM(minor);
171 static int              DRM(numdevs) = 0;
172
173 DRIVER_FOPS;
174
175 /** Ioctl table */
176 static drm_ioctl_desc_t           DRM(ioctls)[] = {
177         [DRM_IOCTL_NR(DRM_IOCTL_VERSION)]       = { DRM(version),     0, 0 },
178         [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)]    = { DRM(getunique),   0, 0 },
179         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]     = { DRM(getmagic),    0, 0 },
180         [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]     = { DRM(irq_busid),   0, 1 },
181         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)]       = { DRM(getmap),      0, 0 },
182         [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)]    = { DRM(getclient),   0, 0 },
183         [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)]     = { DRM(getstats),    0, 0 },
184
185         [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)]    = { DRM(setunique),   1, 1 },
186         [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]         = { DRM(noop),        1, 1 },
187         [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]       = { DRM(noop),        1, 1 },
188         [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)]    = { DRM(authmagic),   1, 1 },
189
190         [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]       = { DRM(addmap),      1, 1 },
191         [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)]        = { DRM(rmmap),       1, 0 },
192
193 #if __HAVE_CTX_BITMAP
194         [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
195         [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
196 #endif
197
198         [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]       = { DRM(addctx),      1, 1 },
199         [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]        = { DRM(rmctx),       1, 1 },
200         [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]       = { DRM(modctx),      1, 1 },
201         [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]       = { DRM(getctx),      1, 0 },
202         [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)]    = { DRM(switchctx),   1, 1 },
203         [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]       = { DRM(newctx),      1, 1 },
204         [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]       = { DRM(resctx),      1, 0 },
205
206         [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]      = { DRM(adddraw),     1, 1 },
207         [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]       = { DRM(rmdraw),      1, 1 },
208
209         [DRM_IOCTL_NR(DRM_IOCTL_LOCK)]          = { DRM(lock),        1, 0 },
210         [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]        = { DRM(unlock),      1, 0 },
211
212 #if __HAVE_DMA_FLUSH
213         /* Gamma only, really */
214         [DRM_IOCTL_NR(DRM_IOCTL_FINISH)]        = { DRM(finish),      1, 0 },
215 #else
216         [DRM_IOCTL_NR(DRM_IOCTL_FINISH)]        = { DRM(noop),      1, 0 },
217 #endif
218
219 #if __HAVE_DMA
220         [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)]      = { DRM(addbufs),     1, 1 },
221         [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)]     = { DRM(markbufs),    1, 1 },
222         [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)]     = { DRM(infobufs),    1, 0 },
223         [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)]      = { DRM(mapbufs),     1, 0 },
224         [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)]     = { DRM(freebufs),    1, 0 },
225
226         /* The DRM_IOCTL_DMA ioctl should be defined by the driver.
227          */
228         [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)]       = { DRM(control),     1, 1 },
229 #endif
230
231 #if __REALLY_HAVE_AGP
232         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = { DRM(agp_acquire), 1, 1 },
233         [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = { DRM(agp_release), 1, 1 },
234         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = { DRM(agp_enable),  1, 1 },
235         [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = { DRM(agp_info),    1, 0 },
236         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]     = { DRM(agp_alloc),   1, 1 },
237         [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]      = { DRM(agp_free),    1, 1 },
238         [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]      = { DRM(agp_bind),    1, 1 },
239         [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]    = { DRM(agp_unbind),  1, 1 },
240 #endif
241
242 #if __HAVE_SG
243         [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)]      = { DRM(sg_alloc),    1, 1 },
244         [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)]       = { DRM(sg_free),     1, 1 },
245 #endif
246
247 #if __HAVE_VBL_IRQ
248         [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)]   = { DRM(wait_vblank), 0, 0 },
249 #endif
250
251         DRIVER_IOCTLS
252 };
253
254 #define DRIVER_IOCTL_COUNT      DRM_ARRAY_SIZE( DRM(ioctls) )
255
256 #ifdef MODULE
257 static char *drm_opts = NULL;
258 #endif
259
260 MODULE_AUTHOR( DRIVER_AUTHOR );
261 MODULE_DESCRIPTION( DRIVER_DESC );
262 MODULE_PARM( drm_opts, "s" );
263 MODULE_LICENSE("GPL and additional rights");
264
265 static int DRM(setup)( drm_device_t *dev )
266 {
267         int i;
268
269         DRIVER_PRESETUP();
270         atomic_set( &dev->ioctl_count, 0 );
271         atomic_set( &dev->vma_count, 0 );
272         dev->buf_use = 0;
273         atomic_set( &dev->buf_alloc, 0 );
274
275 #if __HAVE_DMA
276         i = DRM(dma_setup)( dev );
277         if ( i < 0 )
278                 return i;
279 #endif
280
281         dev->counters  = 6 + __HAVE_COUNTERS;
282         dev->types[0]  = _DRM_STAT_LOCK;
283         dev->types[1]  = _DRM_STAT_OPENS;
284         dev->types[2]  = _DRM_STAT_CLOSES;
285         dev->types[3]  = _DRM_STAT_IOCTLS;
286         dev->types[4]  = _DRM_STAT_LOCKS;
287         dev->types[5]  = _DRM_STAT_UNLOCKS;
288 #ifdef __HAVE_COUNTER6
289         dev->types[6]  = __HAVE_COUNTER6;
290 #endif
291 #ifdef __HAVE_COUNTER7
292         dev->types[7]  = __HAVE_COUNTER7;
293 #endif
294 #ifdef __HAVE_COUNTER8
295         dev->types[8]  = __HAVE_COUNTER8;
296 #endif
297 #ifdef __HAVE_COUNTER9
298         dev->types[9]  = __HAVE_COUNTER9;
299 #endif
300 #ifdef __HAVE_COUNTER10
301         dev->types[10] = __HAVE_COUNTER10;
302 #endif
303 #ifdef __HAVE_COUNTER11
304         dev->types[11] = __HAVE_COUNTER11;
305 #endif
306 #ifdef __HAVE_COUNTER12
307         dev->types[12] = __HAVE_COUNTER12;
308 #endif
309 #ifdef __HAVE_COUNTER13
310         dev->types[13] = __HAVE_COUNTER13;
311 #endif
312 #ifdef __HAVE_COUNTER14
313         dev->types[14] = __HAVE_COUNTER14;
314 #endif
315 #ifdef __HAVE_COUNTER15
316         dev->types[14] = __HAVE_COUNTER14;
317 #endif
318
319         for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
320                 atomic_set( &dev->counts[i], 0 );
321
322         for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
323                 dev->magiclist[i].head = NULL;
324                 dev->magiclist[i].tail = NULL;
325         }
326
327         dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
328                                   DRM_MEM_MAPS);
329         if(dev->maplist == NULL) return -ENOMEM;
330         memset(dev->maplist, 0, sizeof(*dev->maplist));
331         INIT_LIST_HEAD(&dev->maplist->head);
332
333         dev->vmalist = NULL;
334         dev->sigdata.lock = dev->lock.hw_lock = NULL;
335         init_waitqueue_head( &dev->lock.lock_queue );
336         dev->queue_count = 0;
337         dev->queue_reserved = 0;
338         dev->queue_slots = 0;
339         dev->queuelist = NULL;
340         dev->irq = 0;
341         dev->context_flag = 0;
342         dev->interrupt_flag = 0;
343         dev->dma_flag = 0;
344         dev->last_context = 0;
345         dev->last_switch = 0;
346         dev->last_checked = 0;
347         init_waitqueue_head( &dev->context_wait );
348
349         dev->ctx_start = 0;
350         dev->lck_start = 0;
351
352         dev->buf_rp = dev->buf;
353         dev->buf_wp = dev->buf;
354         dev->buf_end = dev->buf + DRM_BSZ;
355         dev->buf_async = NULL;
356         init_waitqueue_head( &dev->buf_readers );
357         init_waitqueue_head( &dev->buf_writers );
358
359         DRM_DEBUG( "\n" );
360
361         /*
362          * The kernel's context could be created here, but is now created
363          * in drm_dma_enqueue.  This is more resource-efficient for
364          * hardware that does not do DMA, but may mean that
365          * drm_select_queue fails between the time the interrupt is
366          * initialized and the time the queues are initialized.
367          */
368         DRIVER_POSTSETUP();
369         return 0;
370 }
371
372
373 /**
374  * Take down the DRM device.
375  *
376  * \param dev DRM device structure.
377  *
378  * Frees every resource in \p dev.
379  *
380  * \sa drm_device and setup().
381  */
382 static int DRM(takedown)( drm_device_t *dev )
383 {
384         drm_magic_entry_t *pt, *next;
385         drm_map_t *map;
386         drm_map_list_t *r_list;
387         struct list_head *list, *list_next;
388         drm_vma_entry_t *vma, *vma_next;
389         int i;
390
391         DRM_DEBUG( "\n" );
392
393         DRIVER_PRETAKEDOWN();
394 #if __HAVE_DMA_IRQ
395         if ( dev->irq ) DRM(irq_uninstall)( dev );
396 #endif
397
398         down( &dev->struct_sem );
399         del_timer( &dev->timer );
400
401         if ( dev->devname ) {
402                 DRM(free)( dev->devname, strlen( dev->devname ) + 1,
403                            DRM_MEM_DRIVER );
404                 dev->devname = NULL;
405         }
406
407         if ( dev->unique ) {
408                 DRM(free)( dev->unique, strlen( dev->unique ) + 1,
409                            DRM_MEM_DRIVER );
410                 dev->unique = NULL;
411                 dev->unique_len = 0;
412         }
413                                 /* Clear pid list */
414         for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
415                 for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
416                         next = pt->next;
417                         DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
418                 }
419                 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
420         }
421
422 #if __REALLY_HAVE_AGP
423                                 /* Clear AGP information */
424         if ( dev->agp ) {
425                 drm_agp_mem_t *entry;
426                 drm_agp_mem_t *nexte;
427
428                                 /* Remove AGP resources, but leave dev->agp
429                                    intact until drv_cleanup is called. */
430                 for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
431                         nexte = entry->next;
432                         if ( entry->bound ) DRM(unbind_agp)( entry->memory );
433                         DRM(free_agp)( entry->memory, entry->pages );
434                         DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
435                 }
436                 dev->agp->memory = NULL;
437
438                 if ( dev->agp->acquired ) DRM(agp_do_release)();
439
440                 dev->agp->acquired = 0;
441                 dev->agp->enabled  = 0;
442         }
443 #endif
444
445                                 /* Clear vma list (only built for debugging) */
446         if ( dev->vmalist ) {
447                 for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
448                         vma_next = vma->next;
449                         DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
450                 }
451                 dev->vmalist = NULL;
452         }
453
454         if( dev->maplist ) {
455                 list_for_each_safe( list, list_next, &dev->maplist->head ) {
456                         r_list = (drm_map_list_t *)list;
457
458                         if ( ( map = r_list->map ) ) {
459                                 switch ( map->type ) {
460                                 case _DRM_REGISTERS:
461                                 case _DRM_FRAME_BUFFER:
462 #if __REALLY_HAVE_MTRR
463                                         if ( map->mtrr >= 0 ) {
464                                                 int retcode;
465                                                 retcode = mtrr_del( map->mtrr,
466                                                                     map->offset,
467                                                                     map->size );
468                                                 DRM_DEBUG( "mtrr_del=%d\n", retcode );
469                                         }
470 #endif
471                                         DRM(ioremapfree)( map->handle, map->size, dev );
472                                         break;
473                                 case _DRM_SHM:
474                                         vfree(map->handle);
475                                         break;
476
477                                 case _DRM_AGP:
478                                         /* Do nothing here, because this is all
479                                          * handled in the AGP/GART driver.
480                                          */
481                                         break;
482                                 case _DRM_SCATTER_GATHER:
483                                         /* Handle it, but do nothing, if HAVE_SG
484                                          * isn't defined.
485                                          */
486 #if __HAVE_SG
487                                         if(dev->sg) {
488                                                 DRM(sg_cleanup)(dev->sg);
489                                                 dev->sg = NULL;
490                                         }
491 #endif
492                                         break;
493                                 }
494                                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
495                         }
496                         list_del( list );
497                         DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
498                 }
499                 DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
500                 dev->maplist = NULL;
501         }
502
503 #if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
504         if ( dev->queuelist ) {
505                 for ( i = 0 ; i < dev->queue_count ; i++ ) {
506 #if __HAVE_DMA_WAITLIST
507                         DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
508 #endif
509                         if ( dev->queuelist[i] ) {
510                                 DRM(free)( dev->queuelist[i],
511                                           sizeof(*dev->queuelist[0]),
512                                           DRM_MEM_QUEUES );
513                                 dev->queuelist[i] = NULL;
514                         }
515                 }
516                 DRM(free)( dev->queuelist,
517                           dev->queue_slots * sizeof(*dev->queuelist),
518                           DRM_MEM_QUEUES );
519                 dev->queuelist = NULL;
520         }
521         dev->queue_count = 0;
522 #endif
523
524 #if __HAVE_DMA
525         DRM(dma_takedown)( dev );
526 #endif
527         if ( dev->lock.hw_lock ) {
528                 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
529                 dev->lock.filp = 0;
530                 wake_up_interruptible( &dev->lock.lock_queue );
531         }
532         up( &dev->struct_sem );
533
534         return 0;
535 }
536
537 /**
538  * Figure out how many instances to initialize.
539  *
540  * \return number of cards found.
541  *
542  * Searches for every PCI card in \c DRIVER_CARD_LIST with matching vendor and device ids.
543  */
544 static int drm_count_cards(void)
545 {
546         int num = 0;
547 #if defined(DRIVER_CARD_LIST)
548         int i;
549         drm_pci_list_t *l;
550         u16 device, vendor;
551         struct pci_dev *pdev = NULL;
552 #endif
553
554         DRM_DEBUG( "\n" );
555
556 #if defined(DRIVER_COUNT_CARDS)
557         num = DRIVER_COUNT_CARDS();
558 #elif defined(DRIVER_CARD_LIST)
559         for (i = 0, l = DRIVER_CARD_LIST; l[i].vendor != 0; i++) {
560                 pdev = NULL;
561                 vendor = l[i].vendor;
562                 device = l[i].device;
563                 if(device == 0xffff) device = PCI_ANY_ID;
564                 if(vendor == 0xffff) vendor = PCI_ANY_ID;
565                 while ((pdev = pci_find_device(vendor, device, pdev))) {
566                         num++;
567                 }
568         }
569 #else
570         num = DRIVER_NUM_CARDS;
571 #endif
572         DRM_DEBUG("numdevs = %d\n", num);
573         return num;
574 }
575
576 /**
577  * Module initialization. Called via init_module at module load time, or via
578  * linux/init/main.c (this is not currently supported).
579  *
580  * \return zero on success or a negative number on failure.
581  *
582  * Allocates and initialize an array of drm_device structures, and attempts to
583  * initialize all available devices, using consecutive minors, registering the
584  * stubs and initializing the AGP device.
585  * 
586  * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
587  * after the initialization for driver customization.
588  */
589 static int __init drm_init( void )
590 {
591
592         drm_device_t *dev;
593         int i;
594 #if __HAVE_CTX_BITMAP
595         int retcode;
596 #endif
597         DRM_DEBUG( "\n" );
598
599 #ifdef MODULE
600         DRM(parse_options)( drm_opts );
601 #endif
602
603         DRM(numdevs) = drm_count_cards();
604         /* Force at least one instance. */
605         if (DRM(numdevs) <= 0)
606                 DRM(numdevs) = 1;
607
608         DRM(device) = kmalloc(sizeof(*DRM(device)) * DRM(numdevs), GFP_KERNEL);
609         if (!DRM(device)) {
610                 return -ENOMEM;
611         }
612         DRM(minor) = kmalloc(sizeof(*DRM(minor)) * DRM(numdevs), GFP_KERNEL);
613         if (!DRM(minor)) {
614                 kfree(DRM(device));
615                 return -ENOMEM;
616         }
617
618         DRIVER_PREINIT();
619
620         DRM(mem_init)();
621
622         for (i = 0; i < DRM(numdevs); i++) {
623                 dev = &(DRM(device)[i]);
624                 memset( (void *)dev, 0, sizeof(*dev) );
625                 dev->count_lock = SPIN_LOCK_UNLOCKED;
626                 init_timer( &dev->timer );
627                 sema_init( &dev->struct_sem, 1 );
628
629                 if ((DRM(minor)[i] = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
630                         return -EPERM;
631                 dev->device = MKDEV(DRM_MAJOR, DRM(minor)[i] );
632                 dev->name   = DRIVER_NAME;
633
634 #if __REALLY_HAVE_AGP
635                 dev->agp = DRM(agp_init)();
636 #if __MUST_HAVE_AGP
637                 if ( dev->agp == NULL ) {
638                         DRM_ERROR( "Cannot initialize the agpgart module.\n" );
639                         DRM(stub_unregister)(DRM(minor)[i]);
640                         DRM(takedown)( dev );
641                         return -EINVAL;
642                 }
643 #endif
644 #if __REALLY_HAVE_MTRR
645                 if (dev->agp)
646                         dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
647                                        dev->agp->agp_info.aper_size*1024*1024,
648                                        MTRR_TYPE_WRCOMB,
649                                        1 );
650 #endif
651 #endif
652
653 #if __HAVE_CTX_BITMAP
654                 retcode = DRM(ctxbitmap_init)( dev );
655                 if( retcode ) {
656                         DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
657                         DRM(stub_unregister)(DRM(minor)[i]);
658                         DRM(takedown)( dev );
659                         return retcode;
660                 }
661 #endif
662                 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
663                         DRIVER_NAME,
664                         DRIVER_MAJOR,
665                         DRIVER_MINOR,
666                         DRIVER_PATCHLEVEL,
667                         DRIVER_DATE,
668                         DRM(minor)[i] );
669         }
670
671         DRIVER_POSTINIT();
672
673         return 0;
674 }
675
676 /**
677  * Called via cleanup_module() at module unload time.
678  *
679  * Cleans up all DRM device, calling takedown().
680  * 
681  * \sa drm_init().
682  */
683 static void __exit drm_cleanup( void )
684 {
685         drm_device_t *dev;
686         int i;
687
688         DRM_DEBUG( "\n" );
689
690         for (i = DRM(numdevs) - 1; i >= 0; i--) {
691                 dev = &(DRM(device)[i]);
692                 if ( DRM(stub_unregister)(DRM(minor)[i]) ) {
693                         DRM_ERROR( "Cannot unload module\n" );
694                 } else {
695                         DRM_DEBUG("minor %d unregistered\n", DRM(minor)[i]);
696                         if (i == 0) {
697                                 DRM_INFO( "Module unloaded\n" );
698                         }
699                 }
700 #if __HAVE_CTX_BITMAP
701                 DRM(ctxbitmap_cleanup)( dev );
702 #endif
703
704 #if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
705                 if ( dev->agp && dev->agp->agp_mtrr >= 0) {
706                         int retval;
707                         retval = mtrr_del( dev->agp->agp_mtrr,
708                                    dev->agp->agp_info.aper_base,
709                                    dev->agp->agp_info.aper_size*1024*1024 );
710                         DRM_DEBUG( "mtrr_del=%d\n", retval );
711                 }
712 #endif
713
714                 DRM(takedown)( dev );
715
716 #if __REALLY_HAVE_AGP
717                 if ( dev->agp ) {
718                         DRM(agp_uninit)();
719                         DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
720                         dev->agp = NULL;
721                 }
722 #endif
723         }
724         DRIVER_POSTCLEANUP();
725         kfree(DRM(minor));
726         kfree(DRM(device));
727         DRM(numdevs) = 0;
728 }
729
730 module_init( drm_init );
731 module_exit( drm_cleanup );
732
733
734 /**
735  * Get version information
736  *
737  * \param inode device inode.
738  * \param filp file pointer.
739  * \param cmd command.
740  * \param arg user argument, pointing to a drm_version structure.
741  * \return zero on success or negative number on failure.
742  *
743  * Fills in the version information in \p arg.
744  */
745 int DRM(version)( struct inode *inode, struct file *filp,
746                   unsigned int cmd, unsigned long arg )
747 {
748         drm_version_t version;
749         int len;
750
751         if ( copy_from_user( &version,
752                              (drm_version_t *)arg,
753                              sizeof(version) ) )
754                 return -EFAULT;
755
756 #define DRM_COPY( name, value )                                         \
757         len = strlen( value );                                          \
758         if ( len > name##_len ) len = name##_len;                       \
759         name##_len = strlen( value );                                   \
760         if ( len && name ) {                                            \
761                 if ( copy_to_user( name, value, len ) )                 \
762                         return -EFAULT;                                 \
763         }
764
765         version.version_major = DRIVER_MAJOR;
766         version.version_minor = DRIVER_MINOR;
767         version.version_patchlevel = DRIVER_PATCHLEVEL;
768
769         DRM_COPY( version.name, DRIVER_NAME );
770         DRM_COPY( version.date, DRIVER_DATE );
771         DRM_COPY( version.desc, DRIVER_DESC );
772
773         if ( copy_to_user( (drm_version_t *)arg,
774                            &version,
775                            sizeof(version) ) )
776                 return -EFAULT;
777         return 0;
778 }
779
780 /**
781  * Open file.
782  * 
783  * \param inode device inode
784  * \param filp file pointer.
785  * \return zero on success or a negative number on failure.
786  *
787  * Searches the DRM device with the same minor number, calls open_helper(), and
788  * increments the device open count. If the open count was previous at zero,
789  * i.e., it's the first that the device is open, then calls setup().
790  */
791 int DRM(open)( struct inode *inode, struct file *filp )
792 {
793         drm_device_t *dev = NULL;
794         int retcode = 0;
795         int i;
796
797         for (i = 0; i < DRM(numdevs); i++) {
798                 if (iminor(inode) == DRM(minor)[i]) {
799                         dev = &(DRM(device)[i]);
800                         break;
801                 }
802         }
803         if (!dev) {
804                 return -ENODEV;
805         }
806
807         retcode = DRM(open_helper)( inode, filp, dev );
808         if ( !retcode ) {
809                 atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
810                 spin_lock( &dev->count_lock );
811                 if ( !dev->open_count++ ) {
812                         spin_unlock( &dev->count_lock );
813                         return DRM(setup)( dev );
814                 }
815                 spin_unlock( &dev->count_lock );
816         }
817
818         return retcode;
819 }
820
821 /**
822  * Release file.
823  *
824  * \param inode device inode
825  * \param filp file pointer.
826  * \return zero on success or a negative number on failure.
827  *
828  * If the hardware lock is held then free it, and take it again for the kernel
829  * context since it's necessary to reclaim buffers. Unlink the file private
830  * data from its list and free it. Decreases the open count and if it reaches
831  * zero calls takedown().
832  */
833 int DRM(release)( struct inode *inode, struct file *filp )
834 {
835         drm_file_t *priv = filp->private_data;
836         drm_device_t *dev;
837         int retcode = 0;
838
839         lock_kernel();
840         dev = priv->dev;
841
842         DRM_DEBUG( "open_count = %d\n", dev->open_count );
843
844         DRIVER_PRERELEASE();
845
846         /* ========================================================
847          * Begin inline drm_release
848          */
849
850         DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
851                    current->pid, (long)old_encode_dev(dev->device), dev->open_count );
852
853         if ( priv->lock_count && dev->lock.hw_lock &&
854              _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
855              dev->lock.filp == filp ) {
856                 DRM_DEBUG( "File %p released, freeing lock for context %d\n",
857                         filp,
858                         _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
859 #if __HAVE_RELEASE
860                 DRIVER_RELEASE();
861 #endif
862                 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
863                                 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
864
865                                 /* FIXME: may require heavy-handed reset of
866                                    hardware at this point, possibly
867                                    processed via a callback to the X
868                                    server. */
869         }
870 #if __HAVE_RELEASE
871         else if ( priv->lock_count && dev->lock.hw_lock ) {
872                 /* The lock is required to reclaim buffers */
873                 DECLARE_WAITQUEUE( entry, current );
874
875                 add_wait_queue( &dev->lock.lock_queue, &entry );
876                 for (;;) {
877                         current->state = TASK_INTERRUPTIBLE;
878                         if ( !dev->lock.hw_lock ) {
879                                 /* Device has been unregistered */
880                                 retcode = -EINTR;
881                                 break;
882                         }
883                         if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
884                                              DRM_KERNEL_CONTEXT ) ) {
885                                 dev->lock.filp      = filp;
886                                 dev->lock.lock_time = jiffies;
887                                 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
888                                 break;  /* Got lock */
889                         }
890                                 /* Contention */
891                         schedule();
892                         if ( signal_pending( current ) ) {
893                                 retcode = -ERESTARTSYS;
894                                 break;
895                         }
896                 }
897                 current->state = TASK_RUNNING;
898                 remove_wait_queue( &dev->lock.lock_queue, &entry );
899                 if( !retcode ) {
900                         DRIVER_RELEASE();
901                         DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
902                                         DRM_KERNEL_CONTEXT );
903                 }
904         }
905 #elif __HAVE_DMA
906         DRM(reclaim_buffers)( filp );
907 #endif
908
909         DRM(fasync)( -1, filp, 0 );
910
911         down( &dev->struct_sem );
912         if ( priv->remove_auth_on_close == 1 ) {
913                 drm_file_t *temp = dev->file_first;
914                 while ( temp ) {
915                         temp->authenticated = 0;
916                         temp = temp->next;
917                 }
918         }
919         if ( priv->prev ) {
920                 priv->prev->next = priv->next;
921         } else {
922                 dev->file_first  = priv->next;
923         }
924         if ( priv->next ) {
925                 priv->next->prev = priv->prev;
926         } else {
927                 dev->file_last   = priv->prev;
928         }
929         up( &dev->struct_sem );
930         
931         DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
932
933         /* ========================================================
934          * End inline drm_release
935          */
936
937         atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
938         spin_lock( &dev->count_lock );
939         if ( !--dev->open_count ) {
940                 if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
941                         DRM_ERROR( "Device busy: %d %d\n",
942                                    atomic_read( &dev->ioctl_count ),
943                                    dev->blocked );
944                         spin_unlock( &dev->count_lock );
945                         unlock_kernel();
946                         return -EBUSY;
947                 }
948                 spin_unlock( &dev->count_lock );
949                 unlock_kernel();
950                 return DRM(takedown)( dev );
951         }
952         spin_unlock( &dev->count_lock );
953
954         unlock_kernel();
955
956         return retcode;
957 }
958
959 /** 
960  * Called whenever a process performs an ioctl on /dev/drm.
961  *
962  * \param inode device inode.
963  * \param filp file pointer.
964  * \param cmd command.
965  * \param arg user argument.
966  * \return zero on success or negative number on failure.
967  *
968  * Looks up the ioctl function in the ::ioctls table, checking for root
969  * previleges if so required, and dispatches to the respective function.
970  */
971 int DRM(ioctl)( struct inode *inode, struct file *filp,
972                 unsigned int cmd, unsigned long arg )
973 {
974         drm_file_t *priv = filp->private_data;
975         drm_device_t *dev = priv->dev;
976         drm_ioctl_desc_t *ioctl;
977         drm_ioctl_t *func;
978         int nr = DRM_IOCTL_NR(cmd);
979         int retcode = 0;
980
981         atomic_inc( &dev->ioctl_count );
982         atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
983         ++priv->ioctl_count;
984
985         DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
986                    current->pid, cmd, nr, (long)old_encode_dev(dev->device), 
987                    priv->authenticated );
988
989         if ( nr >= DRIVER_IOCTL_COUNT ) {
990                 retcode = -EINVAL;
991         } else {
992                 ioctl = &DRM(ioctls)[nr];
993                 func = ioctl->func;
994
995                 if ( !func ) {
996                         DRM_DEBUG( "no function\n" );
997                         retcode = -EINVAL;
998                 } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
999                             ( ioctl->auth_needed && !priv->authenticated ) ) {
1000                         retcode = -EACCES;
1001                 } else {
1002                         retcode = func( inode, filp, cmd, arg );
1003                 }
1004         }
1005
1006         atomic_dec( &dev->ioctl_count );
1007         return retcode;
1008 }
1009
1010 /** 
1011  * Lock ioctl.
1012  *
1013  * \param inode device inode.
1014  * \param filp file pointer.
1015  * \param cmd command.
1016  * \param arg user argument, pointing to a drm_lock structure.
1017  * \return zero on success or negative number on failure.
1018  *
1019  * Add the current task to the lock wait queue, and attempt to take to lock.
1020  */
1021 int DRM(lock)( struct inode *inode, struct file *filp,
1022                unsigned int cmd, unsigned long arg )
1023 {
1024         drm_file_t *priv = filp->private_data;
1025         drm_device_t *dev = priv->dev;
1026         DECLARE_WAITQUEUE( entry, current );
1027         drm_lock_t lock;
1028         int ret = 0;
1029 #if __HAVE_MULTIPLE_DMA_QUEUES
1030         drm_queue_t *q;
1031 #endif
1032
1033         ++priv->lock_count;
1034
1035         if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
1036                 return -EFAULT;
1037
1038         if ( lock.context == DRM_KERNEL_CONTEXT ) {
1039                 DRM_ERROR( "Process %d using kernel context %d\n",
1040                            current->pid, lock.context );
1041                 return -EINVAL;
1042         }
1043
1044         DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
1045                    lock.context, current->pid,
1046                    dev->lock.hw_lock->lock, lock.flags );
1047
1048 #if __HAVE_DMA_QUEUE
1049         if ( lock.context < 0 )
1050                 return -EINVAL;
1051 #elif __HAVE_MULTIPLE_DMA_QUEUES
1052         if ( lock.context < 0 || lock.context >= dev->queue_count )
1053                 return -EINVAL;
1054         q = dev->queuelist[lock.context];
1055 #endif
1056
1057 #if __HAVE_DMA_FLUSH
1058         ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
1059 #endif
1060         if ( !ret ) {
1061                 add_wait_queue( &dev->lock.lock_queue, &entry );
1062                 for (;;) {
1063                         current->state = TASK_INTERRUPTIBLE;
1064                         if ( !dev->lock.hw_lock ) {
1065                                 /* Device has been unregistered */
1066                                 ret = -EINTR;
1067                                 break;
1068                         }
1069                         if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
1070                                              lock.context ) ) {
1071                                 dev->lock.filp      = filp;
1072                                 dev->lock.lock_time = jiffies;
1073                                 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
1074                                 break;  /* Got lock */
1075                         }
1076
1077                                 /* Contention */
1078                         schedule();
1079                         if ( signal_pending( current ) ) {
1080                                 ret = -ERESTARTSYS;
1081                                 break;
1082                         }
1083                 }
1084                 current->state = TASK_RUNNING;
1085                 remove_wait_queue( &dev->lock.lock_queue, &entry );
1086         }
1087
1088 #if __HAVE_DMA_FLUSH
1089         DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
1090 #endif
1091
1092         if ( !ret ) {
1093                 sigemptyset( &dev->sigmask );
1094                 sigaddset( &dev->sigmask, SIGSTOP );
1095                 sigaddset( &dev->sigmask, SIGTSTP );
1096                 sigaddset( &dev->sigmask, SIGTTIN );
1097                 sigaddset( &dev->sigmask, SIGTTOU );
1098                 dev->sigdata.context = lock.context;
1099                 dev->sigdata.lock    = dev->lock.hw_lock;
1100                 block_all_signals( DRM(notifier),
1101                                    &dev->sigdata, &dev->sigmask );
1102
1103 #if __HAVE_DMA_READY
1104                 if ( lock.flags & _DRM_LOCK_READY ) {
1105                         DRIVER_DMA_READY();
1106                 }
1107 #endif
1108 #if __HAVE_DMA_QUIESCENT
1109                 if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
1110                         DRIVER_DMA_QUIESCENT();
1111                 }
1112 #endif
1113                 /* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the
1114                  * drm modules in the DRI cvs tree, but it is required
1115                  * by the Sparc driver.
1116                  */
1117 #if __HAVE_KERNEL_CTX_SWITCH
1118                 if ( dev->last_context != lock.context ) {
1119                         DRM(context_switch)(dev, dev->last_context,
1120                                             lock.context);
1121                 }
1122 #endif
1123         }
1124
1125         DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1126
1127         return ret;
1128 }
1129
1130 /** 
1131  * Unlock ioctl.
1132  *
1133  * \param inode device inode.
1134  * \param filp file pointer.
1135  * \param cmd command.
1136  * \param arg user argument, pointing to a drm_lock structure.
1137  * \return zero on success or negative number on failure.
1138  *
1139  * Transfer and free the lock.
1140  */
1141 int DRM(unlock)( struct inode *inode, struct file *filp,
1142                  unsigned int cmd, unsigned long arg )
1143 {
1144         drm_file_t *priv = filp->private_data;
1145         drm_device_t *dev = priv->dev;
1146         drm_lock_t lock;
1147
1148         if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
1149                 return -EFAULT;
1150
1151         if ( lock.context == DRM_KERNEL_CONTEXT ) {
1152                 DRM_ERROR( "Process %d using kernel context %d\n",
1153                            current->pid, lock.context );
1154                 return -EINVAL;
1155         }
1156
1157         atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1158
1159         /* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the drm
1160          * modules in the DRI cvs tree, but it is required by the
1161          * Sparc driver.
1162          */
1163 #if __HAVE_KERNEL_CTX_SWITCH
1164         /* We no longer really hold it, but if we are the next
1165          * agent to request it then we should just be able to
1166          * take it immediately and not eat the ioctl.
1167          */
1168         dev->lock.filp = 0;
1169         {
1170                 __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
1171                 unsigned int old, new, prev, ctx;
1172
1173                 ctx = lock.context;
1174                 do {
1175                         old  = *plock;
1176                         new  = ctx;
1177                         prev = cmpxchg(plock, old, new);
1178                 } while (prev != old);
1179         }
1180         wake_up_interruptible(&dev->lock.lock_queue);
1181 #else
1182         DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
1183                             DRM_KERNEL_CONTEXT );
1184 #if __HAVE_DMA_SCHEDULE
1185         DRM(dma_schedule)( dev, 1 );
1186 #endif
1187
1188         if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1189                              DRM_KERNEL_CONTEXT ) ) {
1190                 DRM_ERROR( "\n" );
1191         }
1192 #endif /* !__HAVE_KERNEL_CTX_SWITCH */
1193
1194         unblock_all_signals();
1195         return 0;
1196 }