vserver 1.9.3
[linux-2.6.git] / drivers / char / drm / drm_drv.h
1 /**
2  * \file drm_drv.h 
3  * Generic driver template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  *
8  * To use this template, you must at least define the following (samples
9  * given for the MGA driver):
10  *
11  * \code
12  * #define DRIVER_AUTHOR        "VA Linux Systems, Inc."
13  *
14  * #define DRIVER_NAME          "mga"
15  * #define DRIVER_DESC          "Matrox G200/G400"
16  * #define DRIVER_DATE          "20001127"
17  *
18  * #define DRIVER_MAJOR         2
19  * #define DRIVER_MINOR         0
20  * #define DRIVER_PATCHLEVEL    2
21  *
22  * #define DRIVER_IOCTL_COUNT   DRM_ARRAY_SIZE( mga_ioctls )
23  *
24  * #define DRM(x)               mga_##x
25  * \endcode
26  */
27
28 /*
29  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
30  *
31  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
32  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
33  * All Rights Reserved.
34  *
35  * Permission is hereby granted, free of charge, to any person obtaining a
36  * copy of this software and associated documentation files (the "Software"),
37  * to deal in the Software without restriction, including without limitation
38  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
39  * and/or sell copies of the Software, and to permit persons to whom the
40  * Software is furnished to do so, subject to the following conditions:
41  *
42  * The above copyright notice and this permission notice (including the next
43  * paragraph) shall be included in all copies or substantial portions of the
44  * Software.
45  *
46  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
49  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
50  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
51  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
52  * OTHER DEALINGS IN THE SOFTWARE.
53  */
54
55 #ifndef DRIVER_IOCTLS
56 #define DRIVER_IOCTLS
57 #endif
58
59 #ifndef MODULE
60 /** Use an additional macro to avoid preprocessor troubles */
61 #define DRM_OPTIONS_FUNC DRM(options)
62 /**
63  * Called by the kernel to parse command-line options passed via the
64  * boot-loader (e.g., LILO).  It calls the insmod option routine,
65  * parse_options().
66  */
67 static int __init DRM(options)( char *str )
68 {
69         DRM(parse_options)( str );
70         return 1;
71 }
72
73 __setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
74 #undef DRM_OPTIONS_FUNC
75 #endif
76
77 #define MAX_DEVICES 4
78 static drm_device_t     DRM(device)[MAX_DEVICES];
79 static int              DRM(numdevs) = 0;
80
81 struct file_operations  DRM(fops) = {
82         .owner   = THIS_MODULE,
83         .open    = DRM(open),
84         .flush   = DRM(flush),
85         .release = DRM(release),
86         .ioctl   = DRM(ioctl),
87         .mmap    = DRM(mmap),
88         .fasync  = DRM(fasync),
89         .poll    = DRM(poll),
90         .read    = DRM(read),
91 };
92
93 /** Ioctl table */
94 drm_ioctl_desc_t                  DRM(ioctls)[] = {
95         [DRM_IOCTL_NR(DRM_IOCTL_VERSION)]       = { DRM(version),     0, 0 },
96         [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)]    = { DRM(getunique),   0, 0 },
97         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]     = { DRM(getmagic),    0, 0 },
98         [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]     = { DRM(irq_by_busid), 0, 1 },
99         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)]       = { DRM(getmap),      0, 0 },
100         [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)]    = { DRM(getclient),   0, 0 },
101         [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)]     = { DRM(getstats),    0, 0 },
102         [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)]   = { DRM(setversion),  0, 1 },
103
104         [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)]    = { DRM(setunique),   1, 1 },
105         [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]         = { DRM(noop),        1, 1 },
106         [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]       = { DRM(noop),        1, 1 },
107         [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)]    = { DRM(authmagic),   1, 1 },
108
109         [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]       = { DRM(addmap),      1, 1 },
110         [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)]        = { DRM(rmmap),       1, 0 },
111
112         [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
113         [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
114
115         [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]       = { DRM(addctx),      1, 1 },
116         [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]        = { DRM(rmctx),       1, 1 },
117         [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]       = { DRM(modctx),      1, 1 },
118         [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]       = { DRM(getctx),      1, 0 },
119         [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)]    = { DRM(switchctx),   1, 1 },
120         [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]       = { DRM(newctx),      1, 1 },
121         [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]       = { DRM(resctx),      1, 0 },
122
123         [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]      = { DRM(adddraw),     1, 1 },
124         [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]       = { DRM(rmdraw),      1, 1 },
125
126         [DRM_IOCTL_NR(DRM_IOCTL_LOCK)]          = { DRM(lock),        1, 0 },
127         [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]        = { DRM(unlock),      1, 0 },
128
129         [DRM_IOCTL_NR(DRM_IOCTL_FINISH)]        = { DRM(noop),      1, 0 },
130
131         [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)]      = { DRM(addbufs),     1, 1 },
132         [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)]     = { DRM(markbufs),    1, 1 },
133         [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)]     = { DRM(infobufs),    1, 0 },
134         [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)]      = { DRM(mapbufs),     1, 0 },
135         [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)]     = { DRM(freebufs),    1, 0 },
136         /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
137
138         [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)]       = { DRM(control),     1, 1 },
139
140 #if __OS_HAS_AGP
141         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = { DRM(agp_acquire), 1, 1 },
142         [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = { DRM(agp_release), 1, 1 },
143         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = { DRM(agp_enable),  1, 1 },
144         [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = { DRM(agp_info),    1, 0 },
145         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]     = { DRM(agp_alloc),   1, 1 },
146         [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]      = { DRM(agp_free),    1, 1 },
147         [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]      = { DRM(agp_bind),    1, 1 },
148         [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]    = { DRM(agp_unbind),  1, 1 },
149 #endif
150
151         [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)]      = { DRM(sg_alloc),    1, 1 },
152         [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)]       = { DRM(sg_free),     1, 1 },
153
154         [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)]   = { DRM(wait_vblank), 0, 0 },
155
156         DRIVER_IOCTLS
157 };
158
159 #define DRIVER_IOCTL_COUNT      DRM_ARRAY_SIZE( DRM(ioctls) )
160
161 #ifdef MODULE
162 static char *drm_opts = NULL;
163 #endif
164
165 MODULE_AUTHOR( DRIVER_AUTHOR );
166 MODULE_DESCRIPTION( DRIVER_DESC );
167 MODULE_PARM( drm_opts, "s" );
168 MODULE_LICENSE("GPL and additional rights");
169
170 static int DRM(setup)( drm_device_t *dev )
171 {
172         int i;
173         int ret;
174
175         if (dev->fn_tbl.presetup)
176         {
177                 ret=dev->fn_tbl.presetup(dev);
178                 if (ret!=0) 
179                         return ret;
180         }
181
182         atomic_set( &dev->ioctl_count, 0 );
183         atomic_set( &dev->vma_count, 0 );
184         dev->buf_use = 0;
185         atomic_set( &dev->buf_alloc, 0 );
186
187         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
188         {
189                 i = DRM(dma_setup)( dev );
190                 if ( i < 0 )
191                         return i;
192         }
193
194         for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
195                 atomic_set( &dev->counts[i], 0 );
196
197         for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
198                 dev->magiclist[i].head = NULL;
199                 dev->magiclist[i].tail = NULL;
200         }
201
202         dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
203                                   DRM_MEM_MAPS);
204         if(dev->maplist == NULL) return -ENOMEM;
205         memset(dev->maplist, 0, sizeof(*dev->maplist));
206         INIT_LIST_HEAD(&dev->maplist->head);
207
208         dev->ctxlist = DRM(alloc)(sizeof(*dev->ctxlist),
209                                   DRM_MEM_CTXLIST);
210         if(dev->ctxlist == NULL) return -ENOMEM;
211         memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
212         INIT_LIST_HEAD(&dev->ctxlist->head);
213
214         dev->vmalist = NULL;
215         dev->sigdata.lock = dev->lock.hw_lock = NULL;
216         init_waitqueue_head( &dev->lock.lock_queue );
217         dev->queue_count = 0;
218         dev->queue_reserved = 0;
219         dev->queue_slots = 0;
220         dev->queuelist = NULL;
221         dev->irq_enabled = 0;
222         dev->context_flag = 0;
223         dev->interrupt_flag = 0;
224         dev->dma_flag = 0;
225         dev->last_context = 0;
226         dev->last_switch = 0;
227         dev->last_checked = 0;
228         init_waitqueue_head( &dev->context_wait );
229         dev->if_version = 0;
230
231         dev->ctx_start = 0;
232         dev->lck_start = 0;
233
234         dev->buf_rp = dev->buf;
235         dev->buf_wp = dev->buf;
236         dev->buf_end = dev->buf + DRM_BSZ;
237         dev->buf_async = NULL;
238         init_waitqueue_head( &dev->buf_readers );
239         init_waitqueue_head( &dev->buf_writers );
240
241         DRM_DEBUG( "\n" );
242
243         /*
244          * The kernel's context could be created here, but is now created
245          * in drm_dma_enqueue.  This is more resource-efficient for
246          * hardware that does not do DMA, but may mean that
247          * drm_select_queue fails between the time the interrupt is
248          * initialized and the time the queues are initialized.
249          */
250         if (dev->fn_tbl.postsetup)
251                 dev->fn_tbl.postsetup(dev);
252
253         return 0;
254 }
255
256
257 /**
258  * Take down the DRM device.
259  *
260  * \param dev DRM device structure.
261  *
262  * Frees every resource in \p dev.
263  *
264  * \sa drm_device and setup().
265  */
266 static int DRM(takedown)( drm_device_t *dev )
267 {
268         drm_magic_entry_t *pt, *next;
269         drm_map_t *map;
270         drm_map_list_t *r_list;
271         struct list_head *list, *list_next;
272         drm_vma_entry_t *vma, *vma_next;
273         int i;
274
275         DRM_DEBUG( "\n" );
276
277         if (dev->fn_tbl.pretakedown)
278           dev->fn_tbl.pretakedown(dev);
279
280         if ( dev->irq_enabled ) DRM(irq_uninstall)( dev );
281
282         down( &dev->struct_sem );
283         del_timer( &dev->timer );
284
285         if ( dev->devname ) {
286                 DRM(free)( dev->devname, strlen( dev->devname ) + 1,
287                            DRM_MEM_DRIVER );
288                 dev->devname = NULL;
289         }
290
291         if ( dev->unique ) {
292                 DRM(free)( dev->unique, strlen( dev->unique ) + 1,
293                            DRM_MEM_DRIVER );
294                 dev->unique = NULL;
295                 dev->unique_len = 0;
296         }
297                                 /* Clear pid list */
298         for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
299                 for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
300                         next = pt->next;
301                         DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
302                 }
303                 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
304         }
305
306                                 /* Clear AGP information */
307         if (drm_core_has_AGP(dev) && dev->agp) {
308                 drm_agp_mem_t *entry;
309                 drm_agp_mem_t *nexte;
310
311                                 /* Remove AGP resources, but leave dev->agp
312                                    intact until drv_cleanup is called. */
313                 for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
314                         nexte = entry->next;
315                         if ( entry->bound ) DRM(unbind_agp)( entry->memory );
316                         DRM(free_agp)( entry->memory, entry->pages );
317                         DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
318                 }
319                 dev->agp->memory = NULL;
320
321                 if ( dev->agp->acquired ) DRM(agp_do_release)();
322
323                 dev->agp->acquired = 0;
324                 dev->agp->enabled  = 0;
325         }
326
327                                 /* Clear vma list (only built for debugging) */
328         if ( dev->vmalist ) {
329                 for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
330                         vma_next = vma->next;
331                         DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
332                 }
333                 dev->vmalist = NULL;
334         }
335
336         if( dev->maplist ) {
337                 list_for_each_safe( list, list_next, &dev->maplist->head ) {
338                         r_list = (drm_map_list_t *)list;
339
340                         if ( ( map = r_list->map ) ) {
341                                 switch ( map->type ) {
342                                 case _DRM_REGISTERS:
343                                 case _DRM_FRAME_BUFFER:
344                                         if (drm_core_has_MTRR(dev)) {
345                                                 if ( map->mtrr >= 0 ) {
346                                                         int retcode;
347                                                         retcode = mtrr_del( map->mtrr,
348                                                                             map->offset,
349                                                                             map->size );
350                                                         DRM_DEBUG( "mtrr_del=%d\n", retcode );
351                                                 }
352                                         }
353                                         DRM(ioremapfree)( map->handle, map->size, dev );
354                                         break;
355                                 case _DRM_SHM:
356                                         vfree(map->handle);
357                                         break;
358
359                                 case _DRM_AGP:
360                                         /* Do nothing here, because this is all
361                                          * handled in the AGP/GART driver.
362                                          */
363                                         break;
364                                 case _DRM_SCATTER_GATHER:
365                                         /* Handle it */
366                                         if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
367                                                 DRM(sg_cleanup)(dev->sg);
368                                                 dev->sg = NULL;
369                                         }
370                                         break;
371                                 }
372                                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
373                         }
374                         list_del( list );
375                         DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
376                 }
377                 DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
378                 dev->maplist = NULL;
379         }
380
381         if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) {
382                 for ( i = 0 ; i < dev->queue_count ; i++ ) {
383                         if ( dev->queuelist[i] ) {
384                                 DRM(free)( dev->queuelist[i],
385                                           sizeof(*dev->queuelist[0]),
386                                           DRM_MEM_QUEUES );
387                                 dev->queuelist[i] = NULL;
388                         }
389                 }
390                 DRM(free)( dev->queuelist,
391                           dev->queue_slots * sizeof(*dev->queuelist),
392                           DRM_MEM_QUEUES );
393                 dev->queuelist = NULL;
394         }
395         dev->queue_count = 0;
396
397         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
398                 DRM(dma_takedown)( dev );
399
400         if ( dev->lock.hw_lock ) {
401                 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
402                 dev->lock.filp = NULL;
403                 wake_up_interruptible( &dev->lock.lock_queue );
404         }
405         up( &dev->struct_sem );
406
407         return 0;
408 }
409
410 static void DRM(init_fn_table)(struct drm_device *dev)
411 {
412         dev->fn_tbl.reclaim_buffers = DRM(core_reclaim_buffers);
413         dev->fn_tbl.get_map_ofs = DRM(core_get_map_ofs);
414         dev->fn_tbl.get_reg_ofs = DRM(core_get_reg_ofs);
415 }
416
417 #include "drm_pciids.h"
418
419 static struct pci_device_id DRM(pciidlist)[] = {
420         DRM(PCI_IDS)
421 };
422
423 static int DRM(probe)(struct pci_dev *pdev)
424 {
425         drm_device_t *dev;
426         int retcode;
427         int i;
428         int is_compat = 0;
429
430         DRM_DEBUG( "\n" );
431
432         for (i = 0; DRM(pciidlist)[i].vendor != 0; i++) {
433                 if ((DRM(pciidlist)[i].vendor == pdev->vendor) &&
434                     (DRM(pciidlist)[i].device == pdev->device)) {
435                         is_compat = 1;
436                 }
437         }
438         if (is_compat == 0)
439                 return -ENODEV;
440
441         if (DRM(numdevs) >= MAX_DEVICES)
442                 return -ENODEV;
443
444         if ((retcode=pci_enable_device(pdev)))
445                 return retcode;
446
447         dev = &(DRM(device)[DRM(numdevs)]);
448
449         memset( (void *)dev, 0, sizeof(*dev) );
450         dev->count_lock = SPIN_LOCK_UNLOCKED;
451         init_timer( &dev->timer );
452         sema_init( &dev->struct_sem, 1 );
453         sema_init( &dev->ctxlist_sem, 1 );
454
455         if ((dev->minor = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
456                 return -EPERM;
457         dev->device = MKDEV(DRM_MAJOR, dev->minor );
458         dev->name   = DRIVER_NAME;
459
460         dev->pdev   = pdev;
461 #ifdef __alpha__
462         dev->hose   = pdev->sysdata;
463         dev->pci_domain = dev->hose->bus->number;
464 #else
465         dev->pci_domain = 0;
466 #endif
467         dev->pci_bus = pdev->bus->number;
468         dev->pci_slot = PCI_SLOT(pdev->devfn);
469         dev->pci_func = PCI_FUNC(pdev->devfn);
470         dev->irq = pdev->irq;
471
472         /* dev_priv_size can be changed by a driver in driver_register_fns */
473         dev->dev_priv_size = sizeof(u32);
474
475         /* the DRM has 6 basic counters - drivers add theirs in register_fns */
476         dev->counters = 6;
477         dev->types[0]  = _DRM_STAT_LOCK;
478         dev->types[1]  = _DRM_STAT_OPENS;
479         dev->types[2]  = _DRM_STAT_CLOSES;
480         dev->types[3]  = _DRM_STAT_IOCTLS;
481         dev->types[4]  = _DRM_STAT_LOCKS;
482         dev->types[5]  = _DRM_STAT_UNLOCKS;
483
484         DRM(init_fn_table)(dev);
485
486         DRM(driver_register_fns)(dev);
487
488         if (dev->fn_tbl.preinit)
489           dev->fn_tbl.preinit(dev);
490
491         if (drm_core_has_AGP(dev))
492         {
493                 dev->agp = DRM(agp_init)();
494                 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
495                         DRM_ERROR( "Cannot initialize the agpgart module.\n" );
496                         DRM(stub_unregister)(dev->minor);
497                         DRM(takedown)( dev );
498                         return -EINVAL;
499                 }
500                 if (drm_core_has_MTRR(dev)) {
501                         if (dev->agp)
502                                 dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
503                                                                dev->agp->agp_info.aper_size*1024*1024,
504                                                                MTRR_TYPE_WRCOMB,
505                                                                1 );
506                 }
507         }
508
509         retcode = DRM(ctxbitmap_init)( dev );
510         if( retcode ) {
511                 DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
512                 DRM(stub_unregister)(dev->minor);
513                 DRM(takedown)( dev );
514                 return retcode;
515         }
516
517         DRM(numdevs)++; /* no errors, mark it reserved */
518         
519         DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
520                 DRIVER_NAME,
521                 DRIVER_MAJOR,
522                 DRIVER_MINOR,
523                 DRIVER_PATCHLEVEL,
524                 DRIVER_DATE,
525                 dev->minor,
526                 pci_pretty_name(pdev));
527
528         if (dev->fn_tbl.postinit)
529           dev->fn_tbl.postinit(dev);
530
531         return 0;
532 }
533
534 /**
535  * Module initialization. Called via init_module at module load time, or via
536  * linux/init/main.c (this is not currently supported).
537  *
538  * \return zero on success or a negative number on failure.
539  *
540  * Initializes an array of drm_device structures, and attempts to
541  * initialize all available devices, using consecutive minors, registering the
542  * stubs and initializing the AGP device.
543  * 
544  * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
545  * after the initialization for driver customization.
546  */
547 static int __init drm_init( void )
548 {
549         struct pci_dev *pdev = NULL;
550
551         DRM_DEBUG( "\n" );
552
553 #ifdef MODULE
554         DRM(parse_options)( drm_opts );
555 #endif
556
557         DRM(mem_init)();
558
559         while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
560                 DRM(probe)(pdev);
561         }
562         return 0;
563 }
564
565 /**
566  * Called via cleanup_module() at module unload time.
567  *
568  * Cleans up all DRM device, calling takedown().
569  * 
570  * \sa drm_init().
571  */
572 static void __exit drm_cleanup( void )
573 {
574         drm_device_t *dev;
575         int i;
576
577         DRM_DEBUG( "\n" );
578
579         for (i = DRM(numdevs) - 1; i >= 0; i--) {
580                 dev = &(DRM(device)[i]);
581                 if ( DRM(stub_unregister)(dev->minor) ) {
582                         DRM_ERROR( "Cannot unload module\n" );
583                 } else {
584                         DRM_DEBUG("minor %d unregistered\n", dev->minor);
585                         if (i == 0) {
586                                 DRM_INFO( "Module unloaded\n" );
587                         }
588                 }
589
590                 DRM(ctxbitmap_cleanup)( dev );
591
592                 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
593                     dev->agp && dev->agp->agp_mtrr >= 0) {
594                         int retval;
595                         retval = mtrr_del( dev->agp->agp_mtrr,
596                                    dev->agp->agp_info.aper_base,
597                                    dev->agp->agp_info.aper_size*1024*1024 );
598                         DRM_DEBUG( "mtrr_del=%d\n", retval );
599                 }
600
601                 DRM(takedown)( dev );
602
603                 if (drm_core_has_AGP(dev) && dev->agp ) {
604                         DRM(agp_uninit)();
605                         DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
606                         dev->agp = NULL;
607                 }
608
609                 if (dev->fn_tbl.postcleanup)
610                   dev->fn_tbl.postcleanup(dev);
611
612         }
613         DRM(numdevs) = 0;
614 }
615
616 module_init( drm_init );
617 module_exit( drm_cleanup );
618
619
620 /**
621  * Get version information
622  *
623  * \param inode device inode.
624  * \param filp file pointer.
625  * \param cmd command.
626  * \param arg user argument, pointing to a drm_version structure.
627  * \return zero on success or negative number on failure.
628  *
629  * Fills in the version information in \p arg.
630  */
631 int DRM(version)( struct inode *inode, struct file *filp,
632                   unsigned int cmd, unsigned long arg )
633 {
634         drm_version_t __user *argp = (void __user *)arg;
635         drm_version_t version;
636         int len;
637
638         if ( copy_from_user( &version, argp, sizeof(version) ) )
639                 return -EFAULT;
640
641 #define DRM_COPY( name, value )                                         \
642         len = strlen( value );                                          \
643         if ( len > name##_len ) len = name##_len;                       \
644         name##_len = strlen( value );                                   \
645         if ( len && name ) {                                            \
646                 if ( copy_to_user( name, value, len ) )                 \
647                         return -EFAULT;                                 \
648         }
649
650         version.version_major = DRIVER_MAJOR;
651         version.version_minor = DRIVER_MINOR;
652         version.version_patchlevel = DRIVER_PATCHLEVEL;
653
654         DRM_COPY( version.name, DRIVER_NAME );
655         DRM_COPY( version.date, DRIVER_DATE );
656         DRM_COPY( version.desc, DRIVER_DESC );
657
658         if ( copy_to_user( argp, &version, sizeof(version) ) )
659                 return -EFAULT;
660         return 0;
661 }
662
663 /**
664  * Open file.
665  * 
666  * \param inode device inode
667  * \param filp file pointer.
668  * \return zero on success or a negative number on failure.
669  *
670  * Searches the DRM device with the same minor number, calls open_helper(), and
671  * increments the device open count. If the open count was previous at zero,
672  * i.e., it's the first that the device is open, then calls setup().
673  */
674 int DRM(open)( struct inode *inode, struct file *filp )
675 {
676         drm_device_t *dev = NULL;
677         int retcode = 0;
678         int i;
679
680         for (i = 0; i < DRM(numdevs); i++) {
681                 if (iminor(inode) == DRM(device)[i].minor) {
682                         dev = &(DRM(device)[i]);
683                         break;
684                 }
685         }
686         if (!dev) {
687                 return -ENODEV;
688         }
689
690         retcode = DRM(open_helper)( inode, filp, dev );
691         if ( !retcode ) {
692                 atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
693                 spin_lock( &dev->count_lock );
694                 if ( !dev->open_count++ ) {
695                         spin_unlock( &dev->count_lock );
696                         return DRM(setup)( dev );
697                 }
698                 spin_unlock( &dev->count_lock );
699         }
700
701         return retcode;
702 }
703
704 /**
705  * Release file.
706  *
707  * \param inode device inode
708  * \param filp file pointer.
709  * \return zero on success or a negative number on failure.
710  *
711  * If the hardware lock is held then free it, and take it again for the kernel
712  * context since it's necessary to reclaim buffers. Unlink the file private
713  * data from its list and free it. Decreases the open count and if it reaches
714  * zero calls takedown().
715  */
716 int DRM(release)( struct inode *inode, struct file *filp )
717 {
718         drm_file_t *priv = filp->private_data;
719         drm_device_t *dev;
720         int retcode = 0;
721
722         lock_kernel();
723         dev = priv->dev;
724
725         DRM_DEBUG( "open_count = %d\n", dev->open_count );
726
727         if (dev->fn_tbl.prerelease)
728                 dev->fn_tbl.prerelease(dev, filp);
729
730         /* ========================================================
731          * Begin inline drm_release
732          */
733
734         DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
735                    current->pid, (long)old_encode_dev(dev->device), dev->open_count );
736
737         if ( priv->lock_count && dev->lock.hw_lock &&
738              _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
739              dev->lock.filp == filp ) {
740                 DRM_DEBUG( "File %p released, freeing lock for context %d\n",
741                         filp,
742                         _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
743                 
744                 if (dev->fn_tbl.release)
745                         dev->fn_tbl.release(dev, filp);
746
747                 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
748                                 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
749
750                                 /* FIXME: may require heavy-handed reset of
751                                    hardware at this point, possibly
752                                    processed via a callback to the X
753                                    server. */
754         }
755         else if ( dev->fn_tbl.release && priv->lock_count && dev->lock.hw_lock ) {
756                 /* The lock is required to reclaim buffers */
757                 DECLARE_WAITQUEUE( entry, current );
758
759                 add_wait_queue( &dev->lock.lock_queue, &entry );
760                 for (;;) {
761                         __set_current_state(TASK_INTERRUPTIBLE);
762                         if ( !dev->lock.hw_lock ) {
763                                 /* Device has been unregistered */
764                                 retcode = -EINTR;
765                                 break;
766                         }
767                         if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
768                                              DRM_KERNEL_CONTEXT ) ) {
769                                 dev->lock.filp      = filp;
770                                 dev->lock.lock_time = jiffies;
771                                 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
772                                 break;  /* Got lock */
773                         }
774                                 /* Contention */
775                         schedule();
776                         if ( signal_pending( current ) ) {
777                                 retcode = -ERESTARTSYS;
778                                 break;
779                         }
780                 }
781                 __set_current_state(TASK_RUNNING);
782                 remove_wait_queue( &dev->lock.lock_queue, &entry );
783                 if( !retcode ) {
784                         if (dev->fn_tbl.release)
785                                 dev->fn_tbl.release(dev, filp);
786                         DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
787                                         DRM_KERNEL_CONTEXT );
788                 }
789         }
790         
791         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
792         {
793                 dev->fn_tbl.reclaim_buffers(filp);
794         }
795
796         DRM(fasync)( -1, filp, 0 );
797
798         down( &dev->ctxlist_sem );
799         if ( !list_empty( &dev->ctxlist->head ) ) {
800                 drm_ctx_list_t *pos, *n;
801
802                 list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
803                         if ( pos->tag == priv &&
804                              pos->handle != DRM_KERNEL_CONTEXT ) {
805                                 if (dev->fn_tbl.context_dtor)
806                                         dev->fn_tbl.context_dtor(dev, pos->handle);
807
808                                 DRM(ctxbitmap_free)( dev, pos->handle );
809
810                                 list_del( &pos->head );
811                                 DRM(free)( pos, sizeof(*pos), DRM_MEM_CTXLIST );
812                                 --dev->ctx_count;
813                         }
814                 }
815         }
816         up( &dev->ctxlist_sem );
817
818         down( &dev->struct_sem );
819         if ( priv->remove_auth_on_close == 1 ) {
820                 drm_file_t *temp = dev->file_first;
821                 while ( temp ) {
822                         temp->authenticated = 0;
823                         temp = temp->next;
824                 }
825         }
826         if ( priv->prev ) {
827                 priv->prev->next = priv->next;
828         } else {
829                 dev->file_first  = priv->next;
830         }
831         if ( priv->next ) {
832                 priv->next->prev = priv->prev;
833         } else {
834                 dev->file_last   = priv->prev;
835         }
836         up( &dev->struct_sem );
837         
838         if (dev->fn_tbl.free_filp_priv)
839                 dev->fn_tbl.free_filp_priv(dev, priv);
840
841         DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
842
843         /* ========================================================
844          * End inline drm_release
845          */
846
847         atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
848         spin_lock( &dev->count_lock );
849         if ( !--dev->open_count ) {
850                 if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
851                         DRM_ERROR( "Device busy: %d %d\n",
852                                    atomic_read( &dev->ioctl_count ),
853                                    dev->blocked );
854                         spin_unlock( &dev->count_lock );
855                         unlock_kernel();
856                         return -EBUSY;
857                 }
858                 spin_unlock( &dev->count_lock );
859                 unlock_kernel();
860                 return DRM(takedown)( dev );
861         }
862         spin_unlock( &dev->count_lock );
863
864         unlock_kernel();
865
866         return retcode;
867 }
868
869 /** 
870  * Called whenever a process performs an ioctl on /dev/drm.
871  *
872  * \param inode device inode.
873  * \param filp file pointer.
874  * \param cmd command.
875  * \param arg user argument.
876  * \return zero on success or negative number on failure.
877  *
878  * Looks up the ioctl function in the ::ioctls table, checking for root
879  * previleges if so required, and dispatches to the respective function.
880  */
881 int DRM(ioctl)( struct inode *inode, struct file *filp,
882                 unsigned int cmd, unsigned long arg )
883 {
884         drm_file_t *priv = filp->private_data;
885         drm_device_t *dev = priv->dev;
886         drm_ioctl_desc_t *ioctl;
887         drm_ioctl_t *func;
888         int nr = DRM_IOCTL_NR(cmd);
889         int retcode = 0;
890
891         atomic_inc( &dev->ioctl_count );
892         atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
893         ++priv->ioctl_count;
894
895         DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
896                    current->pid, cmd, nr, (long)old_encode_dev(dev->device), 
897                    priv->authenticated );
898
899         if ( nr >= DRIVER_IOCTL_COUNT ) {
900                 retcode = -EINVAL;
901         } else {
902                 ioctl = &DRM(ioctls)[nr];
903                 func = ioctl->func;
904
905                 if ( !func ) {
906                         DRM_DEBUG( "no function\n" );
907                         retcode = -EINVAL;
908                 } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
909                             ( ioctl->auth_needed && !priv->authenticated ) ) {
910                         retcode = -EACCES;
911                 } else {
912                         retcode = func( inode, filp, cmd, arg );
913                 }
914         }
915
916         atomic_dec( &dev->ioctl_count );
917         return retcode;
918 }
919
920 /** 
921  * Lock ioctl.
922  *
923  * \param inode device inode.
924  * \param filp file pointer.
925  * \param cmd command.
926  * \param arg user argument, pointing to a drm_lock structure.
927  * \return zero on success or negative number on failure.
928  *
929  * Add the current task to the lock wait queue, and attempt to take to lock.
930  */
931 int DRM(lock)( struct inode *inode, struct file *filp,
932                unsigned int cmd, unsigned long arg )
933 {
934         drm_file_t *priv = filp->private_data;
935         drm_device_t *dev = priv->dev;
936         DECLARE_WAITQUEUE( entry, current );
937         drm_lock_t lock;
938         int ret = 0;
939
940         ++priv->lock_count;
941
942         if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
943                 return -EFAULT;
944
945         if ( lock.context == DRM_KERNEL_CONTEXT ) {
946                 DRM_ERROR( "Process %d using kernel context %d\n",
947                            current->pid, lock.context );
948                 return -EINVAL;
949         }
950
951         DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
952                    lock.context, current->pid,
953                    dev->lock.hw_lock->lock, lock.flags );
954
955         if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
956                 if ( lock.context < 0 )
957                         return -EINVAL;
958
959         add_wait_queue( &dev->lock.lock_queue, &entry );
960         for (;;) {
961                 __set_current_state(TASK_INTERRUPTIBLE);
962                 if ( !dev->lock.hw_lock ) {
963                         /* Device has been unregistered */
964                         ret = -EINTR;
965                         break;
966                 }
967                 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
968                                      lock.context ) ) {
969                         dev->lock.filp      = filp;
970                         dev->lock.lock_time = jiffies;
971                         atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
972                         break;  /* Got lock */
973                 }
974                 
975                 /* Contention */
976                 schedule();
977                 if ( signal_pending( current ) ) {
978                         ret = -ERESTARTSYS;
979                         break;
980                 }
981         }
982         __set_current_state(TASK_RUNNING);
983         remove_wait_queue( &dev->lock.lock_queue, &entry );
984
985         sigemptyset( &dev->sigmask );
986         sigaddset( &dev->sigmask, SIGSTOP );
987         sigaddset( &dev->sigmask, SIGTSTP );
988         sigaddset( &dev->sigmask, SIGTTIN );
989         sigaddset( &dev->sigmask, SIGTTOU );
990         dev->sigdata.context = lock.context;
991         dev->sigdata.lock    = dev->lock.hw_lock;
992         block_all_signals( DRM(notifier),
993                            &dev->sigdata, &dev->sigmask );
994         
995         if (dev->fn_tbl.dma_ready && (lock.flags & _DRM_LOCK_READY))
996                 dev->fn_tbl.dma_ready(dev);
997         
998         if ( dev->fn_tbl.dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT ))
999                 return dev->fn_tbl.dma_quiescent(dev);
1000         
1001         /* dev->fn_tbl.kernel_context_switch isn't used by any of the x86 
1002          *  drivers but is used by the Sparc driver.
1003          */
1004         
1005         if (dev->fn_tbl.kernel_context_switch && 
1006             dev->last_context != lock.context) {
1007           dev->fn_tbl.kernel_context_switch(dev, dev->last_context, 
1008                                             lock.context);
1009         }
1010         DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1011
1012         return ret;
1013 }
1014
1015 /** 
1016  * Unlock ioctl.
1017  *
1018  * \param inode device inode.
1019  * \param filp file pointer.
1020  * \param cmd command.
1021  * \param arg user argument, pointing to a drm_lock structure.
1022  * \return zero on success or negative number on failure.
1023  *
1024  * Transfer and free the lock.
1025  */
1026 int DRM(unlock)( struct inode *inode, struct file *filp,
1027                  unsigned int cmd, unsigned long arg )
1028 {
1029         drm_file_t *priv = filp->private_data;
1030         drm_device_t *dev = priv->dev;
1031         drm_lock_t lock;
1032
1033         if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
1034                 return -EFAULT;
1035
1036         if ( lock.context == DRM_KERNEL_CONTEXT ) {
1037                 DRM_ERROR( "Process %d using kernel context %d\n",
1038                            current->pid, lock.context );
1039                 return -EINVAL;
1040         }
1041
1042         atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1043
1044         /* kernel_context_switch isn't used by any of the x86 drm
1045          * modules but is required by the Sparc driver.
1046          */
1047         if (dev->fn_tbl.kernel_context_switch_unlock)
1048                 dev->fn_tbl.kernel_context_switch_unlock(dev, &lock);
1049         else {
1050                 DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock, 
1051                                     DRM_KERNEL_CONTEXT );
1052                 
1053                 if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1054                                      DRM_KERNEL_CONTEXT ) ) {
1055                         DRM_ERROR( "\n" );
1056                 }
1057         }
1058
1059         unblock_all_signals();
1060         return 0;
1061 }