patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / char / drm / drm_drv.h
1 /**
2  * \file drm_drv.h 
3  * Generic driver template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  *
8  * To use this template, you must at least define the following (samples
9  * given for the MGA driver):
10  *
11  * \code
12  * #define DRIVER_AUTHOR        "VA Linux Systems, Inc."
13  *
14  * #define DRIVER_NAME          "mga"
15  * #define DRIVER_DESC          "Matrox G200/G400"
16  * #define DRIVER_DATE          "20001127"
17  *
18  * #define DRIVER_MAJOR         2
19  * #define DRIVER_MINOR         0
20  * #define DRIVER_PATCHLEVEL    2
21  *
22  * #define DRIVER_IOCTL_COUNT   DRM_ARRAY_SIZE( mga_ioctls )
23  *
24  * #define DRM(x)               mga_##x
25  * \endcode
26  */
27
28 /*
29  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
30  *
31  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
32  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
33  * All Rights Reserved.
34  *
35  * Permission is hereby granted, free of charge, to any person obtaining a
36  * copy of this software and associated documentation files (the "Software"),
37  * to deal in the Software without restriction, including without limitation
38  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
39  * and/or sell copies of the Software, and to permit persons to whom the
40  * Software is furnished to do so, subject to the following conditions:
41  *
42  * The above copyright notice and this permission notice (including the next
43  * paragraph) shall be included in all copies or substantial portions of the
44  * Software.
45  *
46  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
49  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
50  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
51  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
52  * OTHER DEALINGS IN THE SOFTWARE.
53  */
54
55 #ifndef __MUST_HAVE_AGP
56 #define __MUST_HAVE_AGP                 0
57 #endif
58 #ifndef __HAVE_CTX_BITMAP
59 #define __HAVE_CTX_BITMAP               0
60 #endif
61 #ifndef __HAVE_IRQ
62 #define __HAVE_IRQ                      0
63 #endif
64 #ifndef __HAVE_DMA_QUEUE
65 #define __HAVE_DMA_QUEUE                0
66 #endif
67 #ifndef __HAVE_MULTIPLE_DMA_QUEUES
68 #define __HAVE_MULTIPLE_DMA_QUEUES      0
69 #endif
70 #ifndef __HAVE_DMA_SCHEDULE
71 #define __HAVE_DMA_SCHEDULE             0
72 #endif
73 #ifndef __HAVE_DMA_FLUSH
74 #define __HAVE_DMA_FLUSH                0
75 #endif
76 #ifndef __HAVE_DMA_READY
77 #define __HAVE_DMA_READY                0
78 #endif
79 #ifndef __HAVE_DMA_QUIESCENT
80 #define __HAVE_DMA_QUIESCENT            0
81 #endif
82 #ifndef __HAVE_RELEASE
83 #define __HAVE_RELEASE                  0
84 #endif
85 #ifndef __HAVE_COUNTERS
86 #define __HAVE_COUNTERS                 0
87 #endif
88 #ifndef __HAVE_SG
89 #define __HAVE_SG                       0
90 #endif
91 /* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the drm modules in
92  * the DRI cvs tree, but it is required by the kernel tree's sparc
93  * driver.
94  */
95 #ifndef __HAVE_KERNEL_CTX_SWITCH
96 #define __HAVE_KERNEL_CTX_SWITCH        0
97 #endif
98 #ifndef __HAVE_DRIVER_FOPS_READ
99 #define __HAVE_DRIVER_FOPS_READ         0
100 #endif
101 #ifndef __HAVE_DRIVER_FOPS_POLL
102 #define __HAVE_DRIVER_FOPS_POLL         0
103 #endif
104
105 #ifndef DRIVER_PREINIT
106 #define DRIVER_PREINIT()
107 #endif
108 #ifndef DRIVER_POSTINIT
109 #define DRIVER_POSTINIT()
110 #endif
111 #ifndef DRIVER_PRERELEASE
112 #define DRIVER_PRERELEASE()
113 #endif
114 #ifndef DRIVER_PRETAKEDOWN
115 #define DRIVER_PRETAKEDOWN()
116 #endif
117 #ifndef DRIVER_POSTCLEANUP
118 #define DRIVER_POSTCLEANUP()
119 #endif
120 #ifndef DRIVER_PRESETUP
121 #define DRIVER_PRESETUP()
122 #endif
123 #ifndef DRIVER_POSTSETUP
124 #define DRIVER_POSTSETUP()
125 #endif
126 #ifndef DRIVER_IOCTLS
127 #define DRIVER_IOCTLS
128 #endif
129 #ifndef DRIVER_OPEN_HELPER
130 #define DRIVER_OPEN_HELPER( priv, dev )
131 #endif
132 #ifndef DRIVER_FOPS
133 #define DRIVER_FOPS                             \
134 static struct file_operations   DRM(fops) = {   \
135         .owner   = THIS_MODULE,                 \
136         .open    = DRM(open),                   \
137         .flush   = DRM(flush),                  \
138         .release = DRM(release),                \
139         .ioctl   = DRM(ioctl),                  \
140         .mmap    = DRM(mmap),                   \
141         .fasync  = DRM(fasync),                 \
142         .poll    = DRM(poll),                   \
143         .read    = DRM(read),                   \
144 }
145 #endif
146
147 #ifndef MODULE
148 /** Use an additional macro to avoid preprocessor troubles */
149 #define DRM_OPTIONS_FUNC DRM(options)
150 /**
151  * Called by the kernel to parse command-line options passed via the
152  * boot-loader (e.g., LILO).  It calls the insmod option routine,
153  * parse_options().
154  */
155 static int __init DRM(options)( char *str )
156 {
157         DRM(parse_options)( str );
158         return 1;
159 }
160
161 __setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
162 #undef DRM_OPTIONS_FUNC
163 #endif
164
165 #define MAX_DEVICES 4
166 static drm_device_t     DRM(device)[MAX_DEVICES];
167 static int              DRM(numdevs) = 0;
168
169 DRIVER_FOPS;
170
171 /** Ioctl table */
172 static drm_ioctl_desc_t           DRM(ioctls)[] = {
173         [DRM_IOCTL_NR(DRM_IOCTL_VERSION)]       = { DRM(version),     0, 0 },
174         [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)]    = { DRM(getunique),   0, 0 },
175         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]     = { DRM(getmagic),    0, 0 },
176 #if __HAVE_IRQ
177         [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]     = { DRM(irq_by_busid), 0, 1 },
178 #endif
179         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)]       = { DRM(getmap),      0, 0 },
180         [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)]    = { DRM(getclient),   0, 0 },
181         [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)]     = { DRM(getstats),    0, 0 },
182         [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)]   = { DRM(setversion),  0, 1 },
183
184         [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)]    = { DRM(setunique),   1, 1 },
185         [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]         = { DRM(noop),        1, 1 },
186         [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]       = { DRM(noop),        1, 1 },
187         [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)]    = { DRM(authmagic),   1, 1 },
188
189         [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]       = { DRM(addmap),      1, 1 },
190         [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)]        = { DRM(rmmap),       1, 0 },
191
192 #if __HAVE_CTX_BITMAP
193         [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
194         [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
195 #endif
196
197         [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]       = { DRM(addctx),      1, 1 },
198         [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]        = { DRM(rmctx),       1, 1 },
199         [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]       = { DRM(modctx),      1, 1 },
200         [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]       = { DRM(getctx),      1, 0 },
201         [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)]    = { DRM(switchctx),   1, 1 },
202         [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]       = { DRM(newctx),      1, 1 },
203         [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]       = { DRM(resctx),      1, 0 },
204
205         [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]      = { DRM(adddraw),     1, 1 },
206         [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]       = { DRM(rmdraw),      1, 1 },
207
208         [DRM_IOCTL_NR(DRM_IOCTL_LOCK)]          = { DRM(lock),        1, 0 },
209         [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]        = { DRM(unlock),      1, 0 },
210
211 #if __HAVE_DMA_FLUSH
212         /* Gamma only, really */
213         [DRM_IOCTL_NR(DRM_IOCTL_FINISH)]        = { DRM(finish),      1, 0 },
214 #else
215         [DRM_IOCTL_NR(DRM_IOCTL_FINISH)]        = { DRM(noop),      1, 0 },
216 #endif
217
218 #if __HAVE_DMA
219         [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)]      = { DRM(addbufs),     1, 1 },
220         [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)]     = { DRM(markbufs),    1, 1 },
221         [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)]     = { DRM(infobufs),    1, 0 },
222         [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)]      = { DRM(mapbufs),     1, 0 },
223         [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)]     = { DRM(freebufs),    1, 0 },
224         /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
225 #endif
226 #if __HAVE_IRQ || __HAVE_DMA
227         [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)]       = { DRM(control),     1, 1 },
228 #endif
229
230 #if __REALLY_HAVE_AGP
231         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = { DRM(agp_acquire), 1, 1 },
232         [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = { DRM(agp_release), 1, 1 },
233         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = { DRM(agp_enable),  1, 1 },
234         [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = { DRM(agp_info),    1, 0 },
235         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]     = { DRM(agp_alloc),   1, 1 },
236         [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]      = { DRM(agp_free),    1, 1 },
237         [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]      = { DRM(agp_bind),    1, 1 },
238         [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]    = { DRM(agp_unbind),  1, 1 },
239 #endif
240
241 #if __HAVE_SG
242         [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)]      = { DRM(sg_alloc),    1, 1 },
243         [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)]       = { DRM(sg_free),     1, 1 },
244 #endif
245
246 #if __HAVE_VBL_IRQ
247         [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)]   = { DRM(wait_vblank), 0, 0 },
248 #endif
249
250         DRIVER_IOCTLS
251 };
252
253 #define DRIVER_IOCTL_COUNT      DRM_ARRAY_SIZE( DRM(ioctls) )
254
255 #ifdef MODULE
256 static char *drm_opts = NULL;
257 #endif
258
259 MODULE_AUTHOR( DRIVER_AUTHOR );
260 MODULE_DESCRIPTION( DRIVER_DESC );
261 MODULE_PARM( drm_opts, "s" );
262 MODULE_LICENSE("GPL and additional rights");
263
264 static int DRM(setup)( drm_device_t *dev )
265 {
266         int i;
267
268         DRIVER_PRESETUP();
269         atomic_set( &dev->ioctl_count, 0 );
270         atomic_set( &dev->vma_count, 0 );
271         dev->buf_use = 0;
272         atomic_set( &dev->buf_alloc, 0 );
273
274 #if __HAVE_DMA
275         i = DRM(dma_setup)( dev );
276         if ( i < 0 )
277                 return i;
278 #endif
279
280         dev->counters  = 6 + __HAVE_COUNTERS;
281         dev->types[0]  = _DRM_STAT_LOCK;
282         dev->types[1]  = _DRM_STAT_OPENS;
283         dev->types[2]  = _DRM_STAT_CLOSES;
284         dev->types[3]  = _DRM_STAT_IOCTLS;
285         dev->types[4]  = _DRM_STAT_LOCKS;
286         dev->types[5]  = _DRM_STAT_UNLOCKS;
287 #ifdef __HAVE_COUNTER6
288         dev->types[6]  = __HAVE_COUNTER6;
289 #endif
290 #ifdef __HAVE_COUNTER7
291         dev->types[7]  = __HAVE_COUNTER7;
292 #endif
293 #ifdef __HAVE_COUNTER8
294         dev->types[8]  = __HAVE_COUNTER8;
295 #endif
296 #ifdef __HAVE_COUNTER9
297         dev->types[9]  = __HAVE_COUNTER9;
298 #endif
299 #ifdef __HAVE_COUNTER10
300         dev->types[10] = __HAVE_COUNTER10;
301 #endif
302 #ifdef __HAVE_COUNTER11
303         dev->types[11] = __HAVE_COUNTER11;
304 #endif
305 #ifdef __HAVE_COUNTER12
306         dev->types[12] = __HAVE_COUNTER12;
307 #endif
308 #ifdef __HAVE_COUNTER13
309         dev->types[13] = __HAVE_COUNTER13;
310 #endif
311 #ifdef __HAVE_COUNTER14
312         dev->types[14] = __HAVE_COUNTER14;
313 #endif
314 #ifdef __HAVE_COUNTER15
315         dev->types[14] = __HAVE_COUNTER14;
316 #endif
317
318         for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
319                 atomic_set( &dev->counts[i], 0 );
320
321         for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
322                 dev->magiclist[i].head = NULL;
323                 dev->magiclist[i].tail = NULL;
324         }
325
326         dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
327                                   DRM_MEM_MAPS);
328         if(dev->maplist == NULL) return -ENOMEM;
329         memset(dev->maplist, 0, sizeof(*dev->maplist));
330         INIT_LIST_HEAD(&dev->maplist->head);
331
332         dev->ctxlist = DRM(alloc)(sizeof(*dev->ctxlist),
333                                   DRM_MEM_CTXLIST);
334         if(dev->ctxlist == NULL) return -ENOMEM;
335         memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
336         INIT_LIST_HEAD(&dev->ctxlist->head);
337
338         dev->vmalist = NULL;
339         dev->sigdata.lock = dev->lock.hw_lock = NULL;
340         init_waitqueue_head( &dev->lock.lock_queue );
341         dev->queue_count = 0;
342         dev->queue_reserved = 0;
343         dev->queue_slots = 0;
344         dev->queuelist = NULL;
345         dev->irq_enabled = 0;
346         dev->context_flag = 0;
347         dev->interrupt_flag = 0;
348         dev->dma_flag = 0;
349         dev->last_context = 0;
350         dev->last_switch = 0;
351         dev->last_checked = 0;
352         init_waitqueue_head( &dev->context_wait );
353         dev->if_version = 0;
354
355         dev->ctx_start = 0;
356         dev->lck_start = 0;
357
358         dev->buf_rp = dev->buf;
359         dev->buf_wp = dev->buf;
360         dev->buf_end = dev->buf + DRM_BSZ;
361         dev->buf_async = NULL;
362         init_waitqueue_head( &dev->buf_readers );
363         init_waitqueue_head( &dev->buf_writers );
364
365         DRM_DEBUG( "\n" );
366
367         /*
368          * The kernel's context could be created here, but is now created
369          * in drm_dma_enqueue.  This is more resource-efficient for
370          * hardware that does not do DMA, but may mean that
371          * drm_select_queue fails between the time the interrupt is
372          * initialized and the time the queues are initialized.
373          */
374         DRIVER_POSTSETUP();
375         return 0;
376 }
377
378
379 /**
380  * Take down the DRM device.
381  *
382  * \param dev DRM device structure.
383  *
384  * Frees every resource in \p dev.
385  *
386  * \sa drm_device and setup().
387  */
388 static int DRM(takedown)( drm_device_t *dev )
389 {
390         drm_magic_entry_t *pt, *next;
391         drm_map_t *map;
392         drm_map_list_t *r_list;
393         struct list_head *list, *list_next;
394         drm_vma_entry_t *vma, *vma_next;
395         int i;
396
397         DRM_DEBUG( "\n" );
398
399         DRIVER_PRETAKEDOWN();
400 #if __HAVE_IRQ
401         if ( dev->irq_enabled ) DRM(irq_uninstall)( dev );
402 #endif
403
404         down( &dev->struct_sem );
405         del_timer( &dev->timer );
406
407         if ( dev->devname ) {
408                 DRM(free)( dev->devname, strlen( dev->devname ) + 1,
409                            DRM_MEM_DRIVER );
410                 dev->devname = NULL;
411         }
412
413         if ( dev->unique ) {
414                 DRM(free)( dev->unique, strlen( dev->unique ) + 1,
415                            DRM_MEM_DRIVER );
416                 dev->unique = NULL;
417                 dev->unique_len = 0;
418         }
419                                 /* Clear pid list */
420         for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
421                 for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
422                         next = pt->next;
423                         DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
424                 }
425                 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
426         }
427
428 #if __REALLY_HAVE_AGP
429                                 /* Clear AGP information */
430         if ( dev->agp ) {
431                 drm_agp_mem_t *entry;
432                 drm_agp_mem_t *nexte;
433
434                                 /* Remove AGP resources, but leave dev->agp
435                                    intact until drv_cleanup is called. */
436                 for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
437                         nexte = entry->next;
438                         if ( entry->bound ) DRM(unbind_agp)( entry->memory );
439                         DRM(free_agp)( entry->memory, entry->pages );
440                         DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
441                 }
442                 dev->agp->memory = NULL;
443
444                 if ( dev->agp->acquired ) DRM(agp_do_release)();
445
446                 dev->agp->acquired = 0;
447                 dev->agp->enabled  = 0;
448         }
449 #endif
450
451                                 /* Clear vma list (only built for debugging) */
452         if ( dev->vmalist ) {
453                 for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
454                         vma_next = vma->next;
455                         DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
456                 }
457                 dev->vmalist = NULL;
458         }
459
460         if( dev->maplist ) {
461                 list_for_each_safe( list, list_next, &dev->maplist->head ) {
462                         r_list = (drm_map_list_t *)list;
463
464                         if ( ( map = r_list->map ) ) {
465                                 switch ( map->type ) {
466                                 case _DRM_REGISTERS:
467                                 case _DRM_FRAME_BUFFER:
468 #if __REALLY_HAVE_MTRR
469                                         if ( map->mtrr >= 0 ) {
470                                                 int retcode;
471                                                 retcode = mtrr_del( map->mtrr,
472                                                                     map->offset,
473                                                                     map->size );
474                                                 DRM_DEBUG( "mtrr_del=%d\n", retcode );
475                                         }
476 #endif
477                                         DRM(ioremapfree)( map->handle, map->size, dev );
478                                         break;
479                                 case _DRM_SHM:
480                                         vfree(map->handle);
481                                         break;
482
483                                 case _DRM_AGP:
484                                         /* Do nothing here, because this is all
485                                          * handled in the AGP/GART driver.
486                                          */
487                                         break;
488                                 case _DRM_SCATTER_GATHER:
489                                         /* Handle it, but do nothing, if HAVE_SG
490                                          * isn't defined.
491                                          */
492 #if __HAVE_SG
493                                         if(dev->sg) {
494                                                 DRM(sg_cleanup)(dev->sg);
495                                                 dev->sg = NULL;
496                                         }
497 #endif
498                                         break;
499                                 }
500                                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
501                         }
502                         list_del( list );
503                         DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
504                 }
505                 DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
506                 dev->maplist = NULL;
507         }
508
509 #if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
510         if ( dev->queuelist ) {
511                 for ( i = 0 ; i < dev->queue_count ; i++ ) {
512 #if __HAVE_DMA_WAITLIST
513                         DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
514 #endif
515                         if ( dev->queuelist[i] ) {
516                                 DRM(free)( dev->queuelist[i],
517                                           sizeof(*dev->queuelist[0]),
518                                           DRM_MEM_QUEUES );
519                                 dev->queuelist[i] = NULL;
520                         }
521                 }
522                 DRM(free)( dev->queuelist,
523                           dev->queue_slots * sizeof(*dev->queuelist),
524                           DRM_MEM_QUEUES );
525                 dev->queuelist = NULL;
526         }
527         dev->queue_count = 0;
528 #endif
529
530 #if __HAVE_DMA
531         DRM(dma_takedown)( dev );
532 #endif
533         if ( dev->lock.hw_lock ) {
534                 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
535                 dev->lock.filp = 0;
536                 wake_up_interruptible( &dev->lock.lock_queue );
537         }
538         up( &dev->struct_sem );
539
540         return 0;
541 }
542
543 #include "drm_pciids.h"
544
545 static struct pci_device_id DRM(pciidlist)[] = {
546         DRM(PCI_IDS)
547 };
548
549 static int DRM(probe)(struct pci_dev *pdev)
550 {
551         drm_device_t *dev;
552 #if __HAVE_CTX_BITMAP
553         int retcode;
554 #endif
555         int i;
556         int is_compat = 0;
557
558         DRM_DEBUG( "\n" );
559
560         for (i = 0; DRM(pciidlist)[i].vendor != 0; i++) {
561                 if ((DRM(pciidlist)[i].vendor == pdev->vendor) &&
562                     (DRM(pciidlist)[i].device == pdev->device)) {
563                         is_compat = 1;
564                 }
565         }
566         if (is_compat == 0)
567                 return -ENODEV;
568
569         if (DRM(numdevs) >= MAX_DEVICES)
570                 return -ENODEV;
571
572         dev = &(DRM(device)[DRM(numdevs)]);
573
574         memset( (void *)dev, 0, sizeof(*dev) );
575         dev->count_lock = SPIN_LOCK_UNLOCKED;
576         init_timer( &dev->timer );
577         sema_init( &dev->struct_sem, 1 );
578         sema_init( &dev->ctxlist_sem, 1 );
579
580         if ((dev->minor = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
581                 return -EPERM;
582         dev->device = MKDEV(DRM_MAJOR, dev->minor );
583         dev->name   = DRIVER_NAME;
584
585         dev->pdev   = pdev;
586 #ifdef __alpha__
587         dev->hose   = pdev->sysdata;
588         dev->pci_domain = dev->hose->bus->number;
589 #else
590         dev->pci_domain = 0;
591 #endif
592         dev->pci_bus = pdev->bus->number;
593         dev->pci_slot = PCI_SLOT(pdev->devfn);
594         dev->pci_func = PCI_FUNC(pdev->devfn);
595         dev->irq = pdev->irq;
596
597         DRIVER_PREINIT();
598
599 #if __REALLY_HAVE_AGP
600         dev->agp = DRM(agp_init)();
601 #if __MUST_HAVE_AGP
602         if ( dev->agp == NULL ) {
603                 DRM_ERROR( "Cannot initialize the agpgart module.\n" );
604                 DRM(stub_unregister)(dev->minor);
605                 DRM(takedown)( dev );
606                 return -EINVAL;
607         }
608 #endif
609 #if __REALLY_HAVE_MTRR
610         if (dev->agp)
611                 dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
612                                         dev->agp->agp_info.aper_size*1024*1024,
613                                         MTRR_TYPE_WRCOMB,
614                                         1 );
615 #endif
616 #endif
617
618 #if __HAVE_CTX_BITMAP
619         retcode = DRM(ctxbitmap_init)( dev );
620         if( retcode ) {
621                 DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
622                 DRM(stub_unregister)(dev->minor);
623                 DRM(takedown)( dev );
624                 return retcode;
625         }
626 #endif
627         DRM(numdevs)++; /* no errors, mark it reserved */
628         
629         DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
630                 DRIVER_NAME,
631                 DRIVER_MAJOR,
632                 DRIVER_MINOR,
633                 DRIVER_PATCHLEVEL,
634                 DRIVER_DATE,
635                 dev->minor,
636                 pci_pretty_name(pdev));
637
638         DRIVER_POSTINIT();
639
640         return 0;
641 }
642
643 /**
644  * Module initialization. Called via init_module at module load time, or via
645  * linux/init/main.c (this is not currently supported).
646  *
647  * \return zero on success or a negative number on failure.
648  *
649  * Initializes an array of drm_device structures, and attempts to
650  * initialize all available devices, using consecutive minors, registering the
651  * stubs and initializing the AGP device.
652  * 
653  * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
654  * after the initialization for driver customization.
655  */
656 static int __init drm_init( void )
657 {
658         struct pci_dev *pdev = NULL;
659
660         DRM_DEBUG( "\n" );
661
662 #ifdef MODULE
663         DRM(parse_options)( drm_opts );
664 #endif
665
666         DRM(mem_init)();
667
668         while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
669                 DRM(probe)(pdev);
670         }
671         return 0;
672 }
673
674 /**
675  * Called via cleanup_module() at module unload time.
676  *
677  * Cleans up all DRM device, calling takedown().
678  * 
679  * \sa drm_init().
680  */
681 static void __exit drm_cleanup( void )
682 {
683         drm_device_t *dev;
684         int i;
685
686         DRM_DEBUG( "\n" );
687
688         for (i = DRM(numdevs) - 1; i >= 0; i--) {
689                 dev = &(DRM(device)[i]);
690                 if ( DRM(stub_unregister)(dev->minor) ) {
691                         DRM_ERROR( "Cannot unload module\n" );
692                 } else {
693                         DRM_DEBUG("minor %d unregistered\n", dev->minor);
694                         if (i == 0) {
695                                 DRM_INFO( "Module unloaded\n" );
696                         }
697                 }
698 #if __HAVE_CTX_BITMAP
699                 DRM(ctxbitmap_cleanup)( dev );
700 #endif
701
702 #if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
703                 if ( dev->agp && dev->agp->agp_mtrr >= 0) {
704                         int retval;
705                         retval = mtrr_del( dev->agp->agp_mtrr,
706                                    dev->agp->agp_info.aper_base,
707                                    dev->agp->agp_info.aper_size*1024*1024 );
708                         DRM_DEBUG( "mtrr_del=%d\n", retval );
709                 }
710 #endif
711
712                 DRM(takedown)( dev );
713
714 #if __REALLY_HAVE_AGP
715                 if ( dev->agp ) {
716                         DRM(agp_uninit)();
717                         DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
718                         dev->agp = NULL;
719                 }
720 #endif
721         }
722         DRIVER_POSTCLEANUP();
723         DRM(numdevs) = 0;
724 }
725
726 module_init( drm_init );
727 module_exit( drm_cleanup );
728
729
730 /**
731  * Get version information
732  *
733  * \param inode device inode.
734  * \param filp file pointer.
735  * \param cmd command.
736  * \param arg user argument, pointing to a drm_version structure.
737  * \return zero on success or negative number on failure.
738  *
739  * Fills in the version information in \p arg.
740  */
741 int DRM(version)( struct inode *inode, struct file *filp,
742                   unsigned int cmd, unsigned long arg )
743 {
744         drm_version_t version;
745         int len;
746
747         if ( copy_from_user( &version,
748                              (drm_version_t *)arg,
749                              sizeof(version) ) )
750                 return -EFAULT;
751
752 #define DRM_COPY( name, value )                                         \
753         len = strlen( value );                                          \
754         if ( len > name##_len ) len = name##_len;                       \
755         name##_len = strlen( value );                                   \
756         if ( len && name ) {                                            \
757                 if ( copy_to_user( name, value, len ) )                 \
758                         return -EFAULT;                                 \
759         }
760
761         version.version_major = DRIVER_MAJOR;
762         version.version_minor = DRIVER_MINOR;
763         version.version_patchlevel = DRIVER_PATCHLEVEL;
764
765         DRM_COPY( version.name, DRIVER_NAME );
766         DRM_COPY( version.date, DRIVER_DATE );
767         DRM_COPY( version.desc, DRIVER_DESC );
768
769         if ( copy_to_user( (drm_version_t *)arg,
770                            &version,
771                            sizeof(version) ) )
772                 return -EFAULT;
773         return 0;
774 }
775
776 /**
777  * Open file.
778  * 
779  * \param inode device inode
780  * \param filp file pointer.
781  * \return zero on success or a negative number on failure.
782  *
783  * Searches the DRM device with the same minor number, calls open_helper(), and
784  * increments the device open count. If the open count was previous at zero,
785  * i.e., it's the first that the device is open, then calls setup().
786  */
787 int DRM(open)( struct inode *inode, struct file *filp )
788 {
789         drm_device_t *dev = NULL;
790         int retcode = 0;
791         int i;
792
793         for (i = 0; i < DRM(numdevs); i++) {
794                 if (iminor(inode) == DRM(device)[i].minor) {
795                         dev = &(DRM(device)[i]);
796                         break;
797                 }
798         }
799         if (!dev) {
800                 return -ENODEV;
801         }
802
803         retcode = DRM(open_helper)( inode, filp, dev );
804         if ( !retcode ) {
805                 atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
806                 spin_lock( &dev->count_lock );
807                 if ( !dev->open_count++ ) {
808                         spin_unlock( &dev->count_lock );
809                         return DRM(setup)( dev );
810                 }
811                 spin_unlock( &dev->count_lock );
812         }
813
814         return retcode;
815 }
816
817 /**
818  * Release file.
819  *
820  * \param inode device inode
821  * \param filp file pointer.
822  * \return zero on success or a negative number on failure.
823  *
824  * If the hardware lock is held then free it, and take it again for the kernel
825  * context since it's necessary to reclaim buffers. Unlink the file private
826  * data from its list and free it. Decreases the open count and if it reaches
827  * zero calls takedown().
828  */
829 int DRM(release)( struct inode *inode, struct file *filp )
830 {
831         drm_file_t *priv = filp->private_data;
832         drm_device_t *dev;
833         int retcode = 0;
834
835         lock_kernel();
836         dev = priv->dev;
837
838         DRM_DEBUG( "open_count = %d\n", dev->open_count );
839
840         DRIVER_PRERELEASE();
841
842         /* ========================================================
843          * Begin inline drm_release
844          */
845
846         DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
847                    current->pid, (long)old_encode_dev(dev->device), dev->open_count );
848
849         if ( priv->lock_count && dev->lock.hw_lock &&
850              _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
851              dev->lock.filp == filp ) {
852                 DRM_DEBUG( "File %p released, freeing lock for context %d\n",
853                         filp,
854                         _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
855 #if __HAVE_RELEASE
856                 DRIVER_RELEASE();
857 #endif
858                 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
859                                 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
860
861                                 /* FIXME: may require heavy-handed reset of
862                                    hardware at this point, possibly
863                                    processed via a callback to the X
864                                    server. */
865         }
866 #if __HAVE_RELEASE
867         else if ( priv->lock_count && dev->lock.hw_lock ) {
868                 /* The lock is required to reclaim buffers */
869                 DECLARE_WAITQUEUE( entry, current );
870
871                 add_wait_queue( &dev->lock.lock_queue, &entry );
872                 for (;;) {
873                         current->state = TASK_INTERRUPTIBLE;
874                         if ( !dev->lock.hw_lock ) {
875                                 /* Device has been unregistered */
876                                 retcode = -EINTR;
877                                 break;
878                         }
879                         if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
880                                              DRM_KERNEL_CONTEXT ) ) {
881                                 dev->lock.filp      = filp;
882                                 dev->lock.lock_time = jiffies;
883                                 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
884                                 break;  /* Got lock */
885                         }
886                                 /* Contention */
887                         schedule();
888                         if ( signal_pending( current ) ) {
889                                 retcode = -ERESTARTSYS;
890                                 break;
891                         }
892                 }
893                 current->state = TASK_RUNNING;
894                 remove_wait_queue( &dev->lock.lock_queue, &entry );
895                 if( !retcode ) {
896                         DRIVER_RELEASE();
897                         DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
898                                         DRM_KERNEL_CONTEXT );
899                 }
900         }
901 #elif __HAVE_DMA
902         DRM(reclaim_buffers)( filp );
903 #endif
904
905         DRM(fasync)( -1, filp, 0 );
906
907         down( &dev->ctxlist_sem );
908         if ( !list_empty( &dev->ctxlist->head ) ) {
909                 drm_ctx_list_t *pos, *n;
910
911                 list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
912                         if ( pos->tag == priv &&
913                              pos->handle != DRM_KERNEL_CONTEXT ) {
914 #ifdef DRIVER_CTX_DTOR
915                                 DRIVER_CTX_DTOR(pos->handle);
916 #endif
917 #if __HAVE_CTX_BITMAP
918                                 DRM(ctxbitmap_free)( dev, pos->handle );
919 #endif
920                                 list_del( &pos->head );
921                                 DRM(free)( pos, sizeof(*pos), DRM_MEM_CTXLIST );
922                         }
923                 }
924         }
925         up( &dev->ctxlist_sem );
926
927         down( &dev->struct_sem );
928         if ( priv->remove_auth_on_close == 1 ) {
929                 drm_file_t *temp = dev->file_first;
930                 while ( temp ) {
931                         temp->authenticated = 0;
932                         temp = temp->next;
933                 }
934         }
935         if ( priv->prev ) {
936                 priv->prev->next = priv->next;
937         } else {
938                 dev->file_first  = priv->next;
939         }
940         if ( priv->next ) {
941                 priv->next->prev = priv->prev;
942         } else {
943                 dev->file_last   = priv->prev;
944         }
945         up( &dev->struct_sem );
946         
947         DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
948
949         /* ========================================================
950          * End inline drm_release
951          */
952
953         atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
954         spin_lock( &dev->count_lock );
955         if ( !--dev->open_count ) {
956                 if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
957                         DRM_ERROR( "Device busy: %d %d\n",
958                                    atomic_read( &dev->ioctl_count ),
959                                    dev->blocked );
960                         spin_unlock( &dev->count_lock );
961                         unlock_kernel();
962                         return -EBUSY;
963                 }
964                 spin_unlock( &dev->count_lock );
965                 unlock_kernel();
966                 return DRM(takedown)( dev );
967         }
968         spin_unlock( &dev->count_lock );
969
970         unlock_kernel();
971
972         return retcode;
973 }
974
975 /** 
976  * Called whenever a process performs an ioctl on /dev/drm.
977  *
978  * \param inode device inode.
979  * \param filp file pointer.
980  * \param cmd command.
981  * \param arg user argument.
982  * \return zero on success or negative number on failure.
983  *
984  * Looks up the ioctl function in the ::ioctls table, checking for root
985  * previleges if so required, and dispatches to the respective function.
986  */
987 int DRM(ioctl)( struct inode *inode, struct file *filp,
988                 unsigned int cmd, unsigned long arg )
989 {
990         drm_file_t *priv = filp->private_data;
991         drm_device_t *dev = priv->dev;
992         drm_ioctl_desc_t *ioctl;
993         drm_ioctl_t *func;
994         int nr = DRM_IOCTL_NR(cmd);
995         int retcode = 0;
996
997         atomic_inc( &dev->ioctl_count );
998         atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
999         ++priv->ioctl_count;
1000
1001         DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
1002                    current->pid, cmd, nr, (long)old_encode_dev(dev->device), 
1003                    priv->authenticated );
1004
1005         if ( nr >= DRIVER_IOCTL_COUNT ) {
1006                 retcode = -EINVAL;
1007         } else {
1008                 ioctl = &DRM(ioctls)[nr];
1009                 func = ioctl->func;
1010
1011                 if ( !func ) {
1012                         DRM_DEBUG( "no function\n" );
1013                         retcode = -EINVAL;
1014                 } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
1015                             ( ioctl->auth_needed && !priv->authenticated ) ) {
1016                         retcode = -EACCES;
1017                 } else {
1018                         retcode = func( inode, filp, cmd, arg );
1019                 }
1020         }
1021
1022         atomic_dec( &dev->ioctl_count );
1023         return retcode;
1024 }
1025
1026 /** 
1027  * Lock ioctl.
1028  *
1029  * \param inode device inode.
1030  * \param filp file pointer.
1031  * \param cmd command.
1032  * \param arg user argument, pointing to a drm_lock structure.
1033  * \return zero on success or negative number on failure.
1034  *
1035  * Add the current task to the lock wait queue, and attempt to take to lock.
1036  */
1037 int DRM(lock)( struct inode *inode, struct file *filp,
1038                unsigned int cmd, unsigned long arg )
1039 {
1040         drm_file_t *priv = filp->private_data;
1041         drm_device_t *dev = priv->dev;
1042         DECLARE_WAITQUEUE( entry, current );
1043         drm_lock_t lock;
1044         int ret = 0;
1045 #if __HAVE_MULTIPLE_DMA_QUEUES
1046         drm_queue_t *q;
1047 #endif
1048
1049         ++priv->lock_count;
1050
1051         if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
1052                 return -EFAULT;
1053
1054         if ( lock.context == DRM_KERNEL_CONTEXT ) {
1055                 DRM_ERROR( "Process %d using kernel context %d\n",
1056                            current->pid, lock.context );
1057                 return -EINVAL;
1058         }
1059
1060         DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
1061                    lock.context, current->pid,
1062                    dev->lock.hw_lock->lock, lock.flags );
1063
1064 #if __HAVE_DMA_QUEUE
1065         if ( lock.context < 0 )
1066                 return -EINVAL;
1067 #elif __HAVE_MULTIPLE_DMA_QUEUES
1068         if ( lock.context < 0 || lock.context >= dev->queue_count )
1069                 return -EINVAL;
1070         q = dev->queuelist[lock.context];
1071 #endif
1072
1073 #if __HAVE_DMA_FLUSH
1074         ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
1075 #endif
1076         if ( !ret ) {
1077                 add_wait_queue( &dev->lock.lock_queue, &entry );
1078                 for (;;) {
1079                         current->state = TASK_INTERRUPTIBLE;
1080                         if ( !dev->lock.hw_lock ) {
1081                                 /* Device has been unregistered */
1082                                 ret = -EINTR;
1083                                 break;
1084                         }
1085                         if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
1086                                              lock.context ) ) {
1087                                 dev->lock.filp      = filp;
1088                                 dev->lock.lock_time = jiffies;
1089                                 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
1090                                 break;  /* Got lock */
1091                         }
1092
1093                                 /* Contention */
1094                         schedule();
1095                         if ( signal_pending( current ) ) {
1096                                 ret = -ERESTARTSYS;
1097                                 break;
1098                         }
1099                 }
1100                 current->state = TASK_RUNNING;
1101                 remove_wait_queue( &dev->lock.lock_queue, &entry );
1102         }
1103
1104 #if __HAVE_DMA_FLUSH
1105         DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
1106 #endif
1107
1108         if ( !ret ) {
1109                 sigemptyset( &dev->sigmask );
1110                 sigaddset( &dev->sigmask, SIGSTOP );
1111                 sigaddset( &dev->sigmask, SIGTSTP );
1112                 sigaddset( &dev->sigmask, SIGTTIN );
1113                 sigaddset( &dev->sigmask, SIGTTOU );
1114                 dev->sigdata.context = lock.context;
1115                 dev->sigdata.lock    = dev->lock.hw_lock;
1116                 block_all_signals( DRM(notifier),
1117                                    &dev->sigdata, &dev->sigmask );
1118
1119 #if __HAVE_DMA_READY
1120                 if ( lock.flags & _DRM_LOCK_READY ) {
1121                         DRIVER_DMA_READY();
1122                 }
1123 #endif
1124 #if __HAVE_DMA_QUIESCENT
1125                 if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
1126                         DRIVER_DMA_QUIESCENT();
1127                 }
1128 #endif
1129                 /* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the
1130                  * drm modules in the DRI cvs tree, but it is required
1131                  * by the Sparc driver.
1132                  */
1133 #if __HAVE_KERNEL_CTX_SWITCH
1134                 if ( dev->last_context != lock.context ) {
1135                         DRM(context_switch)(dev, dev->last_context,
1136                                             lock.context);
1137                 }
1138 #endif
1139         }
1140
1141         DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1142
1143         return ret;
1144 }
1145
1146 /** 
1147  * Unlock ioctl.
1148  *
1149  * \param inode device inode.
1150  * \param filp file pointer.
1151  * \param cmd command.
1152  * \param arg user argument, pointing to a drm_lock structure.
1153  * \return zero on success or negative number on failure.
1154  *
1155  * Transfer and free the lock.
1156  */
1157 int DRM(unlock)( struct inode *inode, struct file *filp,
1158                  unsigned int cmd, unsigned long arg )
1159 {
1160         drm_file_t *priv = filp->private_data;
1161         drm_device_t *dev = priv->dev;
1162         drm_lock_t lock;
1163
1164         if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
1165                 return -EFAULT;
1166
1167         if ( lock.context == DRM_KERNEL_CONTEXT ) {
1168                 DRM_ERROR( "Process %d using kernel context %d\n",
1169                            current->pid, lock.context );
1170                 return -EINVAL;
1171         }
1172
1173         atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1174
1175         /* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the drm
1176          * modules in the DRI cvs tree, but it is required by the
1177          * Sparc driver.
1178          */
1179 #if __HAVE_KERNEL_CTX_SWITCH
1180         /* We no longer really hold it, but if we are the next
1181          * agent to request it then we should just be able to
1182          * take it immediately and not eat the ioctl.
1183          */
1184         dev->lock.filp = 0;
1185         {
1186                 __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
1187                 unsigned int old, new, prev, ctx;
1188
1189                 ctx = lock.context;
1190                 do {
1191                         old  = *plock;
1192                         new  = ctx;
1193                         prev = cmpxchg(plock, old, new);
1194                 } while (prev != old);
1195         }
1196         wake_up_interruptible(&dev->lock.lock_queue);
1197 #else
1198         DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
1199                             DRM_KERNEL_CONTEXT );
1200 #if __HAVE_DMA_SCHEDULE
1201         DRM(dma_schedule)( dev, 1 );
1202 #endif
1203
1204         if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1205                              DRM_KERNEL_CONTEXT ) ) {
1206                 DRM_ERROR( "\n" );
1207         }
1208 #endif /* !__HAVE_KERNEL_CTX_SWITCH */
1209
1210         unblock_all_signals();
1211         return 0;
1212 }