ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / char / drm / drm_bufs.h
1 /**
2  * \file drm_bufs.h 
3  * Generic buffer template
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 #ifndef __HAVE_PCI_DMA
40 #define __HAVE_PCI_DMA          0
41 #endif
42
43 #ifndef __HAVE_SG
44 #define __HAVE_SG               0
45 #endif
46
47 #ifndef DRIVER_BUF_PRIV_T
48 #define DRIVER_BUF_PRIV_T               u32
49 #endif
50 #ifndef DRIVER_AGP_BUFFERS_MAP
51 #if __HAVE_AGP && __HAVE_DMA
52 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
53 #else
54 #define DRIVER_AGP_BUFFERS_MAP( dev )   NULL
55 #endif
56 #endif
57
58
59 /**
60  * Compute size order.  Returns the exponent of the smaller power of two which
61  * is greater or equal to given number.
62  * 
63  * \param size size.
64  * \return order.
65  *
66  * \todo Can be made faster.
67  */
68 int DRM(order)( unsigned long size )
69 {
70         int order;
71         unsigned long tmp;
72
73         for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
74
75         if ( size & ~(1 << order) )
76                 ++order;
77
78         return order;
79 }
80
81 /**
82  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
83  *
84  * \param inode device inode.
85  * \param filp file pointer.
86  * \param cmd command.
87  * \param arg pointer to a drm_map structure.
88  * \return zero on success or a negative value on error.
89  *
90  * Adjusts the memory offset to its absolute value according to the mapping
91  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
92  * applicable and if supported by the kernel.
93  */
94 int DRM(addmap)( struct inode *inode, struct file *filp,
95                  unsigned int cmd, unsigned long arg )
96 {
97         drm_file_t *priv = filp->private_data;
98         drm_device_t *dev = priv->dev;
99         drm_map_t *map;
100         drm_map_list_t *list;
101
102         if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
103
104         map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
105         if ( !map )
106                 return -ENOMEM;
107
108         if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
109                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
110                 return -EFAULT;
111         }
112
113         /* Only allow shared memory to be removable since we only keep enough
114          * book keeping information about shared memory to allow for removal
115          * when processes fork.
116          */
117         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
118                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
119                 return -EINVAL;
120         }
121         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
122                    map->offset, map->size, map->type );
123         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
124                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
125                 return -EINVAL;
126         }
127         map->mtrr   = -1;
128         map->handle = 0;
129
130         switch ( map->type ) {
131         case _DRM_REGISTERS:
132         case _DRM_FRAME_BUFFER:
133 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
134                 if ( map->offset + map->size < map->offset ||
135                      map->offset < virt_to_phys(high_memory) ) {
136                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
137                         return -EINVAL;
138                 }
139 #endif
140 #ifdef __alpha__
141                 map->offset += dev->hose->mem_space->start;
142 #endif
143 #if __REALLY_HAVE_MTRR
144                 if ( map->type == _DRM_FRAME_BUFFER ||
145                      (map->flags & _DRM_WRITE_COMBINING) ) {
146                         map->mtrr = mtrr_add( map->offset, map->size,
147                                               MTRR_TYPE_WRCOMB, 1 );
148                 }
149 #endif
150                 map->handle = DRM(ioremap)( map->offset, map->size, dev );
151                 break;
152
153         case _DRM_SHM:
154                 map->handle = vmalloc_32(map->size);
155                 DRM_DEBUG( "%lu %d %p\n",
156                            map->size, DRM(order)( map->size ), map->handle );
157                 if ( !map->handle ) {
158                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
159                         return -ENOMEM;
160                 }
161                 map->offset = (unsigned long)map->handle;
162                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
163                         dev->sigdata.lock =
164                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
165                 }
166                 break;
167 #if __REALLY_HAVE_AGP
168         case _DRM_AGP:
169 #ifdef __alpha__
170                 map->offset += dev->hose->mem_space->start;
171 #endif
172                 map->offset += dev->agp->base;
173                 map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
174                 break;
175 #endif
176         case _DRM_SCATTER_GATHER:
177                 if (!dev->sg) {
178                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
179                         return -EINVAL;
180                 }
181                 map->offset += dev->sg->handle;
182                 break;
183
184         default:
185                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
186                 return -EINVAL;
187         }
188
189         list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
190         if(!list) {
191                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
192                 return -EINVAL;
193         }
194         memset(list, 0, sizeof(*list));
195         list->map = map;
196
197         down(&dev->struct_sem);
198         list_add(&list->head, &dev->maplist->head);
199         up(&dev->struct_sem);
200
201         if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
202                 return -EFAULT;
203         if ( map->type != _DRM_SHM ) {
204                 if ( copy_to_user( &((drm_map_t *)arg)->handle,
205                                    &map->offset,
206                                    sizeof(map->offset) ) )
207                         return -EFAULT;
208         }
209         return 0;
210 }
211
212
213 /**
214  * Remove a map private from list and deallocate resources if the mapping
215  * isn't in use.
216  *
217  * \param inode device inode.
218  * \param filp file pointer.
219  * \param cmd command.
220  * \param arg pointer to a drm_map_t structure.
221  * \return zero on success or a negative value on error.
222  *
223  * Searches the map on drm_device::maplist, removes it from the list, see if
224  * its being used, and free any associate resource (such as MTRR's) if it's not
225  * being on use.
226  *
227  * \sa addmap().
228  */
229 int DRM(rmmap)(struct inode *inode, struct file *filp,
230                unsigned int cmd, unsigned long arg)
231 {
232         drm_file_t      *priv   = filp->private_data;
233         drm_device_t    *dev    = priv->dev;
234         struct list_head *list;
235         drm_map_list_t *r_list = NULL;
236         drm_vma_entry_t *pt, *prev;
237         drm_map_t *map;
238         drm_map_t request;
239         int found_maps = 0;
240
241         if (copy_from_user(&request, (drm_map_t *)arg,
242                            sizeof(request))) {
243                 return -EFAULT;
244         }
245
246         down(&dev->struct_sem);
247         list = &dev->maplist->head;
248         list_for_each(list, &dev->maplist->head) {
249                 r_list = list_entry(list, drm_map_list_t, head);
250
251                 if(r_list->map &&
252                    r_list->map->handle == request.handle &&
253                    r_list->map->flags & _DRM_REMOVABLE) break;
254         }
255
256         /* List has wrapped around to the head pointer, or its empty we didn't
257          * find anything.
258          */
259         if(list == (&dev->maplist->head)) {
260                 up(&dev->struct_sem);
261                 return -EINVAL;
262         }
263         map = r_list->map;
264         list_del(list);
265         DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
266
267         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
268                 if (pt->vma->vm_private_data == map) found_maps++;
269         }
270
271         if(!found_maps) {
272                 switch (map->type) {
273                 case _DRM_REGISTERS:
274                 case _DRM_FRAME_BUFFER:
275 #if __REALLY_HAVE_MTRR
276                         if (map->mtrr >= 0) {
277                                 int retcode;
278                                 retcode = mtrr_del(map->mtrr,
279                                                    map->offset,
280                                                    map->size);
281                                 DRM_DEBUG("mtrr_del = %d\n", retcode);
282                         }
283 #endif
284                         DRM(ioremapfree)(map->handle, map->size, dev);
285                         break;
286                 case _DRM_SHM:
287                         vfree(map->handle);
288                         break;
289                 case _DRM_AGP:
290                 case _DRM_SCATTER_GATHER:
291                         break;
292                 }
293                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
294         }
295         up(&dev->struct_sem);
296         return 0;
297 }
298
299 #if __HAVE_DMA
300
301 /**
302  * Cleanup after an error on one of the addbufs() functions.
303  *
304  * \param entry buffer entry where the error occurred.
305  *
306  * Frees any pages and buffers associated with the given entry.
307  */
308 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
309 {
310         int i;
311
312         if (entry->seg_count) {
313                 for (i = 0; i < entry->seg_count; i++) {
314                         if (entry->seglist[i]) {
315                                 DRM(free_pages)(entry->seglist[i],
316                                                 entry->page_order,
317                                                 DRM_MEM_DMA);
318                         }
319                 }
320                 DRM(free)(entry->seglist,
321                           entry->seg_count *
322                           sizeof(*entry->seglist),
323                           DRM_MEM_SEGS);
324
325                 entry->seg_count = 0;
326         }
327
328         if (entry->buf_count) {
329                 for (i = 0; i < entry->buf_count; i++) {
330                         if (entry->buflist[i].dev_private) {
331                                 DRM(free)(entry->buflist[i].dev_private,
332                                           entry->buflist[i].dev_priv_size,
333                                           DRM_MEM_BUFS);
334                         }
335                 }
336                 DRM(free)(entry->buflist,
337                           entry->buf_count *
338                           sizeof(*entry->buflist),
339                           DRM_MEM_BUFS);
340
341 #if __HAVE_DMA_FREELIST
342                 DRM(freelist_destroy)(&entry->freelist);
343 #endif
344
345                 entry->buf_count = 0;
346         }
347 }
348
349 #if __REALLY_HAVE_AGP
350 /**
351  * Add AGP buffers for DMA transfers (ioctl).
352  *
353  * \param inode device inode.
354  * \param filp file pointer.
355  * \param cmd command.
356  * \param arg pointer to a drm_buf_desc_t request.
357  * \return zero on success or a negative number on failure.
358  * 
359  * After some sanity checks creates a drm_buf structure for each buffer and
360  * reallocates the buffer list of the same size order to accommodate the new
361  * buffers.
362  */
363 int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
364                       unsigned int cmd, unsigned long arg )
365 {
366         drm_file_t *priv = filp->private_data;
367         drm_device_t *dev = priv->dev;
368         drm_device_dma_t *dma = dev->dma;
369         drm_buf_desc_t request;
370         drm_buf_entry_t *entry;
371         drm_buf_t *buf;
372         unsigned long offset;
373         unsigned long agp_offset;
374         int count;
375         int order;
376         int size;
377         int alignment;
378         int page_order;
379         int total;
380         int byte_count;
381         int i;
382         drm_buf_t **temp_buflist;
383
384         if ( !dma ) return -EINVAL;
385
386         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
387                              sizeof(request) ) )
388                 return -EFAULT;
389
390         count = request.count;
391         order = DRM(order)( request.size );
392         size = 1 << order;
393
394         alignment  = (request.flags & _DRM_PAGE_ALIGN)
395                 ? PAGE_ALIGN(size) : size;
396         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
397         total = PAGE_SIZE << page_order;
398
399         byte_count = 0;
400         agp_offset = dev->agp->base + request.agp_start;
401
402         DRM_DEBUG( "count:      %d\n",  count );
403         DRM_DEBUG( "order:      %d\n",  order );
404         DRM_DEBUG( "size:       %d\n",  size );
405         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
406         DRM_DEBUG( "alignment:  %d\n",  alignment );
407         DRM_DEBUG( "page_order: %d\n",  page_order );
408         DRM_DEBUG( "total:      %d\n",  total );
409
410         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
411         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
412
413         spin_lock( &dev->count_lock );
414         if ( dev->buf_use ) {
415                 spin_unlock( &dev->count_lock );
416                 return -EBUSY;
417         }
418         atomic_inc( &dev->buf_alloc );
419         spin_unlock( &dev->count_lock );
420
421         down( &dev->struct_sem );
422         entry = &dma->bufs[order];
423         if ( entry->buf_count ) {
424                 up( &dev->struct_sem );
425                 atomic_dec( &dev->buf_alloc );
426                 return -ENOMEM; /* May only call once for each order */
427         }
428
429         if (count < 0 || count > 4096) {
430                 up( &dev->struct_sem );
431                 atomic_dec( &dev->buf_alloc );
432                 return -EINVAL;
433         }
434
435         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
436                                     DRM_MEM_BUFS );
437         if ( !entry->buflist ) {
438                 up( &dev->struct_sem );
439                 atomic_dec( &dev->buf_alloc );
440                 return -ENOMEM;
441         }
442         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
443
444         entry->buf_size = size;
445         entry->page_order = page_order;
446
447         offset = 0;
448
449         while ( entry->buf_count < count ) {
450                 buf          = &entry->buflist[entry->buf_count];
451                 buf->idx     = dma->buf_count + entry->buf_count;
452                 buf->total   = alignment;
453                 buf->order   = order;
454                 buf->used    = 0;
455
456                 buf->offset  = (dma->byte_count + offset);
457                 buf->bus_address = agp_offset + offset;
458                 buf->address = (void *)(agp_offset + offset);
459                 buf->next    = NULL;
460                 buf->waiting = 0;
461                 buf->pending = 0;
462                 init_waitqueue_head( &buf->dma_wait );
463                 buf->filp    = 0;
464
465                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
466                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
467                                                DRM_MEM_BUFS );
468                 if(!buf->dev_private) {
469                         /* Set count correctly so we free the proper amount. */
470                         entry->buf_count = count;
471                         DRM(cleanup_buf_error)(entry);
472                         up( &dev->struct_sem );
473                         atomic_dec( &dev->buf_alloc );
474                         return -ENOMEM;
475                 }
476                 memset( buf->dev_private, 0, buf->dev_priv_size );
477
478                 DRM_DEBUG( "buffer %d @ %p\n",
479                            entry->buf_count, buf->address );
480
481                 offset += alignment;
482                 entry->buf_count++;
483                 byte_count += PAGE_SIZE << page_order;
484         }
485
486         DRM_DEBUG( "byte_count: %d\n", byte_count );
487
488         temp_buflist = DRM(realloc)( dma->buflist,
489                                      dma->buf_count * sizeof(*dma->buflist),
490                                      (dma->buf_count + entry->buf_count)
491                                      * sizeof(*dma->buflist),
492                                      DRM_MEM_BUFS );
493         if(!temp_buflist) {
494                 /* Free the entry because it isn't valid */
495                 DRM(cleanup_buf_error)(entry);
496                 up( &dev->struct_sem );
497                 atomic_dec( &dev->buf_alloc );
498                 return -ENOMEM;
499         }
500         dma->buflist = temp_buflist;
501
502         for ( i = 0 ; i < entry->buf_count ; i++ ) {
503                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
504         }
505
506         dma->buf_count += entry->buf_count;
507         dma->byte_count += byte_count;
508
509         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
510         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
511
512 #if __HAVE_DMA_FREELIST
513         DRM(freelist_create)( &entry->freelist, entry->buf_count );
514         for ( i = 0 ; i < entry->buf_count ; i++ ) {
515                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
516         }
517 #endif
518         up( &dev->struct_sem );
519
520         request.count = entry->buf_count;
521         request.size = size;
522
523         if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
524                 return -EFAULT;
525
526         dma->flags = _DRM_DMA_USE_AGP;
527
528         atomic_dec( &dev->buf_alloc );
529         return 0;
530 }
531 #endif /* __REALLY_HAVE_AGP */
532
533 #if __HAVE_PCI_DMA
534 int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
535                       unsigned int cmd, unsigned long arg )
536 {
537         drm_file_t *priv = filp->private_data;
538         drm_device_t *dev = priv->dev;
539         drm_device_dma_t *dma = dev->dma;
540         drm_buf_desc_t request;
541         int count;
542         int order;
543         int size;
544         int total;
545         int page_order;
546         drm_buf_entry_t *entry;
547         unsigned long page;
548         drm_buf_t *buf;
549         int alignment;
550         unsigned long offset;
551         int i;
552         int byte_count;
553         int page_count;
554         unsigned long *temp_pagelist;
555         drm_buf_t **temp_buflist;
556
557         if ( !dma ) return -EINVAL;
558
559         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
560                              sizeof(request) ) )
561                 return -EFAULT;
562
563         count = request.count;
564         order = DRM(order)( request.size );
565         size = 1 << order;
566
567         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
568                    request.count, request.size, size,
569                    order, dev->queue_count );
570
571         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
572         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
573
574         alignment = (request.flags & _DRM_PAGE_ALIGN)
575                 ? PAGE_ALIGN(size) : size;
576         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
577         total = PAGE_SIZE << page_order;
578
579         spin_lock( &dev->count_lock );
580         if ( dev->buf_use ) {
581                 spin_unlock( &dev->count_lock );
582                 return -EBUSY;
583         }
584         atomic_inc( &dev->buf_alloc );
585         spin_unlock( &dev->count_lock );
586
587         down( &dev->struct_sem );
588         entry = &dma->bufs[order];
589         if ( entry->buf_count ) {
590                 up( &dev->struct_sem );
591                 atomic_dec( &dev->buf_alloc );
592                 return -ENOMEM; /* May only call once for each order */
593         }
594
595         if (count < 0 || count > 4096) {
596                 up( &dev->struct_sem );
597                 atomic_dec( &dev->buf_alloc );
598                 return -EINVAL;
599         }
600
601         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
602                                     DRM_MEM_BUFS );
603         if ( !entry->buflist ) {
604                 up( &dev->struct_sem );
605                 atomic_dec( &dev->buf_alloc );
606                 return -ENOMEM;
607         }
608         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
609
610         entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
611                                     DRM_MEM_SEGS );
612         if ( !entry->seglist ) {
613                 DRM(free)( entry->buflist,
614                           count * sizeof(*entry->buflist),
615                           DRM_MEM_BUFS );
616                 up( &dev->struct_sem );
617                 atomic_dec( &dev->buf_alloc );
618                 return -ENOMEM;
619         }
620         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
621
622         /* Keep the original pagelist until we know all the allocations
623          * have succeeded
624          */
625         temp_pagelist = DRM(alloc)( (dma->page_count + (count << page_order))
626                                     * sizeof(*dma->pagelist),
627                                     DRM_MEM_PAGES );
628         if (!temp_pagelist) {
629                 DRM(free)( entry->buflist,
630                            count * sizeof(*entry->buflist),
631                            DRM_MEM_BUFS );
632                 DRM(free)( entry->seglist,
633                            count * sizeof(*entry->seglist),
634                            DRM_MEM_SEGS );
635                 up( &dev->struct_sem );
636                 atomic_dec( &dev->buf_alloc );
637                 return -ENOMEM;
638         }
639         memcpy(temp_pagelist,
640                dma->pagelist,
641                dma->page_count * sizeof(*dma->pagelist));
642         DRM_DEBUG( "pagelist: %d entries\n",
643                    dma->page_count + (count << page_order) );
644
645         entry->buf_size = size;
646         entry->page_order = page_order;
647         byte_count = 0;
648         page_count = 0;
649
650         while ( entry->buf_count < count ) {
651                 page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
652                 if ( !page ) {
653                         /* Set count correctly so we free the proper amount. */
654                         entry->buf_count = count;
655                         entry->seg_count = count;
656                         DRM(cleanup_buf_error)(entry);
657                         DRM(free)( temp_pagelist,
658                                    (dma->page_count + (count << page_order))
659                                    * sizeof(*dma->pagelist),
660                                    DRM_MEM_PAGES );
661                         up( &dev->struct_sem );
662                         atomic_dec( &dev->buf_alloc );
663                         return -ENOMEM;
664                 }
665                 entry->seglist[entry->seg_count++] = page;
666                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
667                         DRM_DEBUG( "page %d @ 0x%08lx\n",
668                                    dma->page_count + page_count,
669                                    page + PAGE_SIZE * i );
670                         temp_pagelist[dma->page_count + page_count++]
671                                 = page + PAGE_SIZE * i;
672                 }
673                 for ( offset = 0 ;
674                       offset + size <= total && entry->buf_count < count ;
675                       offset += alignment, ++entry->buf_count ) {
676                         buf          = &entry->buflist[entry->buf_count];
677                         buf->idx     = dma->buf_count + entry->buf_count;
678                         buf->total   = alignment;
679                         buf->order   = order;
680                         buf->used    = 0;
681                         buf->offset  = (dma->byte_count + byte_count + offset);
682                         buf->address = (void *)(page + offset);
683                         buf->next    = NULL;
684                         buf->waiting = 0;
685                         buf->pending = 0;
686                         init_waitqueue_head( &buf->dma_wait );
687                         buf->filp    = 0;
688
689                         buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
690                         buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
691                                                        DRM_MEM_BUFS );
692                         if(!buf->dev_private) {
693                                 /* Set count correctly so we free the proper amount. */
694                                 entry->buf_count = count;
695                                 entry->seg_count = count;
696                                 DRM(cleanup_buf_error)(entry);
697                                 DRM(free)( temp_pagelist,
698                                            (dma->page_count + (count << page_order))
699                                            * sizeof(*dma->pagelist),
700                                            DRM_MEM_PAGES );
701                                 up( &dev->struct_sem );
702                                 atomic_dec( &dev->buf_alloc );
703                                 return -ENOMEM;
704                         }
705                         memset( buf->dev_private, 0, buf->dev_priv_size );
706
707                         DRM_DEBUG( "buffer %d @ %p\n",
708                                    entry->buf_count, buf->address );
709                 }
710                 byte_count += PAGE_SIZE << page_order;
711         }
712
713         temp_buflist = DRM(realloc)( dma->buflist,
714                                      dma->buf_count * sizeof(*dma->buflist),
715                                      (dma->buf_count + entry->buf_count)
716                                      * sizeof(*dma->buflist),
717                                      DRM_MEM_BUFS );
718         if (!temp_buflist) {
719                 /* Free the entry because it isn't valid */
720                 DRM(cleanup_buf_error)(entry);
721                 DRM(free)( temp_pagelist,
722                            (dma->page_count + (count << page_order))
723                            * sizeof(*dma->pagelist),
724                            DRM_MEM_PAGES );
725                 up( &dev->struct_sem );
726                 atomic_dec( &dev->buf_alloc );
727                 return -ENOMEM;
728         }
729         dma->buflist = temp_buflist;
730
731         for ( i = 0 ; i < entry->buf_count ; i++ ) {
732                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
733         }
734
735         /* No allocations failed, so now we can replace the orginal pagelist
736          * with the new one.
737          */
738         if (dma->page_count) {
739                 DRM(free)(dma->pagelist,
740                           dma->page_count * sizeof(*dma->pagelist),
741                           DRM_MEM_PAGES);
742         }
743         dma->pagelist = temp_pagelist;
744
745         dma->buf_count += entry->buf_count;
746         dma->seg_count += entry->seg_count;
747         dma->page_count += entry->seg_count << page_order;
748         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
749
750 #if __HAVE_DMA_FREELIST
751         DRM(freelist_create)( &entry->freelist, entry->buf_count );
752         for ( i = 0 ; i < entry->buf_count ; i++ ) {
753                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
754         }
755 #endif
756         up( &dev->struct_sem );
757
758         request.count = entry->buf_count;
759         request.size = size;
760
761         if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
762                 return -EFAULT;
763
764         atomic_dec( &dev->buf_alloc );
765         return 0;
766
767 }
768 #endif /* __HAVE_PCI_DMA */
769
770 #ifdef __HAVE_SG
771 int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
772                      unsigned int cmd, unsigned long arg )
773 {
774         drm_file_t *priv = filp->private_data;
775         drm_device_t *dev = priv->dev;
776         drm_device_dma_t *dma = dev->dma;
777         drm_buf_desc_t request;
778         drm_buf_entry_t *entry;
779         drm_buf_t *buf;
780         unsigned long offset;
781         unsigned long agp_offset;
782         int count;
783         int order;
784         int size;
785         int alignment;
786         int page_order;
787         int total;
788         int byte_count;
789         int i;
790         drm_buf_t **temp_buflist;
791
792         if ( !dma ) return -EINVAL;
793
794         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
795                              sizeof(request) ) )
796                 return -EFAULT;
797
798         count = request.count;
799         order = DRM(order)( request.size );
800         size = 1 << order;
801
802         alignment  = (request.flags & _DRM_PAGE_ALIGN)
803                         ? PAGE_ALIGN(size) : size;
804         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
805         total = PAGE_SIZE << page_order;
806
807         byte_count = 0;
808         agp_offset = request.agp_start;
809
810         DRM_DEBUG( "count:      %d\n",  count );
811         DRM_DEBUG( "order:      %d\n",  order );
812         DRM_DEBUG( "size:       %d\n",  size );
813         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
814         DRM_DEBUG( "alignment:  %d\n",  alignment );
815         DRM_DEBUG( "page_order: %d\n",  page_order );
816         DRM_DEBUG( "total:      %d\n",  total );
817
818         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
819         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
820
821         spin_lock( &dev->count_lock );
822         if ( dev->buf_use ) {
823                 spin_unlock( &dev->count_lock );
824                 return -EBUSY;
825         }
826         atomic_inc( &dev->buf_alloc );
827         spin_unlock( &dev->count_lock );
828
829         down( &dev->struct_sem );
830         entry = &dma->bufs[order];
831         if ( entry->buf_count ) {
832                 up( &dev->struct_sem );
833                 atomic_dec( &dev->buf_alloc );
834                 return -ENOMEM; /* May only call once for each order */
835         }
836
837         if (count < 0 || count > 4096) {
838                 up( &dev->struct_sem );
839                 atomic_dec( &dev->buf_alloc );
840                 return -EINVAL;
841         }
842
843         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
844                                      DRM_MEM_BUFS );
845         if ( !entry->buflist ) {
846                 up( &dev->struct_sem );
847                 atomic_dec( &dev->buf_alloc );
848                 return -ENOMEM;
849         }
850         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
851
852         entry->buf_size = size;
853         entry->page_order = page_order;
854
855         offset = 0;
856
857         while ( entry->buf_count < count ) {
858                 buf          = &entry->buflist[entry->buf_count];
859                 buf->idx     = dma->buf_count + entry->buf_count;
860                 buf->total   = alignment;
861                 buf->order   = order;
862                 buf->used    = 0;
863
864                 buf->offset  = (dma->byte_count + offset);
865                 buf->bus_address = agp_offset + offset;
866                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
867                 buf->next    = NULL;
868                 buf->waiting = 0;
869                 buf->pending = 0;
870                 init_waitqueue_head( &buf->dma_wait );
871                 buf->filp    = 0;
872
873                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
874                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
875                                                DRM_MEM_BUFS );
876                 if(!buf->dev_private) {
877                         /* Set count correctly so we free the proper amount. */
878                         entry->buf_count = count;
879                         DRM(cleanup_buf_error)(entry);
880                         up( &dev->struct_sem );
881                         atomic_dec( &dev->buf_alloc );
882                         return -ENOMEM;
883                 }
884
885                 memset( buf->dev_private, 0, buf->dev_priv_size );
886
887                 DRM_DEBUG( "buffer %d @ %p\n",
888                            entry->buf_count, buf->address );
889
890                 offset += alignment;
891                 entry->buf_count++;
892                 byte_count += PAGE_SIZE << page_order;
893         }
894
895         DRM_DEBUG( "byte_count: %d\n", byte_count );
896
897         temp_buflist = DRM(realloc)( dma->buflist,
898                                      dma->buf_count * sizeof(*dma->buflist),
899                                      (dma->buf_count + entry->buf_count)
900                                      * sizeof(*dma->buflist),
901                                      DRM_MEM_BUFS );
902         if(!temp_buflist) {
903                 /* Free the entry because it isn't valid */
904                 DRM(cleanup_buf_error)(entry);
905                 up( &dev->struct_sem );
906                 atomic_dec( &dev->buf_alloc );
907                 return -ENOMEM;
908         }
909         dma->buflist = temp_buflist;
910
911         for ( i = 0 ; i < entry->buf_count ; i++ ) {
912                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
913         }
914
915         dma->buf_count += entry->buf_count;
916         dma->byte_count += byte_count;
917
918         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
919         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
920
921 #if __HAVE_DMA_FREELIST
922         DRM(freelist_create)( &entry->freelist, entry->buf_count );
923         for ( i = 0 ; i < entry->buf_count ; i++ ) {
924                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
925         }
926 #endif
927         up( &dev->struct_sem );
928
929         request.count = entry->buf_count;
930         request.size = size;
931
932         if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
933                 return -EFAULT;
934
935         dma->flags = _DRM_DMA_USE_SG;
936
937         atomic_dec( &dev->buf_alloc );
938         return 0;
939 }
940 #endif /* __HAVE_SG */
941
942 /**
943  * Add buffers for DMA transfers (ioctl).
944  *
945  * \param inode device inode.
946  * \param filp file pointer.
947  * \param cmd command.
948  * \param arg pointer to a drm_buf_desc_t request.
949  * \return zero on success or a negative number on failure.
950  *
951  * According with the memory type specified in drm_buf_desc::flags and the
952  * build options, it dispatches the call either to addbufs_agp(),
953  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
954  * PCI memory respectively.
955  */
956 int DRM(addbufs)( struct inode *inode, struct file *filp,
957                   unsigned int cmd, unsigned long arg )
958 {
959         drm_buf_desc_t request;
960
961         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
962                              sizeof(request) ) )
963                 return -EFAULT;
964
965 #if __REALLY_HAVE_AGP
966         if ( request.flags & _DRM_AGP_BUFFER )
967                 return DRM(addbufs_agp)( inode, filp, cmd, arg );
968         else
969 #endif
970 #if __HAVE_SG
971         if ( request.flags & _DRM_SG_BUFFER )
972                 return DRM(addbufs_sg)( inode, filp, cmd, arg );
973         else
974 #endif
975 #if __HAVE_PCI_DMA
976                 return DRM(addbufs_pci)( inode, filp, cmd, arg );
977 #else
978                 return -EINVAL;
979 #endif
980 }
981
982
983 /**
984  * Get information about the buffer mappings.
985  *
986  * This was originally mean for debugging purposes, or by a sophisticated
987  * client library to determine how best to use the available buffers (e.g.,
988  * large buffers can be used for image transfer).
989  *
990  * \param inode device inode.
991  * \param filp file pointer.
992  * \param cmd command.
993  * \param arg pointer to a drm_buf_info structure.
994  * \return zero on success or a negative number on failure.
995  *
996  * Increments drm_device::buf_use while holding the drm_device::count_lock
997  * lock, preventing of allocating more buffers after this call. Information
998  * about each requested buffer is then copied into user space.
999  */
1000 int DRM(infobufs)( struct inode *inode, struct file *filp,
1001                    unsigned int cmd, unsigned long arg )
1002 {
1003         drm_file_t *priv = filp->private_data;
1004         drm_device_t *dev = priv->dev;
1005         drm_device_dma_t *dma = dev->dma;
1006         drm_buf_info_t request;
1007         int i;
1008         int count;
1009
1010         if ( !dma ) return -EINVAL;
1011
1012         spin_lock( &dev->count_lock );
1013         if ( atomic_read( &dev->buf_alloc ) ) {
1014                 spin_unlock( &dev->count_lock );
1015                 return -EBUSY;
1016         }
1017         ++dev->buf_use;         /* Can't allocate more after this call */
1018         spin_unlock( &dev->count_lock );
1019
1020         if ( copy_from_user( &request,
1021                              (drm_buf_info_t *)arg,
1022                              sizeof(request) ) )
1023                 return -EFAULT;
1024
1025         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1026                 if ( dma->bufs[i].buf_count ) ++count;
1027         }
1028
1029         DRM_DEBUG( "count = %d\n", count );
1030
1031         if ( request.count >= count ) {
1032                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1033                         if ( dma->bufs[i].buf_count ) {
1034                                 drm_buf_desc_t *to = &request.list[count];
1035                                 drm_buf_entry_t *from = &dma->bufs[i];
1036                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1037                                 if ( copy_to_user( &to->count,
1038                                                    &from->buf_count,
1039                                                    sizeof(from->buf_count) ) ||
1040                                      copy_to_user( &to->size,
1041                                                    &from->buf_size,
1042                                                    sizeof(from->buf_size) ) ||
1043                                      copy_to_user( &to->low_mark,
1044                                                    &list->low_mark,
1045                                                    sizeof(list->low_mark) ) ||
1046                                      copy_to_user( &to->high_mark,
1047                                                    &list->high_mark,
1048                                                    sizeof(list->high_mark) ) )
1049                                         return -EFAULT;
1050
1051                                 DRM_DEBUG( "%d %d %d %d %d\n",
1052                                            i,
1053                                            dma->bufs[i].buf_count,
1054                                            dma->bufs[i].buf_size,
1055                                            dma->bufs[i].freelist.low_mark,
1056                                            dma->bufs[i].freelist.high_mark );
1057                                 ++count;
1058                         }
1059                 }
1060         }
1061         request.count = count;
1062
1063         if ( copy_to_user( (drm_buf_info_t *)arg,
1064                            &request,
1065                            sizeof(request) ) )
1066                 return -EFAULT;
1067
1068         return 0;
1069 }
1070
1071 /**
1072  * Specifies a low and high water mark for buffer allocation
1073  *
1074  * \param inode device inode.
1075  * \param filp file pointer.
1076  * \param cmd command.
1077  * \param arg a pointer to a drm_buf_desc structure.
1078  * \return zero on success or a negative number on failure.
1079  *
1080  * Verifies that the size order is bounded between the admissible orders and
1081  * updates the respective drm_device_dma::bufs entry low and high water mark.
1082  *
1083  * \note This ioctl is deprecated and mostly never used.
1084  */
1085 int DRM(markbufs)( struct inode *inode, struct file *filp,
1086                    unsigned int cmd, unsigned long arg )
1087 {
1088         drm_file_t *priv = filp->private_data;
1089         drm_device_t *dev = priv->dev;
1090         drm_device_dma_t *dma = dev->dma;
1091         drm_buf_desc_t request;
1092         int order;
1093         drm_buf_entry_t *entry;
1094
1095         if ( !dma ) return -EINVAL;
1096
1097         if ( copy_from_user( &request,
1098                              (drm_buf_desc_t *)arg,
1099                              sizeof(request) ) )
1100                 return -EFAULT;
1101
1102         DRM_DEBUG( "%d, %d, %d\n",
1103                    request.size, request.low_mark, request.high_mark );
1104         order = DRM(order)( request.size );
1105         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1106         entry = &dma->bufs[order];
1107
1108         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1109                 return -EINVAL;
1110         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1111                 return -EINVAL;
1112
1113         entry->freelist.low_mark  = request.low_mark;
1114         entry->freelist.high_mark = request.high_mark;
1115
1116         return 0;
1117 }
1118
1119 /**
1120  * Unreserve the buffers in list, previously reserved using drmDMA. 
1121  *
1122  * \param inode device inode.
1123  * \param filp file pointer.
1124  * \param cmd command.
1125  * \param arg pointer to a drm_buf_free structure.
1126  * \return zero on success or a negative number on failure.
1127  * 
1128  * Calls free_buffer() for each used buffer.
1129  * This function is primarily used for debugging.
1130  */
1131 int DRM(freebufs)( struct inode *inode, struct file *filp,
1132                    unsigned int cmd, unsigned long arg )
1133 {
1134         drm_file_t *priv = filp->private_data;
1135         drm_device_t *dev = priv->dev;
1136         drm_device_dma_t *dma = dev->dma;
1137         drm_buf_free_t request;
1138         int i;
1139         int idx;
1140         drm_buf_t *buf;
1141
1142         if ( !dma ) return -EINVAL;
1143
1144         if ( copy_from_user( &request,
1145                              (drm_buf_free_t *)arg,
1146                              sizeof(request) ) )
1147                 return -EFAULT;
1148
1149         DRM_DEBUG( "%d\n", request.count );
1150         for ( i = 0 ; i < request.count ; i++ ) {
1151                 if ( copy_from_user( &idx,
1152                                      &request.list[i],
1153                                      sizeof(idx) ) )
1154                         return -EFAULT;
1155                 if ( idx < 0 || idx >= dma->buf_count ) {
1156                         DRM_ERROR( "Index %d (of %d max)\n",
1157                                    idx, dma->buf_count - 1 );
1158                         return -EINVAL;
1159                 }
1160                 buf = dma->buflist[idx];
1161                 if ( buf->filp != filp ) {
1162                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1163                                    current->pid );
1164                         return -EINVAL;
1165                 }
1166                 DRM(free_buffer)( dev, buf );
1167         }
1168
1169         return 0;
1170 }
1171
1172 /**
1173  * Maps all of the DMA buffers into client-virtual space (ioctl).
1174  *
1175  * \param inode device inode.
1176  * \param filp file pointer.
1177  * \param cmd command.
1178  * \param arg pointer to a drm_buf_map structure.
1179  * \return zero on success or a negative number on failure.
1180  *
1181  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1182  * about each buffer into user space. The PCI buffers are already mapped on the
1183  * addbufs_pci() call.
1184  */
1185 int DRM(mapbufs)( struct inode *inode, struct file *filp,
1186                   unsigned int cmd, unsigned long arg )
1187 {
1188         drm_file_t *priv = filp->private_data;
1189         drm_device_t *dev = priv->dev;
1190         drm_device_dma_t *dma = dev->dma;
1191         int retcode = 0;
1192         const int zero = 0;
1193         unsigned long virtual;
1194         unsigned long address;
1195         drm_buf_map_t request;
1196         int i;
1197
1198         if ( !dma ) return -EINVAL;
1199
1200         spin_lock( &dev->count_lock );
1201         if ( atomic_read( &dev->buf_alloc ) ) {
1202                 spin_unlock( &dev->count_lock );
1203                 return -EBUSY;
1204         }
1205         dev->buf_use++;         /* Can't allocate more after this call */
1206         spin_unlock( &dev->count_lock );
1207
1208         if ( copy_from_user( &request, (drm_buf_map_t *)arg,
1209                              sizeof(request) ) )
1210                 return -EFAULT;
1211
1212         if ( request.count >= dma->buf_count ) {
1213                 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
1214                      (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
1215                         drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
1216
1217                         if ( !map ) {
1218                                 retcode = -EINVAL;
1219                                 goto done;
1220                         }
1221
1222 #if LINUX_VERSION_CODE <= 0x020402
1223                         down( &current->mm->mmap_sem );
1224 #else
1225                         down_write( &current->mm->mmap_sem );
1226 #endif
1227                         virtual = do_mmap( filp, 0, map->size,
1228                                            PROT_READ | PROT_WRITE,
1229                                            MAP_SHARED,
1230                                            (unsigned long)map->offset );
1231 #if LINUX_VERSION_CODE <= 0x020402
1232                         up( &current->mm->mmap_sem );
1233 #else
1234                         up_write( &current->mm->mmap_sem );
1235 #endif
1236                 } else {
1237 #if LINUX_VERSION_CODE <= 0x020402
1238                         down( &current->mm->mmap_sem );
1239 #else
1240                         down_write( &current->mm->mmap_sem );
1241 #endif
1242                         virtual = do_mmap( filp, 0, dma->byte_count,
1243                                            PROT_READ | PROT_WRITE,
1244                                            MAP_SHARED, 0 );
1245 #if LINUX_VERSION_CODE <= 0x020402
1246                         up( &current->mm->mmap_sem );
1247 #else
1248                         up_write( &current->mm->mmap_sem );
1249 #endif
1250                 }
1251                 if ( virtual > -1024UL ) {
1252                         /* Real error */
1253                         retcode = (signed long)virtual;
1254                         goto done;
1255                 }
1256                 request.virtual = (void *)virtual;
1257
1258                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1259                         if ( copy_to_user( &request.list[i].idx,
1260                                            &dma->buflist[i]->idx,
1261                                            sizeof(request.list[0].idx) ) ) {
1262                                 retcode = -EFAULT;
1263                                 goto done;
1264                         }
1265                         if ( copy_to_user( &request.list[i].total,
1266                                            &dma->buflist[i]->total,
1267                                            sizeof(request.list[0].total) ) ) {
1268                                 retcode = -EFAULT;
1269                                 goto done;
1270                         }
1271                         if ( copy_to_user( &request.list[i].used,
1272                                            &zero,
1273                                            sizeof(zero) ) ) {
1274                                 retcode = -EFAULT;
1275                                 goto done;
1276                         }
1277                         address = virtual + dma->buflist[i]->offset; /* *** */
1278                         if ( copy_to_user( &request.list[i].address,
1279                                            &address,
1280                                            sizeof(address) ) ) {
1281                                 retcode = -EFAULT;
1282                                 goto done;
1283                         }
1284                 }
1285         }
1286  done:
1287         request.count = dma->buf_count;
1288         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1289
1290         if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
1291                 return -EFAULT;
1292
1293         return retcode;
1294 }
1295
1296 #endif /* __HAVE_DMA */