vserver 1.9.3
[linux-2.6.git] / drivers / char / drm / drm_bufs.h
1 /**
2  * \file drm_bufs.h 
3  * Generic buffer template
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 /**
40  * Compute size order.  Returns the exponent of the smaller power of two which
41  * is greater or equal to given number.
42  * 
43  * \param size size.
44  * \return order.
45  *
46  * \todo Can be made faster.
47  */
48 int DRM(order)( unsigned long size )
49 {
50         int order;
51         unsigned long tmp;
52
53         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
54                 ;
55
56         if (size & (size - 1))
57                 ++order;
58
59         return order;
60 }
61
62 /**
63  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
64  *
65  * \param inode device inode.
66  * \param filp file pointer.
67  * \param cmd command.
68  * \param arg pointer to a drm_map structure.
69  * \return zero on success or a negative value on error.
70  *
71  * Adjusts the memory offset to its absolute value according to the mapping
72  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
73  * applicable and if supported by the kernel.
74  */
75 int DRM(addmap)( struct inode *inode, struct file *filp,
76                  unsigned int cmd, unsigned long arg )
77 {
78         drm_file_t *priv = filp->private_data;
79         drm_device_t *dev = priv->dev;
80         drm_map_t *map;
81         drm_map_t __user *argp = (void __user *)arg;
82         drm_map_list_t *list;
83
84         if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
85
86         map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
87         if ( !map )
88                 return -ENOMEM;
89
90         if ( copy_from_user( map, argp, sizeof(*map) ) ) {
91                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
92                 return -EFAULT;
93         }
94
95         /* Only allow shared memory to be removable since we only keep enough
96          * book keeping information about shared memory to allow for removal
97          * when processes fork.
98          */
99         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
100                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
101                 return -EINVAL;
102         }
103         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
104                    map->offset, map->size, map->type );
105         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
106                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
107                 return -EINVAL;
108         }
109         map->mtrr   = -1;
110         map->handle = NULL;
111
112         switch ( map->type ) {
113         case _DRM_REGISTERS:
114         case _DRM_FRAME_BUFFER:
115 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
116                 if ( map->offset + map->size < map->offset ||
117                      map->offset < virt_to_phys(high_memory) ) {
118                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
119                         return -EINVAL;
120                 }
121 #endif
122 #ifdef __alpha__
123                 map->offset += dev->hose->mem_space->start;
124 #endif
125                 if (drm_core_has_MTRR(dev)) {
126                         if ( map->type == _DRM_FRAME_BUFFER ||
127                              (map->flags & _DRM_WRITE_COMBINING) ) {
128                                 map->mtrr = mtrr_add( map->offset, map->size,
129                                                       MTRR_TYPE_WRCOMB, 1 );
130                         }
131                 }
132                 if (map->type == _DRM_REGISTERS)
133                         map->handle = DRM(ioremap)( map->offset, map->size,
134                                                     dev );
135                 break;
136
137         case _DRM_SHM:
138                 map->handle = vmalloc_32(map->size);
139                 DRM_DEBUG( "%lu %d %p\n",
140                            map->size, DRM(order)( map->size ), map->handle );
141                 if ( !map->handle ) {
142                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
143                         return -ENOMEM;
144                 }
145                 map->offset = (unsigned long)map->handle;
146                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
147                         /* Prevent a 2nd X Server from creating a 2nd lock */
148                         if (dev->lock.hw_lock != NULL) {
149                                 vfree( map->handle );
150                                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
151                                 return -EBUSY;
152                         }
153                         dev->sigdata.lock =
154                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
155                 }
156                 break;
157         case _DRM_AGP:
158                 if (drm_core_has_AGP(dev)) {
159 #ifdef __alpha__
160                         map->offset += dev->hose->mem_space->start;
161 #endif
162                         map->offset += dev->agp->base;
163                         map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
164                 }
165                 break;
166         case _DRM_SCATTER_GATHER:
167                 if (!dev->sg) {
168                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
169                         return -EINVAL;
170                 }
171                 map->offset += dev->sg->handle;
172                 break;
173
174         default:
175                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
176                 return -EINVAL;
177         }
178
179         list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
180         if(!list) {
181                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
182                 return -EINVAL;
183         }
184         memset(list, 0, sizeof(*list));
185         list->map = map;
186
187         down(&dev->struct_sem);
188         list_add(&list->head, &dev->maplist->head);
189         up(&dev->struct_sem);
190
191         if ( copy_to_user( argp, map, sizeof(*map) ) )
192                 return -EFAULT;
193         if ( map->type != _DRM_SHM ) {
194                 if ( copy_to_user( &argp->handle,
195                                    &map->offset,
196                                    sizeof(map->offset) ) )
197                         return -EFAULT;
198         }
199         return 0;
200 }
201
202
203 /**
204  * Remove a map private from list and deallocate resources if the mapping
205  * isn't in use.
206  *
207  * \param inode device inode.
208  * \param filp file pointer.
209  * \param cmd command.
210  * \param arg pointer to a drm_map_t structure.
211  * \return zero on success or a negative value on error.
212  *
213  * Searches the map on drm_device::maplist, removes it from the list, see if
214  * its being used, and free any associate resource (such as MTRR's) if it's not
215  * being on use.
216  *
217  * \sa addmap().
218  */
219 int DRM(rmmap)(struct inode *inode, struct file *filp,
220                unsigned int cmd, unsigned long arg)
221 {
222         drm_file_t      *priv   = filp->private_data;
223         drm_device_t    *dev    = priv->dev;
224         struct list_head *list;
225         drm_map_list_t *r_list = NULL;
226         drm_vma_entry_t *pt, *prev;
227         drm_map_t *map;
228         drm_map_t request;
229         int found_maps = 0;
230
231         if (copy_from_user(&request, (drm_map_t __user *)arg,
232                            sizeof(request))) {
233                 return -EFAULT;
234         }
235
236         down(&dev->struct_sem);
237         list = &dev->maplist->head;
238         list_for_each(list, &dev->maplist->head) {
239                 r_list = list_entry(list, drm_map_list_t, head);
240
241                 if(r_list->map &&
242                    r_list->map->handle == request.handle &&
243                    r_list->map->flags & _DRM_REMOVABLE) break;
244         }
245
246         /* List has wrapped around to the head pointer, or its empty we didn't
247          * find anything.
248          */
249         if(list == (&dev->maplist->head)) {
250                 up(&dev->struct_sem);
251                 return -EINVAL;
252         }
253         map = r_list->map;
254         list_del(list);
255         DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
256
257         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
258                 if (pt->vma->vm_private_data == map) found_maps++;
259         }
260
261         if(!found_maps) {
262                 switch (map->type) {
263                 case _DRM_REGISTERS:
264                 case _DRM_FRAME_BUFFER:
265                   if (drm_core_has_MTRR(dev)) {
266                                 if (map->mtrr >= 0) {
267                                         int retcode;
268                                         retcode = mtrr_del(map->mtrr,
269                                                            map->offset,
270                                                            map->size);
271                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
272                                 }
273                         }
274                         DRM(ioremapfree)(map->handle, map->size, dev);
275                         break;
276                 case _DRM_SHM:
277                         vfree(map->handle);
278                         break;
279                 case _DRM_AGP:
280                 case _DRM_SCATTER_GATHER:
281                         break;
282                 }
283                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
284         }
285         up(&dev->struct_sem);
286         return 0;
287 }
288
289 /**
290  * Cleanup after an error on one of the addbufs() functions.
291  *
292  * \param entry buffer entry where the error occurred.
293  *
294  * Frees any pages and buffers associated with the given entry.
295  */
296 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
297 {
298         int i;
299
300         if (entry->seg_count) {
301                 for (i = 0; i < entry->seg_count; i++) {
302                         if (entry->seglist[i]) {
303                                 DRM(free_pages)(entry->seglist[i],
304                                                 entry->page_order,
305                                                 DRM_MEM_DMA);
306                         }
307                 }
308                 DRM(free)(entry->seglist,
309                           entry->seg_count *
310                           sizeof(*entry->seglist),
311                           DRM_MEM_SEGS);
312
313                 entry->seg_count = 0;
314         }
315
316         if (entry->buf_count) {
317                 for (i = 0; i < entry->buf_count; i++) {
318                         if (entry->buflist[i].dev_private) {
319                                 DRM(free)(entry->buflist[i].dev_private,
320                                           entry->buflist[i].dev_priv_size,
321                                           DRM_MEM_BUFS);
322                         }
323                 }
324                 DRM(free)(entry->buflist,
325                           entry->buf_count *
326                           sizeof(*entry->buflist),
327                           DRM_MEM_BUFS);
328
329                 entry->buf_count = 0;
330         }
331 }
332
333 #if __OS_HAS_AGP
334 /**
335  * Add AGP buffers for DMA transfers (ioctl).
336  *
337  * \param inode device inode.
338  * \param filp file pointer.
339  * \param cmd command.
340  * \param arg pointer to a drm_buf_desc_t request.
341  * \return zero on success or a negative number on failure.
342  * 
343  * After some sanity checks creates a drm_buf structure for each buffer and
344  * reallocates the buffer list of the same size order to accommodate the new
345  * buffers.
346  */
347 int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
348                       unsigned int cmd, unsigned long arg )
349 {
350         drm_file_t *priv = filp->private_data;
351         drm_device_t *dev = priv->dev;
352         drm_device_dma_t *dma = dev->dma;
353         drm_buf_desc_t request;
354         drm_buf_entry_t *entry;
355         drm_buf_t *buf;
356         unsigned long offset;
357         unsigned long agp_offset;
358         int count;
359         int order;
360         int size;
361         int alignment;
362         int page_order;
363         int total;
364         int byte_count;
365         int i;
366         drm_buf_t **temp_buflist;
367         drm_buf_desc_t __user *argp = (void __user *)arg;
368
369         if ( !dma ) return -EINVAL;
370
371         if ( copy_from_user( &request, argp,
372                              sizeof(request) ) )
373                 return -EFAULT;
374
375         count = request.count;
376         order = DRM(order)( request.size );
377         size = 1 << order;
378
379         alignment  = (request.flags & _DRM_PAGE_ALIGN)
380                 ? PAGE_ALIGN(size) : size;
381         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
382         total = PAGE_SIZE << page_order;
383
384         byte_count = 0;
385         agp_offset = dev->agp->base + request.agp_start;
386
387         DRM_DEBUG( "count:      %d\n",  count );
388         DRM_DEBUG( "order:      %d\n",  order );
389         DRM_DEBUG( "size:       %d\n",  size );
390         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
391         DRM_DEBUG( "alignment:  %d\n",  alignment );
392         DRM_DEBUG( "page_order: %d\n",  page_order );
393         DRM_DEBUG( "total:      %d\n",  total );
394
395         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
396         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
397
398         spin_lock( &dev->count_lock );
399         if ( dev->buf_use ) {
400                 spin_unlock( &dev->count_lock );
401                 return -EBUSY;
402         }
403         atomic_inc( &dev->buf_alloc );
404         spin_unlock( &dev->count_lock );
405
406         down( &dev->struct_sem );
407         entry = &dma->bufs[order];
408         if ( entry->buf_count ) {
409                 up( &dev->struct_sem );
410                 atomic_dec( &dev->buf_alloc );
411                 return -ENOMEM; /* May only call once for each order */
412         }
413
414         if (count < 0 || count > 4096) {
415                 up( &dev->struct_sem );
416                 atomic_dec( &dev->buf_alloc );
417                 return -EINVAL;
418         }
419
420         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
421                                     DRM_MEM_BUFS );
422         if ( !entry->buflist ) {
423                 up( &dev->struct_sem );
424                 atomic_dec( &dev->buf_alloc );
425                 return -ENOMEM;
426         }
427         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
428
429         entry->buf_size = size;
430         entry->page_order = page_order;
431
432         offset = 0;
433
434         while ( entry->buf_count < count ) {
435                 buf          = &entry->buflist[entry->buf_count];
436                 buf->idx     = dma->buf_count + entry->buf_count;
437                 buf->total   = alignment;
438                 buf->order   = order;
439                 buf->used    = 0;
440
441                 buf->offset  = (dma->byte_count + offset);
442                 buf->bus_address = agp_offset + offset;
443                 buf->address = (void *)(agp_offset + offset);
444                 buf->next    = NULL;
445                 buf->waiting = 0;
446                 buf->pending = 0;
447                 init_waitqueue_head( &buf->dma_wait );
448                 buf->filp    = NULL;
449
450                 buf->dev_priv_size = dev->dev_priv_size;
451                 buf->dev_private = DRM(alloc)( buf->dev_priv_size,
452                                                DRM_MEM_BUFS );
453                 if(!buf->dev_private) {
454                         /* Set count correctly so we free the proper amount. */
455                         entry->buf_count = count;
456                         DRM(cleanup_buf_error)(entry);
457                         up( &dev->struct_sem );
458                         atomic_dec( &dev->buf_alloc );
459                         return -ENOMEM;
460                 }
461                 memset( buf->dev_private, 0, buf->dev_priv_size );
462
463                 DRM_DEBUG( "buffer %d @ %p\n",
464                            entry->buf_count, buf->address );
465
466                 offset += alignment;
467                 entry->buf_count++;
468                 byte_count += PAGE_SIZE << page_order;
469         }
470
471         DRM_DEBUG( "byte_count: %d\n", byte_count );
472
473         temp_buflist = DRM(realloc)( dma->buflist,
474                                      dma->buf_count * sizeof(*dma->buflist),
475                                      (dma->buf_count + entry->buf_count)
476                                      * sizeof(*dma->buflist),
477                                      DRM_MEM_BUFS );
478         if(!temp_buflist) {
479                 /* Free the entry because it isn't valid */
480                 DRM(cleanup_buf_error)(entry);
481                 up( &dev->struct_sem );
482                 atomic_dec( &dev->buf_alloc );
483                 return -ENOMEM;
484         }
485         dma->buflist = temp_buflist;
486
487         for ( i = 0 ; i < entry->buf_count ; i++ ) {
488                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
489         }
490
491         dma->buf_count += entry->buf_count;
492         dma->byte_count += byte_count;
493
494         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
495         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
496
497         up( &dev->struct_sem );
498
499         request.count = entry->buf_count;
500         request.size = size;
501
502         if ( copy_to_user( argp, &request, sizeof(request) ) )
503                 return -EFAULT;
504
505         dma->flags = _DRM_DMA_USE_AGP;
506
507         atomic_dec( &dev->buf_alloc );
508         return 0;
509 }
510 #endif /* __OS_HAS_AGP */
511
512 int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
513                       unsigned int cmd, unsigned long arg )
514 {
515         drm_file_t *priv = filp->private_data;
516         drm_device_t *dev = priv->dev;
517         drm_device_dma_t *dma = dev->dma;
518         drm_buf_desc_t request;
519         int count;
520         int order;
521         int size;
522         int total;
523         int page_order;
524         drm_buf_entry_t *entry;
525         unsigned long page;
526         drm_buf_t *buf;
527         int alignment;
528         unsigned long offset;
529         int i;
530         int byte_count;
531         int page_count;
532         unsigned long *temp_pagelist;
533         drm_buf_t **temp_buflist;
534         drm_buf_desc_t __user *argp = (void __user *)arg;
535
536         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
537         if ( !dma ) return -EINVAL;
538
539         if ( copy_from_user( &request, argp, sizeof(request) ) )
540                 return -EFAULT;
541
542         count = request.count;
543         order = DRM(order)( request.size );
544         size = 1 << order;
545
546         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
547                    request.count, request.size, size,
548                    order, dev->queue_count );
549
550         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
551         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
552
553         alignment = (request.flags & _DRM_PAGE_ALIGN)
554                 ? PAGE_ALIGN(size) : size;
555         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
556         total = PAGE_SIZE << page_order;
557
558         spin_lock( &dev->count_lock );
559         if ( dev->buf_use ) {
560                 spin_unlock( &dev->count_lock );
561                 return -EBUSY;
562         }
563         atomic_inc( &dev->buf_alloc );
564         spin_unlock( &dev->count_lock );
565
566         down( &dev->struct_sem );
567         entry = &dma->bufs[order];
568         if ( entry->buf_count ) {
569                 up( &dev->struct_sem );
570                 atomic_dec( &dev->buf_alloc );
571                 return -ENOMEM; /* May only call once for each order */
572         }
573
574         if (count < 0 || count > 4096) {
575                 up( &dev->struct_sem );
576                 atomic_dec( &dev->buf_alloc );
577                 return -EINVAL;
578         }
579
580         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
581                                     DRM_MEM_BUFS );
582         if ( !entry->buflist ) {
583                 up( &dev->struct_sem );
584                 atomic_dec( &dev->buf_alloc );
585                 return -ENOMEM;
586         }
587         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
588
589         entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
590                                     DRM_MEM_SEGS );
591         if ( !entry->seglist ) {
592                 DRM(free)( entry->buflist,
593                           count * sizeof(*entry->buflist),
594                           DRM_MEM_BUFS );
595                 up( &dev->struct_sem );
596                 atomic_dec( &dev->buf_alloc );
597                 return -ENOMEM;
598         }
599         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
600
601         /* Keep the original pagelist until we know all the allocations
602          * have succeeded
603          */
604         temp_pagelist = DRM(alloc)( (dma->page_count + (count << page_order))
605                                     * sizeof(*dma->pagelist),
606                                     DRM_MEM_PAGES );
607         if (!temp_pagelist) {
608                 DRM(free)( entry->buflist,
609                            count * sizeof(*entry->buflist),
610                            DRM_MEM_BUFS );
611                 DRM(free)( entry->seglist,
612                            count * sizeof(*entry->seglist),
613                            DRM_MEM_SEGS );
614                 up( &dev->struct_sem );
615                 atomic_dec( &dev->buf_alloc );
616                 return -ENOMEM;
617         }
618         memcpy(temp_pagelist,
619                dma->pagelist,
620                dma->page_count * sizeof(*dma->pagelist));
621         DRM_DEBUG( "pagelist: %d entries\n",
622                    dma->page_count + (count << page_order) );
623
624         entry->buf_size = size;
625         entry->page_order = page_order;
626         byte_count = 0;
627         page_count = 0;
628
629         while ( entry->buf_count < count ) {
630                 page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
631                 if ( !page ) {
632                         /* Set count correctly so we free the proper amount. */
633                         entry->buf_count = count;
634                         entry->seg_count = count;
635                         DRM(cleanup_buf_error)(entry);
636                         DRM(free)( temp_pagelist,
637                                    (dma->page_count + (count << page_order))
638                                    * sizeof(*dma->pagelist),
639                                    DRM_MEM_PAGES );
640                         up( &dev->struct_sem );
641                         atomic_dec( &dev->buf_alloc );
642                         return -ENOMEM;
643                 }
644                 entry->seglist[entry->seg_count++] = page;
645                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
646                         DRM_DEBUG( "page %d @ 0x%08lx\n",
647                                    dma->page_count + page_count,
648                                    page + PAGE_SIZE * i );
649                         temp_pagelist[dma->page_count + page_count++]
650                                 = page + PAGE_SIZE * i;
651                 }
652                 for ( offset = 0 ;
653                       offset + size <= total && entry->buf_count < count ;
654                       offset += alignment, ++entry->buf_count ) {
655                         buf          = &entry->buflist[entry->buf_count];
656                         buf->idx     = dma->buf_count + entry->buf_count;
657                         buf->total   = alignment;
658                         buf->order   = order;
659                         buf->used    = 0;
660                         buf->offset  = (dma->byte_count + byte_count + offset);
661                         buf->address = (void *)(page + offset);
662                         buf->next    = NULL;
663                         buf->waiting = 0;
664                         buf->pending = 0;
665                         init_waitqueue_head( &buf->dma_wait );
666                         buf->filp    = NULL;
667
668                         buf->dev_priv_size = dev->dev_priv_size;
669                         buf->dev_private = DRM(alloc)( dev->dev_priv_size,
670                                                        DRM_MEM_BUFS );
671                         if(!buf->dev_private) {
672                                 /* Set count correctly so we free the proper amount. */
673                                 entry->buf_count = count;
674                                 entry->seg_count = count;
675                                 DRM(cleanup_buf_error)(entry);
676                                 DRM(free)( temp_pagelist,
677                                            (dma->page_count + (count << page_order))
678                                            * sizeof(*dma->pagelist),
679                                            DRM_MEM_PAGES );
680                                 up( &dev->struct_sem );
681                                 atomic_dec( &dev->buf_alloc );
682                                 return -ENOMEM;
683                         }
684                         memset( buf->dev_private, 0, buf->dev_priv_size );
685
686                         DRM_DEBUG( "buffer %d @ %p\n",
687                                    entry->buf_count, buf->address );
688                 }
689                 byte_count += PAGE_SIZE << page_order;
690         }
691
692         temp_buflist = DRM(realloc)( dma->buflist,
693                                      dma->buf_count * sizeof(*dma->buflist),
694                                      (dma->buf_count + entry->buf_count)
695                                      * sizeof(*dma->buflist),
696                                      DRM_MEM_BUFS );
697         if (!temp_buflist) {
698                 /* Free the entry because it isn't valid */
699                 DRM(cleanup_buf_error)(entry);
700                 DRM(free)( temp_pagelist,
701                            (dma->page_count + (count << page_order))
702                            * sizeof(*dma->pagelist),
703                            DRM_MEM_PAGES );
704                 up( &dev->struct_sem );
705                 atomic_dec( &dev->buf_alloc );
706                 return -ENOMEM;
707         }
708         dma->buflist = temp_buflist;
709
710         for ( i = 0 ; i < entry->buf_count ; i++ ) {
711                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
712         }
713
714         /* No allocations failed, so now we can replace the orginal pagelist
715          * with the new one.
716          */
717         if (dma->page_count) {
718                 DRM(free)(dma->pagelist,
719                           dma->page_count * sizeof(*dma->pagelist),
720                           DRM_MEM_PAGES);
721         }
722         dma->pagelist = temp_pagelist;
723
724         dma->buf_count += entry->buf_count;
725         dma->seg_count += entry->seg_count;
726         dma->page_count += entry->seg_count << page_order;
727         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
728
729         up( &dev->struct_sem );
730
731         request.count = entry->buf_count;
732         request.size = size;
733
734         if ( copy_to_user( argp, &request, sizeof(request) ) )
735                 return -EFAULT;
736
737         atomic_dec( &dev->buf_alloc );
738         return 0;
739
740 }
741
742 int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
743                      unsigned int cmd, unsigned long arg )
744 {
745         drm_file_t *priv = filp->private_data;
746         drm_device_t *dev = priv->dev;
747         drm_device_dma_t *dma = dev->dma;
748         drm_buf_desc_t __user *argp = (void __user *)arg;
749         drm_buf_desc_t request;
750         drm_buf_entry_t *entry;
751         drm_buf_t *buf;
752         unsigned long offset;
753         unsigned long agp_offset;
754         int count;
755         int order;
756         int size;
757         int alignment;
758         int page_order;
759         int total;
760         int byte_count;
761         int i;
762         drm_buf_t **temp_buflist;
763
764         if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
765         
766         if ( !dma ) return -EINVAL;
767
768         if ( copy_from_user( &request, argp, sizeof(request) ) )
769                 return -EFAULT;
770
771         count = request.count;
772         order = DRM(order)( request.size );
773         size = 1 << order;
774
775         alignment  = (request.flags & _DRM_PAGE_ALIGN)
776                         ? PAGE_ALIGN(size) : size;
777         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
778         total = PAGE_SIZE << page_order;
779
780         byte_count = 0;
781         agp_offset = request.agp_start;
782
783         DRM_DEBUG( "count:      %d\n",  count );
784         DRM_DEBUG( "order:      %d\n",  order );
785         DRM_DEBUG( "size:       %d\n",  size );
786         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
787         DRM_DEBUG( "alignment:  %d\n",  alignment );
788         DRM_DEBUG( "page_order: %d\n",  page_order );
789         DRM_DEBUG( "total:      %d\n",  total );
790
791         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
792         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
793
794         spin_lock( &dev->count_lock );
795         if ( dev->buf_use ) {
796                 spin_unlock( &dev->count_lock );
797                 return -EBUSY;
798         }
799         atomic_inc( &dev->buf_alloc );
800         spin_unlock( &dev->count_lock );
801
802         down( &dev->struct_sem );
803         entry = &dma->bufs[order];
804         if ( entry->buf_count ) {
805                 up( &dev->struct_sem );
806                 atomic_dec( &dev->buf_alloc );
807                 return -ENOMEM; /* May only call once for each order */
808         }
809
810         if (count < 0 || count > 4096) {
811                 up( &dev->struct_sem );
812                 atomic_dec( &dev->buf_alloc );
813                 return -EINVAL;
814         }
815
816         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
817                                      DRM_MEM_BUFS );
818         if ( !entry->buflist ) {
819                 up( &dev->struct_sem );
820                 atomic_dec( &dev->buf_alloc );
821                 return -ENOMEM;
822         }
823         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
824
825         entry->buf_size = size;
826         entry->page_order = page_order;
827
828         offset = 0;
829
830         while ( entry->buf_count < count ) {
831                 buf          = &entry->buflist[entry->buf_count];
832                 buf->idx     = dma->buf_count + entry->buf_count;
833                 buf->total   = alignment;
834                 buf->order   = order;
835                 buf->used    = 0;
836
837                 buf->offset  = (dma->byte_count + offset);
838                 buf->bus_address = agp_offset + offset;
839                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
840                 buf->next    = NULL;
841                 buf->waiting = 0;
842                 buf->pending = 0;
843                 init_waitqueue_head( &buf->dma_wait );
844                 buf->filp    = NULL;
845
846                 buf->dev_priv_size = dev->dev_priv_size;
847                 buf->dev_private = DRM(alloc)( dev->dev_priv_size,
848                                                DRM_MEM_BUFS );
849                 if(!buf->dev_private) {
850                         /* Set count correctly so we free the proper amount. */
851                         entry->buf_count = count;
852                         DRM(cleanup_buf_error)(entry);
853                         up( &dev->struct_sem );
854                         atomic_dec( &dev->buf_alloc );
855                         return -ENOMEM;
856                 }
857
858                 memset( buf->dev_private, 0, buf->dev_priv_size );
859
860                 DRM_DEBUG( "buffer %d @ %p\n",
861                            entry->buf_count, buf->address );
862
863                 offset += alignment;
864                 entry->buf_count++;
865                 byte_count += PAGE_SIZE << page_order;
866         }
867
868         DRM_DEBUG( "byte_count: %d\n", byte_count );
869
870         temp_buflist = DRM(realloc)( dma->buflist,
871                                      dma->buf_count * sizeof(*dma->buflist),
872                                      (dma->buf_count + entry->buf_count)
873                                      * sizeof(*dma->buflist),
874                                      DRM_MEM_BUFS );
875         if(!temp_buflist) {
876                 /* Free the entry because it isn't valid */
877                 DRM(cleanup_buf_error)(entry);
878                 up( &dev->struct_sem );
879                 atomic_dec( &dev->buf_alloc );
880                 return -ENOMEM;
881         }
882         dma->buflist = temp_buflist;
883
884         for ( i = 0 ; i < entry->buf_count ; i++ ) {
885                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
886         }
887
888         dma->buf_count += entry->buf_count;
889         dma->byte_count += byte_count;
890
891         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
892         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
893
894         up( &dev->struct_sem );
895
896         request.count = entry->buf_count;
897         request.size = size;
898
899         if ( copy_to_user( argp, &request, sizeof(request) ) )
900                 return -EFAULT;
901
902         dma->flags = _DRM_DMA_USE_SG;
903
904         atomic_dec( &dev->buf_alloc );
905         return 0;
906 }
907
908 /**
909  * Add buffers for DMA transfers (ioctl).
910  *
911  * \param inode device inode.
912  * \param filp file pointer.
913  * \param cmd command.
914  * \param arg pointer to a drm_buf_desc_t request.
915  * \return zero on success or a negative number on failure.
916  *
917  * According with the memory type specified in drm_buf_desc::flags and the
918  * build options, it dispatches the call either to addbufs_agp(),
919  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
920  * PCI memory respectively.
921  */
922 int DRM(addbufs)( struct inode *inode, struct file *filp,
923                   unsigned int cmd, unsigned long arg )
924 {
925         drm_buf_desc_t request;
926         drm_file_t *priv = filp->private_data;
927         drm_device_t *dev = priv->dev;
928         
929         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
930                 return -EINVAL;
931
932         if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
933                              sizeof(request) ) )
934                 return -EFAULT;
935
936 #if __OS_HAS_AGP
937         if ( request.flags & _DRM_AGP_BUFFER )
938                 return DRM(addbufs_agp)( inode, filp, cmd, arg );
939         else
940 #endif
941         if ( request.flags & _DRM_SG_BUFFER )
942                 return DRM(addbufs_sg)( inode, filp, cmd, arg );
943         else
944                 return DRM(addbufs_pci)( inode, filp, cmd, arg );
945 }
946
947
948 /**
949  * Get information about the buffer mappings.
950  *
951  * This was originally mean for debugging purposes, or by a sophisticated
952  * client library to determine how best to use the available buffers (e.g.,
953  * large buffers can be used for image transfer).
954  *
955  * \param inode device inode.
956  * \param filp file pointer.
957  * \param cmd command.
958  * \param arg pointer to a drm_buf_info structure.
959  * \return zero on success or a negative number on failure.
960  *
961  * Increments drm_device::buf_use while holding the drm_device::count_lock
962  * lock, preventing of allocating more buffers after this call. Information
963  * about each requested buffer is then copied into user space.
964  */
965 int DRM(infobufs)( struct inode *inode, struct file *filp,
966                    unsigned int cmd, unsigned long arg )
967 {
968         drm_file_t *priv = filp->private_data;
969         drm_device_t *dev = priv->dev;
970         drm_device_dma_t *dma = dev->dma;
971         drm_buf_info_t request;
972         drm_buf_info_t __user *argp = (void __user *)arg;
973         int i;
974         int count;
975
976         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
977                 return -EINVAL;
978
979         if ( !dma ) return -EINVAL;
980
981         spin_lock( &dev->count_lock );
982         if ( atomic_read( &dev->buf_alloc ) ) {
983                 spin_unlock( &dev->count_lock );
984                 return -EBUSY;
985         }
986         ++dev->buf_use;         /* Can't allocate more after this call */
987         spin_unlock( &dev->count_lock );
988
989         if ( copy_from_user( &request, argp, sizeof(request) ) )
990                 return -EFAULT;
991
992         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
993                 if ( dma->bufs[i].buf_count ) ++count;
994         }
995
996         DRM_DEBUG( "count = %d\n", count );
997
998         if ( request.count >= count ) {
999                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1000                         if ( dma->bufs[i].buf_count ) {
1001                                 drm_buf_desc_t __user *to = &request.list[count];
1002                                 drm_buf_entry_t *from = &dma->bufs[i];
1003                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1004                                 if ( copy_to_user( &to->count,
1005                                                    &from->buf_count,
1006                                                    sizeof(from->buf_count) ) ||
1007                                      copy_to_user( &to->size,
1008                                                    &from->buf_size,
1009                                                    sizeof(from->buf_size) ) ||
1010                                      copy_to_user( &to->low_mark,
1011                                                    &list->low_mark,
1012                                                    sizeof(list->low_mark) ) ||
1013                                      copy_to_user( &to->high_mark,
1014                                                    &list->high_mark,
1015                                                    sizeof(list->high_mark) ) )
1016                                         return -EFAULT;
1017
1018                                 DRM_DEBUG( "%d %d %d %d %d\n",
1019                                            i,
1020                                            dma->bufs[i].buf_count,
1021                                            dma->bufs[i].buf_size,
1022                                            dma->bufs[i].freelist.low_mark,
1023                                            dma->bufs[i].freelist.high_mark );
1024                                 ++count;
1025                         }
1026                 }
1027         }
1028         request.count = count;
1029
1030         if ( copy_to_user( argp, &request, sizeof(request) ) )
1031                 return -EFAULT;
1032
1033         return 0;
1034 }
1035
1036 /**
1037  * Specifies a low and high water mark for buffer allocation
1038  *
1039  * \param inode device inode.
1040  * \param filp file pointer.
1041  * \param cmd command.
1042  * \param arg a pointer to a drm_buf_desc structure.
1043  * \return zero on success or a negative number on failure.
1044  *
1045  * Verifies that the size order is bounded between the admissible orders and
1046  * updates the respective drm_device_dma::bufs entry low and high water mark.
1047  *
1048  * \note This ioctl is deprecated and mostly never used.
1049  */
1050 int DRM(markbufs)( struct inode *inode, struct file *filp,
1051                    unsigned int cmd, unsigned long arg )
1052 {
1053         drm_file_t *priv = filp->private_data;
1054         drm_device_t *dev = priv->dev;
1055         drm_device_dma_t *dma = dev->dma;
1056         drm_buf_desc_t request;
1057         int order;
1058         drm_buf_entry_t *entry;
1059
1060         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1061                 return -EINVAL;
1062
1063         if ( !dma ) return -EINVAL;
1064
1065         if ( copy_from_user( &request,
1066                              (drm_buf_desc_t __user *)arg,
1067                              sizeof(request) ) )
1068                 return -EFAULT;
1069
1070         DRM_DEBUG( "%d, %d, %d\n",
1071                    request.size, request.low_mark, request.high_mark );
1072         order = DRM(order)( request.size );
1073         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1074         entry = &dma->bufs[order];
1075
1076         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1077                 return -EINVAL;
1078         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1079                 return -EINVAL;
1080
1081         entry->freelist.low_mark  = request.low_mark;
1082         entry->freelist.high_mark = request.high_mark;
1083
1084         return 0;
1085 }
1086
1087 /**
1088  * Unreserve the buffers in list, previously reserved using drmDMA. 
1089  *
1090  * \param inode device inode.
1091  * \param filp file pointer.
1092  * \param cmd command.
1093  * \param arg pointer to a drm_buf_free structure.
1094  * \return zero on success or a negative number on failure.
1095  * 
1096  * Calls free_buffer() for each used buffer.
1097  * This function is primarily used for debugging.
1098  */
1099 int DRM(freebufs)( struct inode *inode, struct file *filp,
1100                    unsigned int cmd, unsigned long arg )
1101 {
1102         drm_file_t *priv = filp->private_data;
1103         drm_device_t *dev = priv->dev;
1104         drm_device_dma_t *dma = dev->dma;
1105         drm_buf_free_t request;
1106         int i;
1107         int idx;
1108         drm_buf_t *buf;
1109
1110         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1111                 return -EINVAL;
1112
1113         if ( !dma ) return -EINVAL;
1114
1115         if ( copy_from_user( &request,
1116                              (drm_buf_free_t __user *)arg,
1117                              sizeof(request) ) )
1118                 return -EFAULT;
1119
1120         DRM_DEBUG( "%d\n", request.count );
1121         for ( i = 0 ; i < request.count ; i++ ) {
1122                 if ( copy_from_user( &idx,
1123                                      &request.list[i],
1124                                      sizeof(idx) ) )
1125                         return -EFAULT;
1126                 if ( idx < 0 || idx >= dma->buf_count ) {
1127                         DRM_ERROR( "Index %d (of %d max)\n",
1128                                    idx, dma->buf_count - 1 );
1129                         return -EINVAL;
1130                 }
1131                 buf = dma->buflist[idx];
1132                 if ( buf->filp != filp ) {
1133                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1134                                    current->pid );
1135                         return -EINVAL;
1136                 }
1137                 DRM(free_buffer)( dev, buf );
1138         }
1139
1140         return 0;
1141 }
1142
1143 /**
1144  * Maps all of the DMA buffers into client-virtual space (ioctl).
1145  *
1146  * \param inode device inode.
1147  * \param filp file pointer.
1148  * \param cmd command.
1149  * \param arg pointer to a drm_buf_map structure.
1150  * \return zero on success or a negative number on failure.
1151  *
1152  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1153  * about each buffer into user space. The PCI buffers are already mapped on the
1154  * addbufs_pci() call.
1155  */
1156 int DRM(mapbufs)( struct inode *inode, struct file *filp,
1157                   unsigned int cmd, unsigned long arg )
1158 {
1159         drm_file_t *priv = filp->private_data;
1160         drm_device_t *dev = priv->dev;
1161         drm_device_dma_t *dma = dev->dma;
1162         drm_buf_map_t __user *argp = (void __user *)arg;
1163         int retcode = 0;
1164         const int zero = 0;
1165         unsigned long virtual;
1166         unsigned long address;
1167         drm_buf_map_t request;
1168         int i;
1169
1170         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1171                 return -EINVAL;
1172
1173         if ( !dma ) return -EINVAL;
1174
1175         spin_lock( &dev->count_lock );
1176         if ( atomic_read( &dev->buf_alloc ) ) {
1177                 spin_unlock( &dev->count_lock );
1178                 return -EBUSY;
1179         }
1180         dev->buf_use++;         /* Can't allocate more after this call */
1181         spin_unlock( &dev->count_lock );
1182
1183         if ( copy_from_user( &request, argp, sizeof(request) ) )
1184                 return -EFAULT;
1185
1186         if ( request.count >= dma->buf_count ) {
1187                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1188                     (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) {
1189                         drm_map_t *map = dev->agp_buffer_map;
1190
1191                         if ( !map ) {
1192                                 retcode = -EINVAL;
1193                                 goto done;
1194                         }
1195
1196 #if LINUX_VERSION_CODE <= 0x020402
1197                         down( &current->mm->mmap_sem );
1198 #else
1199                         down_write( &current->mm->mmap_sem );
1200 #endif
1201                         virtual = do_mmap( filp, 0, map->size,
1202                                            PROT_READ | PROT_WRITE,
1203                                            MAP_SHARED,
1204                                            (unsigned long)map->offset );
1205 #if LINUX_VERSION_CODE <= 0x020402
1206                         up( &current->mm->mmap_sem );
1207 #else
1208                         up_write( &current->mm->mmap_sem );
1209 #endif
1210                 } else {
1211 #if LINUX_VERSION_CODE <= 0x020402
1212                         down( &current->mm->mmap_sem );
1213 #else
1214                         down_write( &current->mm->mmap_sem );
1215 #endif
1216                         virtual = do_mmap( filp, 0, dma->byte_count,
1217                                            PROT_READ | PROT_WRITE,
1218                                            MAP_SHARED, 0 );
1219 #if LINUX_VERSION_CODE <= 0x020402
1220                         up( &current->mm->mmap_sem );
1221 #else
1222                         up_write( &current->mm->mmap_sem );
1223 #endif
1224                 }
1225                 if ( virtual > -1024UL ) {
1226                         /* Real error */
1227                         retcode = (signed long)virtual;
1228                         goto done;
1229                 }
1230                 request.virtual = (void __user *)virtual;
1231
1232                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1233                         if ( copy_to_user( &request.list[i].idx,
1234                                            &dma->buflist[i]->idx,
1235                                            sizeof(request.list[0].idx) ) ) {
1236                                 retcode = -EFAULT;
1237                                 goto done;
1238                         }
1239                         if ( copy_to_user( &request.list[i].total,
1240                                            &dma->buflist[i]->total,
1241                                            sizeof(request.list[0].total) ) ) {
1242                                 retcode = -EFAULT;
1243                                 goto done;
1244                         }
1245                         if ( copy_to_user( &request.list[i].used,
1246                                            &zero,
1247                                            sizeof(zero) ) ) {
1248                                 retcode = -EFAULT;
1249                                 goto done;
1250                         }
1251                         address = virtual + dma->buflist[i]->offset; /* *** */
1252                         if ( copy_to_user( &request.list[i].address,
1253                                            &address,
1254                                            sizeof(address) ) ) {
1255                                 retcode = -EFAULT;
1256                                 goto done;
1257                         }
1258                 }
1259         }
1260  done:
1261         request.count = dma->buf_count;
1262         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1263
1264         if ( copy_to_user( argp, &request, sizeof(request) ) )
1265                 return -EFAULT;
1266
1267         return retcode;
1268 }
1269