VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / char / drm / drm_bufs.h
1 /**
2  * \file drm_bufs.h 
3  * Generic buffer template
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 #ifndef __HAVE_PCI_DMA
40 #define __HAVE_PCI_DMA          0
41 #endif
42
43 #ifndef __HAVE_SG
44 #define __HAVE_SG               0
45 #endif
46
47 #ifndef DRIVER_BUF_PRIV_T
48 #define DRIVER_BUF_PRIV_T               u32
49 #endif
50 #ifndef DRIVER_AGP_BUFFERS_MAP
51 #if __HAVE_AGP && __HAVE_DMA
52 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
53 #else
54 #define DRIVER_AGP_BUFFERS_MAP( dev )   NULL
55 #endif
56 #endif
57
58
59 /**
60  * Compute size order.  Returns the exponent of the smaller power of two which
61  * is greater or equal to given number.
62  * 
63  * \param size size.
64  * \return order.
65  *
66  * \todo Can be made faster.
67  */
68 int DRM(order)( unsigned long size )
69 {
70         int order;
71         unsigned long tmp;
72
73         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
74                 ;
75
76         if (size & (size - 1))
77                 ++order;
78
79         return order;
80 }
81
82 /**
83  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
84  *
85  * \param inode device inode.
86  * \param filp file pointer.
87  * \param cmd command.
88  * \param arg pointer to a drm_map structure.
89  * \return zero on success or a negative value on error.
90  *
91  * Adjusts the memory offset to its absolute value according to the mapping
92  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
93  * applicable and if supported by the kernel.
94  */
95 int DRM(addmap)( struct inode *inode, struct file *filp,
96                  unsigned int cmd, unsigned long arg )
97 {
98         drm_file_t *priv = filp->private_data;
99         drm_device_t *dev = priv->dev;
100         drm_map_t *map;
101         drm_map_t __user *argp = (void __user *)arg;
102         drm_map_list_t *list;
103
104         if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
105
106         map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
107         if ( !map )
108                 return -ENOMEM;
109
110         if ( copy_from_user( map, argp, sizeof(*map) ) ) {
111                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
112                 return -EFAULT;
113         }
114
115         /* Only allow shared memory to be removable since we only keep enough
116          * book keeping information about shared memory to allow for removal
117          * when processes fork.
118          */
119         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
120                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
121                 return -EINVAL;
122         }
123         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
124                    map->offset, map->size, map->type );
125         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
126                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
127                 return -EINVAL;
128         }
129         map->mtrr   = -1;
130         map->handle = NULL;
131
132         switch ( map->type ) {
133         case _DRM_REGISTERS:
134         case _DRM_FRAME_BUFFER:
135 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
136                 if ( map->offset + map->size < map->offset ||
137                      map->offset < virt_to_phys(high_memory) ) {
138                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
139                         return -EINVAL;
140                 }
141 #endif
142 #ifdef __alpha__
143                 map->offset += dev->hose->mem_space->start;
144 #endif
145 #if __REALLY_HAVE_MTRR
146                 if ( map->type == _DRM_FRAME_BUFFER ||
147                      (map->flags & _DRM_WRITE_COMBINING) ) {
148                         map->mtrr = mtrr_add( map->offset, map->size,
149                                               MTRR_TYPE_WRCOMB, 1 );
150                 }
151 #endif
152                 if (map->type == _DRM_REGISTERS)
153                         map->handle = DRM(ioremap)( map->offset, map->size,
154                                                     dev );
155                 break;
156
157         case _DRM_SHM:
158                 map->handle = vmalloc_32(map->size);
159                 DRM_DEBUG( "%lu %d %p\n",
160                            map->size, DRM(order)( map->size ), map->handle );
161                 if ( !map->handle ) {
162                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
163                         return -ENOMEM;
164                 }
165                 map->offset = (unsigned long)map->handle;
166                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
167                         /* Prevent a 2nd X Server from creating a 2nd lock */
168                         if (dev->lock.hw_lock != NULL) {
169                                 vfree( map->handle );
170                                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
171                                 return -EBUSY;
172                         }
173                         dev->sigdata.lock =
174                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
175                 }
176                 break;
177 #if __REALLY_HAVE_AGP
178         case _DRM_AGP:
179 #ifdef __alpha__
180                 map->offset += dev->hose->mem_space->start;
181 #endif
182                 map->offset += dev->agp->base;
183                 map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
184                 break;
185 #endif
186         case _DRM_SCATTER_GATHER:
187                 if (!dev->sg) {
188                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
189                         return -EINVAL;
190                 }
191                 map->offset += dev->sg->handle;
192                 break;
193
194         default:
195                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
196                 return -EINVAL;
197         }
198
199         list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
200         if(!list) {
201                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
202                 return -EINVAL;
203         }
204         memset(list, 0, sizeof(*list));
205         list->map = map;
206
207         down(&dev->struct_sem);
208         list_add(&list->head, &dev->maplist->head);
209         up(&dev->struct_sem);
210
211         if ( copy_to_user( argp, map, sizeof(*map) ) )
212                 return -EFAULT;
213         if ( map->type != _DRM_SHM ) {
214                 if ( copy_to_user( &argp->handle,
215                                    &map->offset,
216                                    sizeof(map->offset) ) )
217                         return -EFAULT;
218         }
219         return 0;
220 }
221
222
223 /**
224  * Remove a map private from list and deallocate resources if the mapping
225  * isn't in use.
226  *
227  * \param inode device inode.
228  * \param filp file pointer.
229  * \param cmd command.
230  * \param arg pointer to a drm_map_t structure.
231  * \return zero on success or a negative value on error.
232  *
233  * Searches the map on drm_device::maplist, removes it from the list, see if
234  * its being used, and free any associate resource (such as MTRR's) if it's not
235  * being on use.
236  *
237  * \sa addmap().
238  */
239 int DRM(rmmap)(struct inode *inode, struct file *filp,
240                unsigned int cmd, unsigned long arg)
241 {
242         drm_file_t      *priv   = filp->private_data;
243         drm_device_t    *dev    = priv->dev;
244         struct list_head *list;
245         drm_map_list_t *r_list = NULL;
246         drm_vma_entry_t *pt, *prev;
247         drm_map_t *map;
248         drm_map_t request;
249         int found_maps = 0;
250
251         if (copy_from_user(&request, (drm_map_t __user *)arg,
252                            sizeof(request))) {
253                 return -EFAULT;
254         }
255
256         down(&dev->struct_sem);
257         list = &dev->maplist->head;
258         list_for_each(list, &dev->maplist->head) {
259                 r_list = list_entry(list, drm_map_list_t, head);
260
261                 if(r_list->map &&
262                    r_list->map->handle == request.handle &&
263                    r_list->map->flags & _DRM_REMOVABLE) break;
264         }
265
266         /* List has wrapped around to the head pointer, or its empty we didn't
267          * find anything.
268          */
269         if(list == (&dev->maplist->head)) {
270                 up(&dev->struct_sem);
271                 return -EINVAL;
272         }
273         map = r_list->map;
274         list_del(list);
275         DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
276
277         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
278                 if (pt->vma->vm_private_data == map) found_maps++;
279         }
280
281         if(!found_maps) {
282                 switch (map->type) {
283                 case _DRM_REGISTERS:
284                 case _DRM_FRAME_BUFFER:
285 #if __REALLY_HAVE_MTRR
286                         if (map->mtrr >= 0) {
287                                 int retcode;
288                                 retcode = mtrr_del(map->mtrr,
289                                                    map->offset,
290                                                    map->size);
291                                 DRM_DEBUG("mtrr_del = %d\n", retcode);
292                         }
293 #endif
294                         DRM(ioremapfree)(map->handle, map->size, dev);
295                         break;
296                 case _DRM_SHM:
297                         vfree(map->handle);
298                         break;
299                 case _DRM_AGP:
300                 case _DRM_SCATTER_GATHER:
301                         break;
302                 }
303                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
304         }
305         up(&dev->struct_sem);
306         return 0;
307 }
308
309 #if __HAVE_DMA
310
311 /**
312  * Cleanup after an error on one of the addbufs() functions.
313  *
314  * \param entry buffer entry where the error occurred.
315  *
316  * Frees any pages and buffers associated with the given entry.
317  */
318 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
319 {
320         int i;
321
322         if (entry->seg_count) {
323                 for (i = 0; i < entry->seg_count; i++) {
324                         if (entry->seglist[i]) {
325                                 DRM(free_pages)(entry->seglist[i],
326                                                 entry->page_order,
327                                                 DRM_MEM_DMA);
328                         }
329                 }
330                 DRM(free)(entry->seglist,
331                           entry->seg_count *
332                           sizeof(*entry->seglist),
333                           DRM_MEM_SEGS);
334
335                 entry->seg_count = 0;
336         }
337
338         if (entry->buf_count) {
339                 for (i = 0; i < entry->buf_count; i++) {
340                         if (entry->buflist[i].dev_private) {
341                                 DRM(free)(entry->buflist[i].dev_private,
342                                           entry->buflist[i].dev_priv_size,
343                                           DRM_MEM_BUFS);
344                         }
345                 }
346                 DRM(free)(entry->buflist,
347                           entry->buf_count *
348                           sizeof(*entry->buflist),
349                           DRM_MEM_BUFS);
350
351 #if __HAVE_DMA_FREELIST
352                 DRM(freelist_destroy)(&entry->freelist);
353 #endif
354
355                 entry->buf_count = 0;
356         }
357 }
358
359 #if __REALLY_HAVE_AGP
360 /**
361  * Add AGP buffers for DMA transfers (ioctl).
362  *
363  * \param inode device inode.
364  * \param filp file pointer.
365  * \param cmd command.
366  * \param arg pointer to a drm_buf_desc_t request.
367  * \return zero on success or a negative number on failure.
368  * 
369  * After some sanity checks creates a drm_buf structure for each buffer and
370  * reallocates the buffer list of the same size order to accommodate the new
371  * buffers.
372  */
373 int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
374                       unsigned int cmd, unsigned long arg )
375 {
376         drm_file_t *priv = filp->private_data;
377         drm_device_t *dev = priv->dev;
378         drm_device_dma_t *dma = dev->dma;
379         drm_buf_desc_t request;
380         drm_buf_entry_t *entry;
381         drm_buf_t *buf;
382         unsigned long offset;
383         unsigned long agp_offset;
384         int count;
385         int order;
386         int size;
387         int alignment;
388         int page_order;
389         int total;
390         int byte_count;
391         int i;
392         drm_buf_t **temp_buflist;
393         drm_buf_desc_t __user *argp = (void __user *)arg;
394
395         if ( !dma ) return -EINVAL;
396
397         if ( copy_from_user( &request, argp,
398                              sizeof(request) ) )
399                 return -EFAULT;
400
401         count = request.count;
402         order = DRM(order)( request.size );
403         size = 1 << order;
404
405         alignment  = (request.flags & _DRM_PAGE_ALIGN)
406                 ? PAGE_ALIGN(size) : size;
407         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
408         total = PAGE_SIZE << page_order;
409
410         byte_count = 0;
411         agp_offset = dev->agp->base + request.agp_start;
412
413         DRM_DEBUG( "count:      %d\n",  count );
414         DRM_DEBUG( "order:      %d\n",  order );
415         DRM_DEBUG( "size:       %d\n",  size );
416         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
417         DRM_DEBUG( "alignment:  %d\n",  alignment );
418         DRM_DEBUG( "page_order: %d\n",  page_order );
419         DRM_DEBUG( "total:      %d\n",  total );
420
421         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
422         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
423
424         spin_lock( &dev->count_lock );
425         if ( dev->buf_use ) {
426                 spin_unlock( &dev->count_lock );
427                 return -EBUSY;
428         }
429         atomic_inc( &dev->buf_alloc );
430         spin_unlock( &dev->count_lock );
431
432         down( &dev->struct_sem );
433         entry = &dma->bufs[order];
434         if ( entry->buf_count ) {
435                 up( &dev->struct_sem );
436                 atomic_dec( &dev->buf_alloc );
437                 return -ENOMEM; /* May only call once for each order */
438         }
439
440         if (count < 0 || count > 4096) {
441                 up( &dev->struct_sem );
442                 atomic_dec( &dev->buf_alloc );
443                 return -EINVAL;
444         }
445
446         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
447                                     DRM_MEM_BUFS );
448         if ( !entry->buflist ) {
449                 up( &dev->struct_sem );
450                 atomic_dec( &dev->buf_alloc );
451                 return -ENOMEM;
452         }
453         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
454
455         entry->buf_size = size;
456         entry->page_order = page_order;
457
458         offset = 0;
459
460         while ( entry->buf_count < count ) {
461                 buf          = &entry->buflist[entry->buf_count];
462                 buf->idx     = dma->buf_count + entry->buf_count;
463                 buf->total   = alignment;
464                 buf->order   = order;
465                 buf->used    = 0;
466
467                 buf->offset  = (dma->byte_count + offset);
468                 buf->bus_address = agp_offset + offset;
469                 buf->address = (void *)(agp_offset + offset);
470                 buf->next    = NULL;
471                 buf->waiting = 0;
472                 buf->pending = 0;
473                 init_waitqueue_head( &buf->dma_wait );
474                 buf->filp    = NULL;
475
476                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
477                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
478                                                DRM_MEM_BUFS );
479                 if(!buf->dev_private) {
480                         /* Set count correctly so we free the proper amount. */
481                         entry->buf_count = count;
482                         DRM(cleanup_buf_error)(entry);
483                         up( &dev->struct_sem );
484                         atomic_dec( &dev->buf_alloc );
485                         return -ENOMEM;
486                 }
487                 memset( buf->dev_private, 0, buf->dev_priv_size );
488
489                 DRM_DEBUG( "buffer %d @ %p\n",
490                            entry->buf_count, buf->address );
491
492                 offset += alignment;
493                 entry->buf_count++;
494                 byte_count += PAGE_SIZE << page_order;
495         }
496
497         DRM_DEBUG( "byte_count: %d\n", byte_count );
498
499         temp_buflist = DRM(realloc)( dma->buflist,
500                                      dma->buf_count * sizeof(*dma->buflist),
501                                      (dma->buf_count + entry->buf_count)
502                                      * sizeof(*dma->buflist),
503                                      DRM_MEM_BUFS );
504         if(!temp_buflist) {
505                 /* Free the entry because it isn't valid */
506                 DRM(cleanup_buf_error)(entry);
507                 up( &dev->struct_sem );
508                 atomic_dec( &dev->buf_alloc );
509                 return -ENOMEM;
510         }
511         dma->buflist = temp_buflist;
512
513         for ( i = 0 ; i < entry->buf_count ; i++ ) {
514                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
515         }
516
517         dma->buf_count += entry->buf_count;
518         dma->byte_count += byte_count;
519
520         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
521         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
522
523 #if __HAVE_DMA_FREELIST
524         DRM(freelist_create)( &entry->freelist, entry->buf_count );
525         for ( i = 0 ; i < entry->buf_count ; i++ ) {
526                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
527         }
528 #endif
529         up( &dev->struct_sem );
530
531         request.count = entry->buf_count;
532         request.size = size;
533
534         if ( copy_to_user( argp, &request, sizeof(request) ) )
535                 return -EFAULT;
536
537         dma->flags = _DRM_DMA_USE_AGP;
538
539         atomic_dec( &dev->buf_alloc );
540         return 0;
541 }
542 #endif /* __REALLY_HAVE_AGP */
543
544 #if __HAVE_PCI_DMA
545 int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
546                       unsigned int cmd, unsigned long arg )
547 {
548         drm_file_t *priv = filp->private_data;
549         drm_device_t *dev = priv->dev;
550         drm_device_dma_t *dma = dev->dma;
551         drm_buf_desc_t request;
552         int count;
553         int order;
554         int size;
555         int total;
556         int page_order;
557         drm_buf_entry_t *entry;
558         unsigned long page;
559         drm_buf_t *buf;
560         int alignment;
561         unsigned long offset;
562         int i;
563         int byte_count;
564         int page_count;
565         unsigned long *temp_pagelist;
566         drm_buf_t **temp_buflist;
567         drm_buf_desc_t __user *argp = (void __user *)arg;
568
569         if ( !dma ) return -EINVAL;
570
571         if ( copy_from_user( &request, argp, sizeof(request) ) )
572                 return -EFAULT;
573
574         count = request.count;
575         order = DRM(order)( request.size );
576         size = 1 << order;
577
578         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
579                    request.count, request.size, size,
580                    order, dev->queue_count );
581
582         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
583         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
584
585         alignment = (request.flags & _DRM_PAGE_ALIGN)
586                 ? PAGE_ALIGN(size) : size;
587         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
588         total = PAGE_SIZE << page_order;
589
590         spin_lock( &dev->count_lock );
591         if ( dev->buf_use ) {
592                 spin_unlock( &dev->count_lock );
593                 return -EBUSY;
594         }
595         atomic_inc( &dev->buf_alloc );
596         spin_unlock( &dev->count_lock );
597
598         down( &dev->struct_sem );
599         entry = &dma->bufs[order];
600         if ( entry->buf_count ) {
601                 up( &dev->struct_sem );
602                 atomic_dec( &dev->buf_alloc );
603                 return -ENOMEM; /* May only call once for each order */
604         }
605
606         if (count < 0 || count > 4096) {
607                 up( &dev->struct_sem );
608                 atomic_dec( &dev->buf_alloc );
609                 return -EINVAL;
610         }
611
612         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
613                                     DRM_MEM_BUFS );
614         if ( !entry->buflist ) {
615                 up( &dev->struct_sem );
616                 atomic_dec( &dev->buf_alloc );
617                 return -ENOMEM;
618         }
619         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
620
621         entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
622                                     DRM_MEM_SEGS );
623         if ( !entry->seglist ) {
624                 DRM(free)( entry->buflist,
625                           count * sizeof(*entry->buflist),
626                           DRM_MEM_BUFS );
627                 up( &dev->struct_sem );
628                 atomic_dec( &dev->buf_alloc );
629                 return -ENOMEM;
630         }
631         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
632
633         /* Keep the original pagelist until we know all the allocations
634          * have succeeded
635          */
636         temp_pagelist = DRM(alloc)( (dma->page_count + (count << page_order))
637                                     * sizeof(*dma->pagelist),
638                                     DRM_MEM_PAGES );
639         if (!temp_pagelist) {
640                 DRM(free)( entry->buflist,
641                            count * sizeof(*entry->buflist),
642                            DRM_MEM_BUFS );
643                 DRM(free)( entry->seglist,
644                            count * sizeof(*entry->seglist),
645                            DRM_MEM_SEGS );
646                 up( &dev->struct_sem );
647                 atomic_dec( &dev->buf_alloc );
648                 return -ENOMEM;
649         }
650         memcpy(temp_pagelist,
651                dma->pagelist,
652                dma->page_count * sizeof(*dma->pagelist));
653         DRM_DEBUG( "pagelist: %d entries\n",
654                    dma->page_count + (count << page_order) );
655
656         entry->buf_size = size;
657         entry->page_order = page_order;
658         byte_count = 0;
659         page_count = 0;
660
661         while ( entry->buf_count < count ) {
662                 page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
663                 if ( !page ) {
664                         /* Set count correctly so we free the proper amount. */
665                         entry->buf_count = count;
666                         entry->seg_count = count;
667                         DRM(cleanup_buf_error)(entry);
668                         DRM(free)( temp_pagelist,
669                                    (dma->page_count + (count << page_order))
670                                    * sizeof(*dma->pagelist),
671                                    DRM_MEM_PAGES );
672                         up( &dev->struct_sem );
673                         atomic_dec( &dev->buf_alloc );
674                         return -ENOMEM;
675                 }
676                 entry->seglist[entry->seg_count++] = page;
677                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
678                         DRM_DEBUG( "page %d @ 0x%08lx\n",
679                                    dma->page_count + page_count,
680                                    page + PAGE_SIZE * i );
681                         temp_pagelist[dma->page_count + page_count++]
682                                 = page + PAGE_SIZE * i;
683                 }
684                 for ( offset = 0 ;
685                       offset + size <= total && entry->buf_count < count ;
686                       offset += alignment, ++entry->buf_count ) {
687                         buf          = &entry->buflist[entry->buf_count];
688                         buf->idx     = dma->buf_count + entry->buf_count;
689                         buf->total   = alignment;
690                         buf->order   = order;
691                         buf->used    = 0;
692                         buf->offset  = (dma->byte_count + byte_count + offset);
693                         buf->address = (void *)(page + offset);
694                         buf->next    = NULL;
695                         buf->waiting = 0;
696                         buf->pending = 0;
697                         init_waitqueue_head( &buf->dma_wait );
698                         buf->filp    = NULL;
699
700                         buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
701                         buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
702                                                        DRM_MEM_BUFS );
703                         if(!buf->dev_private) {
704                                 /* Set count correctly so we free the proper amount. */
705                                 entry->buf_count = count;
706                                 entry->seg_count = count;
707                                 DRM(cleanup_buf_error)(entry);
708                                 DRM(free)( temp_pagelist,
709                                            (dma->page_count + (count << page_order))
710                                            * sizeof(*dma->pagelist),
711                                            DRM_MEM_PAGES );
712                                 up( &dev->struct_sem );
713                                 atomic_dec( &dev->buf_alloc );
714                                 return -ENOMEM;
715                         }
716                         memset( buf->dev_private, 0, buf->dev_priv_size );
717
718                         DRM_DEBUG( "buffer %d @ %p\n",
719                                    entry->buf_count, buf->address );
720                 }
721                 byte_count += PAGE_SIZE << page_order;
722         }
723
724         temp_buflist = DRM(realloc)( dma->buflist,
725                                      dma->buf_count * sizeof(*dma->buflist),
726                                      (dma->buf_count + entry->buf_count)
727                                      * sizeof(*dma->buflist),
728                                      DRM_MEM_BUFS );
729         if (!temp_buflist) {
730                 /* Free the entry because it isn't valid */
731                 DRM(cleanup_buf_error)(entry);
732                 DRM(free)( temp_pagelist,
733                            (dma->page_count + (count << page_order))
734                            * sizeof(*dma->pagelist),
735                            DRM_MEM_PAGES );
736                 up( &dev->struct_sem );
737                 atomic_dec( &dev->buf_alloc );
738                 return -ENOMEM;
739         }
740         dma->buflist = temp_buflist;
741
742         for ( i = 0 ; i < entry->buf_count ; i++ ) {
743                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
744         }
745
746         /* No allocations failed, so now we can replace the orginal pagelist
747          * with the new one.
748          */
749         if (dma->page_count) {
750                 DRM(free)(dma->pagelist,
751                           dma->page_count * sizeof(*dma->pagelist),
752                           DRM_MEM_PAGES);
753         }
754         dma->pagelist = temp_pagelist;
755
756         dma->buf_count += entry->buf_count;
757         dma->seg_count += entry->seg_count;
758         dma->page_count += entry->seg_count << page_order;
759         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
760
761 #if __HAVE_DMA_FREELIST
762         DRM(freelist_create)( &entry->freelist, entry->buf_count );
763         for ( i = 0 ; i < entry->buf_count ; i++ ) {
764                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
765         }
766 #endif
767         up( &dev->struct_sem );
768
769         request.count = entry->buf_count;
770         request.size = size;
771
772         if ( copy_to_user( argp, &request, sizeof(request) ) )
773                 return -EFAULT;
774
775         atomic_dec( &dev->buf_alloc );
776         return 0;
777
778 }
779 #endif /* __HAVE_PCI_DMA */
780
781 #if __HAVE_SG
782 int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
783                      unsigned int cmd, unsigned long arg )
784 {
785         drm_file_t *priv = filp->private_data;
786         drm_device_t *dev = priv->dev;
787         drm_device_dma_t *dma = dev->dma;
788         drm_buf_desc_t __user *argp = (void __user *)arg;
789         drm_buf_desc_t request;
790         drm_buf_entry_t *entry;
791         drm_buf_t *buf;
792         unsigned long offset;
793         unsigned long agp_offset;
794         int count;
795         int order;
796         int size;
797         int alignment;
798         int page_order;
799         int total;
800         int byte_count;
801         int i;
802         drm_buf_t **temp_buflist;
803
804         if ( !dma ) return -EINVAL;
805
806         if ( copy_from_user( &request, argp, sizeof(request) ) )
807                 return -EFAULT;
808
809         count = request.count;
810         order = DRM(order)( request.size );
811         size = 1 << order;
812
813         alignment  = (request.flags & _DRM_PAGE_ALIGN)
814                         ? PAGE_ALIGN(size) : size;
815         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
816         total = PAGE_SIZE << page_order;
817
818         byte_count = 0;
819         agp_offset = request.agp_start;
820
821         DRM_DEBUG( "count:      %d\n",  count );
822         DRM_DEBUG( "order:      %d\n",  order );
823         DRM_DEBUG( "size:       %d\n",  size );
824         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
825         DRM_DEBUG( "alignment:  %d\n",  alignment );
826         DRM_DEBUG( "page_order: %d\n",  page_order );
827         DRM_DEBUG( "total:      %d\n",  total );
828
829         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
830         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
831
832         spin_lock( &dev->count_lock );
833         if ( dev->buf_use ) {
834                 spin_unlock( &dev->count_lock );
835                 return -EBUSY;
836         }
837         atomic_inc( &dev->buf_alloc );
838         spin_unlock( &dev->count_lock );
839
840         down( &dev->struct_sem );
841         entry = &dma->bufs[order];
842         if ( entry->buf_count ) {
843                 up( &dev->struct_sem );
844                 atomic_dec( &dev->buf_alloc );
845                 return -ENOMEM; /* May only call once for each order */
846         }
847
848         if (count < 0 || count > 4096) {
849                 up( &dev->struct_sem );
850                 atomic_dec( &dev->buf_alloc );
851                 return -EINVAL;
852         }
853
854         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
855                                      DRM_MEM_BUFS );
856         if ( !entry->buflist ) {
857                 up( &dev->struct_sem );
858                 atomic_dec( &dev->buf_alloc );
859                 return -ENOMEM;
860         }
861         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
862
863         entry->buf_size = size;
864         entry->page_order = page_order;
865
866         offset = 0;
867
868         while ( entry->buf_count < count ) {
869                 buf          = &entry->buflist[entry->buf_count];
870                 buf->idx     = dma->buf_count + entry->buf_count;
871                 buf->total   = alignment;
872                 buf->order   = order;
873                 buf->used    = 0;
874
875                 buf->offset  = (dma->byte_count + offset);
876                 buf->bus_address = agp_offset + offset;
877                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
878                 buf->next    = NULL;
879                 buf->waiting = 0;
880                 buf->pending = 0;
881                 init_waitqueue_head( &buf->dma_wait );
882                 buf->filp    = NULL;
883
884                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
885                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
886                                                DRM_MEM_BUFS );
887                 if(!buf->dev_private) {
888                         /* Set count correctly so we free the proper amount. */
889                         entry->buf_count = count;
890                         DRM(cleanup_buf_error)(entry);
891                         up( &dev->struct_sem );
892                         atomic_dec( &dev->buf_alloc );
893                         return -ENOMEM;
894                 }
895
896                 memset( buf->dev_private, 0, buf->dev_priv_size );
897
898                 DRM_DEBUG( "buffer %d @ %p\n",
899                            entry->buf_count, buf->address );
900
901                 offset += alignment;
902                 entry->buf_count++;
903                 byte_count += PAGE_SIZE << page_order;
904         }
905
906         DRM_DEBUG( "byte_count: %d\n", byte_count );
907
908         temp_buflist = DRM(realloc)( dma->buflist,
909                                      dma->buf_count * sizeof(*dma->buflist),
910                                      (dma->buf_count + entry->buf_count)
911                                      * sizeof(*dma->buflist),
912                                      DRM_MEM_BUFS );
913         if(!temp_buflist) {
914                 /* Free the entry because it isn't valid */
915                 DRM(cleanup_buf_error)(entry);
916                 up( &dev->struct_sem );
917                 atomic_dec( &dev->buf_alloc );
918                 return -ENOMEM;
919         }
920         dma->buflist = temp_buflist;
921
922         for ( i = 0 ; i < entry->buf_count ; i++ ) {
923                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
924         }
925
926         dma->buf_count += entry->buf_count;
927         dma->byte_count += byte_count;
928
929         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
930         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
931
932 #if __HAVE_DMA_FREELIST
933         DRM(freelist_create)( &entry->freelist, entry->buf_count );
934         for ( i = 0 ; i < entry->buf_count ; i++ ) {
935                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
936         }
937 #endif
938         up( &dev->struct_sem );
939
940         request.count = entry->buf_count;
941         request.size = size;
942
943         if ( copy_to_user( argp, &request, sizeof(request) ) )
944                 return -EFAULT;
945
946         dma->flags = _DRM_DMA_USE_SG;
947
948         atomic_dec( &dev->buf_alloc );
949         return 0;
950 }
951 #endif /* __HAVE_SG */
952
953 /**
954  * Add buffers for DMA transfers (ioctl).
955  *
956  * \param inode device inode.
957  * \param filp file pointer.
958  * \param cmd command.
959  * \param arg pointer to a drm_buf_desc_t request.
960  * \return zero on success or a negative number on failure.
961  *
962  * According with the memory type specified in drm_buf_desc::flags and the
963  * build options, it dispatches the call either to addbufs_agp(),
964  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
965  * PCI memory respectively.
966  */
967 int DRM(addbufs)( struct inode *inode, struct file *filp,
968                   unsigned int cmd, unsigned long arg )
969 {
970         drm_buf_desc_t request;
971
972         if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
973                              sizeof(request) ) )
974                 return -EFAULT;
975
976 #if __REALLY_HAVE_AGP
977         if ( request.flags & _DRM_AGP_BUFFER )
978                 return DRM(addbufs_agp)( inode, filp, cmd, arg );
979         else
980 #endif
981 #if __HAVE_SG
982         if ( request.flags & _DRM_SG_BUFFER )
983                 return DRM(addbufs_sg)( inode, filp, cmd, arg );
984         else
985 #endif
986 #if __HAVE_PCI_DMA
987                 return DRM(addbufs_pci)( inode, filp, cmd, arg );
988 #else
989                 return -EINVAL;
990 #endif
991 }
992
993
994 /**
995  * Get information about the buffer mappings.
996  *
997  * This was originally mean for debugging purposes, or by a sophisticated
998  * client library to determine how best to use the available buffers (e.g.,
999  * large buffers can be used for image transfer).
1000  *
1001  * \param inode device inode.
1002  * \param filp file pointer.
1003  * \param cmd command.
1004  * \param arg pointer to a drm_buf_info structure.
1005  * \return zero on success or a negative number on failure.
1006  *
1007  * Increments drm_device::buf_use while holding the drm_device::count_lock
1008  * lock, preventing of allocating more buffers after this call. Information
1009  * about each requested buffer is then copied into user space.
1010  */
1011 int DRM(infobufs)( struct inode *inode, struct file *filp,
1012                    unsigned int cmd, unsigned long arg )
1013 {
1014         drm_file_t *priv = filp->private_data;
1015         drm_device_t *dev = priv->dev;
1016         drm_device_dma_t *dma = dev->dma;
1017         drm_buf_info_t request;
1018         drm_buf_info_t __user *argp = (void __user *)arg;
1019         int i;
1020         int count;
1021
1022         if ( !dma ) return -EINVAL;
1023
1024         spin_lock( &dev->count_lock );
1025         if ( atomic_read( &dev->buf_alloc ) ) {
1026                 spin_unlock( &dev->count_lock );
1027                 return -EBUSY;
1028         }
1029         ++dev->buf_use;         /* Can't allocate more after this call */
1030         spin_unlock( &dev->count_lock );
1031
1032         if ( copy_from_user( &request, argp, sizeof(request) ) )
1033                 return -EFAULT;
1034
1035         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1036                 if ( dma->bufs[i].buf_count ) ++count;
1037         }
1038
1039         DRM_DEBUG( "count = %d\n", count );
1040
1041         if ( request.count >= count ) {
1042                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1043                         if ( dma->bufs[i].buf_count ) {
1044                                 drm_buf_desc_t __user *to = &request.list[count];
1045                                 drm_buf_entry_t *from = &dma->bufs[i];
1046                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1047                                 if ( copy_to_user( &to->count,
1048                                                    &from->buf_count,
1049                                                    sizeof(from->buf_count) ) ||
1050                                      copy_to_user( &to->size,
1051                                                    &from->buf_size,
1052                                                    sizeof(from->buf_size) ) ||
1053                                      copy_to_user( &to->low_mark,
1054                                                    &list->low_mark,
1055                                                    sizeof(list->low_mark) ) ||
1056                                      copy_to_user( &to->high_mark,
1057                                                    &list->high_mark,
1058                                                    sizeof(list->high_mark) ) )
1059                                         return -EFAULT;
1060
1061                                 DRM_DEBUG( "%d %d %d %d %d\n",
1062                                            i,
1063                                            dma->bufs[i].buf_count,
1064                                            dma->bufs[i].buf_size,
1065                                            dma->bufs[i].freelist.low_mark,
1066                                            dma->bufs[i].freelist.high_mark );
1067                                 ++count;
1068                         }
1069                 }
1070         }
1071         request.count = count;
1072
1073         if ( copy_to_user( argp, &request, sizeof(request) ) )
1074                 return -EFAULT;
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * Specifies a low and high water mark for buffer allocation
1081  *
1082  * \param inode device inode.
1083  * \param filp file pointer.
1084  * \param cmd command.
1085  * \param arg a pointer to a drm_buf_desc structure.
1086  * \return zero on success or a negative number on failure.
1087  *
1088  * Verifies that the size order is bounded between the admissible orders and
1089  * updates the respective drm_device_dma::bufs entry low and high water mark.
1090  *
1091  * \note This ioctl is deprecated and mostly never used.
1092  */
1093 int DRM(markbufs)( struct inode *inode, struct file *filp,
1094                    unsigned int cmd, unsigned long arg )
1095 {
1096         drm_file_t *priv = filp->private_data;
1097         drm_device_t *dev = priv->dev;
1098         drm_device_dma_t *dma = dev->dma;
1099         drm_buf_desc_t request;
1100         int order;
1101         drm_buf_entry_t *entry;
1102
1103         if ( !dma ) return -EINVAL;
1104
1105         if ( copy_from_user( &request,
1106                              (drm_buf_desc_t __user *)arg,
1107                              sizeof(request) ) )
1108                 return -EFAULT;
1109
1110         DRM_DEBUG( "%d, %d, %d\n",
1111                    request.size, request.low_mark, request.high_mark );
1112         order = DRM(order)( request.size );
1113         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1114         entry = &dma->bufs[order];
1115
1116         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1117                 return -EINVAL;
1118         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1119                 return -EINVAL;
1120
1121         entry->freelist.low_mark  = request.low_mark;
1122         entry->freelist.high_mark = request.high_mark;
1123
1124         return 0;
1125 }
1126
1127 /**
1128  * Unreserve the buffers in list, previously reserved using drmDMA. 
1129  *
1130  * \param inode device inode.
1131  * \param filp file pointer.
1132  * \param cmd command.
1133  * \param arg pointer to a drm_buf_free structure.
1134  * \return zero on success or a negative number on failure.
1135  * 
1136  * Calls free_buffer() for each used buffer.
1137  * This function is primarily used for debugging.
1138  */
1139 int DRM(freebufs)( struct inode *inode, struct file *filp,
1140                    unsigned int cmd, unsigned long arg )
1141 {
1142         drm_file_t *priv = filp->private_data;
1143         drm_device_t *dev = priv->dev;
1144         drm_device_dma_t *dma = dev->dma;
1145         drm_buf_free_t request;
1146         int i;
1147         int idx;
1148         drm_buf_t *buf;
1149
1150         if ( !dma ) return -EINVAL;
1151
1152         if ( copy_from_user( &request,
1153                              (drm_buf_free_t __user *)arg,
1154                              sizeof(request) ) )
1155                 return -EFAULT;
1156
1157         DRM_DEBUG( "%d\n", request.count );
1158         for ( i = 0 ; i < request.count ; i++ ) {
1159                 if ( copy_from_user( &idx,
1160                                      &request.list[i],
1161                                      sizeof(idx) ) )
1162                         return -EFAULT;
1163                 if ( idx < 0 || idx >= dma->buf_count ) {
1164                         DRM_ERROR( "Index %d (of %d max)\n",
1165                                    idx, dma->buf_count - 1 );
1166                         return -EINVAL;
1167                 }
1168                 buf = dma->buflist[idx];
1169                 if ( buf->filp != filp ) {
1170                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1171                                    current->pid );
1172                         return -EINVAL;
1173                 }
1174                 DRM(free_buffer)( dev, buf );
1175         }
1176
1177         return 0;
1178 }
1179
1180 /**
1181  * Maps all of the DMA buffers into client-virtual space (ioctl).
1182  *
1183  * \param inode device inode.
1184  * \param filp file pointer.
1185  * \param cmd command.
1186  * \param arg pointer to a drm_buf_map structure.
1187  * \return zero on success or a negative number on failure.
1188  *
1189  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1190  * about each buffer into user space. The PCI buffers are already mapped on the
1191  * addbufs_pci() call.
1192  */
1193 int DRM(mapbufs)( struct inode *inode, struct file *filp,
1194                   unsigned int cmd, unsigned long arg )
1195 {
1196         drm_file_t *priv = filp->private_data;
1197         drm_device_t *dev = priv->dev;
1198         drm_device_dma_t *dma = dev->dma;
1199         drm_buf_map_t __user *argp = (void __user *)arg;
1200         int retcode = 0;
1201         const int zero = 0;
1202         unsigned long virtual;
1203         unsigned long address;
1204         drm_buf_map_t request;
1205         int i;
1206
1207         if ( !dma ) return -EINVAL;
1208
1209         spin_lock( &dev->count_lock );
1210         if ( atomic_read( &dev->buf_alloc ) ) {
1211                 spin_unlock( &dev->count_lock );
1212                 return -EBUSY;
1213         }
1214         dev->buf_use++;         /* Can't allocate more after this call */
1215         spin_unlock( &dev->count_lock );
1216
1217         if ( copy_from_user( &request, argp, sizeof(request) ) )
1218                 return -EFAULT;
1219
1220         if ( request.count >= dma->buf_count ) {
1221                 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
1222                      (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
1223                         drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
1224
1225                         if ( !map ) {
1226                                 retcode = -EINVAL;
1227                                 goto done;
1228                         }
1229
1230 #if LINUX_VERSION_CODE <= 0x020402
1231                         down( &current->mm->mmap_sem );
1232 #else
1233                         down_write( &current->mm->mmap_sem );
1234 #endif
1235                         virtual = do_mmap( filp, 0, map->size,
1236                                            PROT_READ | PROT_WRITE,
1237                                            MAP_SHARED,
1238                                            (unsigned long)map->offset );
1239 #if LINUX_VERSION_CODE <= 0x020402
1240                         up( &current->mm->mmap_sem );
1241 #else
1242                         up_write( &current->mm->mmap_sem );
1243 #endif
1244                 } else {
1245 #if LINUX_VERSION_CODE <= 0x020402
1246                         down( &current->mm->mmap_sem );
1247 #else
1248                         down_write( &current->mm->mmap_sem );
1249 #endif
1250                         virtual = do_mmap( filp, 0, dma->byte_count,
1251                                            PROT_READ | PROT_WRITE,
1252                                            MAP_SHARED, 0 );
1253 #if LINUX_VERSION_CODE <= 0x020402
1254                         up( &current->mm->mmap_sem );
1255 #else
1256                         up_write( &current->mm->mmap_sem );
1257 #endif
1258                 }
1259                 if ( virtual > -1024UL ) {
1260                         /* Real error */
1261                         retcode = (signed long)virtual;
1262                         goto done;
1263                 }
1264                 request.virtual = (void __user *)virtual;
1265
1266                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1267                         if ( copy_to_user( &request.list[i].idx,
1268                                            &dma->buflist[i]->idx,
1269                                            sizeof(request.list[0].idx) ) ) {
1270                                 retcode = -EFAULT;
1271                                 goto done;
1272                         }
1273                         if ( copy_to_user( &request.list[i].total,
1274                                            &dma->buflist[i]->total,
1275                                            sizeof(request.list[0].total) ) ) {
1276                                 retcode = -EFAULT;
1277                                 goto done;
1278                         }
1279                         if ( copy_to_user( &request.list[i].used,
1280                                            &zero,
1281                                            sizeof(zero) ) ) {
1282                                 retcode = -EFAULT;
1283                                 goto done;
1284                         }
1285                         address = virtual + dma->buflist[i]->offset; /* *** */
1286                         if ( copy_to_user( &request.list[i].address,
1287                                            &address,
1288                                            sizeof(address) ) ) {
1289                                 retcode = -EFAULT;
1290                                 goto done;
1291                         }
1292                 }
1293         }
1294  done:
1295         request.count = dma->buf_count;
1296         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1297
1298         if ( copy_to_user( argp, &request, sizeof(request) ) )
1299                 return -EFAULT;
1300
1301         return retcode;
1302 }
1303
1304 #endif /* __HAVE_DMA */