patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / char / drm / drm_bufs.h
1 /**
2  * \file drm_bufs.h 
3  * Generic buffer template
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 #ifndef __HAVE_PCI_DMA
40 #define __HAVE_PCI_DMA          0
41 #endif
42
43 #ifndef __HAVE_SG
44 #define __HAVE_SG               0
45 #endif
46
47 #ifndef DRIVER_BUF_PRIV_T
48 #define DRIVER_BUF_PRIV_T               u32
49 #endif
50 #ifndef DRIVER_AGP_BUFFERS_MAP
51 #if __HAVE_AGP && __HAVE_DMA
52 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
53 #else
54 #define DRIVER_AGP_BUFFERS_MAP( dev )   NULL
55 #endif
56 #endif
57
58
59 /**
60  * Compute size order.  Returns the exponent of the smaller power of two which
61  * is greater or equal to given number.
62  * 
63  * \param size size.
64  * \return order.
65  *
66  * \todo Can be made faster.
67  */
68 int DRM(order)( unsigned long size )
69 {
70         int order;
71         unsigned long tmp;
72
73         for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
74
75         if ( size & ~(1 << order) )
76                 ++order;
77
78         return order;
79 }
80
81 /**
82  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
83  *
84  * \param inode device inode.
85  * \param filp file pointer.
86  * \param cmd command.
87  * \param arg pointer to a drm_map structure.
88  * \return zero on success or a negative value on error.
89  *
90  * Adjusts the memory offset to its absolute value according to the mapping
91  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
92  * applicable and if supported by the kernel.
93  */
94 int DRM(addmap)( struct inode *inode, struct file *filp,
95                  unsigned int cmd, unsigned long arg )
96 {
97         drm_file_t *priv = filp->private_data;
98         drm_device_t *dev = priv->dev;
99         drm_map_t *map;
100         drm_map_list_t *list;
101
102         if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
103
104         map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
105         if ( !map )
106                 return -ENOMEM;
107
108         if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
109                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
110                 return -EFAULT;
111         }
112
113         /* Only allow shared memory to be removable since we only keep enough
114          * book keeping information about shared memory to allow for removal
115          * when processes fork.
116          */
117         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
118                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
119                 return -EINVAL;
120         }
121         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
122                    map->offset, map->size, map->type );
123         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
124                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
125                 return -EINVAL;
126         }
127         map->mtrr   = -1;
128         map->handle = 0;
129
130         switch ( map->type ) {
131         case _DRM_REGISTERS:
132         case _DRM_FRAME_BUFFER:
133 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
134                 if ( map->offset + map->size < map->offset ||
135                      map->offset < virt_to_phys(high_memory) ) {
136                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
137                         return -EINVAL;
138                 }
139 #endif
140 #ifdef __alpha__
141                 map->offset += dev->hose->mem_space->start;
142 #endif
143 #if __REALLY_HAVE_MTRR
144                 if ( map->type == _DRM_FRAME_BUFFER ||
145                      (map->flags & _DRM_WRITE_COMBINING) ) {
146                         map->mtrr = mtrr_add( map->offset, map->size,
147                                               MTRR_TYPE_WRCOMB, 1 );
148                 }
149 #endif
150                 if (map->type == _DRM_REGISTERS)
151                         map->handle = DRM(ioremap)( map->offset, map->size,
152                                                     dev );
153                 break;
154
155         case _DRM_SHM:
156                 map->handle = vmalloc_32(map->size);
157                 DRM_DEBUG( "%lu %d %p\n",
158                            map->size, DRM(order)( map->size ), map->handle );
159                 if ( !map->handle ) {
160                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
161                         return -ENOMEM;
162                 }
163                 map->offset = (unsigned long)map->handle;
164                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
165                         /* Prevent a 2nd X Server from creating a 2nd lock */
166                         if (dev->lock.hw_lock != NULL) {
167                                 vfree( map->handle );
168                                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
169                                 return -EBUSY;
170                         }
171                         dev->sigdata.lock =
172                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
173                 }
174                 break;
175 #if __REALLY_HAVE_AGP
176         case _DRM_AGP:
177 #ifdef __alpha__
178                 map->offset += dev->hose->mem_space->start;
179 #endif
180                 map->offset += dev->agp->base;
181                 map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
182                 break;
183 #endif
184         case _DRM_SCATTER_GATHER:
185                 if (!dev->sg) {
186                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
187                         return -EINVAL;
188                 }
189                 map->offset += dev->sg->handle;
190                 break;
191
192         default:
193                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
194                 return -EINVAL;
195         }
196
197         list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
198         if(!list) {
199                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
200                 return -EINVAL;
201         }
202         memset(list, 0, sizeof(*list));
203         list->map = map;
204
205         down(&dev->struct_sem);
206         list_add(&list->head, &dev->maplist->head);
207         up(&dev->struct_sem);
208
209         if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
210                 return -EFAULT;
211         if ( map->type != _DRM_SHM ) {
212                 if ( copy_to_user( &((drm_map_t *)arg)->handle,
213                                    &map->offset,
214                                    sizeof(map->offset) ) )
215                         return -EFAULT;
216         }
217         return 0;
218 }
219
220
221 /**
222  * Remove a map private from list and deallocate resources if the mapping
223  * isn't in use.
224  *
225  * \param inode device inode.
226  * \param filp file pointer.
227  * \param cmd command.
228  * \param arg pointer to a drm_map_t structure.
229  * \return zero on success or a negative value on error.
230  *
231  * Searches the map on drm_device::maplist, removes it from the list, see if
232  * its being used, and free any associate resource (such as MTRR's) if it's not
233  * being on use.
234  *
235  * \sa addmap().
236  */
237 int DRM(rmmap)(struct inode *inode, struct file *filp,
238                unsigned int cmd, unsigned long arg)
239 {
240         drm_file_t      *priv   = filp->private_data;
241         drm_device_t    *dev    = priv->dev;
242         struct list_head *list;
243         drm_map_list_t *r_list = NULL;
244         drm_vma_entry_t *pt, *prev;
245         drm_map_t *map;
246         drm_map_t request;
247         int found_maps = 0;
248
249         if (copy_from_user(&request, (drm_map_t *)arg,
250                            sizeof(request))) {
251                 return -EFAULT;
252         }
253
254         down(&dev->struct_sem);
255         list = &dev->maplist->head;
256         list_for_each(list, &dev->maplist->head) {
257                 r_list = list_entry(list, drm_map_list_t, head);
258
259                 if(r_list->map &&
260                    r_list->map->handle == request.handle &&
261                    r_list->map->flags & _DRM_REMOVABLE) break;
262         }
263
264         /* List has wrapped around to the head pointer, or its empty we didn't
265          * find anything.
266          */
267         if(list == (&dev->maplist->head)) {
268                 up(&dev->struct_sem);
269                 return -EINVAL;
270         }
271         map = r_list->map;
272         list_del(list);
273         DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
274
275         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
276                 if (pt->vma->vm_private_data == map) found_maps++;
277         }
278
279         if(!found_maps) {
280                 switch (map->type) {
281                 case _DRM_REGISTERS:
282                 case _DRM_FRAME_BUFFER:
283 #if __REALLY_HAVE_MTRR
284                         if (map->mtrr >= 0) {
285                                 int retcode;
286                                 retcode = mtrr_del(map->mtrr,
287                                                    map->offset,
288                                                    map->size);
289                                 DRM_DEBUG("mtrr_del = %d\n", retcode);
290                         }
291 #endif
292                         DRM(ioremapfree)(map->handle, map->size, dev);
293                         break;
294                 case _DRM_SHM:
295                         vfree(map->handle);
296                         break;
297                 case _DRM_AGP:
298                 case _DRM_SCATTER_GATHER:
299                         break;
300                 }
301                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
302         }
303         up(&dev->struct_sem);
304         return 0;
305 }
306
307 #if __HAVE_DMA
308
309 /**
310  * Cleanup after an error on one of the addbufs() functions.
311  *
312  * \param entry buffer entry where the error occurred.
313  *
314  * Frees any pages and buffers associated with the given entry.
315  */
316 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
317 {
318         int i;
319
320         if (entry->seg_count) {
321                 for (i = 0; i < entry->seg_count; i++) {
322                         if (entry->seglist[i]) {
323                                 DRM(free_pages)(entry->seglist[i],
324                                                 entry->page_order,
325                                                 DRM_MEM_DMA);
326                         }
327                 }
328                 DRM(free)(entry->seglist,
329                           entry->seg_count *
330                           sizeof(*entry->seglist),
331                           DRM_MEM_SEGS);
332
333                 entry->seg_count = 0;
334         }
335
336         if (entry->buf_count) {
337                 for (i = 0; i < entry->buf_count; i++) {
338                         if (entry->buflist[i].dev_private) {
339                                 DRM(free)(entry->buflist[i].dev_private,
340                                           entry->buflist[i].dev_priv_size,
341                                           DRM_MEM_BUFS);
342                         }
343                 }
344                 DRM(free)(entry->buflist,
345                           entry->buf_count *
346                           sizeof(*entry->buflist),
347                           DRM_MEM_BUFS);
348
349 #if __HAVE_DMA_FREELIST
350                 DRM(freelist_destroy)(&entry->freelist);
351 #endif
352
353                 entry->buf_count = 0;
354         }
355 }
356
357 #if __REALLY_HAVE_AGP
358 /**
359  * Add AGP buffers for DMA transfers (ioctl).
360  *
361  * \param inode device inode.
362  * \param filp file pointer.
363  * \param cmd command.
364  * \param arg pointer to a drm_buf_desc_t request.
365  * \return zero on success or a negative number on failure.
366  * 
367  * After some sanity checks creates a drm_buf structure for each buffer and
368  * reallocates the buffer list of the same size order to accommodate the new
369  * buffers.
370  */
371 int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
372                       unsigned int cmd, unsigned long arg )
373 {
374         drm_file_t *priv = filp->private_data;
375         drm_device_t *dev = priv->dev;
376         drm_device_dma_t *dma = dev->dma;
377         drm_buf_desc_t request;
378         drm_buf_entry_t *entry;
379         drm_buf_t *buf;
380         unsigned long offset;
381         unsigned long agp_offset;
382         int count;
383         int order;
384         int size;
385         int alignment;
386         int page_order;
387         int total;
388         int byte_count;
389         int i;
390         drm_buf_t **temp_buflist;
391
392         if ( !dma ) return -EINVAL;
393
394         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
395                              sizeof(request) ) )
396                 return -EFAULT;
397
398         count = request.count;
399         order = DRM(order)( request.size );
400         size = 1 << order;
401
402         alignment  = (request.flags & _DRM_PAGE_ALIGN)
403                 ? PAGE_ALIGN(size) : size;
404         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
405         total = PAGE_SIZE << page_order;
406
407         byte_count = 0;
408         agp_offset = dev->agp->base + request.agp_start;
409
410         DRM_DEBUG( "count:      %d\n",  count );
411         DRM_DEBUG( "order:      %d\n",  order );
412         DRM_DEBUG( "size:       %d\n",  size );
413         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
414         DRM_DEBUG( "alignment:  %d\n",  alignment );
415         DRM_DEBUG( "page_order: %d\n",  page_order );
416         DRM_DEBUG( "total:      %d\n",  total );
417
418         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
419         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
420
421         spin_lock( &dev->count_lock );
422         if ( dev->buf_use ) {
423                 spin_unlock( &dev->count_lock );
424                 return -EBUSY;
425         }
426         atomic_inc( &dev->buf_alloc );
427         spin_unlock( &dev->count_lock );
428
429         down( &dev->struct_sem );
430         entry = &dma->bufs[order];
431         if ( entry->buf_count ) {
432                 up( &dev->struct_sem );
433                 atomic_dec( &dev->buf_alloc );
434                 return -ENOMEM; /* May only call once for each order */
435         }
436
437         if (count < 0 || count > 4096) {
438                 up( &dev->struct_sem );
439                 atomic_dec( &dev->buf_alloc );
440                 return -EINVAL;
441         }
442
443         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
444                                     DRM_MEM_BUFS );
445         if ( !entry->buflist ) {
446                 up( &dev->struct_sem );
447                 atomic_dec( &dev->buf_alloc );
448                 return -ENOMEM;
449         }
450         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
451
452         entry->buf_size = size;
453         entry->page_order = page_order;
454
455         offset = 0;
456
457         while ( entry->buf_count < count ) {
458                 buf          = &entry->buflist[entry->buf_count];
459                 buf->idx     = dma->buf_count + entry->buf_count;
460                 buf->total   = alignment;
461                 buf->order   = order;
462                 buf->used    = 0;
463
464                 buf->offset  = (dma->byte_count + offset);
465                 buf->bus_address = agp_offset + offset;
466                 buf->address = (void *)(agp_offset + offset);
467                 buf->next    = NULL;
468                 buf->waiting = 0;
469                 buf->pending = 0;
470                 init_waitqueue_head( &buf->dma_wait );
471                 buf->filp    = 0;
472
473                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
474                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
475                                                DRM_MEM_BUFS );
476                 if(!buf->dev_private) {
477                         /* Set count correctly so we free the proper amount. */
478                         entry->buf_count = count;
479                         DRM(cleanup_buf_error)(entry);
480                         up( &dev->struct_sem );
481                         atomic_dec( &dev->buf_alloc );
482                         return -ENOMEM;
483                 }
484                 memset( buf->dev_private, 0, buf->dev_priv_size );
485
486                 DRM_DEBUG( "buffer %d @ %p\n",
487                            entry->buf_count, buf->address );
488
489                 offset += alignment;
490                 entry->buf_count++;
491                 byte_count += PAGE_SIZE << page_order;
492         }
493
494         DRM_DEBUG( "byte_count: %d\n", byte_count );
495
496         temp_buflist = DRM(realloc)( dma->buflist,
497                                      dma->buf_count * sizeof(*dma->buflist),
498                                      (dma->buf_count + entry->buf_count)
499                                      * sizeof(*dma->buflist),
500                                      DRM_MEM_BUFS );
501         if(!temp_buflist) {
502                 /* Free the entry because it isn't valid */
503                 DRM(cleanup_buf_error)(entry);
504                 up( &dev->struct_sem );
505                 atomic_dec( &dev->buf_alloc );
506                 return -ENOMEM;
507         }
508         dma->buflist = temp_buflist;
509
510         for ( i = 0 ; i < entry->buf_count ; i++ ) {
511                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
512         }
513
514         dma->buf_count += entry->buf_count;
515         dma->byte_count += byte_count;
516
517         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
518         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
519
520 #if __HAVE_DMA_FREELIST
521         DRM(freelist_create)( &entry->freelist, entry->buf_count );
522         for ( i = 0 ; i < entry->buf_count ; i++ ) {
523                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
524         }
525 #endif
526         up( &dev->struct_sem );
527
528         request.count = entry->buf_count;
529         request.size = size;
530
531         if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
532                 return -EFAULT;
533
534         dma->flags = _DRM_DMA_USE_AGP;
535
536         atomic_dec( &dev->buf_alloc );
537         return 0;
538 }
539 #endif /* __REALLY_HAVE_AGP */
540
541 #if __HAVE_PCI_DMA
542 int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
543                       unsigned int cmd, unsigned long arg )
544 {
545         drm_file_t *priv = filp->private_data;
546         drm_device_t *dev = priv->dev;
547         drm_device_dma_t *dma = dev->dma;
548         drm_buf_desc_t request;
549         int count;
550         int order;
551         int size;
552         int total;
553         int page_order;
554         drm_buf_entry_t *entry;
555         unsigned long page;
556         drm_buf_t *buf;
557         int alignment;
558         unsigned long offset;
559         int i;
560         int byte_count;
561         int page_count;
562         unsigned long *temp_pagelist;
563         drm_buf_t **temp_buflist;
564
565         if ( !dma ) return -EINVAL;
566
567         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
568                              sizeof(request) ) )
569                 return -EFAULT;
570
571         count = request.count;
572         order = DRM(order)( request.size );
573         size = 1 << order;
574
575         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
576                    request.count, request.size, size,
577                    order, dev->queue_count );
578
579         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
580         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
581
582         alignment = (request.flags & _DRM_PAGE_ALIGN)
583                 ? PAGE_ALIGN(size) : size;
584         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
585         total = PAGE_SIZE << page_order;
586
587         spin_lock( &dev->count_lock );
588         if ( dev->buf_use ) {
589                 spin_unlock( &dev->count_lock );
590                 return -EBUSY;
591         }
592         atomic_inc( &dev->buf_alloc );
593         spin_unlock( &dev->count_lock );
594
595         down( &dev->struct_sem );
596         entry = &dma->bufs[order];
597         if ( entry->buf_count ) {
598                 up( &dev->struct_sem );
599                 atomic_dec( &dev->buf_alloc );
600                 return -ENOMEM; /* May only call once for each order */
601         }
602
603         if (count < 0 || count > 4096) {
604                 up( &dev->struct_sem );
605                 atomic_dec( &dev->buf_alloc );
606                 return -EINVAL;
607         }
608
609         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
610                                     DRM_MEM_BUFS );
611         if ( !entry->buflist ) {
612                 up( &dev->struct_sem );
613                 atomic_dec( &dev->buf_alloc );
614                 return -ENOMEM;
615         }
616         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
617
618         entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
619                                     DRM_MEM_SEGS );
620         if ( !entry->seglist ) {
621                 DRM(free)( entry->buflist,
622                           count * sizeof(*entry->buflist),
623                           DRM_MEM_BUFS );
624                 up( &dev->struct_sem );
625                 atomic_dec( &dev->buf_alloc );
626                 return -ENOMEM;
627         }
628         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
629
630         /* Keep the original pagelist until we know all the allocations
631          * have succeeded
632          */
633         temp_pagelist = DRM(alloc)( (dma->page_count + (count << page_order))
634                                     * sizeof(*dma->pagelist),
635                                     DRM_MEM_PAGES );
636         if (!temp_pagelist) {
637                 DRM(free)( entry->buflist,
638                            count * sizeof(*entry->buflist),
639                            DRM_MEM_BUFS );
640                 DRM(free)( entry->seglist,
641                            count * sizeof(*entry->seglist),
642                            DRM_MEM_SEGS );
643                 up( &dev->struct_sem );
644                 atomic_dec( &dev->buf_alloc );
645                 return -ENOMEM;
646         }
647         memcpy(temp_pagelist,
648                dma->pagelist,
649                dma->page_count * sizeof(*dma->pagelist));
650         DRM_DEBUG( "pagelist: %d entries\n",
651                    dma->page_count + (count << page_order) );
652
653         entry->buf_size = size;
654         entry->page_order = page_order;
655         byte_count = 0;
656         page_count = 0;
657
658         while ( entry->buf_count < count ) {
659                 page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
660                 if ( !page ) {
661                         /* Set count correctly so we free the proper amount. */
662                         entry->buf_count = count;
663                         entry->seg_count = count;
664                         DRM(cleanup_buf_error)(entry);
665                         DRM(free)( temp_pagelist,
666                                    (dma->page_count + (count << page_order))
667                                    * sizeof(*dma->pagelist),
668                                    DRM_MEM_PAGES );
669                         up( &dev->struct_sem );
670                         atomic_dec( &dev->buf_alloc );
671                         return -ENOMEM;
672                 }
673                 entry->seglist[entry->seg_count++] = page;
674                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
675                         DRM_DEBUG( "page %d @ 0x%08lx\n",
676                                    dma->page_count + page_count,
677                                    page + PAGE_SIZE * i );
678                         temp_pagelist[dma->page_count + page_count++]
679                                 = page + PAGE_SIZE * i;
680                 }
681                 for ( offset = 0 ;
682                       offset + size <= total && entry->buf_count < count ;
683                       offset += alignment, ++entry->buf_count ) {
684                         buf          = &entry->buflist[entry->buf_count];
685                         buf->idx     = dma->buf_count + entry->buf_count;
686                         buf->total   = alignment;
687                         buf->order   = order;
688                         buf->used    = 0;
689                         buf->offset  = (dma->byte_count + byte_count + offset);
690                         buf->address = (void *)(page + offset);
691                         buf->next    = NULL;
692                         buf->waiting = 0;
693                         buf->pending = 0;
694                         init_waitqueue_head( &buf->dma_wait );
695                         buf->filp    = 0;
696
697                         buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
698                         buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
699                                                        DRM_MEM_BUFS );
700                         if(!buf->dev_private) {
701                                 /* Set count correctly so we free the proper amount. */
702                                 entry->buf_count = count;
703                                 entry->seg_count = count;
704                                 DRM(cleanup_buf_error)(entry);
705                                 DRM(free)( temp_pagelist,
706                                            (dma->page_count + (count << page_order))
707                                            * sizeof(*dma->pagelist),
708                                            DRM_MEM_PAGES );
709                                 up( &dev->struct_sem );
710                                 atomic_dec( &dev->buf_alloc );
711                                 return -ENOMEM;
712                         }
713                         memset( buf->dev_private, 0, buf->dev_priv_size );
714
715                         DRM_DEBUG( "buffer %d @ %p\n",
716                                    entry->buf_count, buf->address );
717                 }
718                 byte_count += PAGE_SIZE << page_order;
719         }
720
721         temp_buflist = DRM(realloc)( dma->buflist,
722                                      dma->buf_count * sizeof(*dma->buflist),
723                                      (dma->buf_count + entry->buf_count)
724                                      * sizeof(*dma->buflist),
725                                      DRM_MEM_BUFS );
726         if (!temp_buflist) {
727                 /* Free the entry because it isn't valid */
728                 DRM(cleanup_buf_error)(entry);
729                 DRM(free)( temp_pagelist,
730                            (dma->page_count + (count << page_order))
731                            * sizeof(*dma->pagelist),
732                            DRM_MEM_PAGES );
733                 up( &dev->struct_sem );
734                 atomic_dec( &dev->buf_alloc );
735                 return -ENOMEM;
736         }
737         dma->buflist = temp_buflist;
738
739         for ( i = 0 ; i < entry->buf_count ; i++ ) {
740                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
741         }
742
743         /* No allocations failed, so now we can replace the orginal pagelist
744          * with the new one.
745          */
746         if (dma->page_count) {
747                 DRM(free)(dma->pagelist,
748                           dma->page_count * sizeof(*dma->pagelist),
749                           DRM_MEM_PAGES);
750         }
751         dma->pagelist = temp_pagelist;
752
753         dma->buf_count += entry->buf_count;
754         dma->seg_count += entry->seg_count;
755         dma->page_count += entry->seg_count << page_order;
756         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
757
758 #if __HAVE_DMA_FREELIST
759         DRM(freelist_create)( &entry->freelist, entry->buf_count );
760         for ( i = 0 ; i < entry->buf_count ; i++ ) {
761                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
762         }
763 #endif
764         up( &dev->struct_sem );
765
766         request.count = entry->buf_count;
767         request.size = size;
768
769         if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
770                 return -EFAULT;
771
772         atomic_dec( &dev->buf_alloc );
773         return 0;
774
775 }
776 #endif /* __HAVE_PCI_DMA */
777
778 #if __HAVE_SG
779 int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
780                      unsigned int cmd, unsigned long arg )
781 {
782         drm_file_t *priv = filp->private_data;
783         drm_device_t *dev = priv->dev;
784         drm_device_dma_t *dma = dev->dma;
785         drm_buf_desc_t request;
786         drm_buf_entry_t *entry;
787         drm_buf_t *buf;
788         unsigned long offset;
789         unsigned long agp_offset;
790         int count;
791         int order;
792         int size;
793         int alignment;
794         int page_order;
795         int total;
796         int byte_count;
797         int i;
798         drm_buf_t **temp_buflist;
799
800         if ( !dma ) return -EINVAL;
801
802         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
803                              sizeof(request) ) )
804                 return -EFAULT;
805
806         count = request.count;
807         order = DRM(order)( request.size );
808         size = 1 << order;
809
810         alignment  = (request.flags & _DRM_PAGE_ALIGN)
811                         ? PAGE_ALIGN(size) : size;
812         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
813         total = PAGE_SIZE << page_order;
814
815         byte_count = 0;
816         agp_offset = request.agp_start;
817
818         DRM_DEBUG( "count:      %d\n",  count );
819         DRM_DEBUG( "order:      %d\n",  order );
820         DRM_DEBUG( "size:       %d\n",  size );
821         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
822         DRM_DEBUG( "alignment:  %d\n",  alignment );
823         DRM_DEBUG( "page_order: %d\n",  page_order );
824         DRM_DEBUG( "total:      %d\n",  total );
825
826         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
827         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
828
829         spin_lock( &dev->count_lock );
830         if ( dev->buf_use ) {
831                 spin_unlock( &dev->count_lock );
832                 return -EBUSY;
833         }
834         atomic_inc( &dev->buf_alloc );
835         spin_unlock( &dev->count_lock );
836
837         down( &dev->struct_sem );
838         entry = &dma->bufs[order];
839         if ( entry->buf_count ) {
840                 up( &dev->struct_sem );
841                 atomic_dec( &dev->buf_alloc );
842                 return -ENOMEM; /* May only call once for each order */
843         }
844
845         if (count < 0 || count > 4096) {
846                 up( &dev->struct_sem );
847                 atomic_dec( &dev->buf_alloc );
848                 return -EINVAL;
849         }
850
851         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
852                                      DRM_MEM_BUFS );
853         if ( !entry->buflist ) {
854                 up( &dev->struct_sem );
855                 atomic_dec( &dev->buf_alloc );
856                 return -ENOMEM;
857         }
858         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
859
860         entry->buf_size = size;
861         entry->page_order = page_order;
862
863         offset = 0;
864
865         while ( entry->buf_count < count ) {
866                 buf          = &entry->buflist[entry->buf_count];
867                 buf->idx     = dma->buf_count + entry->buf_count;
868                 buf->total   = alignment;
869                 buf->order   = order;
870                 buf->used    = 0;
871
872                 buf->offset  = (dma->byte_count + offset);
873                 buf->bus_address = agp_offset + offset;
874                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
875                 buf->next    = NULL;
876                 buf->waiting = 0;
877                 buf->pending = 0;
878                 init_waitqueue_head( &buf->dma_wait );
879                 buf->filp    = 0;
880
881                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
882                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
883                                                DRM_MEM_BUFS );
884                 if(!buf->dev_private) {
885                         /* Set count correctly so we free the proper amount. */
886                         entry->buf_count = count;
887                         DRM(cleanup_buf_error)(entry);
888                         up( &dev->struct_sem );
889                         atomic_dec( &dev->buf_alloc );
890                         return -ENOMEM;
891                 }
892
893                 memset( buf->dev_private, 0, buf->dev_priv_size );
894
895                 DRM_DEBUG( "buffer %d @ %p\n",
896                            entry->buf_count, buf->address );
897
898                 offset += alignment;
899                 entry->buf_count++;
900                 byte_count += PAGE_SIZE << page_order;
901         }
902
903         DRM_DEBUG( "byte_count: %d\n", byte_count );
904
905         temp_buflist = DRM(realloc)( dma->buflist,
906                                      dma->buf_count * sizeof(*dma->buflist),
907                                      (dma->buf_count + entry->buf_count)
908                                      * sizeof(*dma->buflist),
909                                      DRM_MEM_BUFS );
910         if(!temp_buflist) {
911                 /* Free the entry because it isn't valid */
912                 DRM(cleanup_buf_error)(entry);
913                 up( &dev->struct_sem );
914                 atomic_dec( &dev->buf_alloc );
915                 return -ENOMEM;
916         }
917         dma->buflist = temp_buflist;
918
919         for ( i = 0 ; i < entry->buf_count ; i++ ) {
920                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
921         }
922
923         dma->buf_count += entry->buf_count;
924         dma->byte_count += byte_count;
925
926         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
927         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
928
929 #if __HAVE_DMA_FREELIST
930         DRM(freelist_create)( &entry->freelist, entry->buf_count );
931         for ( i = 0 ; i < entry->buf_count ; i++ ) {
932                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
933         }
934 #endif
935         up( &dev->struct_sem );
936
937         request.count = entry->buf_count;
938         request.size = size;
939
940         if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
941                 return -EFAULT;
942
943         dma->flags = _DRM_DMA_USE_SG;
944
945         atomic_dec( &dev->buf_alloc );
946         return 0;
947 }
948 #endif /* __HAVE_SG */
949
950 /**
951  * Add buffers for DMA transfers (ioctl).
952  *
953  * \param inode device inode.
954  * \param filp file pointer.
955  * \param cmd command.
956  * \param arg pointer to a drm_buf_desc_t request.
957  * \return zero on success or a negative number on failure.
958  *
959  * According with the memory type specified in drm_buf_desc::flags and the
960  * build options, it dispatches the call either to addbufs_agp(),
961  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
962  * PCI memory respectively.
963  */
964 int DRM(addbufs)( struct inode *inode, struct file *filp,
965                   unsigned int cmd, unsigned long arg )
966 {
967         drm_buf_desc_t request;
968
969         if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
970                              sizeof(request) ) )
971                 return -EFAULT;
972
973 #if __REALLY_HAVE_AGP
974         if ( request.flags & _DRM_AGP_BUFFER )
975                 return DRM(addbufs_agp)( inode, filp, cmd, arg );
976         else
977 #endif
978 #if __HAVE_SG
979         if ( request.flags & _DRM_SG_BUFFER )
980                 return DRM(addbufs_sg)( inode, filp, cmd, arg );
981         else
982 #endif
983 #if __HAVE_PCI_DMA
984                 return DRM(addbufs_pci)( inode, filp, cmd, arg );
985 #else
986                 return -EINVAL;
987 #endif
988 }
989
990
991 /**
992  * Get information about the buffer mappings.
993  *
994  * This was originally mean for debugging purposes, or by a sophisticated
995  * client library to determine how best to use the available buffers (e.g.,
996  * large buffers can be used for image transfer).
997  *
998  * \param inode device inode.
999  * \param filp file pointer.
1000  * \param cmd command.
1001  * \param arg pointer to a drm_buf_info structure.
1002  * \return zero on success or a negative number on failure.
1003  *
1004  * Increments drm_device::buf_use while holding the drm_device::count_lock
1005  * lock, preventing of allocating more buffers after this call. Information
1006  * about each requested buffer is then copied into user space.
1007  */
1008 int DRM(infobufs)( struct inode *inode, struct file *filp,
1009                    unsigned int cmd, unsigned long arg )
1010 {
1011         drm_file_t *priv = filp->private_data;
1012         drm_device_t *dev = priv->dev;
1013         drm_device_dma_t *dma = dev->dma;
1014         drm_buf_info_t request;
1015         int i;
1016         int count;
1017
1018         if ( !dma ) return -EINVAL;
1019
1020         spin_lock( &dev->count_lock );
1021         if ( atomic_read( &dev->buf_alloc ) ) {
1022                 spin_unlock( &dev->count_lock );
1023                 return -EBUSY;
1024         }
1025         ++dev->buf_use;         /* Can't allocate more after this call */
1026         spin_unlock( &dev->count_lock );
1027
1028         if ( copy_from_user( &request,
1029                              (drm_buf_info_t *)arg,
1030                              sizeof(request) ) )
1031                 return -EFAULT;
1032
1033         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1034                 if ( dma->bufs[i].buf_count ) ++count;
1035         }
1036
1037         DRM_DEBUG( "count = %d\n", count );
1038
1039         if ( request.count >= count ) {
1040                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1041                         if ( dma->bufs[i].buf_count ) {
1042                                 drm_buf_desc_t *to = &request.list[count];
1043                                 drm_buf_entry_t *from = &dma->bufs[i];
1044                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1045                                 if ( copy_to_user( &to->count,
1046                                                    &from->buf_count,
1047                                                    sizeof(from->buf_count) ) ||
1048                                      copy_to_user( &to->size,
1049                                                    &from->buf_size,
1050                                                    sizeof(from->buf_size) ) ||
1051                                      copy_to_user( &to->low_mark,
1052                                                    &list->low_mark,
1053                                                    sizeof(list->low_mark) ) ||
1054                                      copy_to_user( &to->high_mark,
1055                                                    &list->high_mark,
1056                                                    sizeof(list->high_mark) ) )
1057                                         return -EFAULT;
1058
1059                                 DRM_DEBUG( "%d %d %d %d %d\n",
1060                                            i,
1061                                            dma->bufs[i].buf_count,
1062                                            dma->bufs[i].buf_size,
1063                                            dma->bufs[i].freelist.low_mark,
1064                                            dma->bufs[i].freelist.high_mark );
1065                                 ++count;
1066                         }
1067                 }
1068         }
1069         request.count = count;
1070
1071         if ( copy_to_user( (drm_buf_info_t *)arg,
1072                            &request,
1073                            sizeof(request) ) )
1074                 return -EFAULT;
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * Specifies a low and high water mark for buffer allocation
1081  *
1082  * \param inode device inode.
1083  * \param filp file pointer.
1084  * \param cmd command.
1085  * \param arg a pointer to a drm_buf_desc structure.
1086  * \return zero on success or a negative number on failure.
1087  *
1088  * Verifies that the size order is bounded between the admissible orders and
1089  * updates the respective drm_device_dma::bufs entry low and high water mark.
1090  *
1091  * \note This ioctl is deprecated and mostly never used.
1092  */
1093 int DRM(markbufs)( struct inode *inode, struct file *filp,
1094                    unsigned int cmd, unsigned long arg )
1095 {
1096         drm_file_t *priv = filp->private_data;
1097         drm_device_t *dev = priv->dev;
1098         drm_device_dma_t *dma = dev->dma;
1099         drm_buf_desc_t request;
1100         int order;
1101         drm_buf_entry_t *entry;
1102
1103         if ( !dma ) return -EINVAL;
1104
1105         if ( copy_from_user( &request,
1106                              (drm_buf_desc_t *)arg,
1107                              sizeof(request) ) )
1108                 return -EFAULT;
1109
1110         DRM_DEBUG( "%d, %d, %d\n",
1111                    request.size, request.low_mark, request.high_mark );
1112         order = DRM(order)( request.size );
1113         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1114         entry = &dma->bufs[order];
1115
1116         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1117                 return -EINVAL;
1118         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1119                 return -EINVAL;
1120
1121         entry->freelist.low_mark  = request.low_mark;
1122         entry->freelist.high_mark = request.high_mark;
1123
1124         return 0;
1125 }
1126
1127 /**
1128  * Unreserve the buffers in list, previously reserved using drmDMA. 
1129  *
1130  * \param inode device inode.
1131  * \param filp file pointer.
1132  * \param cmd command.
1133  * \param arg pointer to a drm_buf_free structure.
1134  * \return zero on success or a negative number on failure.
1135  * 
1136  * Calls free_buffer() for each used buffer.
1137  * This function is primarily used for debugging.
1138  */
1139 int DRM(freebufs)( struct inode *inode, struct file *filp,
1140                    unsigned int cmd, unsigned long arg )
1141 {
1142         drm_file_t *priv = filp->private_data;
1143         drm_device_t *dev = priv->dev;
1144         drm_device_dma_t *dma = dev->dma;
1145         drm_buf_free_t request;
1146         int i;
1147         int idx;
1148         drm_buf_t *buf;
1149
1150         if ( !dma ) return -EINVAL;
1151
1152         if ( copy_from_user( &request,
1153                              (drm_buf_free_t *)arg,
1154                              sizeof(request) ) )
1155                 return -EFAULT;
1156
1157         DRM_DEBUG( "%d\n", request.count );
1158         for ( i = 0 ; i < request.count ; i++ ) {
1159                 if ( copy_from_user( &idx,
1160                                      &request.list[i],
1161                                      sizeof(idx) ) )
1162                         return -EFAULT;
1163                 if ( idx < 0 || idx >= dma->buf_count ) {
1164                         DRM_ERROR( "Index %d (of %d max)\n",
1165                                    idx, dma->buf_count - 1 );
1166                         return -EINVAL;
1167                 }
1168                 buf = dma->buflist[idx];
1169                 if ( buf->filp != filp ) {
1170                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1171                                    current->pid );
1172                         return -EINVAL;
1173                 }
1174                 DRM(free_buffer)( dev, buf );
1175         }
1176
1177         return 0;
1178 }
1179
1180 /**
1181  * Maps all of the DMA buffers into client-virtual space (ioctl).
1182  *
1183  * \param inode device inode.
1184  * \param filp file pointer.
1185  * \param cmd command.
1186  * \param arg pointer to a drm_buf_map structure.
1187  * \return zero on success or a negative number on failure.
1188  *
1189  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1190  * about each buffer into user space. The PCI buffers are already mapped on the
1191  * addbufs_pci() call.
1192  */
1193 int DRM(mapbufs)( struct inode *inode, struct file *filp,
1194                   unsigned int cmd, unsigned long arg )
1195 {
1196         drm_file_t *priv = filp->private_data;
1197         drm_device_t *dev = priv->dev;
1198         drm_device_dma_t *dma = dev->dma;
1199         int retcode = 0;
1200         const int zero = 0;
1201         unsigned long virtual;
1202         unsigned long address;
1203         drm_buf_map_t request;
1204         int i;
1205
1206         if ( !dma ) return -EINVAL;
1207
1208         spin_lock( &dev->count_lock );
1209         if ( atomic_read( &dev->buf_alloc ) ) {
1210                 spin_unlock( &dev->count_lock );
1211                 return -EBUSY;
1212         }
1213         dev->buf_use++;         /* Can't allocate more after this call */
1214         spin_unlock( &dev->count_lock );
1215
1216         if ( copy_from_user( &request, (drm_buf_map_t *)arg,
1217                              sizeof(request) ) )
1218                 return -EFAULT;
1219
1220         if ( request.count >= dma->buf_count ) {
1221                 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
1222                      (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
1223                         drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
1224
1225                         if ( !map ) {
1226                                 retcode = -EINVAL;
1227                                 goto done;
1228                         }
1229
1230 #if LINUX_VERSION_CODE <= 0x020402
1231                         down( &current->mm->mmap_sem );
1232 #else
1233                         down_write( &current->mm->mmap_sem );
1234 #endif
1235                         virtual = do_mmap( filp, 0, map->size,
1236                                            PROT_READ | PROT_WRITE,
1237                                            MAP_SHARED,
1238                                            (unsigned long)map->offset );
1239 #if LINUX_VERSION_CODE <= 0x020402
1240                         up( &current->mm->mmap_sem );
1241 #else
1242                         up_write( &current->mm->mmap_sem );
1243 #endif
1244                 } else {
1245 #if LINUX_VERSION_CODE <= 0x020402
1246                         down( &current->mm->mmap_sem );
1247 #else
1248                         down_write( &current->mm->mmap_sem );
1249 #endif
1250                         virtual = do_mmap( filp, 0, dma->byte_count,
1251                                            PROT_READ | PROT_WRITE,
1252                                            MAP_SHARED, 0 );
1253 #if LINUX_VERSION_CODE <= 0x020402
1254                         up( &current->mm->mmap_sem );
1255 #else
1256                         up_write( &current->mm->mmap_sem );
1257 #endif
1258                 }
1259                 if ( virtual > -1024UL ) {
1260                         /* Real error */
1261                         retcode = (signed long)virtual;
1262                         goto done;
1263                 }
1264                 request.virtual = (void *)virtual;
1265
1266                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1267                         if ( copy_to_user( &request.list[i].idx,
1268                                            &dma->buflist[i]->idx,
1269                                            sizeof(request.list[0].idx) ) ) {
1270                                 retcode = -EFAULT;
1271                                 goto done;
1272                         }
1273                         if ( copy_to_user( &request.list[i].total,
1274                                            &dma->buflist[i]->total,
1275                                            sizeof(request.list[0].total) ) ) {
1276                                 retcode = -EFAULT;
1277                                 goto done;
1278                         }
1279                         if ( copy_to_user( &request.list[i].used,
1280                                            &zero,
1281                                            sizeof(zero) ) ) {
1282                                 retcode = -EFAULT;
1283                                 goto done;
1284                         }
1285                         address = virtual + dma->buflist[i]->offset; /* *** */
1286                         if ( copy_to_user( &request.list[i].address,
1287                                            &address,
1288                                            sizeof(address) ) ) {
1289                                 retcode = -EFAULT;
1290                                 goto done;
1291                         }
1292                 }
1293         }
1294  done:
1295         request.count = dma->buf_count;
1296         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1297
1298         if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
1299                 return -EFAULT;
1300
1301         return retcode;
1302 }
1303
1304 #endif /* __HAVE_DMA */