3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 #ifndef __HAVE_PCI_DMA
40 #define __HAVE_PCI_DMA 0
47 #ifndef DRIVER_BUF_PRIV_T
48 #define DRIVER_BUF_PRIV_T u32
50 #ifndef DRIVER_AGP_BUFFERS_MAP
51 #if __HAVE_AGP && __HAVE_DMA
52 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
54 #define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
60 * Compute size order. Returns the exponent of the smaller power of two which
61 * is greater or equal to given number.
66 * \todo Can be made faster.
68 int DRM(order)( unsigned long size )
73 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
75 if ( size & ~(1 << order) )
82 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
84 * \param inode device inode.
85 * \param filp file pointer.
87 * \param arg pointer to a drm_map structure.
88 * \return zero on success or a negative value on error.
90 * Adjusts the memory offset to its absolute value according to the mapping
91 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
92 * applicable and if supported by the kernel.
94 int DRM(addmap)( struct inode *inode, struct file *filp,
95 unsigned int cmd, unsigned long arg )
97 drm_file_t *priv = filp->private_data;
98 drm_device_t *dev = priv->dev;
100 drm_map_list_t *list;
102 if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
104 map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
108 if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
109 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
113 /* Only allow shared memory to be removable since we only keep enough
114 * book keeping information about shared memory to allow for removal
115 * when processes fork.
117 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
118 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
121 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
122 map->offset, map->size, map->type );
123 if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
124 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
130 switch ( map->type ) {
132 case _DRM_FRAME_BUFFER:
133 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
134 if ( map->offset + map->size < map->offset ||
135 map->offset < virt_to_phys(high_memory) ) {
136 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
141 map->offset += dev->hose->mem_space->start;
143 #if __REALLY_HAVE_MTRR
144 if ( map->type == _DRM_FRAME_BUFFER ||
145 (map->flags & _DRM_WRITE_COMBINING) ) {
146 map->mtrr = mtrr_add( map->offset, map->size,
147 MTRR_TYPE_WRCOMB, 1 );
150 if (map->type == _DRM_REGISTERS)
151 map->handle = DRM(ioremap)( map->offset, map->size,
156 map->handle = vmalloc_32(map->size);
157 DRM_DEBUG( "%lu %d %p\n",
158 map->size, DRM(order)( map->size ), map->handle );
159 if ( !map->handle ) {
160 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
163 map->offset = (unsigned long)map->handle;
164 if ( map->flags & _DRM_CONTAINS_LOCK ) {
165 /* Prevent a 2nd X Server from creating a 2nd lock */
166 if (dev->lock.hw_lock != NULL) {
167 vfree( map->handle );
168 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
172 dev->lock.hw_lock = map->handle; /* Pointer to lock */
175 #if __REALLY_HAVE_AGP
178 map->offset += dev->hose->mem_space->start;
180 map->offset += dev->agp->base;
181 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
184 case _DRM_SCATTER_GATHER:
186 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
189 map->offset += dev->sg->handle;
193 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
197 list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
199 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
202 memset(list, 0, sizeof(*list));
205 down(&dev->struct_sem);
206 list_add(&list->head, &dev->maplist->head);
207 up(&dev->struct_sem);
209 if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
211 if ( map->type != _DRM_SHM ) {
212 if ( copy_to_user( &((drm_map_t *)arg)->handle,
214 sizeof(map->offset) ) )
222 * Remove a map private from list and deallocate resources if the mapping
225 * \param inode device inode.
226 * \param filp file pointer.
227 * \param cmd command.
228 * \param arg pointer to a drm_map_t structure.
229 * \return zero on success or a negative value on error.
231 * Searches the map on drm_device::maplist, removes it from the list, see if
232 * its being used, and free any associate resource (such as MTRR's) if it's not
237 int DRM(rmmap)(struct inode *inode, struct file *filp,
238 unsigned int cmd, unsigned long arg)
240 drm_file_t *priv = filp->private_data;
241 drm_device_t *dev = priv->dev;
242 struct list_head *list;
243 drm_map_list_t *r_list = NULL;
244 drm_vma_entry_t *pt, *prev;
249 if (copy_from_user(&request, (drm_map_t *)arg,
254 down(&dev->struct_sem);
255 list = &dev->maplist->head;
256 list_for_each(list, &dev->maplist->head) {
257 r_list = list_entry(list, drm_map_list_t, head);
260 r_list->map->handle == request.handle &&
261 r_list->map->flags & _DRM_REMOVABLE) break;
264 /* List has wrapped around to the head pointer, or its empty we didn't
267 if(list == (&dev->maplist->head)) {
268 up(&dev->struct_sem);
273 DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
275 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
276 if (pt->vma->vm_private_data == map) found_maps++;
282 case _DRM_FRAME_BUFFER:
283 #if __REALLY_HAVE_MTRR
284 if (map->mtrr >= 0) {
286 retcode = mtrr_del(map->mtrr,
289 DRM_DEBUG("mtrr_del = %d\n", retcode);
292 DRM(ioremapfree)(map->handle, map->size, dev);
298 case _DRM_SCATTER_GATHER:
301 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
303 up(&dev->struct_sem);
310 * Cleanup after an error on one of the addbufs() functions.
312 * \param entry buffer entry where the error occurred.
314 * Frees any pages and buffers associated with the given entry.
316 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
320 if (entry->seg_count) {
321 for (i = 0; i < entry->seg_count; i++) {
322 if (entry->seglist[i]) {
323 DRM(free_pages)(entry->seglist[i],
328 DRM(free)(entry->seglist,
330 sizeof(*entry->seglist),
333 entry->seg_count = 0;
336 if (entry->buf_count) {
337 for (i = 0; i < entry->buf_count; i++) {
338 if (entry->buflist[i].dev_private) {
339 DRM(free)(entry->buflist[i].dev_private,
340 entry->buflist[i].dev_priv_size,
344 DRM(free)(entry->buflist,
346 sizeof(*entry->buflist),
349 #if __HAVE_DMA_FREELIST
350 DRM(freelist_destroy)(&entry->freelist);
353 entry->buf_count = 0;
357 #if __REALLY_HAVE_AGP
359 * Add AGP buffers for DMA transfers (ioctl).
361 * \param inode device inode.
362 * \param filp file pointer.
363 * \param cmd command.
364 * \param arg pointer to a drm_buf_desc_t request.
365 * \return zero on success or a negative number on failure.
367 * After some sanity checks creates a drm_buf structure for each buffer and
368 * reallocates the buffer list of the same size order to accommodate the new
371 int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
372 unsigned int cmd, unsigned long arg )
374 drm_file_t *priv = filp->private_data;
375 drm_device_t *dev = priv->dev;
376 drm_device_dma_t *dma = dev->dma;
377 drm_buf_desc_t request;
378 drm_buf_entry_t *entry;
380 unsigned long offset;
381 unsigned long agp_offset;
390 drm_buf_t **temp_buflist;
392 if ( !dma ) return -EINVAL;
394 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
398 count = request.count;
399 order = DRM(order)( request.size );
402 alignment = (request.flags & _DRM_PAGE_ALIGN)
403 ? PAGE_ALIGN(size) : size;
404 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
405 total = PAGE_SIZE << page_order;
408 agp_offset = dev->agp->base + request.agp_start;
410 DRM_DEBUG( "count: %d\n", count );
411 DRM_DEBUG( "order: %d\n", order );
412 DRM_DEBUG( "size: %d\n", size );
413 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
414 DRM_DEBUG( "alignment: %d\n", alignment );
415 DRM_DEBUG( "page_order: %d\n", page_order );
416 DRM_DEBUG( "total: %d\n", total );
418 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
419 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
421 spin_lock( &dev->count_lock );
422 if ( dev->buf_use ) {
423 spin_unlock( &dev->count_lock );
426 atomic_inc( &dev->buf_alloc );
427 spin_unlock( &dev->count_lock );
429 down( &dev->struct_sem );
430 entry = &dma->bufs[order];
431 if ( entry->buf_count ) {
432 up( &dev->struct_sem );
433 atomic_dec( &dev->buf_alloc );
434 return -ENOMEM; /* May only call once for each order */
437 if (count < 0 || count > 4096) {
438 up( &dev->struct_sem );
439 atomic_dec( &dev->buf_alloc );
443 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
445 if ( !entry->buflist ) {
446 up( &dev->struct_sem );
447 atomic_dec( &dev->buf_alloc );
450 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
452 entry->buf_size = size;
453 entry->page_order = page_order;
457 while ( entry->buf_count < count ) {
458 buf = &entry->buflist[entry->buf_count];
459 buf->idx = dma->buf_count + entry->buf_count;
460 buf->total = alignment;
464 buf->offset = (dma->byte_count + offset);
465 buf->bus_address = agp_offset + offset;
466 buf->address = (void *)(agp_offset + offset);
470 init_waitqueue_head( &buf->dma_wait );
473 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
474 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
476 if(!buf->dev_private) {
477 /* Set count correctly so we free the proper amount. */
478 entry->buf_count = count;
479 DRM(cleanup_buf_error)(entry);
480 up( &dev->struct_sem );
481 atomic_dec( &dev->buf_alloc );
484 memset( buf->dev_private, 0, buf->dev_priv_size );
486 DRM_DEBUG( "buffer %d @ %p\n",
487 entry->buf_count, buf->address );
491 byte_count += PAGE_SIZE << page_order;
494 DRM_DEBUG( "byte_count: %d\n", byte_count );
496 temp_buflist = DRM(realloc)( dma->buflist,
497 dma->buf_count * sizeof(*dma->buflist),
498 (dma->buf_count + entry->buf_count)
499 * sizeof(*dma->buflist),
502 /* Free the entry because it isn't valid */
503 DRM(cleanup_buf_error)(entry);
504 up( &dev->struct_sem );
505 atomic_dec( &dev->buf_alloc );
508 dma->buflist = temp_buflist;
510 for ( i = 0 ; i < entry->buf_count ; i++ ) {
511 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
514 dma->buf_count += entry->buf_count;
515 dma->byte_count += byte_count;
517 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
518 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
520 #if __HAVE_DMA_FREELIST
521 DRM(freelist_create)( &entry->freelist, entry->buf_count );
522 for ( i = 0 ; i < entry->buf_count ; i++ ) {
523 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
526 up( &dev->struct_sem );
528 request.count = entry->buf_count;
531 if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
534 dma->flags = _DRM_DMA_USE_AGP;
536 atomic_dec( &dev->buf_alloc );
539 #endif /* __REALLY_HAVE_AGP */
542 int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
543 unsigned int cmd, unsigned long arg )
545 drm_file_t *priv = filp->private_data;
546 drm_device_t *dev = priv->dev;
547 drm_device_dma_t *dma = dev->dma;
548 drm_buf_desc_t request;
554 drm_buf_entry_t *entry;
558 unsigned long offset;
562 unsigned long *temp_pagelist;
563 drm_buf_t **temp_buflist;
565 if ( !dma ) return -EINVAL;
567 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
571 count = request.count;
572 order = DRM(order)( request.size );
575 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
576 request.count, request.size, size,
577 order, dev->queue_count );
579 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
580 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
582 alignment = (request.flags & _DRM_PAGE_ALIGN)
583 ? PAGE_ALIGN(size) : size;
584 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
585 total = PAGE_SIZE << page_order;
587 spin_lock( &dev->count_lock );
588 if ( dev->buf_use ) {
589 spin_unlock( &dev->count_lock );
592 atomic_inc( &dev->buf_alloc );
593 spin_unlock( &dev->count_lock );
595 down( &dev->struct_sem );
596 entry = &dma->bufs[order];
597 if ( entry->buf_count ) {
598 up( &dev->struct_sem );
599 atomic_dec( &dev->buf_alloc );
600 return -ENOMEM; /* May only call once for each order */
603 if (count < 0 || count > 4096) {
604 up( &dev->struct_sem );
605 atomic_dec( &dev->buf_alloc );
609 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
611 if ( !entry->buflist ) {
612 up( &dev->struct_sem );
613 atomic_dec( &dev->buf_alloc );
616 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
618 entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
620 if ( !entry->seglist ) {
621 DRM(free)( entry->buflist,
622 count * sizeof(*entry->buflist),
624 up( &dev->struct_sem );
625 atomic_dec( &dev->buf_alloc );
628 memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
630 /* Keep the original pagelist until we know all the allocations
633 temp_pagelist = DRM(alloc)( (dma->page_count + (count << page_order))
634 * sizeof(*dma->pagelist),
636 if (!temp_pagelist) {
637 DRM(free)( entry->buflist,
638 count * sizeof(*entry->buflist),
640 DRM(free)( entry->seglist,
641 count * sizeof(*entry->seglist),
643 up( &dev->struct_sem );
644 atomic_dec( &dev->buf_alloc );
647 memcpy(temp_pagelist,
649 dma->page_count * sizeof(*dma->pagelist));
650 DRM_DEBUG( "pagelist: %d entries\n",
651 dma->page_count + (count << page_order) );
653 entry->buf_size = size;
654 entry->page_order = page_order;
658 while ( entry->buf_count < count ) {
659 page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
661 /* Set count correctly so we free the proper amount. */
662 entry->buf_count = count;
663 entry->seg_count = count;
664 DRM(cleanup_buf_error)(entry);
665 DRM(free)( temp_pagelist,
666 (dma->page_count + (count << page_order))
667 * sizeof(*dma->pagelist),
669 up( &dev->struct_sem );
670 atomic_dec( &dev->buf_alloc );
673 entry->seglist[entry->seg_count++] = page;
674 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
675 DRM_DEBUG( "page %d @ 0x%08lx\n",
676 dma->page_count + page_count,
677 page + PAGE_SIZE * i );
678 temp_pagelist[dma->page_count + page_count++]
679 = page + PAGE_SIZE * i;
682 offset + size <= total && entry->buf_count < count ;
683 offset += alignment, ++entry->buf_count ) {
684 buf = &entry->buflist[entry->buf_count];
685 buf->idx = dma->buf_count + entry->buf_count;
686 buf->total = alignment;
689 buf->offset = (dma->byte_count + byte_count + offset);
690 buf->address = (void *)(page + offset);
694 init_waitqueue_head( &buf->dma_wait );
697 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
698 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
700 if(!buf->dev_private) {
701 /* Set count correctly so we free the proper amount. */
702 entry->buf_count = count;
703 entry->seg_count = count;
704 DRM(cleanup_buf_error)(entry);
705 DRM(free)( temp_pagelist,
706 (dma->page_count + (count << page_order))
707 * sizeof(*dma->pagelist),
709 up( &dev->struct_sem );
710 atomic_dec( &dev->buf_alloc );
713 memset( buf->dev_private, 0, buf->dev_priv_size );
715 DRM_DEBUG( "buffer %d @ %p\n",
716 entry->buf_count, buf->address );
718 byte_count += PAGE_SIZE << page_order;
721 temp_buflist = DRM(realloc)( dma->buflist,
722 dma->buf_count * sizeof(*dma->buflist),
723 (dma->buf_count + entry->buf_count)
724 * sizeof(*dma->buflist),
727 /* Free the entry because it isn't valid */
728 DRM(cleanup_buf_error)(entry);
729 DRM(free)( temp_pagelist,
730 (dma->page_count + (count << page_order))
731 * sizeof(*dma->pagelist),
733 up( &dev->struct_sem );
734 atomic_dec( &dev->buf_alloc );
737 dma->buflist = temp_buflist;
739 for ( i = 0 ; i < entry->buf_count ; i++ ) {
740 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
743 /* No allocations failed, so now we can replace the orginal pagelist
746 if (dma->page_count) {
747 DRM(free)(dma->pagelist,
748 dma->page_count * sizeof(*dma->pagelist),
751 dma->pagelist = temp_pagelist;
753 dma->buf_count += entry->buf_count;
754 dma->seg_count += entry->seg_count;
755 dma->page_count += entry->seg_count << page_order;
756 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
758 #if __HAVE_DMA_FREELIST
759 DRM(freelist_create)( &entry->freelist, entry->buf_count );
760 for ( i = 0 ; i < entry->buf_count ; i++ ) {
761 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
764 up( &dev->struct_sem );
766 request.count = entry->buf_count;
769 if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
772 atomic_dec( &dev->buf_alloc );
776 #endif /* __HAVE_PCI_DMA */
779 int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
780 unsigned int cmd, unsigned long arg )
782 drm_file_t *priv = filp->private_data;
783 drm_device_t *dev = priv->dev;
784 drm_device_dma_t *dma = dev->dma;
785 drm_buf_desc_t request;
786 drm_buf_entry_t *entry;
788 unsigned long offset;
789 unsigned long agp_offset;
798 drm_buf_t **temp_buflist;
800 if ( !dma ) return -EINVAL;
802 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
806 count = request.count;
807 order = DRM(order)( request.size );
810 alignment = (request.flags & _DRM_PAGE_ALIGN)
811 ? PAGE_ALIGN(size) : size;
812 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
813 total = PAGE_SIZE << page_order;
816 agp_offset = request.agp_start;
818 DRM_DEBUG( "count: %d\n", count );
819 DRM_DEBUG( "order: %d\n", order );
820 DRM_DEBUG( "size: %d\n", size );
821 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
822 DRM_DEBUG( "alignment: %d\n", alignment );
823 DRM_DEBUG( "page_order: %d\n", page_order );
824 DRM_DEBUG( "total: %d\n", total );
826 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
827 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
829 spin_lock( &dev->count_lock );
830 if ( dev->buf_use ) {
831 spin_unlock( &dev->count_lock );
834 atomic_inc( &dev->buf_alloc );
835 spin_unlock( &dev->count_lock );
837 down( &dev->struct_sem );
838 entry = &dma->bufs[order];
839 if ( entry->buf_count ) {
840 up( &dev->struct_sem );
841 atomic_dec( &dev->buf_alloc );
842 return -ENOMEM; /* May only call once for each order */
845 if (count < 0 || count > 4096) {
846 up( &dev->struct_sem );
847 atomic_dec( &dev->buf_alloc );
851 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
853 if ( !entry->buflist ) {
854 up( &dev->struct_sem );
855 atomic_dec( &dev->buf_alloc );
858 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
860 entry->buf_size = size;
861 entry->page_order = page_order;
865 while ( entry->buf_count < count ) {
866 buf = &entry->buflist[entry->buf_count];
867 buf->idx = dma->buf_count + entry->buf_count;
868 buf->total = alignment;
872 buf->offset = (dma->byte_count + offset);
873 buf->bus_address = agp_offset + offset;
874 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
878 init_waitqueue_head( &buf->dma_wait );
881 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
882 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
884 if(!buf->dev_private) {
885 /* Set count correctly so we free the proper amount. */
886 entry->buf_count = count;
887 DRM(cleanup_buf_error)(entry);
888 up( &dev->struct_sem );
889 atomic_dec( &dev->buf_alloc );
893 memset( buf->dev_private, 0, buf->dev_priv_size );
895 DRM_DEBUG( "buffer %d @ %p\n",
896 entry->buf_count, buf->address );
900 byte_count += PAGE_SIZE << page_order;
903 DRM_DEBUG( "byte_count: %d\n", byte_count );
905 temp_buflist = DRM(realloc)( dma->buflist,
906 dma->buf_count * sizeof(*dma->buflist),
907 (dma->buf_count + entry->buf_count)
908 * sizeof(*dma->buflist),
911 /* Free the entry because it isn't valid */
912 DRM(cleanup_buf_error)(entry);
913 up( &dev->struct_sem );
914 atomic_dec( &dev->buf_alloc );
917 dma->buflist = temp_buflist;
919 for ( i = 0 ; i < entry->buf_count ; i++ ) {
920 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
923 dma->buf_count += entry->buf_count;
924 dma->byte_count += byte_count;
926 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
927 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
929 #if __HAVE_DMA_FREELIST
930 DRM(freelist_create)( &entry->freelist, entry->buf_count );
931 for ( i = 0 ; i < entry->buf_count ; i++ ) {
932 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
935 up( &dev->struct_sem );
937 request.count = entry->buf_count;
940 if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
943 dma->flags = _DRM_DMA_USE_SG;
945 atomic_dec( &dev->buf_alloc );
948 #endif /* __HAVE_SG */
951 * Add buffers for DMA transfers (ioctl).
953 * \param inode device inode.
954 * \param filp file pointer.
955 * \param cmd command.
956 * \param arg pointer to a drm_buf_desc_t request.
957 * \return zero on success or a negative number on failure.
959 * According with the memory type specified in drm_buf_desc::flags and the
960 * build options, it dispatches the call either to addbufs_agp(),
961 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
962 * PCI memory respectively.
964 int DRM(addbufs)( struct inode *inode, struct file *filp,
965 unsigned int cmd, unsigned long arg )
967 drm_buf_desc_t request;
969 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
973 #if __REALLY_HAVE_AGP
974 if ( request.flags & _DRM_AGP_BUFFER )
975 return DRM(addbufs_agp)( inode, filp, cmd, arg );
979 if ( request.flags & _DRM_SG_BUFFER )
980 return DRM(addbufs_sg)( inode, filp, cmd, arg );
984 return DRM(addbufs_pci)( inode, filp, cmd, arg );
992 * Get information about the buffer mappings.
994 * This was originally mean for debugging purposes, or by a sophisticated
995 * client library to determine how best to use the available buffers (e.g.,
996 * large buffers can be used for image transfer).
998 * \param inode device inode.
999 * \param filp file pointer.
1000 * \param cmd command.
1001 * \param arg pointer to a drm_buf_info structure.
1002 * \return zero on success or a negative number on failure.
1004 * Increments drm_device::buf_use while holding the drm_device::count_lock
1005 * lock, preventing of allocating more buffers after this call. Information
1006 * about each requested buffer is then copied into user space.
1008 int DRM(infobufs)( struct inode *inode, struct file *filp,
1009 unsigned int cmd, unsigned long arg )
1011 drm_file_t *priv = filp->private_data;
1012 drm_device_t *dev = priv->dev;
1013 drm_device_dma_t *dma = dev->dma;
1014 drm_buf_info_t request;
1018 if ( !dma ) return -EINVAL;
1020 spin_lock( &dev->count_lock );
1021 if ( atomic_read( &dev->buf_alloc ) ) {
1022 spin_unlock( &dev->count_lock );
1025 ++dev->buf_use; /* Can't allocate more after this call */
1026 spin_unlock( &dev->count_lock );
1028 if ( copy_from_user( &request,
1029 (drm_buf_info_t *)arg,
1033 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1034 if ( dma->bufs[i].buf_count ) ++count;
1037 DRM_DEBUG( "count = %d\n", count );
1039 if ( request.count >= count ) {
1040 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1041 if ( dma->bufs[i].buf_count ) {
1042 drm_buf_desc_t *to = &request.list[count];
1043 drm_buf_entry_t *from = &dma->bufs[i];
1044 drm_freelist_t *list = &dma->bufs[i].freelist;
1045 if ( copy_to_user( &to->count,
1047 sizeof(from->buf_count) ) ||
1048 copy_to_user( &to->size,
1050 sizeof(from->buf_size) ) ||
1051 copy_to_user( &to->low_mark,
1053 sizeof(list->low_mark) ) ||
1054 copy_to_user( &to->high_mark,
1056 sizeof(list->high_mark) ) )
1059 DRM_DEBUG( "%d %d %d %d %d\n",
1061 dma->bufs[i].buf_count,
1062 dma->bufs[i].buf_size,
1063 dma->bufs[i].freelist.low_mark,
1064 dma->bufs[i].freelist.high_mark );
1069 request.count = count;
1071 if ( copy_to_user( (drm_buf_info_t *)arg,
1080 * Specifies a low and high water mark for buffer allocation
1082 * \param inode device inode.
1083 * \param filp file pointer.
1084 * \param cmd command.
1085 * \param arg a pointer to a drm_buf_desc structure.
1086 * \return zero on success or a negative number on failure.
1088 * Verifies that the size order is bounded between the admissible orders and
1089 * updates the respective drm_device_dma::bufs entry low and high water mark.
1091 * \note This ioctl is deprecated and mostly never used.
1093 int DRM(markbufs)( struct inode *inode, struct file *filp,
1094 unsigned int cmd, unsigned long arg )
1096 drm_file_t *priv = filp->private_data;
1097 drm_device_t *dev = priv->dev;
1098 drm_device_dma_t *dma = dev->dma;
1099 drm_buf_desc_t request;
1101 drm_buf_entry_t *entry;
1103 if ( !dma ) return -EINVAL;
1105 if ( copy_from_user( &request,
1106 (drm_buf_desc_t *)arg,
1110 DRM_DEBUG( "%d, %d, %d\n",
1111 request.size, request.low_mark, request.high_mark );
1112 order = DRM(order)( request.size );
1113 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1114 entry = &dma->bufs[order];
1116 if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1118 if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1121 entry->freelist.low_mark = request.low_mark;
1122 entry->freelist.high_mark = request.high_mark;
1128 * Unreserve the buffers in list, previously reserved using drmDMA.
1130 * \param inode device inode.
1131 * \param filp file pointer.
1132 * \param cmd command.
1133 * \param arg pointer to a drm_buf_free structure.
1134 * \return zero on success or a negative number on failure.
1136 * Calls free_buffer() for each used buffer.
1137 * This function is primarily used for debugging.
1139 int DRM(freebufs)( struct inode *inode, struct file *filp,
1140 unsigned int cmd, unsigned long arg )
1142 drm_file_t *priv = filp->private_data;
1143 drm_device_t *dev = priv->dev;
1144 drm_device_dma_t *dma = dev->dma;
1145 drm_buf_free_t request;
1150 if ( !dma ) return -EINVAL;
1152 if ( copy_from_user( &request,
1153 (drm_buf_free_t *)arg,
1157 DRM_DEBUG( "%d\n", request.count );
1158 for ( i = 0 ; i < request.count ; i++ ) {
1159 if ( copy_from_user( &idx,
1163 if ( idx < 0 || idx >= dma->buf_count ) {
1164 DRM_ERROR( "Index %d (of %d max)\n",
1165 idx, dma->buf_count - 1 );
1168 buf = dma->buflist[idx];
1169 if ( buf->filp != filp ) {
1170 DRM_ERROR( "Process %d freeing buffer not owned\n",
1174 DRM(free_buffer)( dev, buf );
1181 * Maps all of the DMA buffers into client-virtual space (ioctl).
1183 * \param inode device inode.
1184 * \param filp file pointer.
1185 * \param cmd command.
1186 * \param arg pointer to a drm_buf_map structure.
1187 * \return zero on success or a negative number on failure.
1189 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1190 * about each buffer into user space. The PCI buffers are already mapped on the
1191 * addbufs_pci() call.
1193 int DRM(mapbufs)( struct inode *inode, struct file *filp,
1194 unsigned int cmd, unsigned long arg )
1196 drm_file_t *priv = filp->private_data;
1197 drm_device_t *dev = priv->dev;
1198 drm_device_dma_t *dma = dev->dma;
1201 unsigned long virtual;
1202 unsigned long address;
1203 drm_buf_map_t request;
1206 if ( !dma ) return -EINVAL;
1208 spin_lock( &dev->count_lock );
1209 if ( atomic_read( &dev->buf_alloc ) ) {
1210 spin_unlock( &dev->count_lock );
1213 dev->buf_use++; /* Can't allocate more after this call */
1214 spin_unlock( &dev->count_lock );
1216 if ( copy_from_user( &request, (drm_buf_map_t *)arg,
1220 if ( request.count >= dma->buf_count ) {
1221 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
1222 (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
1223 drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
1230 #if LINUX_VERSION_CODE <= 0x020402
1231 down( ¤t->mm->mmap_sem );
1233 down_write( ¤t->mm->mmap_sem );
1235 virtual = do_mmap( filp, 0, map->size,
1236 PROT_READ | PROT_WRITE,
1238 (unsigned long)map->offset );
1239 #if LINUX_VERSION_CODE <= 0x020402
1240 up( ¤t->mm->mmap_sem );
1242 up_write( ¤t->mm->mmap_sem );
1245 #if LINUX_VERSION_CODE <= 0x020402
1246 down( ¤t->mm->mmap_sem );
1248 down_write( ¤t->mm->mmap_sem );
1250 virtual = do_mmap( filp, 0, dma->byte_count,
1251 PROT_READ | PROT_WRITE,
1253 #if LINUX_VERSION_CODE <= 0x020402
1254 up( ¤t->mm->mmap_sem );
1256 up_write( ¤t->mm->mmap_sem );
1259 if ( virtual > -1024UL ) {
1261 retcode = (signed long)virtual;
1264 request.virtual = (void *)virtual;
1266 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1267 if ( copy_to_user( &request.list[i].idx,
1268 &dma->buflist[i]->idx,
1269 sizeof(request.list[0].idx) ) ) {
1273 if ( copy_to_user( &request.list[i].total,
1274 &dma->buflist[i]->total,
1275 sizeof(request.list[0].total) ) ) {
1279 if ( copy_to_user( &request.list[i].used,
1285 address = virtual + dma->buflist[i]->offset; /* *** */
1286 if ( copy_to_user( &request.list[i].address,
1288 sizeof(address) ) ) {
1295 request.count = dma->buf_count;
1296 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1298 if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
1304 #endif /* __HAVE_DMA */