3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 #ifndef __HAVE_PCI_DMA
40 #define __HAVE_PCI_DMA 0
47 #ifndef DRIVER_BUF_PRIV_T
48 #define DRIVER_BUF_PRIV_T u32
50 #ifndef DRIVER_AGP_BUFFERS_MAP
51 #if __HAVE_AGP && __HAVE_DMA
52 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
54 #define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
60 * Compute size order. Returns the exponent of the smaller power of two which
61 * is greater or equal to given number.
66 * \todo Can be made faster.
68 int DRM(order)( unsigned long size )
73 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
75 if ( size & ~(1 << order) )
82 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
84 * \param inode device inode.
85 * \param filp file pointer.
87 * \param arg pointer to a drm_map structure.
88 * \return zero on success or a negative value on error.
90 * Adjusts the memory offset to its absolute value according to the mapping
91 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
92 * applicable and if supported by the kernel.
94 int DRM(addmap)( struct inode *inode, struct file *filp,
95 unsigned int cmd, unsigned long arg )
97 drm_file_t *priv = filp->private_data;
98 drm_device_t *dev = priv->dev;
100 drm_map_list_t *list;
102 if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
104 map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
108 if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
109 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
113 /* Only allow shared memory to be removable since we only keep enough
114 * book keeping information about shared memory to allow for removal
115 * when processes fork.
117 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
118 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
121 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
122 map->offset, map->size, map->type );
123 if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
124 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
130 switch ( map->type ) {
132 case _DRM_FRAME_BUFFER:
133 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
134 if ( map->offset + map->size < map->offset ||
135 map->offset < virt_to_phys(high_memory) ) {
136 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
141 map->offset += dev->hose->mem_space->start;
143 #if __REALLY_HAVE_MTRR
144 if ( map->type == _DRM_FRAME_BUFFER ||
145 (map->flags & _DRM_WRITE_COMBINING) ) {
146 map->mtrr = mtrr_add( map->offset, map->size,
147 MTRR_TYPE_WRCOMB, 1 );
150 map->handle = DRM(ioremap)( map->offset, map->size, dev );
154 map->handle = vmalloc_32(map->size);
155 DRM_DEBUG( "%lu %d %p\n",
156 map->size, DRM(order)( map->size ), map->handle );
157 if ( !map->handle ) {
158 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
161 map->offset = (unsigned long)map->handle;
162 if ( map->flags & _DRM_CONTAINS_LOCK ) {
164 dev->lock.hw_lock = map->handle; /* Pointer to lock */
167 #if __REALLY_HAVE_AGP
170 map->offset += dev->hose->mem_space->start;
172 map->offset += dev->agp->base;
173 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
176 case _DRM_SCATTER_GATHER:
178 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
181 map->offset += dev->sg->handle;
185 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
189 list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
191 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
194 memset(list, 0, sizeof(*list));
197 down(&dev->struct_sem);
198 list_add(&list->head, &dev->maplist->head);
199 up(&dev->struct_sem);
201 if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
203 if ( map->type != _DRM_SHM ) {
204 if ( copy_to_user( &((drm_map_t *)arg)->handle,
206 sizeof(map->offset) ) )
214 * Remove a map private from list and deallocate resources if the mapping
217 * \param inode device inode.
218 * \param filp file pointer.
219 * \param cmd command.
220 * \param arg pointer to a drm_map_t structure.
221 * \return zero on success or a negative value on error.
223 * Searches the map on drm_device::maplist, removes it from the list, see if
224 * its being used, and free any associate resource (such as MTRR's) if it's not
229 int DRM(rmmap)(struct inode *inode, struct file *filp,
230 unsigned int cmd, unsigned long arg)
232 drm_file_t *priv = filp->private_data;
233 drm_device_t *dev = priv->dev;
234 struct list_head *list;
235 drm_map_list_t *r_list = NULL;
236 drm_vma_entry_t *pt, *prev;
241 if (copy_from_user(&request, (drm_map_t *)arg,
246 down(&dev->struct_sem);
247 list = &dev->maplist->head;
248 list_for_each(list, &dev->maplist->head) {
249 r_list = list_entry(list, drm_map_list_t, head);
252 r_list->map->handle == request.handle &&
253 r_list->map->flags & _DRM_REMOVABLE) break;
256 /* List has wrapped around to the head pointer, or its empty we didn't
259 if(list == (&dev->maplist->head)) {
260 up(&dev->struct_sem);
265 DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
267 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
268 if (pt->vma->vm_private_data == map) found_maps++;
274 case _DRM_FRAME_BUFFER:
275 #if __REALLY_HAVE_MTRR
276 if (map->mtrr >= 0) {
278 retcode = mtrr_del(map->mtrr,
281 DRM_DEBUG("mtrr_del = %d\n", retcode);
284 DRM(ioremapfree)(map->handle, map->size, dev);
290 case _DRM_SCATTER_GATHER:
293 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
295 up(&dev->struct_sem);
302 * Cleanup after an error on one of the addbufs() functions.
304 * \param entry buffer entry where the error occurred.
306 * Frees any pages and buffers associated with the given entry.
308 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
312 if (entry->seg_count) {
313 for (i = 0; i < entry->seg_count; i++) {
314 if (entry->seglist[i]) {
315 DRM(free_pages)(entry->seglist[i],
320 DRM(free)(entry->seglist,
322 sizeof(*entry->seglist),
325 entry->seg_count = 0;
328 if (entry->buf_count) {
329 for (i = 0; i < entry->buf_count; i++) {
330 if (entry->buflist[i].dev_private) {
331 DRM(free)(entry->buflist[i].dev_private,
332 entry->buflist[i].dev_priv_size,
336 DRM(free)(entry->buflist,
338 sizeof(*entry->buflist),
341 #if __HAVE_DMA_FREELIST
342 DRM(freelist_destroy)(&entry->freelist);
345 entry->buf_count = 0;
349 #if __REALLY_HAVE_AGP
351 * Add AGP buffers for DMA transfers (ioctl).
353 * \param inode device inode.
354 * \param filp file pointer.
355 * \param cmd command.
356 * \param arg pointer to a drm_buf_desc_t request.
357 * \return zero on success or a negative number on failure.
359 * After some sanity checks creates a drm_buf structure for each buffer and
360 * reallocates the buffer list of the same size order to accommodate the new
363 int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
364 unsigned int cmd, unsigned long arg )
366 drm_file_t *priv = filp->private_data;
367 drm_device_t *dev = priv->dev;
368 drm_device_dma_t *dma = dev->dma;
369 drm_buf_desc_t request;
370 drm_buf_entry_t *entry;
372 unsigned long offset;
373 unsigned long agp_offset;
382 drm_buf_t **temp_buflist;
384 if ( !dma ) return -EINVAL;
386 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
390 count = request.count;
391 order = DRM(order)( request.size );
394 alignment = (request.flags & _DRM_PAGE_ALIGN)
395 ? PAGE_ALIGN(size) : size;
396 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
397 total = PAGE_SIZE << page_order;
400 agp_offset = dev->agp->base + request.agp_start;
402 DRM_DEBUG( "count: %d\n", count );
403 DRM_DEBUG( "order: %d\n", order );
404 DRM_DEBUG( "size: %d\n", size );
405 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
406 DRM_DEBUG( "alignment: %d\n", alignment );
407 DRM_DEBUG( "page_order: %d\n", page_order );
408 DRM_DEBUG( "total: %d\n", total );
410 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
411 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
413 spin_lock( &dev->count_lock );
414 if ( dev->buf_use ) {
415 spin_unlock( &dev->count_lock );
418 atomic_inc( &dev->buf_alloc );
419 spin_unlock( &dev->count_lock );
421 down( &dev->struct_sem );
422 entry = &dma->bufs[order];
423 if ( entry->buf_count ) {
424 up( &dev->struct_sem );
425 atomic_dec( &dev->buf_alloc );
426 return -ENOMEM; /* May only call once for each order */
429 if (count < 0 || count > 4096) {
430 up( &dev->struct_sem );
431 atomic_dec( &dev->buf_alloc );
435 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
437 if ( !entry->buflist ) {
438 up( &dev->struct_sem );
439 atomic_dec( &dev->buf_alloc );
442 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
444 entry->buf_size = size;
445 entry->page_order = page_order;
449 while ( entry->buf_count < count ) {
450 buf = &entry->buflist[entry->buf_count];
451 buf->idx = dma->buf_count + entry->buf_count;
452 buf->total = alignment;
456 buf->offset = (dma->byte_count + offset);
457 buf->bus_address = agp_offset + offset;
458 buf->address = (void *)(agp_offset + offset);
462 init_waitqueue_head( &buf->dma_wait );
465 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
466 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
468 if(!buf->dev_private) {
469 /* Set count correctly so we free the proper amount. */
470 entry->buf_count = count;
471 DRM(cleanup_buf_error)(entry);
472 up( &dev->struct_sem );
473 atomic_dec( &dev->buf_alloc );
476 memset( buf->dev_private, 0, buf->dev_priv_size );
478 DRM_DEBUG( "buffer %d @ %p\n",
479 entry->buf_count, buf->address );
483 byte_count += PAGE_SIZE << page_order;
486 DRM_DEBUG( "byte_count: %d\n", byte_count );
488 temp_buflist = DRM(realloc)( dma->buflist,
489 dma->buf_count * sizeof(*dma->buflist),
490 (dma->buf_count + entry->buf_count)
491 * sizeof(*dma->buflist),
494 /* Free the entry because it isn't valid */
495 DRM(cleanup_buf_error)(entry);
496 up( &dev->struct_sem );
497 atomic_dec( &dev->buf_alloc );
500 dma->buflist = temp_buflist;
502 for ( i = 0 ; i < entry->buf_count ; i++ ) {
503 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
506 dma->buf_count += entry->buf_count;
507 dma->byte_count += byte_count;
509 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
510 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
512 #if __HAVE_DMA_FREELIST
513 DRM(freelist_create)( &entry->freelist, entry->buf_count );
514 for ( i = 0 ; i < entry->buf_count ; i++ ) {
515 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
518 up( &dev->struct_sem );
520 request.count = entry->buf_count;
523 if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
526 dma->flags = _DRM_DMA_USE_AGP;
528 atomic_dec( &dev->buf_alloc );
531 #endif /* __REALLY_HAVE_AGP */
534 int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
535 unsigned int cmd, unsigned long arg )
537 drm_file_t *priv = filp->private_data;
538 drm_device_t *dev = priv->dev;
539 drm_device_dma_t *dma = dev->dma;
540 drm_buf_desc_t request;
546 drm_buf_entry_t *entry;
550 unsigned long offset;
554 unsigned long *temp_pagelist;
555 drm_buf_t **temp_buflist;
557 if ( !dma ) return -EINVAL;
559 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
563 count = request.count;
564 order = DRM(order)( request.size );
567 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
568 request.count, request.size, size,
569 order, dev->queue_count );
571 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
572 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
574 alignment = (request.flags & _DRM_PAGE_ALIGN)
575 ? PAGE_ALIGN(size) : size;
576 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
577 total = PAGE_SIZE << page_order;
579 spin_lock( &dev->count_lock );
580 if ( dev->buf_use ) {
581 spin_unlock( &dev->count_lock );
584 atomic_inc( &dev->buf_alloc );
585 spin_unlock( &dev->count_lock );
587 down( &dev->struct_sem );
588 entry = &dma->bufs[order];
589 if ( entry->buf_count ) {
590 up( &dev->struct_sem );
591 atomic_dec( &dev->buf_alloc );
592 return -ENOMEM; /* May only call once for each order */
595 if (count < 0 || count > 4096) {
596 up( &dev->struct_sem );
597 atomic_dec( &dev->buf_alloc );
601 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
603 if ( !entry->buflist ) {
604 up( &dev->struct_sem );
605 atomic_dec( &dev->buf_alloc );
608 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
610 entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
612 if ( !entry->seglist ) {
613 DRM(free)( entry->buflist,
614 count * sizeof(*entry->buflist),
616 up( &dev->struct_sem );
617 atomic_dec( &dev->buf_alloc );
620 memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
622 /* Keep the original pagelist until we know all the allocations
625 temp_pagelist = DRM(alloc)( (dma->page_count + (count << page_order))
626 * sizeof(*dma->pagelist),
628 if (!temp_pagelist) {
629 DRM(free)( entry->buflist,
630 count * sizeof(*entry->buflist),
632 DRM(free)( entry->seglist,
633 count * sizeof(*entry->seglist),
635 up( &dev->struct_sem );
636 atomic_dec( &dev->buf_alloc );
639 memcpy(temp_pagelist,
641 dma->page_count * sizeof(*dma->pagelist));
642 DRM_DEBUG( "pagelist: %d entries\n",
643 dma->page_count + (count << page_order) );
645 entry->buf_size = size;
646 entry->page_order = page_order;
650 while ( entry->buf_count < count ) {
651 page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
653 /* Set count correctly so we free the proper amount. */
654 entry->buf_count = count;
655 entry->seg_count = count;
656 DRM(cleanup_buf_error)(entry);
657 DRM(free)( temp_pagelist,
658 (dma->page_count + (count << page_order))
659 * sizeof(*dma->pagelist),
661 up( &dev->struct_sem );
662 atomic_dec( &dev->buf_alloc );
665 entry->seglist[entry->seg_count++] = page;
666 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
667 DRM_DEBUG( "page %d @ 0x%08lx\n",
668 dma->page_count + page_count,
669 page + PAGE_SIZE * i );
670 temp_pagelist[dma->page_count + page_count++]
671 = page + PAGE_SIZE * i;
674 offset + size <= total && entry->buf_count < count ;
675 offset += alignment, ++entry->buf_count ) {
676 buf = &entry->buflist[entry->buf_count];
677 buf->idx = dma->buf_count + entry->buf_count;
678 buf->total = alignment;
681 buf->offset = (dma->byte_count + byte_count + offset);
682 buf->address = (void *)(page + offset);
686 init_waitqueue_head( &buf->dma_wait );
689 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
690 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
692 if(!buf->dev_private) {
693 /* Set count correctly so we free the proper amount. */
694 entry->buf_count = count;
695 entry->seg_count = count;
696 DRM(cleanup_buf_error)(entry);
697 DRM(free)( temp_pagelist,
698 (dma->page_count + (count << page_order))
699 * sizeof(*dma->pagelist),
701 up( &dev->struct_sem );
702 atomic_dec( &dev->buf_alloc );
705 memset( buf->dev_private, 0, buf->dev_priv_size );
707 DRM_DEBUG( "buffer %d @ %p\n",
708 entry->buf_count, buf->address );
710 byte_count += PAGE_SIZE << page_order;
713 temp_buflist = DRM(realloc)( dma->buflist,
714 dma->buf_count * sizeof(*dma->buflist),
715 (dma->buf_count + entry->buf_count)
716 * sizeof(*dma->buflist),
719 /* Free the entry because it isn't valid */
720 DRM(cleanup_buf_error)(entry);
721 DRM(free)( temp_pagelist,
722 (dma->page_count + (count << page_order))
723 * sizeof(*dma->pagelist),
725 up( &dev->struct_sem );
726 atomic_dec( &dev->buf_alloc );
729 dma->buflist = temp_buflist;
731 for ( i = 0 ; i < entry->buf_count ; i++ ) {
732 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
735 /* No allocations failed, so now we can replace the orginal pagelist
738 if (dma->page_count) {
739 DRM(free)(dma->pagelist,
740 dma->page_count * sizeof(*dma->pagelist),
743 dma->pagelist = temp_pagelist;
745 dma->buf_count += entry->buf_count;
746 dma->seg_count += entry->seg_count;
747 dma->page_count += entry->seg_count << page_order;
748 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
750 #if __HAVE_DMA_FREELIST
751 DRM(freelist_create)( &entry->freelist, entry->buf_count );
752 for ( i = 0 ; i < entry->buf_count ; i++ ) {
753 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
756 up( &dev->struct_sem );
758 request.count = entry->buf_count;
761 if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
764 atomic_dec( &dev->buf_alloc );
768 #endif /* __HAVE_PCI_DMA */
771 int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
772 unsigned int cmd, unsigned long arg )
774 drm_file_t *priv = filp->private_data;
775 drm_device_t *dev = priv->dev;
776 drm_device_dma_t *dma = dev->dma;
777 drm_buf_desc_t request;
778 drm_buf_entry_t *entry;
780 unsigned long offset;
781 unsigned long agp_offset;
790 drm_buf_t **temp_buflist;
792 if ( !dma ) return -EINVAL;
794 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
798 count = request.count;
799 order = DRM(order)( request.size );
802 alignment = (request.flags & _DRM_PAGE_ALIGN)
803 ? PAGE_ALIGN(size) : size;
804 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
805 total = PAGE_SIZE << page_order;
808 agp_offset = request.agp_start;
810 DRM_DEBUG( "count: %d\n", count );
811 DRM_DEBUG( "order: %d\n", order );
812 DRM_DEBUG( "size: %d\n", size );
813 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
814 DRM_DEBUG( "alignment: %d\n", alignment );
815 DRM_DEBUG( "page_order: %d\n", page_order );
816 DRM_DEBUG( "total: %d\n", total );
818 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
819 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
821 spin_lock( &dev->count_lock );
822 if ( dev->buf_use ) {
823 spin_unlock( &dev->count_lock );
826 atomic_inc( &dev->buf_alloc );
827 spin_unlock( &dev->count_lock );
829 down( &dev->struct_sem );
830 entry = &dma->bufs[order];
831 if ( entry->buf_count ) {
832 up( &dev->struct_sem );
833 atomic_dec( &dev->buf_alloc );
834 return -ENOMEM; /* May only call once for each order */
837 if (count < 0 || count > 4096) {
838 up( &dev->struct_sem );
839 atomic_dec( &dev->buf_alloc );
843 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
845 if ( !entry->buflist ) {
846 up( &dev->struct_sem );
847 atomic_dec( &dev->buf_alloc );
850 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
852 entry->buf_size = size;
853 entry->page_order = page_order;
857 while ( entry->buf_count < count ) {
858 buf = &entry->buflist[entry->buf_count];
859 buf->idx = dma->buf_count + entry->buf_count;
860 buf->total = alignment;
864 buf->offset = (dma->byte_count + offset);
865 buf->bus_address = agp_offset + offset;
866 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
870 init_waitqueue_head( &buf->dma_wait );
873 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
874 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
876 if(!buf->dev_private) {
877 /* Set count correctly so we free the proper amount. */
878 entry->buf_count = count;
879 DRM(cleanup_buf_error)(entry);
880 up( &dev->struct_sem );
881 atomic_dec( &dev->buf_alloc );
885 memset( buf->dev_private, 0, buf->dev_priv_size );
887 DRM_DEBUG( "buffer %d @ %p\n",
888 entry->buf_count, buf->address );
892 byte_count += PAGE_SIZE << page_order;
895 DRM_DEBUG( "byte_count: %d\n", byte_count );
897 temp_buflist = DRM(realloc)( dma->buflist,
898 dma->buf_count * sizeof(*dma->buflist),
899 (dma->buf_count + entry->buf_count)
900 * sizeof(*dma->buflist),
903 /* Free the entry because it isn't valid */
904 DRM(cleanup_buf_error)(entry);
905 up( &dev->struct_sem );
906 atomic_dec( &dev->buf_alloc );
909 dma->buflist = temp_buflist;
911 for ( i = 0 ; i < entry->buf_count ; i++ ) {
912 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
915 dma->buf_count += entry->buf_count;
916 dma->byte_count += byte_count;
918 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
919 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
921 #if __HAVE_DMA_FREELIST
922 DRM(freelist_create)( &entry->freelist, entry->buf_count );
923 for ( i = 0 ; i < entry->buf_count ; i++ ) {
924 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
927 up( &dev->struct_sem );
929 request.count = entry->buf_count;
932 if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
935 dma->flags = _DRM_DMA_USE_SG;
937 atomic_dec( &dev->buf_alloc );
940 #endif /* __HAVE_SG */
943 * Add buffers for DMA transfers (ioctl).
945 * \param inode device inode.
946 * \param filp file pointer.
947 * \param cmd command.
948 * \param arg pointer to a drm_buf_desc_t request.
949 * \return zero on success or a negative number on failure.
951 * According with the memory type specified in drm_buf_desc::flags and the
952 * build options, it dispatches the call either to addbufs_agp(),
953 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
954 * PCI memory respectively.
956 int DRM(addbufs)( struct inode *inode, struct file *filp,
957 unsigned int cmd, unsigned long arg )
959 drm_buf_desc_t request;
961 if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
965 #if __REALLY_HAVE_AGP
966 if ( request.flags & _DRM_AGP_BUFFER )
967 return DRM(addbufs_agp)( inode, filp, cmd, arg );
971 if ( request.flags & _DRM_SG_BUFFER )
972 return DRM(addbufs_sg)( inode, filp, cmd, arg );
976 return DRM(addbufs_pci)( inode, filp, cmd, arg );
984 * Get information about the buffer mappings.
986 * This was originally mean for debugging purposes, or by a sophisticated
987 * client library to determine how best to use the available buffers (e.g.,
988 * large buffers can be used for image transfer).
990 * \param inode device inode.
991 * \param filp file pointer.
992 * \param cmd command.
993 * \param arg pointer to a drm_buf_info structure.
994 * \return zero on success or a negative number on failure.
996 * Increments drm_device::buf_use while holding the drm_device::count_lock
997 * lock, preventing of allocating more buffers after this call. Information
998 * about each requested buffer is then copied into user space.
1000 int DRM(infobufs)( struct inode *inode, struct file *filp,
1001 unsigned int cmd, unsigned long arg )
1003 drm_file_t *priv = filp->private_data;
1004 drm_device_t *dev = priv->dev;
1005 drm_device_dma_t *dma = dev->dma;
1006 drm_buf_info_t request;
1010 if ( !dma ) return -EINVAL;
1012 spin_lock( &dev->count_lock );
1013 if ( atomic_read( &dev->buf_alloc ) ) {
1014 spin_unlock( &dev->count_lock );
1017 ++dev->buf_use; /* Can't allocate more after this call */
1018 spin_unlock( &dev->count_lock );
1020 if ( copy_from_user( &request,
1021 (drm_buf_info_t *)arg,
1025 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1026 if ( dma->bufs[i].buf_count ) ++count;
1029 DRM_DEBUG( "count = %d\n", count );
1031 if ( request.count >= count ) {
1032 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1033 if ( dma->bufs[i].buf_count ) {
1034 drm_buf_desc_t *to = &request.list[count];
1035 drm_buf_entry_t *from = &dma->bufs[i];
1036 drm_freelist_t *list = &dma->bufs[i].freelist;
1037 if ( copy_to_user( &to->count,
1039 sizeof(from->buf_count) ) ||
1040 copy_to_user( &to->size,
1042 sizeof(from->buf_size) ) ||
1043 copy_to_user( &to->low_mark,
1045 sizeof(list->low_mark) ) ||
1046 copy_to_user( &to->high_mark,
1048 sizeof(list->high_mark) ) )
1051 DRM_DEBUG( "%d %d %d %d %d\n",
1053 dma->bufs[i].buf_count,
1054 dma->bufs[i].buf_size,
1055 dma->bufs[i].freelist.low_mark,
1056 dma->bufs[i].freelist.high_mark );
1061 request.count = count;
1063 if ( copy_to_user( (drm_buf_info_t *)arg,
1072 * Specifies a low and high water mark for buffer allocation
1074 * \param inode device inode.
1075 * \param filp file pointer.
1076 * \param cmd command.
1077 * \param arg a pointer to a drm_buf_desc structure.
1078 * \return zero on success or a negative number on failure.
1080 * Verifies that the size order is bounded between the admissible orders and
1081 * updates the respective drm_device_dma::bufs entry low and high water mark.
1083 * \note This ioctl is deprecated and mostly never used.
1085 int DRM(markbufs)( struct inode *inode, struct file *filp,
1086 unsigned int cmd, unsigned long arg )
1088 drm_file_t *priv = filp->private_data;
1089 drm_device_t *dev = priv->dev;
1090 drm_device_dma_t *dma = dev->dma;
1091 drm_buf_desc_t request;
1093 drm_buf_entry_t *entry;
1095 if ( !dma ) return -EINVAL;
1097 if ( copy_from_user( &request,
1098 (drm_buf_desc_t *)arg,
1102 DRM_DEBUG( "%d, %d, %d\n",
1103 request.size, request.low_mark, request.high_mark );
1104 order = DRM(order)( request.size );
1105 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1106 entry = &dma->bufs[order];
1108 if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1110 if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1113 entry->freelist.low_mark = request.low_mark;
1114 entry->freelist.high_mark = request.high_mark;
1120 * Unreserve the buffers in list, previously reserved using drmDMA.
1122 * \param inode device inode.
1123 * \param filp file pointer.
1124 * \param cmd command.
1125 * \param arg pointer to a drm_buf_free structure.
1126 * \return zero on success or a negative number on failure.
1128 * Calls free_buffer() for each used buffer.
1129 * This function is primarily used for debugging.
1131 int DRM(freebufs)( struct inode *inode, struct file *filp,
1132 unsigned int cmd, unsigned long arg )
1134 drm_file_t *priv = filp->private_data;
1135 drm_device_t *dev = priv->dev;
1136 drm_device_dma_t *dma = dev->dma;
1137 drm_buf_free_t request;
1142 if ( !dma ) return -EINVAL;
1144 if ( copy_from_user( &request,
1145 (drm_buf_free_t *)arg,
1149 DRM_DEBUG( "%d\n", request.count );
1150 for ( i = 0 ; i < request.count ; i++ ) {
1151 if ( copy_from_user( &idx,
1155 if ( idx < 0 || idx >= dma->buf_count ) {
1156 DRM_ERROR( "Index %d (of %d max)\n",
1157 idx, dma->buf_count - 1 );
1160 buf = dma->buflist[idx];
1161 if ( buf->filp != filp ) {
1162 DRM_ERROR( "Process %d freeing buffer not owned\n",
1166 DRM(free_buffer)( dev, buf );
1173 * Maps all of the DMA buffers into client-virtual space (ioctl).
1175 * \param inode device inode.
1176 * \param filp file pointer.
1177 * \param cmd command.
1178 * \param arg pointer to a drm_buf_map structure.
1179 * \return zero on success or a negative number on failure.
1181 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1182 * about each buffer into user space. The PCI buffers are already mapped on the
1183 * addbufs_pci() call.
1185 int DRM(mapbufs)( struct inode *inode, struct file *filp,
1186 unsigned int cmd, unsigned long arg )
1188 drm_file_t *priv = filp->private_data;
1189 drm_device_t *dev = priv->dev;
1190 drm_device_dma_t *dma = dev->dma;
1193 unsigned long virtual;
1194 unsigned long address;
1195 drm_buf_map_t request;
1198 if ( !dma ) return -EINVAL;
1200 spin_lock( &dev->count_lock );
1201 if ( atomic_read( &dev->buf_alloc ) ) {
1202 spin_unlock( &dev->count_lock );
1205 dev->buf_use++; /* Can't allocate more after this call */
1206 spin_unlock( &dev->count_lock );
1208 if ( copy_from_user( &request, (drm_buf_map_t *)arg,
1212 if ( request.count >= dma->buf_count ) {
1213 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
1214 (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
1215 drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
1222 #if LINUX_VERSION_CODE <= 0x020402
1223 down( ¤t->mm->mmap_sem );
1225 down_write( ¤t->mm->mmap_sem );
1227 virtual = do_mmap( filp, 0, map->size,
1228 PROT_READ | PROT_WRITE,
1230 (unsigned long)map->offset );
1231 #if LINUX_VERSION_CODE <= 0x020402
1232 up( ¤t->mm->mmap_sem );
1234 up_write( ¤t->mm->mmap_sem );
1237 #if LINUX_VERSION_CODE <= 0x020402
1238 down( ¤t->mm->mmap_sem );
1240 down_write( ¤t->mm->mmap_sem );
1242 virtual = do_mmap( filp, 0, dma->byte_count,
1243 PROT_READ | PROT_WRITE,
1245 #if LINUX_VERSION_CODE <= 0x020402
1246 up( ¤t->mm->mmap_sem );
1248 up_write( ¤t->mm->mmap_sem );
1251 if ( virtual > -1024UL ) {
1253 retcode = (signed long)virtual;
1256 request.virtual = (void *)virtual;
1258 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1259 if ( copy_to_user( &request.list[i].idx,
1260 &dma->buflist[i]->idx,
1261 sizeof(request.list[0].idx) ) ) {
1265 if ( copy_to_user( &request.list[i].total,
1266 &dma->buflist[i]->total,
1267 sizeof(request.list[0].total) ) ) {
1271 if ( copy_to_user( &request.list[i].used,
1277 address = virtual + dma->buflist[i]->offset; /* *** */
1278 if ( copy_to_user( &request.list[i].address,
1280 sizeof(address) ) ) {
1287 request.count = dma->buf_count;
1288 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1290 if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
1296 #endif /* __HAVE_DMA */