3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
40 * Compute size order. Returns the exponent of the smaller power of two which
41 * is greater or equal to given number.
46 * \todo Can be made faster.
48 int DRM(order)( unsigned long size )
53 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
56 if (size & (size - 1))
63 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
65 * \param inode device inode.
66 * \param filp file pointer.
68 * \param arg pointer to a drm_map structure.
69 * \return zero on success or a negative value on error.
71 * Adjusts the memory offset to its absolute value according to the mapping
72 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
73 * applicable and if supported by the kernel.
75 int DRM(addmap)( struct inode *inode, struct file *filp,
76 unsigned int cmd, unsigned long arg )
78 drm_file_t *priv = filp->private_data;
79 drm_device_t *dev = priv->dev;
81 drm_map_t __user *argp = (void __user *)arg;
84 if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
86 map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
90 if ( copy_from_user( map, argp, sizeof(*map) ) ) {
91 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
95 /* Only allow shared memory to be removable since we only keep enough
96 * book keeping information about shared memory to allow for removal
97 * when processes fork.
99 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
100 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
103 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
104 map->offset, map->size, map->type );
105 if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
106 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
112 switch ( map->type ) {
114 case _DRM_FRAME_BUFFER:
115 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
116 if ( map->offset + map->size < map->offset ||
117 map->offset < virt_to_phys(high_memory) ) {
118 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
123 map->offset += dev->hose->mem_space->start;
125 if (drm_core_has_MTRR(dev)) {
126 if ( map->type == _DRM_FRAME_BUFFER ||
127 (map->flags & _DRM_WRITE_COMBINING) ) {
128 map->mtrr = mtrr_add( map->offset, map->size,
129 MTRR_TYPE_WRCOMB, 1 );
132 if (map->type == _DRM_REGISTERS)
133 map->handle = DRM(ioremap)( map->offset, map->size,
138 map->handle = vmalloc_32(map->size);
139 DRM_DEBUG( "%lu %d %p\n",
140 map->size, DRM(order)( map->size ), map->handle );
141 if ( !map->handle ) {
142 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
145 map->offset = (unsigned long)map->handle;
146 if ( map->flags & _DRM_CONTAINS_LOCK ) {
147 /* Prevent a 2nd X Server from creating a 2nd lock */
148 if (dev->lock.hw_lock != NULL) {
149 vfree( map->handle );
150 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
154 dev->lock.hw_lock = map->handle; /* Pointer to lock */
158 if (drm_core_has_AGP(dev)) {
160 map->offset += dev->hose->mem_space->start;
162 map->offset += dev->agp->base;
163 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
166 case _DRM_SCATTER_GATHER:
168 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
171 map->offset += dev->sg->handle;
175 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
179 list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
181 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
184 memset(list, 0, sizeof(*list));
187 down(&dev->struct_sem);
188 list_add(&list->head, &dev->maplist->head);
189 up(&dev->struct_sem);
191 if ( copy_to_user( argp, map, sizeof(*map) ) )
193 if ( map->type != _DRM_SHM ) {
194 if ( copy_to_user( &argp->handle,
196 sizeof(map->offset) ) )
204 * Remove a map private from list and deallocate resources if the mapping
207 * \param inode device inode.
208 * \param filp file pointer.
209 * \param cmd command.
210 * \param arg pointer to a drm_map_t structure.
211 * \return zero on success or a negative value on error.
213 * Searches the map on drm_device::maplist, removes it from the list, see if
214 * its being used, and free any associate resource (such as MTRR's) if it's not
219 int DRM(rmmap)(struct inode *inode, struct file *filp,
220 unsigned int cmd, unsigned long arg)
222 drm_file_t *priv = filp->private_data;
223 drm_device_t *dev = priv->dev;
224 struct list_head *list;
225 drm_map_list_t *r_list = NULL;
226 drm_vma_entry_t *pt, *prev;
231 if (copy_from_user(&request, (drm_map_t __user *)arg,
236 down(&dev->struct_sem);
237 list = &dev->maplist->head;
238 list_for_each(list, &dev->maplist->head) {
239 r_list = list_entry(list, drm_map_list_t, head);
242 r_list->map->handle == request.handle &&
243 r_list->map->flags & _DRM_REMOVABLE) break;
246 /* List has wrapped around to the head pointer, or its empty we didn't
249 if(list == (&dev->maplist->head)) {
250 up(&dev->struct_sem);
255 DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
257 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
258 if (pt->vma->vm_private_data == map) found_maps++;
264 case _DRM_FRAME_BUFFER:
265 if (drm_core_has_MTRR(dev)) {
266 if (map->mtrr >= 0) {
268 retcode = mtrr_del(map->mtrr,
271 DRM_DEBUG("mtrr_del = %d\n", retcode);
274 DRM(ioremapfree)(map->handle, map->size, dev);
280 case _DRM_SCATTER_GATHER:
283 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
285 up(&dev->struct_sem);
290 * Cleanup after an error on one of the addbufs() functions.
292 * \param entry buffer entry where the error occurred.
294 * Frees any pages and buffers associated with the given entry.
296 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
300 if (entry->seg_count) {
301 for (i = 0; i < entry->seg_count; i++) {
302 if (entry->seglist[i]) {
303 DRM(free_pages)(entry->seglist[i],
308 DRM(free)(entry->seglist,
310 sizeof(*entry->seglist),
313 entry->seg_count = 0;
316 if (entry->buf_count) {
317 for (i = 0; i < entry->buf_count; i++) {
318 if (entry->buflist[i].dev_private) {
319 DRM(free)(entry->buflist[i].dev_private,
320 entry->buflist[i].dev_priv_size,
324 DRM(free)(entry->buflist,
326 sizeof(*entry->buflist),
329 entry->buf_count = 0;
335 * Add AGP buffers for DMA transfers (ioctl).
337 * \param inode device inode.
338 * \param filp file pointer.
339 * \param cmd command.
340 * \param arg pointer to a drm_buf_desc_t request.
341 * \return zero on success or a negative number on failure.
343 * After some sanity checks creates a drm_buf structure for each buffer and
344 * reallocates the buffer list of the same size order to accommodate the new
347 int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
348 unsigned int cmd, unsigned long arg )
350 drm_file_t *priv = filp->private_data;
351 drm_device_t *dev = priv->dev;
352 drm_device_dma_t *dma = dev->dma;
353 drm_buf_desc_t request;
354 drm_buf_entry_t *entry;
356 unsigned long offset;
357 unsigned long agp_offset;
366 drm_buf_t **temp_buflist;
367 drm_buf_desc_t __user *argp = (void __user *)arg;
369 if ( !dma ) return -EINVAL;
371 if ( copy_from_user( &request, argp,
375 count = request.count;
376 order = DRM(order)( request.size );
379 alignment = (request.flags & _DRM_PAGE_ALIGN)
380 ? PAGE_ALIGN(size) : size;
381 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
382 total = PAGE_SIZE << page_order;
385 agp_offset = dev->agp->base + request.agp_start;
387 DRM_DEBUG( "count: %d\n", count );
388 DRM_DEBUG( "order: %d\n", order );
389 DRM_DEBUG( "size: %d\n", size );
390 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
391 DRM_DEBUG( "alignment: %d\n", alignment );
392 DRM_DEBUG( "page_order: %d\n", page_order );
393 DRM_DEBUG( "total: %d\n", total );
395 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
396 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
398 spin_lock( &dev->count_lock );
399 if ( dev->buf_use ) {
400 spin_unlock( &dev->count_lock );
403 atomic_inc( &dev->buf_alloc );
404 spin_unlock( &dev->count_lock );
406 down( &dev->struct_sem );
407 entry = &dma->bufs[order];
408 if ( entry->buf_count ) {
409 up( &dev->struct_sem );
410 atomic_dec( &dev->buf_alloc );
411 return -ENOMEM; /* May only call once for each order */
414 if (count < 0 || count > 4096) {
415 up( &dev->struct_sem );
416 atomic_dec( &dev->buf_alloc );
420 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
422 if ( !entry->buflist ) {
423 up( &dev->struct_sem );
424 atomic_dec( &dev->buf_alloc );
427 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
429 entry->buf_size = size;
430 entry->page_order = page_order;
434 while ( entry->buf_count < count ) {
435 buf = &entry->buflist[entry->buf_count];
436 buf->idx = dma->buf_count + entry->buf_count;
437 buf->total = alignment;
441 buf->offset = (dma->byte_count + offset);
442 buf->bus_address = agp_offset + offset;
443 buf->address = (void *)(agp_offset + offset);
447 init_waitqueue_head( &buf->dma_wait );
450 buf->dev_priv_size = dev->dev_priv_size;
451 buf->dev_private = DRM(alloc)( buf->dev_priv_size,
453 if(!buf->dev_private) {
454 /* Set count correctly so we free the proper amount. */
455 entry->buf_count = count;
456 DRM(cleanup_buf_error)(entry);
457 up( &dev->struct_sem );
458 atomic_dec( &dev->buf_alloc );
461 memset( buf->dev_private, 0, buf->dev_priv_size );
463 DRM_DEBUG( "buffer %d @ %p\n",
464 entry->buf_count, buf->address );
468 byte_count += PAGE_SIZE << page_order;
471 DRM_DEBUG( "byte_count: %d\n", byte_count );
473 temp_buflist = DRM(realloc)( dma->buflist,
474 dma->buf_count * sizeof(*dma->buflist),
475 (dma->buf_count + entry->buf_count)
476 * sizeof(*dma->buflist),
479 /* Free the entry because it isn't valid */
480 DRM(cleanup_buf_error)(entry);
481 up( &dev->struct_sem );
482 atomic_dec( &dev->buf_alloc );
485 dma->buflist = temp_buflist;
487 for ( i = 0 ; i < entry->buf_count ; i++ ) {
488 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
491 dma->buf_count += entry->buf_count;
492 dma->byte_count += byte_count;
494 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
495 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
497 up( &dev->struct_sem );
499 request.count = entry->buf_count;
502 if ( copy_to_user( argp, &request, sizeof(request) ) )
505 dma->flags = _DRM_DMA_USE_AGP;
507 atomic_dec( &dev->buf_alloc );
510 #endif /* __OS_HAS_AGP */
512 int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
513 unsigned int cmd, unsigned long arg )
515 drm_file_t *priv = filp->private_data;
516 drm_device_t *dev = priv->dev;
517 drm_device_dma_t *dma = dev->dma;
518 drm_buf_desc_t request;
524 drm_buf_entry_t *entry;
528 unsigned long offset;
532 unsigned long *temp_pagelist;
533 drm_buf_t **temp_buflist;
534 drm_buf_desc_t __user *argp = (void __user *)arg;
536 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
537 if ( !dma ) return -EINVAL;
539 if ( copy_from_user( &request, argp, sizeof(request) ) )
542 count = request.count;
543 order = DRM(order)( request.size );
546 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
547 request.count, request.size, size,
548 order, dev->queue_count );
550 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
551 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
553 alignment = (request.flags & _DRM_PAGE_ALIGN)
554 ? PAGE_ALIGN(size) : size;
555 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
556 total = PAGE_SIZE << page_order;
558 spin_lock( &dev->count_lock );
559 if ( dev->buf_use ) {
560 spin_unlock( &dev->count_lock );
563 atomic_inc( &dev->buf_alloc );
564 spin_unlock( &dev->count_lock );
566 down( &dev->struct_sem );
567 entry = &dma->bufs[order];
568 if ( entry->buf_count ) {
569 up( &dev->struct_sem );
570 atomic_dec( &dev->buf_alloc );
571 return -ENOMEM; /* May only call once for each order */
574 if (count < 0 || count > 4096) {
575 up( &dev->struct_sem );
576 atomic_dec( &dev->buf_alloc );
580 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
582 if ( !entry->buflist ) {
583 up( &dev->struct_sem );
584 atomic_dec( &dev->buf_alloc );
587 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
589 entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
591 if ( !entry->seglist ) {
592 DRM(free)( entry->buflist,
593 count * sizeof(*entry->buflist),
595 up( &dev->struct_sem );
596 atomic_dec( &dev->buf_alloc );
599 memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
601 /* Keep the original pagelist until we know all the allocations
604 temp_pagelist = DRM(alloc)( (dma->page_count + (count << page_order))
605 * sizeof(*dma->pagelist),
607 if (!temp_pagelist) {
608 DRM(free)( entry->buflist,
609 count * sizeof(*entry->buflist),
611 DRM(free)( entry->seglist,
612 count * sizeof(*entry->seglist),
614 up( &dev->struct_sem );
615 atomic_dec( &dev->buf_alloc );
618 memcpy(temp_pagelist,
620 dma->page_count * sizeof(*dma->pagelist));
621 DRM_DEBUG( "pagelist: %d entries\n",
622 dma->page_count + (count << page_order) );
624 entry->buf_size = size;
625 entry->page_order = page_order;
629 while ( entry->buf_count < count ) {
630 page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
632 /* Set count correctly so we free the proper amount. */
633 entry->buf_count = count;
634 entry->seg_count = count;
635 DRM(cleanup_buf_error)(entry);
636 DRM(free)( temp_pagelist,
637 (dma->page_count + (count << page_order))
638 * sizeof(*dma->pagelist),
640 up( &dev->struct_sem );
641 atomic_dec( &dev->buf_alloc );
644 entry->seglist[entry->seg_count++] = page;
645 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
646 DRM_DEBUG( "page %d @ 0x%08lx\n",
647 dma->page_count + page_count,
648 page + PAGE_SIZE * i );
649 temp_pagelist[dma->page_count + page_count++]
650 = page + PAGE_SIZE * i;
653 offset + size <= total && entry->buf_count < count ;
654 offset += alignment, ++entry->buf_count ) {
655 buf = &entry->buflist[entry->buf_count];
656 buf->idx = dma->buf_count + entry->buf_count;
657 buf->total = alignment;
660 buf->offset = (dma->byte_count + byte_count + offset);
661 buf->address = (void *)(page + offset);
665 init_waitqueue_head( &buf->dma_wait );
668 buf->dev_priv_size = dev->dev_priv_size;
669 buf->dev_private = DRM(alloc)( dev->dev_priv_size,
671 if(!buf->dev_private) {
672 /* Set count correctly so we free the proper amount. */
673 entry->buf_count = count;
674 entry->seg_count = count;
675 DRM(cleanup_buf_error)(entry);
676 DRM(free)( temp_pagelist,
677 (dma->page_count + (count << page_order))
678 * sizeof(*dma->pagelist),
680 up( &dev->struct_sem );
681 atomic_dec( &dev->buf_alloc );
684 memset( buf->dev_private, 0, buf->dev_priv_size );
686 DRM_DEBUG( "buffer %d @ %p\n",
687 entry->buf_count, buf->address );
689 byte_count += PAGE_SIZE << page_order;
692 temp_buflist = DRM(realloc)( dma->buflist,
693 dma->buf_count * sizeof(*dma->buflist),
694 (dma->buf_count + entry->buf_count)
695 * sizeof(*dma->buflist),
698 /* Free the entry because it isn't valid */
699 DRM(cleanup_buf_error)(entry);
700 DRM(free)( temp_pagelist,
701 (dma->page_count + (count << page_order))
702 * sizeof(*dma->pagelist),
704 up( &dev->struct_sem );
705 atomic_dec( &dev->buf_alloc );
708 dma->buflist = temp_buflist;
710 for ( i = 0 ; i < entry->buf_count ; i++ ) {
711 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
714 /* No allocations failed, so now we can replace the orginal pagelist
717 if (dma->page_count) {
718 DRM(free)(dma->pagelist,
719 dma->page_count * sizeof(*dma->pagelist),
722 dma->pagelist = temp_pagelist;
724 dma->buf_count += entry->buf_count;
725 dma->seg_count += entry->seg_count;
726 dma->page_count += entry->seg_count << page_order;
727 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
729 up( &dev->struct_sem );
731 request.count = entry->buf_count;
734 if ( copy_to_user( argp, &request, sizeof(request) ) )
737 atomic_dec( &dev->buf_alloc );
742 int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
743 unsigned int cmd, unsigned long arg )
745 drm_file_t *priv = filp->private_data;
746 drm_device_t *dev = priv->dev;
747 drm_device_dma_t *dma = dev->dma;
748 drm_buf_desc_t __user *argp = (void __user *)arg;
749 drm_buf_desc_t request;
750 drm_buf_entry_t *entry;
752 unsigned long offset;
753 unsigned long agp_offset;
762 drm_buf_t **temp_buflist;
764 if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
766 if ( !dma ) return -EINVAL;
768 if ( copy_from_user( &request, argp, sizeof(request) ) )
771 count = request.count;
772 order = DRM(order)( request.size );
775 alignment = (request.flags & _DRM_PAGE_ALIGN)
776 ? PAGE_ALIGN(size) : size;
777 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
778 total = PAGE_SIZE << page_order;
781 agp_offset = request.agp_start;
783 DRM_DEBUG( "count: %d\n", count );
784 DRM_DEBUG( "order: %d\n", order );
785 DRM_DEBUG( "size: %d\n", size );
786 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
787 DRM_DEBUG( "alignment: %d\n", alignment );
788 DRM_DEBUG( "page_order: %d\n", page_order );
789 DRM_DEBUG( "total: %d\n", total );
791 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
792 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
794 spin_lock( &dev->count_lock );
795 if ( dev->buf_use ) {
796 spin_unlock( &dev->count_lock );
799 atomic_inc( &dev->buf_alloc );
800 spin_unlock( &dev->count_lock );
802 down( &dev->struct_sem );
803 entry = &dma->bufs[order];
804 if ( entry->buf_count ) {
805 up( &dev->struct_sem );
806 atomic_dec( &dev->buf_alloc );
807 return -ENOMEM; /* May only call once for each order */
810 if (count < 0 || count > 4096) {
811 up( &dev->struct_sem );
812 atomic_dec( &dev->buf_alloc );
816 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
818 if ( !entry->buflist ) {
819 up( &dev->struct_sem );
820 atomic_dec( &dev->buf_alloc );
823 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
825 entry->buf_size = size;
826 entry->page_order = page_order;
830 while ( entry->buf_count < count ) {
831 buf = &entry->buflist[entry->buf_count];
832 buf->idx = dma->buf_count + entry->buf_count;
833 buf->total = alignment;
837 buf->offset = (dma->byte_count + offset);
838 buf->bus_address = agp_offset + offset;
839 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
843 init_waitqueue_head( &buf->dma_wait );
846 buf->dev_priv_size = dev->dev_priv_size;
847 buf->dev_private = DRM(alloc)( dev->dev_priv_size,
849 if(!buf->dev_private) {
850 /* Set count correctly so we free the proper amount. */
851 entry->buf_count = count;
852 DRM(cleanup_buf_error)(entry);
853 up( &dev->struct_sem );
854 atomic_dec( &dev->buf_alloc );
858 memset( buf->dev_private, 0, buf->dev_priv_size );
860 DRM_DEBUG( "buffer %d @ %p\n",
861 entry->buf_count, buf->address );
865 byte_count += PAGE_SIZE << page_order;
868 DRM_DEBUG( "byte_count: %d\n", byte_count );
870 temp_buflist = DRM(realloc)( dma->buflist,
871 dma->buf_count * sizeof(*dma->buflist),
872 (dma->buf_count + entry->buf_count)
873 * sizeof(*dma->buflist),
876 /* Free the entry because it isn't valid */
877 DRM(cleanup_buf_error)(entry);
878 up( &dev->struct_sem );
879 atomic_dec( &dev->buf_alloc );
882 dma->buflist = temp_buflist;
884 for ( i = 0 ; i < entry->buf_count ; i++ ) {
885 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
888 dma->buf_count += entry->buf_count;
889 dma->byte_count += byte_count;
891 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
892 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
894 up( &dev->struct_sem );
896 request.count = entry->buf_count;
899 if ( copy_to_user( argp, &request, sizeof(request) ) )
902 dma->flags = _DRM_DMA_USE_SG;
904 atomic_dec( &dev->buf_alloc );
909 * Add buffers for DMA transfers (ioctl).
911 * \param inode device inode.
912 * \param filp file pointer.
913 * \param cmd command.
914 * \param arg pointer to a drm_buf_desc_t request.
915 * \return zero on success or a negative number on failure.
917 * According with the memory type specified in drm_buf_desc::flags and the
918 * build options, it dispatches the call either to addbufs_agp(),
919 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
920 * PCI memory respectively.
922 int DRM(addbufs)( struct inode *inode, struct file *filp,
923 unsigned int cmd, unsigned long arg )
925 drm_buf_desc_t request;
926 drm_file_t *priv = filp->private_data;
927 drm_device_t *dev = priv->dev;
929 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
932 if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
937 if ( request.flags & _DRM_AGP_BUFFER )
938 return DRM(addbufs_agp)( inode, filp, cmd, arg );
941 if ( request.flags & _DRM_SG_BUFFER )
942 return DRM(addbufs_sg)( inode, filp, cmd, arg );
944 return DRM(addbufs_pci)( inode, filp, cmd, arg );
949 * Get information about the buffer mappings.
951 * This was originally mean for debugging purposes, or by a sophisticated
952 * client library to determine how best to use the available buffers (e.g.,
953 * large buffers can be used for image transfer).
955 * \param inode device inode.
956 * \param filp file pointer.
957 * \param cmd command.
958 * \param arg pointer to a drm_buf_info structure.
959 * \return zero on success or a negative number on failure.
961 * Increments drm_device::buf_use while holding the drm_device::count_lock
962 * lock, preventing of allocating more buffers after this call. Information
963 * about each requested buffer is then copied into user space.
965 int DRM(infobufs)( struct inode *inode, struct file *filp,
966 unsigned int cmd, unsigned long arg )
968 drm_file_t *priv = filp->private_data;
969 drm_device_t *dev = priv->dev;
970 drm_device_dma_t *dma = dev->dma;
971 drm_buf_info_t request;
972 drm_buf_info_t __user *argp = (void __user *)arg;
976 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
979 if ( !dma ) return -EINVAL;
981 spin_lock( &dev->count_lock );
982 if ( atomic_read( &dev->buf_alloc ) ) {
983 spin_unlock( &dev->count_lock );
986 ++dev->buf_use; /* Can't allocate more after this call */
987 spin_unlock( &dev->count_lock );
989 if ( copy_from_user( &request, argp, sizeof(request) ) )
992 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
993 if ( dma->bufs[i].buf_count ) ++count;
996 DRM_DEBUG( "count = %d\n", count );
998 if ( request.count >= count ) {
999 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1000 if ( dma->bufs[i].buf_count ) {
1001 drm_buf_desc_t __user *to = &request.list[count];
1002 drm_buf_entry_t *from = &dma->bufs[i];
1003 drm_freelist_t *list = &dma->bufs[i].freelist;
1004 if ( copy_to_user( &to->count,
1006 sizeof(from->buf_count) ) ||
1007 copy_to_user( &to->size,
1009 sizeof(from->buf_size) ) ||
1010 copy_to_user( &to->low_mark,
1012 sizeof(list->low_mark) ) ||
1013 copy_to_user( &to->high_mark,
1015 sizeof(list->high_mark) ) )
1018 DRM_DEBUG( "%d %d %d %d %d\n",
1020 dma->bufs[i].buf_count,
1021 dma->bufs[i].buf_size,
1022 dma->bufs[i].freelist.low_mark,
1023 dma->bufs[i].freelist.high_mark );
1028 request.count = count;
1030 if ( copy_to_user( argp, &request, sizeof(request) ) )
1037 * Specifies a low and high water mark for buffer allocation
1039 * \param inode device inode.
1040 * \param filp file pointer.
1041 * \param cmd command.
1042 * \param arg a pointer to a drm_buf_desc structure.
1043 * \return zero on success or a negative number on failure.
1045 * Verifies that the size order is bounded between the admissible orders and
1046 * updates the respective drm_device_dma::bufs entry low and high water mark.
1048 * \note This ioctl is deprecated and mostly never used.
1050 int DRM(markbufs)( struct inode *inode, struct file *filp,
1051 unsigned int cmd, unsigned long arg )
1053 drm_file_t *priv = filp->private_data;
1054 drm_device_t *dev = priv->dev;
1055 drm_device_dma_t *dma = dev->dma;
1056 drm_buf_desc_t request;
1058 drm_buf_entry_t *entry;
1060 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1063 if ( !dma ) return -EINVAL;
1065 if ( copy_from_user( &request,
1066 (drm_buf_desc_t __user *)arg,
1070 DRM_DEBUG( "%d, %d, %d\n",
1071 request.size, request.low_mark, request.high_mark );
1072 order = DRM(order)( request.size );
1073 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1074 entry = &dma->bufs[order];
1076 if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1078 if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1081 entry->freelist.low_mark = request.low_mark;
1082 entry->freelist.high_mark = request.high_mark;
1088 * Unreserve the buffers in list, previously reserved using drmDMA.
1090 * \param inode device inode.
1091 * \param filp file pointer.
1092 * \param cmd command.
1093 * \param arg pointer to a drm_buf_free structure.
1094 * \return zero on success or a negative number on failure.
1096 * Calls free_buffer() for each used buffer.
1097 * This function is primarily used for debugging.
1099 int DRM(freebufs)( struct inode *inode, struct file *filp,
1100 unsigned int cmd, unsigned long arg )
1102 drm_file_t *priv = filp->private_data;
1103 drm_device_t *dev = priv->dev;
1104 drm_device_dma_t *dma = dev->dma;
1105 drm_buf_free_t request;
1110 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1113 if ( !dma ) return -EINVAL;
1115 if ( copy_from_user( &request,
1116 (drm_buf_free_t __user *)arg,
1120 DRM_DEBUG( "%d\n", request.count );
1121 for ( i = 0 ; i < request.count ; i++ ) {
1122 if ( copy_from_user( &idx,
1126 if ( idx < 0 || idx >= dma->buf_count ) {
1127 DRM_ERROR( "Index %d (of %d max)\n",
1128 idx, dma->buf_count - 1 );
1131 buf = dma->buflist[idx];
1132 if ( buf->filp != filp ) {
1133 DRM_ERROR( "Process %d freeing buffer not owned\n",
1137 DRM(free_buffer)( dev, buf );
1144 * Maps all of the DMA buffers into client-virtual space (ioctl).
1146 * \param inode device inode.
1147 * \param filp file pointer.
1148 * \param cmd command.
1149 * \param arg pointer to a drm_buf_map structure.
1150 * \return zero on success or a negative number on failure.
1152 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1153 * about each buffer into user space. The PCI buffers are already mapped on the
1154 * addbufs_pci() call.
1156 int DRM(mapbufs)( struct inode *inode, struct file *filp,
1157 unsigned int cmd, unsigned long arg )
1159 drm_file_t *priv = filp->private_data;
1160 drm_device_t *dev = priv->dev;
1161 drm_device_dma_t *dma = dev->dma;
1162 drm_buf_map_t __user *argp = (void __user *)arg;
1165 unsigned long virtual;
1166 unsigned long address;
1167 drm_buf_map_t request;
1170 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1173 if ( !dma ) return -EINVAL;
1175 spin_lock( &dev->count_lock );
1176 if ( atomic_read( &dev->buf_alloc ) ) {
1177 spin_unlock( &dev->count_lock );
1180 dev->buf_use++; /* Can't allocate more after this call */
1181 spin_unlock( &dev->count_lock );
1183 if ( copy_from_user( &request, argp, sizeof(request) ) )
1186 if ( request.count >= dma->buf_count ) {
1187 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1188 (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) {
1189 drm_map_t *map = dev->agp_buffer_map;
1196 #if LINUX_VERSION_CODE <= 0x020402
1197 down( ¤t->mm->mmap_sem );
1199 down_write( ¤t->mm->mmap_sem );
1201 virtual = do_mmap( filp, 0, map->size,
1202 PROT_READ | PROT_WRITE,
1204 (unsigned long)map->offset );
1205 #if LINUX_VERSION_CODE <= 0x020402
1206 up( ¤t->mm->mmap_sem );
1208 up_write( ¤t->mm->mmap_sem );
1211 #if LINUX_VERSION_CODE <= 0x020402
1212 down( ¤t->mm->mmap_sem );
1214 down_write( ¤t->mm->mmap_sem );
1216 virtual = do_mmap( filp, 0, dma->byte_count,
1217 PROT_READ | PROT_WRITE,
1219 #if LINUX_VERSION_CODE <= 0x020402
1220 up( ¤t->mm->mmap_sem );
1222 up_write( ¤t->mm->mmap_sem );
1225 if ( virtual > -1024UL ) {
1227 retcode = (signed long)virtual;
1230 request.virtual = (void __user *)virtual;
1232 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1233 if ( copy_to_user( &request.list[i].idx,
1234 &dma->buflist[i]->idx,
1235 sizeof(request.list[0].idx) ) ) {
1239 if ( copy_to_user( &request.list[i].total,
1240 &dma->buflist[i]->total,
1241 sizeof(request.list[0].total) ) ) {
1245 if ( copy_to_user( &request.list[i].used,
1251 address = virtual + dma->buflist[i]->offset; /* *** */
1252 if ( copy_to_user( &request.list[i].address,
1254 sizeof(address) ) ) {
1261 request.count = dma->buf_count;
1262 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1264 if ( copy_to_user( argp, &request, sizeof(request) ) )