X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fchar%2Fdrm%2Fdrm_bufs.c;h=006b06d29727069b30ff304f5ad58084804a689d;hb=8a0c551415dc3c5aa1e6cac99724ad795498485b;hp=4113bcba67fe080e92adeb7054e089832ec4d571;hpb=b1644648ae5a87454d06f1dd96783b0ec4d292df;p=linux-2.6.git diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index 4113bcba6..006b06d29 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c @@ -1,7 +1,7 @@ /** - * \file drm_bufs.h + * \file drm_bufs.c * Generic buffer template - * + * * \author Rickard E. (Rik) Faith * \author Gareth Hughes */ @@ -36,29 +36,72 @@ #include #include "drmP.h" -/** - * Compute size order. Returns the exponent of the smaller power of two which - * is greater or equal to given number. - * - * \param size size. - * \return order. - * - * \todo Can be made faster. - */ -int drm_order( unsigned long size ) +unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) { - int order; - unsigned long tmp; + return pci_resource_start(dev->pdev, resource); +} +EXPORT_SYMBOL(drm_get_resource_start); - for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) - ; +unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) +{ + return pci_resource_len(dev->pdev, resource); +} - if (size & (size - 1)) - ++order; +EXPORT_SYMBOL(drm_get_resource_len); - return order; +static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, + drm_local_map_t *map) +{ + struct list_head *list; + + list_for_each(list, &dev->maplist->head) { + drm_map_list_t *entry = list_entry(list, drm_map_list_t, head); + if (entry->map && map->type == entry->map->type && + entry->map->offset == map->offset) { + return entry; + } + } + + return NULL; } -EXPORT_SYMBOL(drm_order); + +/* + * Used to allocate 32-bit handles for mappings. + */ +#define START_RANGE 0x10000000 +#define END_RANGE 0x40000000 + +#ifdef _LP64 +static __inline__ unsigned int HandleID(unsigned long lhandle, + drm_device_t *dev) +{ + static unsigned int map32_handle = START_RANGE; + unsigned int hash; + + if (lhandle & 0xffffffff00000000) { + hash = map32_handle; + map32_handle += PAGE_SIZE; + if (map32_handle > END_RANGE) + map32_handle = START_RANGE; + } else + hash = lhandle; + + while (1) { + drm_map_list_t *_entry; + list_for_each_entry(_entry, &dev->maplist->head, head) { + if (_entry->user_token == hash) + break; + } + if (&_entry->head == &dev->maplist->head) + return hash; + + hash += PAGE_SIZE; + map32_handle += PAGE_SIZE; + } +} +#else +# define HandleID(x,dev) (unsigned int)(x) +#endif /** * Ioctl to specify a range of memory that is available for mapping by a non-root process. @@ -73,86 +116,100 @@ EXPORT_SYMBOL(drm_order); * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where * applicable and if supported by the kernel. */ -int drm_addmap( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) +static int drm_addmap_core(drm_device_t * dev, unsigned int offset, + unsigned int size, drm_map_type_t type, + drm_map_flags_t flags, drm_map_list_t ** maplist) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_map_t *map; - drm_map_t __user *argp = (void __user *)arg; drm_map_list_t *list; + drm_dma_handle_t *dmah; - if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */ - - map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); - if ( !map ) + map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); + if (!map) return -ENOMEM; - if ( copy_from_user( map, argp, sizeof(*map) ) ) { - drm_free( map, sizeof(*map), DRM_MEM_MAPS ); - return -EFAULT; - } + map->offset = offset; + map->size = size; + map->flags = flags; + map->type = type; /* Only allow shared memory to be removable since we only keep enough * book keeping information about shared memory to allow for removal * when processes fork. */ - if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) { - drm_free( map, sizeof(*map), DRM_MEM_MAPS ); + if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -EINVAL; } - DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n", - map->offset, map->size, map->type ); - if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) { - drm_free( map, sizeof(*map), DRM_MEM_MAPS ); + DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", + map->offset, map->size, map->type); + if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -EINVAL; } - map->mtrr = -1; + map->mtrr = -1; map->handle = NULL; - switch ( map->type ) { + switch (map->type) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: -#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) - if ( map->offset + map->size < map->offset || - map->offset < virt_to_phys(high_memory) ) { - drm_free( map, sizeof(*map), DRM_MEM_MAPS ); +#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) + if (map->offset + (map->size-1) < map->offset || + map->offset < virt_to_phys(high_memory)) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -EINVAL; } #endif #ifdef __alpha__ map->offset += dev->hose->mem_space->start; #endif + /* Some drivers preinitialize some maps, without the X Server + * needing to be aware of it. Therefore, we just return success + * when the server tries to create a duplicate map. + */ + list = drm_find_matching_map(dev, map); + if (list != NULL) { + if (list->map->size != map->size) { + DRM_DEBUG("Matching maps of type %d with " + "mismatched sizes, (%ld vs %ld)\n", + map->type, map->size, + list->map->size); + list->map->size = map->size; + } + + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + *maplist = list; + return 0; + } + if (drm_core_has_MTRR(dev)) { - if ( map->type == _DRM_FRAME_BUFFER || - (map->flags & _DRM_WRITE_COMBINING) ) { - map->mtrr = mtrr_add( map->offset, map->size, - MTRR_TYPE_WRCOMB, 1 ); + if (map->type == _DRM_FRAME_BUFFER || + (map->flags & _DRM_WRITE_COMBINING)) { + map->mtrr = mtrr_add(map->offset, map->size, + MTRR_TYPE_WRCOMB, 1); } } if (map->type == _DRM_REGISTERS) - map->handle = drm_ioremap( map->offset, map->size, - dev ); + map->handle = drm_ioremap(map->offset, map->size, dev); break; case _DRM_SHM: map->handle = vmalloc_32(map->size); - DRM_DEBUG( "%lu %d %p\n", - map->size, drm_order( map->size ), map->handle ); - if ( !map->handle ) { - drm_free( map, sizeof(*map), DRM_MEM_MAPS ); + DRM_DEBUG("%lu %d %p\n", + map->size, drm_order(map->size), map->handle); + if (!map->handle) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -ENOMEM; } map->offset = (unsigned long)map->handle; - if ( map->flags & _DRM_CONTAINS_LOCK ) { + if (map->flags & _DRM_CONTAINS_LOCK) { /* Prevent a 2nd X Server from creating a 2nd lock */ if (dev->lock.hw_lock != NULL) { - vfree( map->handle ); - drm_free( map, sizeof(*map), DRM_MEM_MAPS ); + vfree(map->handle); + drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -EBUSY; } - dev->sigdata.lock = - dev->lock.hw_lock = map->handle; /* Pointer to lock */ + dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ } break; case _DRM_AGP: @@ -161,7 +218,7 @@ int drm_addmap( struct inode *inode, struct file *filp, map->offset += dev->hose->mem_space->start; #endif map->offset += dev->agp->base; - map->mtrr = dev->agp->agp_mtrr; /* for getmap */ + map->mtrr = dev->agp->agp_mtrr; /* for getmap */ } break; case _DRM_SCATTER_GATHER: @@ -169,38 +226,98 @@ int drm_addmap( struct inode *inode, struct file *filp, drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -EINVAL; } - map->offset += dev->sg->handle; + map->offset += (unsigned long)dev->sg->virtual; + break; + case _DRM_CONSISTENT: + /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, + * As we're limiting the address to 2^32-1 (or less), + * casting it down to 32 bits is no problem, but we + * need to point to a 64bit variable first. */ + dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); + if (!dmah) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -ENOMEM; + } + map->handle = dmah->vaddr; + map->offset = (unsigned long)dmah->busaddr; + kfree(dmah); break; - default: - drm_free( map, sizeof(*map), DRM_MEM_MAPS ); + drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -EINVAL; } list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); - if(!list) { + if (!list) { drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -EINVAL; } memset(list, 0, sizeof(*list)); list->map = map; - down(&dev->struct_sem); + mutex_lock(&dev->struct_mutex); list_add(&list->head, &dev->maplist->head); - up(&dev->struct_sem); + /* Assign a 32-bit handle */ + /* We do it here so that dev->struct_mutex protects the increment */ + list->user_token = HandleID(map->type == _DRM_SHM + ? (unsigned long)map->handle + : map->offset, dev); + mutex_unlock(&dev->struct_mutex); + + *maplist = list; + return 0; +} + +int drm_addmap(drm_device_t * dev, unsigned int offset, + unsigned int size, drm_map_type_t type, + drm_map_flags_t flags, drm_local_map_t ** map_ptr) +{ + drm_map_list_t *list; + int rc; + + rc = drm_addmap_core(dev, offset, size, type, flags, &list); + if (!rc) + *map_ptr = list->map; + return rc; +} + +EXPORT_SYMBOL(drm_addmap); + +int drm_addmap_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + drm_map_t map; + drm_map_list_t *maplist; + drm_map_t __user *argp = (void __user *)arg; + int err; - if ( copy_to_user( argp, map, sizeof(*map) ) ) + if (!(filp->f_mode & 3)) + return -EACCES; /* Require read/write */ + + if (copy_from_user(&map, argp, sizeof(map))) { return -EFAULT; - if ( map->type != _DRM_SHM ) { - if ( copy_to_user( &argp->handle, - &map->offset, - sizeof(map->offset) ) ) - return -EFAULT; } + + if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP)) + return -EPERM; + + err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, + &maplist); + + if (err) + return err; + + if (copy_to_user(argp, maplist->map, sizeof(drm_map_t))) + return -EFAULT; + + /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ + if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle)) + return -EFAULT; return 0; } - /** * Remove a map private from list and deallocate resources if the mapping * isn't in use. @@ -215,117 +332,167 @@ int drm_addmap( struct inode *inode, struct file *filp, * its being used, and free any associate resource (such as MTRR's) if it's not * being on use. * - * \sa addmap(). + * \sa drm_addmap */ -int drm_rmmap(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; struct list_head *list; drm_map_list_t *r_list = NULL; - drm_vma_entry_t *pt, *prev; - drm_map_t *map; + drm_dma_handle_t dmah; + + /* Find the list entry for the map and remove it */ + list_for_each(list, &dev->maplist->head) { + r_list = list_entry(list, drm_map_list_t, head); + + if (r_list->map == map) { + list_del(list); + drm_free(list, sizeof(*list), DRM_MEM_MAPS); + break; + } + } + + /* List has wrapped around to the head pointer, or it's empty and we + * didn't find anything. + */ + if (list == (&dev->maplist->head)) { + return -EINVAL; + } + + switch (map->type) { + case _DRM_REGISTERS: + drm_ioremapfree(map->handle, map->size, dev); + /* FALLTHROUGH */ + case _DRM_FRAME_BUFFER: + if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { + int retcode; + retcode = mtrr_del(map->mtrr, map->offset, map->size); + DRM_DEBUG("mtrr_del=%d\n", retcode); + } + break; + case _DRM_SHM: + vfree(map->handle); + break; + case _DRM_AGP: + case _DRM_SCATTER_GATHER: + break; + case _DRM_CONSISTENT: + dmah.vaddr = map->handle; + dmah.busaddr = map->offset; + dmah.size = map->size; + __drm_pci_free(dev, &dmah); + break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + + return 0; +} + +int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) +{ + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_rmmap_locked(dev, map); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on + * the last close of the device, and this is necessary for cleanup when things + * exit uncleanly. Therefore, having userland manually remove mappings seems + * like a pointless exercise since they're going away anyway. + * + * One use case might be after addmap is allowed for normal users for SHM and + * gets used by drivers that the server doesn't need to care about. This seems + * unlikely. + */ +int drm_rmmap_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; drm_map_t request; - int found_maps = 0; + drm_local_map_t *map = NULL; + struct list_head *list; + int ret; - if (copy_from_user(&request, (drm_map_t __user *)arg, - sizeof(request))) { + if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) { return -EFAULT; } - down(&dev->struct_sem); - list = &dev->maplist->head; + mutex_lock(&dev->struct_mutex); list_for_each(list, &dev->maplist->head) { - r_list = list_entry(list, drm_map_list_t, head); + drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); - if(r_list->map && - r_list->map->handle == request.handle && - r_list->map->flags & _DRM_REMOVABLE) break; + if (r_list->map && + r_list->user_token == (unsigned long)request.handle && + r_list->map->flags & _DRM_REMOVABLE) { + map = r_list->map; + break; + } } /* List has wrapped around to the head pointer, or its empty we didn't * find anything. */ - if(list == (&dev->maplist->head)) { - up(&dev->struct_sem); + if (list == (&dev->maplist->head)) { + mutex_unlock(&dev->struct_mutex); return -EINVAL; } - map = r_list->map; - list_del(list); - drm_free(list, sizeof(*list), DRM_MEM_MAPS); - for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { - if (pt->vma->vm_private_data == map) found_maps++; - } + if (!map) + return -EINVAL; - if(!found_maps) { - switch (map->type) { - case _DRM_REGISTERS: - case _DRM_FRAME_BUFFER: - if (drm_core_has_MTRR(dev)) { - if (map->mtrr >= 0) { - int retcode; - retcode = mtrr_del(map->mtrr, - map->offset, - map->size); - DRM_DEBUG("mtrr_del = %d\n", retcode); - } - } - drm_ioremapfree(map->handle, map->size, dev); - break; - case _DRM_SHM: - vfree(map->handle); - break; - case _DRM_AGP: - case _DRM_SCATTER_GATHER: - break; - } - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + /* Register and framebuffer maps are permanent */ + if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { + mutex_unlock(&dev->struct_mutex); + return 0; } - up(&dev->struct_sem); - return 0; + + ret = drm_rmmap_locked(dev, map); + + mutex_unlock(&dev->struct_mutex); + + return ret; } /** * Cleanup after an error on one of the addbufs() functions. * + * \param dev DRM device. * \param entry buffer entry where the error occurred. * * Frees any pages and buffers associated with the given entry. */ -static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry) +static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) { int i; if (entry->seg_count) { for (i = 0; i < entry->seg_count; i++) { if (entry->seglist[i]) { - drm_free_pages(entry->seglist[i], - entry->page_order, - DRM_MEM_DMA); + drm_pci_free(dev, entry->seglist[i]); } } drm_free(entry->seglist, - entry->seg_count * - sizeof(*entry->seglist), - DRM_MEM_SEGS); + entry->seg_count * + sizeof(*entry->seglist), DRM_MEM_SEGS); entry->seg_count = 0; } - if (entry->buf_count) { - for (i = 0; i < entry->buf_count; i++) { + if (entry->buf_count) { + for (i = 0; i < entry->buf_count; i++) { if (entry->buflist[i].dev_private) { drm_free(entry->buflist[i].dev_private, - entry->buflist[i].dev_priv_size, - DRM_MEM_BUFS); + entry->buflist[i].dev_priv_size, + DRM_MEM_BUFS); } } drm_free(entry->buflist, - entry->buf_count * - sizeof(*entry->buflist), - DRM_MEM_BUFS); + entry->buf_count * + sizeof(*entry->buflist), DRM_MEM_BUFS); entry->buf_count = 0; } @@ -333,25 +500,19 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry) #if __OS_HAS_AGP /** - * Add AGP buffers for DMA transfers (ioctl). + * Add AGP buffers for DMA transfers. * - * \param inode device inode. - * \param filp file pointer. - * \param cmd command. - * \param arg pointer to a drm_buf_desc_t request. + * \param dev drm_device_t to which the buffers are to be added. + * \param request pointer to a drm_buf_desc_t describing the request. * \return zero on success or a negative number on failure. - * + * * After some sanity checks creates a drm_buf structure for each buffer and * reallocates the buffer list of the same size order to accommodate the new * buffers. */ -int drm_addbufs_agp( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) +int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t request; drm_buf_entry_t *entry; drm_buf_t *buf; unsigned long offset; @@ -365,165 +526,156 @@ int drm_addbufs_agp( struct inode *inode, struct file *filp, int byte_count; int i; drm_buf_t **temp_buflist; - drm_buf_desc_t __user *argp = (void __user *)arg; - - if ( !dma ) return -EINVAL; - if ( copy_from_user( &request, argp, - sizeof(request) ) ) - return -EFAULT; + if (!dma) + return -EINVAL; - count = request.count; - order = drm_order( request.size ); + count = request->count; + order = drm_order(request->size); size = 1 << order; - alignment = (request.flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; + alignment = (request->flags & _DRM_PAGE_ALIGN) + ? PAGE_ALIGN(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; - agp_offset = dev->agp->base + request.agp_start; - - DRM_DEBUG( "count: %d\n", count ); - DRM_DEBUG( "order: %d\n", order ); - DRM_DEBUG( "size: %d\n", size ); - DRM_DEBUG( "agp_offset: %lu\n", agp_offset ); - DRM_DEBUG( "alignment: %d\n", alignment ); - DRM_DEBUG( "page_order: %d\n", page_order ); - DRM_DEBUG( "total: %d\n", total ); - - if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; - if ( dev->queue_count ) return -EBUSY; /* Not while in use */ - - spin_lock( &dev->count_lock ); - if ( dev->buf_use ) { - spin_unlock( &dev->count_lock ); + agp_offset = dev->agp->base + request->agp_start; + + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %lx\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; + if (dev->queue_count) + return -EBUSY; /* Not while in use */ + + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); return -EBUSY; } - atomic_inc( &dev->buf_alloc ); - spin_unlock( &dev->count_lock ); + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); - down( &dev->struct_sem ); + mutex_lock(&dev->struct_mutex); entry = &dma->bufs[order]; - if ( entry->buf_count ) { - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); - return -ENOMEM; /* May only call once for each order */ + if (entry->buf_count) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ } if (count < 0 || count > 4096) { - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -EINVAL; } - entry->buflist = drm_alloc( count * sizeof(*entry->buflist), - DRM_MEM_BUFS ); - if ( !entry->buflist ) { - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } - memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); entry->buf_size = size; entry->page_order = page_order; offset = 0; - while ( entry->buf_count < count ) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; + while (entry->buf_count < count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; - buf->offset = (dma->byte_count + offset); + buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset); - buf->next = NULL; + buf->next = NULL; buf->waiting = 0; buf->pending = 0; - init_waitqueue_head( &buf->dma_wait ); - buf->filp = NULL; + init_waitqueue_head(&buf->dma_wait); + buf->filp = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = drm_alloc( buf->dev_priv_size, - DRM_MEM_BUFS ); - if(!buf->dev_private) { + buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); + if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; - drm_cleanup_buf_error(dev,entry); - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + drm_cleanup_buf_error(dev, entry); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } - memset( buf->dev_private, 0, buf->dev_priv_size ); + memset(buf->dev_private, 0, buf->dev_priv_size); - DRM_DEBUG( "buffer %d @ %p\n", - entry->buf_count, buf->address ); + DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } - DRM_DEBUG( "byte_count: %d\n", byte_count ); + DRM_DEBUG("byte_count: %d\n", byte_count); - temp_buflist = drm_realloc( dma->buflist, - dma->buf_count * sizeof(*dma->buflist), - (dma->buf_count + entry->buf_count) - * sizeof(*dma->buflist), - DRM_MEM_BUFS ); - if(!temp_buflist) { + temp_buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), DRM_MEM_BUFS); + if (!temp_buflist) { /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev,entry); - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + drm_cleanup_buf_error(dev, entry); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } dma->buflist = temp_buflist; - for ( i = 0 ; i < entry->buf_count ; i++ ) { + for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; + dma->seg_count += entry->seg_count; + dma->page_count += byte_count >> PAGE_SHIFT; dma->byte_count += byte_count; - DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); - DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); + DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); + DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); - up( &dev->struct_sem ); + mutex_unlock(&dev->struct_mutex); - request.count = entry->buf_count; - request.size = size; - - if ( copy_to_user( argp, &request, sizeof(request) ) ) - return -EFAULT; + request->count = entry->buf_count; + request->size = size; dma->flags = _DRM_DMA_USE_AGP; - atomic_dec( &dev->buf_alloc ); + atomic_dec(&dev->buf_alloc); return 0; } -#endif /* __OS_HAS_AGP */ +EXPORT_SYMBOL(drm_addbufs_agp); +#endif /* __OS_HAS_AGP */ -int drm_addbufs_pci( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) +int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t request; int count; int order; int size; int total; int page_order; drm_buf_entry_t *entry; - unsigned long page; + drm_dma_handle_t *dmah; drm_buf_t *buf; int alignment; unsigned long offset; @@ -532,183 +684,182 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp, int page_count; unsigned long *temp_pagelist; drm_buf_t **temp_buflist; - drm_buf_desc_t __user *argp = (void __user *)arg; - if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; - if ( !dma ) return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) + return -EINVAL; - if ( copy_from_user( &request, argp, sizeof(request) ) ) - return -EFAULT; + if (!dma) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; - count = request.count; - order = drm_order( request.size ); + count = request->count; + order = drm_order(request->size); size = 1 << order; - DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", - request.count, request.size, size, - order, dev->queue_count ); + DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", + request->count, request->size, size, order, dev->queue_count); - if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; - if ( dev->queue_count ) return -EBUSY; /* Not while in use */ + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; + if (dev->queue_count) + return -EBUSY; /* Not while in use */ - alignment = (request.flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; + alignment = (request->flags & _DRM_PAGE_ALIGN) + ? PAGE_ALIGN(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; - spin_lock( &dev->count_lock ); - if ( dev->buf_use ) { - spin_unlock( &dev->count_lock ); + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); return -EBUSY; } - atomic_inc( &dev->buf_alloc ); - spin_unlock( &dev->count_lock ); + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); - down( &dev->struct_sem ); + mutex_lock(&dev->struct_mutex); entry = &dma->bufs[order]; - if ( entry->buf_count ) { - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + if (entry->buf_count) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; /* May only call once for each order */ } if (count < 0 || count > 4096) { - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -EINVAL; } - entry->buflist = drm_alloc( count * sizeof(*entry->buflist), - DRM_MEM_BUFS ); - if ( !entry->buflist ) { - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } - memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); - - entry->seglist = drm_alloc( count * sizeof(*entry->seglist), - DRM_MEM_SEGS ); - if ( !entry->seglist ) { - drm_free( entry->buflist, - count * sizeof(*entry->buflist), - DRM_MEM_BUFS ); - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); + + entry->seglist = drm_alloc(count * sizeof(*entry->seglist), + DRM_MEM_SEGS); + if (!entry->seglist) { + drm_free(entry->buflist, + count * sizeof(*entry->buflist), DRM_MEM_BUFS); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } - memset( entry->seglist, 0, count * sizeof(*entry->seglist) ); + memset(entry->seglist, 0, count * sizeof(*entry->seglist)); /* Keep the original pagelist until we know all the allocations * have succeeded */ - temp_pagelist = drm_alloc( (dma->page_count + (count << page_order)) - * sizeof(*dma->pagelist), - DRM_MEM_PAGES ); + temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) + * sizeof(*dma->pagelist), DRM_MEM_PAGES); if (!temp_pagelist) { - drm_free( entry->buflist, - count * sizeof(*entry->buflist), - DRM_MEM_BUFS ); - drm_free( entry->seglist, - count * sizeof(*entry->seglist), - DRM_MEM_SEGS ); - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + drm_free(entry->buflist, + count * sizeof(*entry->buflist), DRM_MEM_BUFS); + drm_free(entry->seglist, + count * sizeof(*entry->seglist), DRM_MEM_SEGS); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } memcpy(temp_pagelist, - dma->pagelist, - dma->page_count * sizeof(*dma->pagelist)); - DRM_DEBUG( "pagelist: %d entries\n", - dma->page_count + (count << page_order) ); + dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); + DRM_DEBUG("pagelist: %d entries\n", + dma->page_count + (count << page_order)); - entry->buf_size = size; + entry->buf_size = size; entry->page_order = page_order; byte_count = 0; page_count = 0; - while ( entry->buf_count < count ) { - page = drm_alloc_pages( page_order, DRM_MEM_DMA ); - if ( !page ) { + while (entry->buf_count < count) { + + dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); + + if (!dmah) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); - drm_free( temp_pagelist, - (dma->page_count + (count << page_order)) - * sizeof(*dma->pagelist), - DRM_MEM_PAGES ); - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + drm_free(temp_pagelist, + (dma->page_count + (count << page_order)) + * sizeof(*dma->pagelist), DRM_MEM_PAGES); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } - entry->seglist[entry->seg_count++] = page; - for ( i = 0 ; i < (1 << page_order) ; i++ ) { - DRM_DEBUG( "page %d @ 0x%08lx\n", - dma->page_count + page_count, - page + PAGE_SIZE * i ); + entry->seglist[entry->seg_count++] = dmah; + for (i = 0; i < (1 << page_order); i++) { + DRM_DEBUG("page %d @ 0x%08lx\n", + dma->page_count + page_count, + (unsigned long)dmah->vaddr + PAGE_SIZE * i); temp_pagelist[dma->page_count + page_count++] - = page + PAGE_SIZE * i; + = (unsigned long)dmah->vaddr + PAGE_SIZE * i; } - for ( offset = 0 ; - offset + size <= total && entry->buf_count < count ; - offset += alignment, ++entry->buf_count ) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - buf->offset = (dma->byte_count + byte_count + offset); - buf->address = (void *)(page + offset); - buf->next = NULL; + for (offset = 0; + offset + size <= total && entry->buf_count < count; + offset += alignment, ++entry->buf_count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + buf->offset = (dma->byte_count + byte_count + offset); + buf->address = (void *)(dmah->vaddr + offset); + buf->bus_address = dmah->busaddr + offset; + buf->next = NULL; buf->waiting = 0; buf->pending = 0; - init_waitqueue_head( &buf->dma_wait ); - buf->filp = NULL; + init_waitqueue_head(&buf->dma_wait); + buf->filp = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = drm_alloc( buf->dev_priv_size, - DRM_MEM_BUFS ); - if(!buf->dev_private) { + buf->dev_private = drm_alloc(buf->dev_priv_size, + DRM_MEM_BUFS); + if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; - drm_cleanup_buf_error(dev,entry); - drm_free( temp_pagelist, - (dma->page_count + (count << page_order)) - * sizeof(*dma->pagelist), - DRM_MEM_PAGES ); - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + drm_cleanup_buf_error(dev, entry); + drm_free(temp_pagelist, + (dma->page_count + + (count << page_order)) + * sizeof(*dma->pagelist), + DRM_MEM_PAGES); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } - memset( buf->dev_private, 0, buf->dev_priv_size ); + memset(buf->dev_private, 0, buf->dev_priv_size); - DRM_DEBUG( "buffer %d @ %p\n", - entry->buf_count, buf->address ); + DRM_DEBUG("buffer %d @ %p\n", + entry->buf_count, buf->address); } byte_count += PAGE_SIZE << page_order; } - temp_buflist = drm_realloc( dma->buflist, - dma->buf_count * sizeof(*dma->buflist), - (dma->buf_count + entry->buf_count) - * sizeof(*dma->buflist), - DRM_MEM_BUFS ); + temp_buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), DRM_MEM_BUFS); if (!temp_buflist) { /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev,entry); - drm_free( temp_pagelist, - (dma->page_count + (count << page_order)) - * sizeof(*dma->pagelist), - DRM_MEM_PAGES ); - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + drm_cleanup_buf_error(dev, entry); + drm_free(temp_pagelist, + (dma->page_count + (count << page_order)) + * sizeof(*dma->pagelist), DRM_MEM_PAGES); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } dma->buflist = temp_buflist; - for ( i = 0 ; i < entry->buf_count ; i++ ) { + for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } @@ -717,8 +868,8 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp, */ if (dma->page_count) { drm_free(dma->pagelist, - dma->page_count * sizeof(*dma->pagelist), - DRM_MEM_PAGES); + dma->page_count * sizeof(*dma->pagelist), + DRM_MEM_PAGES); } dma->pagelist = temp_pagelist; @@ -727,27 +878,20 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp, dma->page_count += entry->seg_count << page_order; dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); - up( &dev->struct_sem ); + mutex_unlock(&dev->struct_mutex); - request.count = entry->buf_count; - request.size = size; - - if ( copy_to_user( argp, &request, sizeof(request) ) ) - return -EFAULT; + request->count = entry->buf_count; + request->size = size; - atomic_dec( &dev->buf_alloc ); + atomic_dec(&dev->buf_alloc); return 0; } +EXPORT_SYMBOL(drm_addbufs_pci); -int drm_addbufs_sg( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) +static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t __user *argp = (void __user *)arg; - drm_buf_desc_t request; drm_buf_entry_t *entry; drm_buf_t *buf; unsigned long offset; @@ -762,150 +906,312 @@ int drm_addbufs_sg( struct inode *inode, struct file *filp, int i; drm_buf_t **temp_buflist; - if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; - - if ( !dma ) return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_SG)) + return -EINVAL; - if ( copy_from_user( &request, argp, sizeof(request) ) ) - return -EFAULT; + if (!dma) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; - count = request.count; - order = drm_order( request.size ); + count = request->count; + order = drm_order(request->size); size = 1 << order; - alignment = (request.flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; + alignment = (request->flags & _DRM_PAGE_ALIGN) + ? PAGE_ALIGN(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; - agp_offset = request.agp_start; - - DRM_DEBUG( "count: %d\n", count ); - DRM_DEBUG( "order: %d\n", order ); - DRM_DEBUG( "size: %d\n", size ); - DRM_DEBUG( "agp_offset: %lu\n", agp_offset ); - DRM_DEBUG( "alignment: %d\n", alignment ); - DRM_DEBUG( "page_order: %d\n", page_order ); - DRM_DEBUG( "total: %d\n", total ); - - if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; - if ( dev->queue_count ) return -EBUSY; /* Not while in use */ - - spin_lock( &dev->count_lock ); - if ( dev->buf_use ) { - spin_unlock( &dev->count_lock ); + agp_offset = request->agp_start; + + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %lu\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; + if (dev->queue_count) + return -EBUSY; /* Not while in use */ + + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); return -EBUSY; } - atomic_inc( &dev->buf_alloc ); - spin_unlock( &dev->count_lock ); + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); - down( &dev->struct_sem ); + mutex_lock(&dev->struct_mutex); entry = &dma->bufs[order]; - if ( entry->buf_count ) { - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); - return -ENOMEM; /* May only call once for each order */ + if (entry->buf_count) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ } if (count < 0 || count > 4096) { - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -EINVAL; } - entry->buflist = drm_alloc( count * sizeof(*entry->buflist), - DRM_MEM_BUFS ); - if ( !entry->buflist ) { - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } - memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); entry->buf_size = size; entry->page_order = page_order; offset = 0; - while ( entry->buf_count < count ) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; + while (entry->buf_count < count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; - buf->offset = (dma->byte_count + offset); + buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; - buf->address = (void *)(agp_offset + offset + dev->sg->handle); - buf->next = NULL; + buf->address = (void *)(agp_offset + offset + + (unsigned long)dev->sg->virtual); + buf->next = NULL; buf->waiting = 0; buf->pending = 0; - init_waitqueue_head( &buf->dma_wait ); - buf->filp = NULL; + init_waitqueue_head(&buf->dma_wait); + buf->filp = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = drm_alloc( buf->dev_priv_size, - DRM_MEM_BUFS ); - if(!buf->dev_private) { + buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); + if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; - drm_cleanup_buf_error(dev,entry); - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + drm_cleanup_buf_error(dev, entry); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } - memset( buf->dev_private, 0, buf->dev_priv_size ); + memset(buf->dev_private, 0, buf->dev_priv_size); - DRM_DEBUG( "buffer %d @ %p\n", - entry->buf_count, buf->address ); + DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } - DRM_DEBUG( "byte_count: %d\n", byte_count ); + DRM_DEBUG("byte_count: %d\n", byte_count); - temp_buflist = drm_realloc( dma->buflist, - dma->buf_count * sizeof(*dma->buflist), - (dma->buf_count + entry->buf_count) - * sizeof(*dma->buflist), - DRM_MEM_BUFS ); - if(!temp_buflist) { + temp_buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), DRM_MEM_BUFS); + if (!temp_buflist) { /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev,entry); - up( &dev->struct_sem ); - atomic_dec( &dev->buf_alloc ); + drm_cleanup_buf_error(dev, entry); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); return -ENOMEM; } dma->buflist = temp_buflist; - for ( i = 0 ; i < entry->buf_count ; i++ ) { + for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; + dma->seg_count += entry->seg_count; + dma->page_count += byte_count >> PAGE_SHIFT; dma->byte_count += byte_count; - DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); - DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); + DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); + DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); - up( &dev->struct_sem ); + mutex_unlock(&dev->struct_mutex); - request.count = entry->buf_count; - request.size = size; - - if ( copy_to_user( argp, &request, sizeof(request) ) ) - return -EFAULT; + request->count = entry->buf_count; + request->size = size; dma->flags = _DRM_DMA_USE_SG; - atomic_dec( &dev->buf_alloc ); + atomic_dec(&dev->buf_alloc); return 0; } +static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) +{ + drm_device_dma_t *dma = dev->dma; + drm_buf_entry_t *entry; + drm_buf_t *buf; + unsigned long offset; + unsigned long agp_offset; + int count; + int order; + int size; + int alignment; + int page_order; + int total; + int byte_count; + int i; + drm_buf_t **temp_buflist; + + if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) + return -EINVAL; + + if (!dma) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + count = request->count; + order = drm_order(request->size); + size = 1 << order; + + alignment = (request->flags & _DRM_PAGE_ALIGN) + ? PAGE_ALIGN(size) : size; + page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; + total = PAGE_SIZE << page_order; + + byte_count = 0; + agp_offset = request->agp_start; + + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %lu\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; + if (dev->queue_count) + return -EBUSY; /* Not while in use */ + + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); + + mutex_lock(&dev->struct_mutex); + entry = &dma->bufs[order]; + if (entry->buf_count) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ + } + + if (count < 0 || count > 4096) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -EINVAL; + } + + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); + + entry->buf_size = size; + entry->page_order = page_order; + + offset = 0; + + while (entry->buf_count < count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + + buf->offset = (dma->byte_count + offset); + buf->bus_address = agp_offset + offset; + buf->address = (void *)(agp_offset + offset); + buf->next = NULL; + buf->waiting = 0; + buf->pending = 0; + init_waitqueue_head(&buf->dma_wait); + buf->filp = NULL; + + buf->dev_priv_size = dev->driver->dev_priv_size; + buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); + if (!buf->dev_private) { + /* Set count correctly so we free the proper amount. */ + entry->buf_count = count; + drm_cleanup_buf_error(dev, entry); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(buf->dev_private, 0, buf->dev_priv_size); + + DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); + + offset += alignment; + entry->buf_count++; + byte_count += PAGE_SIZE << page_order; + } + + DRM_DEBUG("byte_count: %d\n", byte_count); + + temp_buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), DRM_MEM_BUFS); + if (!temp_buflist) { + /* Free the entry because it isn't valid */ + drm_cleanup_buf_error(dev, entry); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + dma->buflist = temp_buflist; + + for (i = 0; i < entry->buf_count; i++) { + dma->buflist[i + dma->buf_count] = &entry->buflist[i]; + } + + dma->buf_count += entry->buf_count; + dma->seg_count += entry->seg_count; + dma->page_count += byte_count >> PAGE_SHIFT; + dma->byte_count += byte_count; + + DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); + DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); + + mutex_unlock(&dev->struct_mutex); + + request->count = entry->buf_count; + request->size = size; + + dma->flags = _DRM_DMA_USE_FB; + + atomic_dec(&dev->buf_alloc); + return 0; +} + + /** * Add buffers for DMA transfers (ioctl). * @@ -920,31 +1226,40 @@ int drm_addbufs_sg( struct inode *inode, struct file *filp, * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent * PCI memory respectively. */ -int drm_addbufs( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) +int drm_addbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_buf_desc_t request; drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - + int ret; + if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg, - sizeof(request) ) ) + if (copy_from_user(&request, (drm_buf_desc_t __user *) arg, + sizeof(request))) return -EFAULT; #if __OS_HAS_AGP - if ( request.flags & _DRM_AGP_BUFFER ) - return drm_addbufs_agp( inode, filp, cmd, arg ); + if (request.flags & _DRM_AGP_BUFFER) + ret = drm_addbufs_agp(dev, &request); else #endif - if ( request.flags & _DRM_SG_BUFFER ) - return drm_addbufs_sg( inode, filp, cmd, arg ); + if (request.flags & _DRM_SG_BUFFER) + ret = drm_addbufs_sg(dev, &request); + else if (request.flags & _DRM_FB_BUFFER) + ret = drm_addbufs_fb(dev, &request); else - return drm_addbufs_pci( inode, filp, cmd, arg ); -} + ret = drm_addbufs_pci(dev, &request); + if (ret == 0) { + if (copy_to_user((void __user *)arg, &request, sizeof(request))) { + ret = -EFAULT; + } + } + return ret; +} /** * Get information about the buffer mappings. @@ -963,8 +1278,8 @@ int drm_addbufs( struct inode *inode, struct file *filp, * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */ -int drm_infobufs( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) +int drm_infobufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; @@ -977,58 +1292,61 @@ int drm_infobufs( struct inode *inode, struct file *filp, if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if ( !dma ) return -EINVAL; + if (!dma) + return -EINVAL; - spin_lock( &dev->count_lock ); - if ( atomic_read( &dev->buf_alloc ) ) { - spin_unlock( &dev->count_lock ); + spin_lock(&dev->count_lock); + if (atomic_read(&dev->buf_alloc)) { + spin_unlock(&dev->count_lock); return -EBUSY; } ++dev->buf_use; /* Can't allocate more after this call */ - spin_unlock( &dev->count_lock ); + spin_unlock(&dev->count_lock); - if ( copy_from_user( &request, argp, sizeof(request) ) ) + if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; - for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { - if ( dma->bufs[i].buf_count ) ++count; + for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { + if (dma->bufs[i].buf_count) + ++count; } - DRM_DEBUG( "count = %d\n", count ); + DRM_DEBUG("count = %d\n", count); - if ( request.count >= count ) { - for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { - if ( dma->bufs[i].buf_count ) { - drm_buf_desc_t __user *to = &request.list[count]; + if (request.count >= count) { + for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { + if (dma->bufs[i].buf_count) { + drm_buf_desc_t __user *to = + &request.list[count]; drm_buf_entry_t *from = &dma->bufs[i]; drm_freelist_t *list = &dma->bufs[i].freelist; - if ( copy_to_user( &to->count, - &from->buf_count, - sizeof(from->buf_count) ) || - copy_to_user( &to->size, - &from->buf_size, - sizeof(from->buf_size) ) || - copy_to_user( &to->low_mark, - &list->low_mark, - sizeof(list->low_mark) ) || - copy_to_user( &to->high_mark, - &list->high_mark, - sizeof(list->high_mark) ) ) + if (copy_to_user(&to->count, + &from->buf_count, + sizeof(from->buf_count)) || + copy_to_user(&to->size, + &from->buf_size, + sizeof(from->buf_size)) || + copy_to_user(&to->low_mark, + &list->low_mark, + sizeof(list->low_mark)) || + copy_to_user(&to->high_mark, + &list->high_mark, + sizeof(list->high_mark))) return -EFAULT; - DRM_DEBUG( "%d %d %d %d %d\n", - i, - dma->bufs[i].buf_count, - dma->bufs[i].buf_size, - dma->bufs[i].freelist.low_mark, - dma->bufs[i].freelist.high_mark ); + DRM_DEBUG("%d %d %d %d %d\n", + i, + dma->bufs[i].buf_count, + dma->bufs[i].buf_size, + dma->bufs[i].freelist.low_mark, + dma->bufs[i].freelist.high_mark); ++count; } } } request.count = count; - if ( copy_to_user( argp, &request, sizeof(request) ) ) + if (copy_to_user(argp, &request, sizeof(request))) return -EFAULT; return 0; @@ -1048,8 +1366,8 @@ int drm_infobufs( struct inode *inode, struct file *filp, * * \note This ioctl is deprecated and mostly never used. */ -int drm_markbufs( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) +int drm_markbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; @@ -1061,44 +1379,45 @@ int drm_markbufs( struct inode *inode, struct file *filp, if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if ( !dma ) return -EINVAL; + if (!dma) + return -EINVAL; - if ( copy_from_user( &request, - (drm_buf_desc_t __user *)arg, - sizeof(request) ) ) + if (copy_from_user(&request, + (drm_buf_desc_t __user *) arg, sizeof(request))) return -EFAULT; - DRM_DEBUG( "%d, %d, %d\n", - request.size, request.low_mark, request.high_mark ); - order = drm_order( request.size ); - if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; + DRM_DEBUG("%d, %d, %d\n", + request.size, request.low_mark, request.high_mark); + order = drm_order(request.size); + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; entry = &dma->bufs[order]; - if ( request.low_mark < 0 || request.low_mark > entry->buf_count ) + if (request.low_mark < 0 || request.low_mark > entry->buf_count) return -EINVAL; - if ( request.high_mark < 0 || request.high_mark > entry->buf_count ) + if (request.high_mark < 0 || request.high_mark > entry->buf_count) return -EINVAL; - entry->freelist.low_mark = request.low_mark; + entry->freelist.low_mark = request.low_mark; entry->freelist.high_mark = request.high_mark; return 0; } /** - * Unreserve the buffers in list, previously reserved using drmDMA. + * Unreserve the buffers in list, previously reserved using drmDMA. * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_buf_free structure. * \return zero on success or a negative number on failure. - * + * * Calls free_buffer() for each used buffer. * This function is primarily used for debugging. */ -int drm_freebufs( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) +int drm_freebufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; @@ -1111,31 +1430,29 @@ int drm_freebufs( struct inode *inode, struct file *filp, if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if ( !dma ) return -EINVAL; + if (!dma) + return -EINVAL; - if ( copy_from_user( &request, - (drm_buf_free_t __user *)arg, - sizeof(request) ) ) + if (copy_from_user(&request, + (drm_buf_free_t __user *) arg, sizeof(request))) return -EFAULT; - DRM_DEBUG( "%d\n", request.count ); - for ( i = 0 ; i < request.count ; i++ ) { - if ( copy_from_user( &idx, - &request.list[i], - sizeof(idx) ) ) + DRM_DEBUG("%d\n", request.count); + for (i = 0; i < request.count; i++) { + if (copy_from_user(&idx, &request.list[i], sizeof(idx))) return -EFAULT; - if ( idx < 0 || idx >= dma->buf_count ) { - DRM_ERROR( "Index %d (of %d max)\n", - idx, dma->buf_count - 1 ); + if (idx < 0 || idx >= dma->buf_count) { + DRM_ERROR("Index %d (of %d max)\n", + idx, dma->buf_count - 1); return -EINVAL; } buf = dma->buflist[idx]; - if ( buf->filp != filp ) { - DRM_ERROR( "Process %d freeing buffer not owned\n", - current->pid ); + if (buf->filp != filp) { + DRM_ERROR("Process %d freeing buffer not owned\n", + current->pid); return -EINVAL; } - drm_free_buffer( dev, buf ); + drm_free_buffer(dev, buf); } return 0; @@ -1154,8 +1471,8 @@ int drm_freebufs( struct inode *inode, struct file *filp, * about each buffer into user space. The PCI buffers are already mapped on the * addbufs_pci() call. */ -int drm_mapbufs( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) +int drm_mapbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; @@ -1171,100 +1488,110 @@ int drm_mapbufs( struct inode *inode, struct file *filp, if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if ( !dma ) return -EINVAL; + if (!dma) + return -EINVAL; - spin_lock( &dev->count_lock ); - if ( atomic_read( &dev->buf_alloc ) ) { - spin_unlock( &dev->count_lock ); + spin_lock(&dev->count_lock); + if (atomic_read(&dev->buf_alloc)) { + spin_unlock(&dev->count_lock); return -EBUSY; } dev->buf_use++; /* Can't allocate more after this call */ - spin_unlock( &dev->count_lock ); + spin_unlock(&dev->count_lock); - if ( copy_from_user( &request, argp, sizeof(request) ) ) + if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; - if ( request.count >= dma->buf_count ) { - if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || - (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) { + if (request.count >= dma->buf_count) { + if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) + || (drm_core_check_feature(dev, DRIVER_SG) + && (dma->flags & _DRM_DMA_USE_SG)) + || (drm_core_check_feature(dev, DRIVER_FB_DMA) + && (dma->flags & _DRM_DMA_USE_FB))) { drm_map_t *map = dev->agp_buffer_map; + unsigned long token = dev->agp_buffer_token; - if ( !map ) { + if (!map) { retcode = -EINVAL; goto done; } -#if LINUX_VERSION_CODE <= 0x020402 - down( ¤t->mm->mmap_sem ); -#else - down_write( ¤t->mm->mmap_sem ); -#endif - virtual = do_mmap( filp, 0, map->size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - (unsigned long)map->offset ); -#if LINUX_VERSION_CODE <= 0x020402 - up( ¤t->mm->mmap_sem ); -#else - up_write( ¤t->mm->mmap_sem ); -#endif + down_write(¤t->mm->mmap_sem); + virtual = do_mmap(filp, 0, map->size, + PROT_READ | PROT_WRITE, + MAP_SHARED, token); + up_write(¤t->mm->mmap_sem); } else { -#if LINUX_VERSION_CODE <= 0x020402 - down( ¤t->mm->mmap_sem ); -#else - down_write( ¤t->mm->mmap_sem ); -#endif - virtual = do_mmap( filp, 0, dma->byte_count, - PROT_READ | PROT_WRITE, - MAP_SHARED, 0 ); -#if LINUX_VERSION_CODE <= 0x020402 - up( ¤t->mm->mmap_sem ); -#else - up_write( ¤t->mm->mmap_sem ); -#endif + down_write(¤t->mm->mmap_sem); + virtual = do_mmap(filp, 0, dma->byte_count, + PROT_READ | PROT_WRITE, + MAP_SHARED, 0); + up_write(¤t->mm->mmap_sem); } - if ( virtual > -1024UL ) { + if (virtual > -1024UL) { /* Real error */ retcode = (signed long)virtual; goto done; } request.virtual = (void __user *)virtual; - for ( i = 0 ; i < dma->buf_count ; i++ ) { - if ( copy_to_user( &request.list[i].idx, - &dma->buflist[i]->idx, - sizeof(request.list[0].idx) ) ) { + for (i = 0; i < dma->buf_count; i++) { + if (copy_to_user(&request.list[i].idx, + &dma->buflist[i]->idx, + sizeof(request.list[0].idx))) { retcode = -EFAULT; goto done; } - if ( copy_to_user( &request.list[i].total, - &dma->buflist[i]->total, - sizeof(request.list[0].total) ) ) { + if (copy_to_user(&request.list[i].total, + &dma->buflist[i]->total, + sizeof(request.list[0].total))) { retcode = -EFAULT; goto done; } - if ( copy_to_user( &request.list[i].used, - &zero, - sizeof(zero) ) ) { + if (copy_to_user(&request.list[i].used, + &zero, sizeof(zero))) { retcode = -EFAULT; goto done; } - address = virtual + dma->buflist[i]->offset; /* *** */ - if ( copy_to_user( &request.list[i].address, - &address, - sizeof(address) ) ) { + address = virtual + dma->buflist[i]->offset; /* *** */ + if (copy_to_user(&request.list[i].address, + &address, sizeof(address))) { retcode = -EFAULT; goto done; } } } - done: + done: request.count = dma->buf_count; - DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode ); + DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); - if ( copy_to_user( argp, &request, sizeof(request) ) ) + if (copy_to_user(argp, &request, sizeof(request))) return -EFAULT; return retcode; } +/** + * Compute size order. Returns the exponent of the smaller power of two which + * is greater or equal to given number. + * + * \param size size. + * \return order. + * + * \todo Can be made faster. + */ +int drm_order(unsigned long size) +{ + int order; + unsigned long tmp; + + for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; + + if (size & (size - 1)) + ++order; + + return order; +} +EXPORT_SYMBOL(drm_order); + +