3 * DMA IOCTL and function support
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/interrupt.h> /* For task queue support */
40 #ifndef __HAVE_DMA_WAITQUEUE
41 #define __HAVE_DMA_WAITQUEUE 0
43 #ifndef __HAVE_DMA_RECLAIM
44 #define __HAVE_DMA_RECLAIM 0
46 #ifndef __HAVE_SHARED_IRQ
47 #define __HAVE_SHARED_IRQ 0
51 #define DRM_IRQ_TYPE SA_SHIRQ
53 #define DRM_IRQ_TYPE 0
59 * Initialize the DMA data.
61 * \param dev DRM device.
62 * \return zero on success or a negative value on failure.
64 * Allocate and initialize a drm_device_dma structure.
66 int DRM(dma_setup)( drm_device_t *dev )
70 dev->dma = DRM(alloc)( sizeof(*dev->dma), DRM_MEM_DRIVER );
74 memset( dev->dma, 0, sizeof(*dev->dma) );
76 for ( i = 0 ; i <= DRM_MAX_ORDER ; i++ )
77 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
83 * Cleanup the DMA resources.
85 * \param dev DRM device.
87 * Free all pages associated with DMA buffers, the buffers and pages lists, and
88 * finally the the drm_device::dma structure itself.
90 void DRM(dma_takedown)(drm_device_t *dev)
92 drm_device_dma_t *dma = dev->dma;
97 /* Clear dma buffers */
98 for (i = 0; i <= DRM_MAX_ORDER; i++) {
99 if (dma->bufs[i].seg_count) {
100 DRM_DEBUG("order %d: buf_count = %d,"
103 dma->bufs[i].buf_count,
104 dma->bufs[i].seg_count);
105 for (j = 0; j < dma->bufs[i].seg_count; j++) {
106 if (dma->bufs[i].seglist[j]) {
107 DRM(free_pages)(dma->bufs[i].seglist[j],
108 dma->bufs[i].page_order,
112 DRM(free)(dma->bufs[i].seglist,
113 dma->bufs[i].seg_count
114 * sizeof(*dma->bufs[0].seglist),
117 if (dma->bufs[i].buf_count) {
118 for (j = 0; j < dma->bufs[i].buf_count; j++) {
119 if (dma->bufs[i].buflist[j].dev_private) {
120 DRM(free)(dma->bufs[i].buflist[j].dev_private,
121 dma->bufs[i].buflist[j].dev_priv_size,
125 DRM(free)(dma->bufs[i].buflist,
126 dma->bufs[i].buf_count *
127 sizeof(*dma->bufs[0].buflist),
129 #if __HAVE_DMA_FREELIST
130 DRM(freelist_destroy)(&dma->bufs[i].freelist);
136 DRM(free)(dma->buflist,
137 dma->buf_count * sizeof(*dma->buflist),
142 DRM(free)(dma->pagelist,
143 dma->page_count * sizeof(*dma->pagelist),
146 DRM(free)(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
154 * \param dev DRM device.
155 * \param buf buffer to free.
157 * Resets the fields of \p buf.
159 void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
168 if ( __HAVE_DMA_WAITQUEUE && waitqueue_active(&buf->dma_wait)) {
169 wake_up_interruptible(&buf->dma_wait);
171 #if __HAVE_DMA_FREELIST
173 drm_device_dma_t *dma = dev->dma;
174 /* If processes are waiting, the last one
175 to wake will put the buffer on the free
176 list. If no processes are waiting, we
177 put the buffer on the freelist here. */
178 DRM(freelist_put)(dev, &dma->bufs[buf->order].freelist, buf);
183 #if !__HAVE_DMA_RECLAIM
185 * Reclaim the buffers.
187 * \param filp file pointer.
189 * Frees each buffer associated with \p filp not already on the hardware.
191 void DRM(reclaim_buffers)( struct file *filp )
193 drm_file_t *priv = filp->private_data;
194 drm_device_t *dev = priv->dev;
195 drm_device_dma_t *dma = dev->dma;
199 for (i = 0; i < dma->buf_count; i++) {
200 if (dma->buflist[i]->filp == filp) {
201 switch (dma->buflist[i]->list) {
203 DRM(free_buffer)(dev, dma->buflist[i]);
206 dma->buflist[i]->list = DRM_LIST_RECLAIM;
209 /* Buffer already on hardware. */
223 * Install IRQ handler.
225 * \param dev DRM device.
226 * \param irq IRQ number.
228 * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
229 * \c DRM(driver_irq_preinstall)() and \c DRM(driver_irq_postinstall)() functions
230 * before and after the installation.
232 int DRM(irq_install)( drm_device_t *dev, int irq )
239 down( &dev->struct_sem );
241 /* Driver must have been initialized */
242 if ( !dev->dev_private ) {
243 up( &dev->struct_sem );
248 up( &dev->struct_sem );
252 up( &dev->struct_sem );
254 DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
256 dev->context_flag = 0;
257 dev->interrupt_flag = 0;
260 dev->dma->next_buffer = NULL;
261 dev->dma->next_queue = NULL;
262 dev->dma->this_buffer = NULL;
264 #if __HAVE_DMA_IRQ_BH
265 INIT_WORK(&dev->work, DRM(dma_immediate_bh), dev);
269 init_waitqueue_head(&dev->vbl_queue);
271 spin_lock_init( &dev->vbl_lock );
273 INIT_LIST_HEAD( &dev->vbl_sigs.head );
275 dev->vbl_pending = 0;
278 /* Before installing handler */
279 DRM(driver_irq_preinstall)(dev);
281 /* Install handler */
282 ret = request_irq( dev->irq, DRM(dma_service),
283 DRM_IRQ_TYPE, dev->devname, dev );
285 down( &dev->struct_sem );
287 up( &dev->struct_sem );
291 /* After installing handler */
292 DRM(driver_irq_postinstall)(dev);
298 * Uninstall the IRQ handler.
300 * \param dev DRM device.
302 * Calls the driver's \c DRM(driver_irq_uninstall)() function, and stops the irq.
304 int DRM(irq_uninstall)( drm_device_t *dev )
308 down( &dev->struct_sem );
311 up( &dev->struct_sem );
316 DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
318 DRM(driver_irq_uninstall)( dev );
320 free_irq( irq, dev );
328 * \param inode device inode.
329 * \param filp file pointer.
330 * \param cmd command.
331 * \param arg user argument, pointing to a drm_control structure.
332 * \return zero on success or a negative number on failure.
334 * Calls irq_install() or irq_uninstall() according to \p arg.
336 int DRM(control)( struct inode *inode, struct file *filp,
337 unsigned int cmd, unsigned long arg )
339 drm_file_t *priv = filp->private_data;
340 drm_device_t *dev = priv->dev;
343 if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
346 switch ( ctl.func ) {
347 case DRM_INST_HANDLER:
348 return DRM(irq_install)( dev, ctl.irq );
349 case DRM_UNINST_HANDLER:
350 return DRM(irq_uninstall)( dev );
361 * \param inode device inode.
362 * \param filp file pointer.
363 * \param cmd command.
364 * \param data user argument, pointing to a drm_wait_vblank structure.
365 * \return zero on success or a negative number on failure.
367 * Verifies the IRQ is installed.
369 * If a signal is requested checks if this task has already scheduled the same signal
370 * for the same vblank sequence number - nothing to be done in
371 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
372 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
375 * If a signal is not requested, then calls vblank_wait().
377 int DRM(wait_vblank)( DRM_IOCTL_ARGS )
379 drm_file_t *priv = filp->private_data;
380 drm_device_t *dev = priv->dev;
381 drm_wait_vblank_t vblwait;
389 DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
392 switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
393 case _DRM_VBLANK_RELATIVE:
394 vblwait.request.sequence += atomic_read( &dev->vbl_received );
395 vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
396 case _DRM_VBLANK_ABSOLUTE:
402 flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
404 if ( flags & _DRM_VBLANK_SIGNAL ) {
405 unsigned long irqflags;
406 drm_vbl_sig_t *vbl_sig;
408 vblwait.reply.sequence = atomic_read( &dev->vbl_received );
410 spin_lock_irqsave( &dev->vbl_lock, irqflags );
412 /* Check if this task has already scheduled the same signal
413 * for the same vblank sequence number; nothing to be done in
416 list_for_each_entry( vbl_sig, &dev->vbl_sigs.head, head ) {
417 if (vbl_sig->sequence == vblwait.request.sequence
418 && vbl_sig->info.si_signo == vblwait.request.signal
419 && vbl_sig->task == current)
421 spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
426 if ( dev->vbl_pending >= 100 ) {
427 spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
433 spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
435 if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) {
439 memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
441 vbl_sig->sequence = vblwait.request.sequence;
442 vbl_sig->info.si_signo = vblwait.request.signal;
443 vbl_sig->task = current;
445 spin_lock_irqsave( &dev->vbl_lock, irqflags );
447 list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
449 spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
451 ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
453 do_gettimeofday( &now );
454 vblwait.reply.tval_sec = now.tv_sec;
455 vblwait.reply.tval_usec = now.tv_usec;
459 DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
466 * Send the VBLANK signals.
468 * \param dev DRM device.
470 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
472 * If a signal is not requested, then calls vblank_wait().
474 void DRM(vbl_send_signals)( drm_device_t *dev )
476 struct list_head *list, *tmp;
477 drm_vbl_sig_t *vbl_sig;
478 unsigned int vbl_seq = atomic_read( &dev->vbl_received );
481 spin_lock_irqsave( &dev->vbl_lock, flags );
483 list_for_each_safe( list, tmp, &dev->vbl_sigs.head ) {
484 vbl_sig = list_entry( list, drm_vbl_sig_t, head );
485 if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
486 vbl_sig->info.si_code = vbl_seq;
487 send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
491 DRM_FREE( vbl_sig, sizeof(*vbl_sig) );
497 spin_unlock_irqrestore( &dev->vbl_lock, flags );
500 #endif /* __HAVE_VBL_IRQ */
504 int DRM(control)( struct inode *inode, struct file *filp,
505 unsigned int cmd, unsigned long arg )
509 if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
512 switch ( ctl.func ) {
513 case DRM_INST_HANDLER:
514 case DRM_UNINST_HANDLER:
521 #endif /* __HAVE_DMA_IRQ */
523 #endif /* __HAVE_DMA */