ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / char / drm / drm_dma.h
1 /**
2  * \file drm_dma.h 
3  * DMA IOCTL and function support
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38 #include <linux/interrupt.h>    /* For task queue support */
39
40 #ifndef __HAVE_DMA_WAITQUEUE
41 #define __HAVE_DMA_WAITQUEUE    0
42 #endif
43 #ifndef __HAVE_DMA_RECLAIM
44 #define __HAVE_DMA_RECLAIM      0
45 #endif
46 #ifndef __HAVE_SHARED_IRQ
47 #define __HAVE_SHARED_IRQ       0
48 #endif
49
50 #if __HAVE_SHARED_IRQ
51 #define DRM_IRQ_TYPE            SA_SHIRQ
52 #else
53 #define DRM_IRQ_TYPE            0
54 #endif
55
56 #if __HAVE_DMA
57
58 /**
59  * Initialize the DMA data.
60  * 
61  * \param dev DRM device.
62  * \return zero on success or a negative value on failure.
63  *
64  * Allocate and initialize a drm_device_dma structure.
65  */
66 int DRM(dma_setup)( drm_device_t *dev )
67 {
68         int i;
69
70         dev->dma = DRM(alloc)( sizeof(*dev->dma), DRM_MEM_DRIVER );
71         if ( !dev->dma )
72                 return -ENOMEM;
73
74         memset( dev->dma, 0, sizeof(*dev->dma) );
75
76         for ( i = 0 ; i <= DRM_MAX_ORDER ; i++ )
77                 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
78
79         return 0;
80 }
81
82 /**
83  * Cleanup the DMA resources.
84  *
85  * \param dev DRM device.
86  *
87  * Free all pages associated with DMA buffers, the buffers and pages lists, and
88  * finally the the drm_device::dma structure itself.
89  */
90 void DRM(dma_takedown)(drm_device_t *dev)
91 {
92         drm_device_dma_t  *dma = dev->dma;
93         int               i, j;
94
95         if (!dma) return;
96
97                                 /* Clear dma buffers */
98         for (i = 0; i <= DRM_MAX_ORDER; i++) {
99                 if (dma->bufs[i].seg_count) {
100                         DRM_DEBUG("order %d: buf_count = %d,"
101                                   " seg_count = %d\n",
102                                   i,
103                                   dma->bufs[i].buf_count,
104                                   dma->bufs[i].seg_count);
105                         for (j = 0; j < dma->bufs[i].seg_count; j++) {
106                                 if (dma->bufs[i].seglist[j]) {
107                                         DRM(free_pages)(dma->bufs[i].seglist[j],
108                                                         dma->bufs[i].page_order,
109                                                         DRM_MEM_DMA);
110                                 }
111                         }
112                         DRM(free)(dma->bufs[i].seglist,
113                                   dma->bufs[i].seg_count
114                                   * sizeof(*dma->bufs[0].seglist),
115                                   DRM_MEM_SEGS);
116                 }
117                 if (dma->bufs[i].buf_count) {
118                         for (j = 0; j < dma->bufs[i].buf_count; j++) {
119                                 if (dma->bufs[i].buflist[j].dev_private) {
120                                         DRM(free)(dma->bufs[i].buflist[j].dev_private,
121                                                   dma->bufs[i].buflist[j].dev_priv_size,
122                                                   DRM_MEM_BUFS);
123                                 }
124                         }
125                         DRM(free)(dma->bufs[i].buflist,
126                                   dma->bufs[i].buf_count *
127                                   sizeof(*dma->bufs[0].buflist),
128                                   DRM_MEM_BUFS);
129 #if __HAVE_DMA_FREELIST
130                         DRM(freelist_destroy)(&dma->bufs[i].freelist);
131 #endif
132                 }
133         }
134
135         if (dma->buflist) {
136                 DRM(free)(dma->buflist,
137                           dma->buf_count * sizeof(*dma->buflist),
138                           DRM_MEM_BUFS);
139         }
140
141         if (dma->pagelist) {
142                 DRM(free)(dma->pagelist,
143                           dma->page_count * sizeof(*dma->pagelist),
144                           DRM_MEM_PAGES);
145         }
146         DRM(free)(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
147         dev->dma = NULL;
148 }
149
150
151 /**
152  * Free a buffer.
153  *
154  * \param dev DRM device.
155  * \param buf buffer to free.
156  * 
157  * Resets the fields of \p buf.
158  */
159 void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
160 {
161         if (!buf) return;
162
163         buf->waiting  = 0;
164         buf->pending  = 0;
165         buf->filp     = 0;
166         buf->used     = 0;
167
168         if ( __HAVE_DMA_WAITQUEUE && waitqueue_active(&buf->dma_wait)) {
169                 wake_up_interruptible(&buf->dma_wait);
170         }
171 #if __HAVE_DMA_FREELIST
172         else {
173                 drm_device_dma_t *dma = dev->dma;
174                                 /* If processes are waiting, the last one
175                                    to wake will put the buffer on the free
176                                    list.  If no processes are waiting, we
177                                    put the buffer on the freelist here. */
178                 DRM(freelist_put)(dev, &dma->bufs[buf->order].freelist, buf);
179         }
180 #endif
181 }
182
183 #if !__HAVE_DMA_RECLAIM
184 /**
185  * Reclaim the buffers.
186  *
187  * \param filp file pointer.
188  *
189  * Frees each buffer associated with \p filp not already on the hardware.
190  */
191 void DRM(reclaim_buffers)( struct file *filp )
192 {
193         drm_file_t    *priv   = filp->private_data;
194         drm_device_t  *dev    = priv->dev;
195         drm_device_dma_t *dma = dev->dma;
196         int              i;
197
198         if (!dma) return;
199         for (i = 0; i < dma->buf_count; i++) {
200                 if (dma->buflist[i]->filp == filp) {
201                         switch (dma->buflist[i]->list) {
202                         case DRM_LIST_NONE:
203                                 DRM(free_buffer)(dev, dma->buflist[i]);
204                                 break;
205                         case DRM_LIST_WAIT:
206                                 dma->buflist[i]->list = DRM_LIST_RECLAIM;
207                                 break;
208                         default:
209                                 /* Buffer already on hardware. */
210                                 break;
211                         }
212                 }
213         }
214 }
215 #endif
216
217
218
219
220 #if __HAVE_DMA_IRQ
221
222 /**
223  * Install IRQ handler.
224  *
225  * \param dev DRM device.
226  * \param irq IRQ number.
227  *
228  * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
229  * \c DRM(driver_irq_preinstall)() and \c DRM(driver_irq_postinstall)() functions
230  * before and after the installation.
231  */
232 int DRM(irq_install)( drm_device_t *dev, int irq )
233 {
234         int ret;
235
236         if ( !irq )
237                 return -EINVAL;
238
239         down( &dev->struct_sem );
240
241         /* Driver must have been initialized */
242         if ( !dev->dev_private ) {
243                 up( &dev->struct_sem );
244                 return -EINVAL;
245         }
246
247         if ( dev->irq ) {
248                 up( &dev->struct_sem );
249                 return -EBUSY;
250         }
251         dev->irq = irq;
252         up( &dev->struct_sem );
253
254         DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
255
256         dev->context_flag = 0;
257         dev->interrupt_flag = 0;
258         dev->dma_flag = 0;
259
260         dev->dma->next_buffer = NULL;
261         dev->dma->next_queue = NULL;
262         dev->dma->this_buffer = NULL;
263
264 #if __HAVE_DMA_IRQ_BH
265         INIT_WORK(&dev->work, DRM(dma_immediate_bh), dev);
266 #endif
267
268 #if __HAVE_VBL_IRQ
269         init_waitqueue_head(&dev->vbl_queue);
270
271         spin_lock_init( &dev->vbl_lock );
272
273         INIT_LIST_HEAD( &dev->vbl_sigs.head );
274
275         dev->vbl_pending = 0;
276 #endif
277
278                                 /* Before installing handler */
279         DRM(driver_irq_preinstall)(dev);
280
281                                 /* Install handler */
282         ret = request_irq( dev->irq, DRM(dma_service),
283                            DRM_IRQ_TYPE, dev->devname, dev );
284         if ( ret < 0 ) {
285                 down( &dev->struct_sem );
286                 dev->irq = 0;
287                 up( &dev->struct_sem );
288                 return ret;
289         }
290
291                                 /* After installing handler */
292         DRM(driver_irq_postinstall)(dev);
293
294         return 0;
295 }
296
297 /**
298  * Uninstall the IRQ handler.
299  *
300  * \param dev DRM device.
301  *
302  * Calls the driver's \c DRM(driver_irq_uninstall)() function, and stops the irq.
303  */
304 int DRM(irq_uninstall)( drm_device_t *dev )
305 {
306         int irq;
307
308         down( &dev->struct_sem );
309         irq = dev->irq;
310         dev->irq = 0;
311         up( &dev->struct_sem );
312
313         if ( !irq )
314                 return -EINVAL;
315
316         DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
317
318         DRM(driver_irq_uninstall)( dev );
319
320         free_irq( irq, dev );
321
322         return 0;
323 }
324
325 /**
326  * IRQ control ioctl.
327  *
328  * \param inode device inode.
329  * \param filp file pointer.
330  * \param cmd command.
331  * \param arg user argument, pointing to a drm_control structure.
332  * \return zero on success or a negative number on failure.
333  *
334  * Calls irq_install() or irq_uninstall() according to \p arg.
335  */
336 int DRM(control)( struct inode *inode, struct file *filp,
337                   unsigned int cmd, unsigned long arg )
338 {
339         drm_file_t *priv = filp->private_data;
340         drm_device_t *dev = priv->dev;
341         drm_control_t ctl;
342
343         if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
344                 return -EFAULT;
345
346         switch ( ctl.func ) {
347         case DRM_INST_HANDLER:
348                 return DRM(irq_install)( dev, ctl.irq );
349         case DRM_UNINST_HANDLER:
350                 return DRM(irq_uninstall)( dev );
351         default:
352                 return -EINVAL;
353         }
354 }
355
356 #if __HAVE_VBL_IRQ
357
358 /**
359  * Wait for VBLANK.
360  *
361  * \param inode device inode.
362  * \param filp file pointer.
363  * \param cmd command.
364  * \param data user argument, pointing to a drm_wait_vblank structure.
365  * \return zero on success or a negative number on failure.
366  *
367  * Verifies the IRQ is installed. 
368  *
369  * If a signal is requested checks if this task has already scheduled the same signal
370  * for the same vblank sequence number - nothing to be done in
371  * that case. If the number of tasks waiting for the interrupt exceeds 100 the
372  * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
373  * task.
374  *
375  * If a signal is not requested, then calls vblank_wait().
376  */
377 int DRM(wait_vblank)( DRM_IOCTL_ARGS )
378 {
379         drm_file_t *priv = filp->private_data;
380         drm_device_t *dev = priv->dev;
381         drm_wait_vblank_t vblwait;
382         struct timeval now;
383         int ret = 0;
384         unsigned int flags;
385
386         if (!dev->irq)
387                 return -EINVAL;
388
389         DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
390                                   sizeof(vblwait) );
391
392         switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
393         case _DRM_VBLANK_RELATIVE:
394                 vblwait.request.sequence += atomic_read( &dev->vbl_received );
395                 vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
396         case _DRM_VBLANK_ABSOLUTE:
397                 break;
398         default:
399                 return -EINVAL;
400         }
401
402         flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
403         
404         if ( flags & _DRM_VBLANK_SIGNAL ) {
405                 unsigned long irqflags;
406                 drm_vbl_sig_t *vbl_sig;
407                 
408                 vblwait.reply.sequence = atomic_read( &dev->vbl_received );
409
410                 spin_lock_irqsave( &dev->vbl_lock, irqflags );
411
412                 /* Check if this task has already scheduled the same signal
413                  * for the same vblank sequence number; nothing to be done in
414                  * that case
415                  */
416                 list_for_each_entry( vbl_sig, &dev->vbl_sigs.head, head ) {
417                         if (vbl_sig->sequence == vblwait.request.sequence
418                             && vbl_sig->info.si_signo == vblwait.request.signal
419                             && vbl_sig->task == current)
420                         {
421                                 spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
422                                 goto done;
423                         }
424                 }
425
426                 if ( dev->vbl_pending >= 100 ) {
427                         spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
428                         return -EBUSY;
429                 }
430
431                 dev->vbl_pending++;
432
433                 spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
434
435                 if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) {
436                         return -ENOMEM;
437                 }
438
439                 memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
440
441                 vbl_sig->sequence = vblwait.request.sequence;
442                 vbl_sig->info.si_signo = vblwait.request.signal;
443                 vbl_sig->task = current;
444
445                 spin_lock_irqsave( &dev->vbl_lock, irqflags );
446
447                 list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
448
449                 spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
450         } else {
451                 ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
452
453                 do_gettimeofday( &now );
454                 vblwait.reply.tval_sec = now.tv_sec;
455                 vblwait.reply.tval_usec = now.tv_usec;
456         }
457
458 done:
459         DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
460                                 sizeof(vblwait) );
461
462         return ret;
463 }
464
465 /**
466  * Send the VBLANK signals.
467  *
468  * \param dev DRM device.
469  *
470  * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
471  *
472  * If a signal is not requested, then calls vblank_wait().
473  */
474 void DRM(vbl_send_signals)( drm_device_t *dev )
475 {
476         struct list_head *list, *tmp;
477         drm_vbl_sig_t *vbl_sig;
478         unsigned int vbl_seq = atomic_read( &dev->vbl_received );
479         unsigned long flags;
480
481         spin_lock_irqsave( &dev->vbl_lock, flags );
482
483         list_for_each_safe( list, tmp, &dev->vbl_sigs.head ) {
484                 vbl_sig = list_entry( list, drm_vbl_sig_t, head );
485                 if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
486                         vbl_sig->info.si_code = vbl_seq;
487                         send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
488
489                         list_del( list );
490
491                         DRM_FREE( vbl_sig, sizeof(*vbl_sig) );
492
493                         dev->vbl_pending--;
494                 }
495         }
496
497         spin_unlock_irqrestore( &dev->vbl_lock, flags );
498 }
499
500 #endif  /* __HAVE_VBL_IRQ */
501
502 #else
503
504 int DRM(control)( struct inode *inode, struct file *filp,
505                   unsigned int cmd, unsigned long arg )
506 {
507         drm_control_t ctl;
508
509         if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
510                 return -EFAULT;
511
512         switch ( ctl.func ) {
513         case DRM_INST_HANDLER:
514         case DRM_UNINST_HANDLER:
515                 return 0;
516         default:
517                 return -EINVAL;
518         }
519 }
520
521 #endif /* __HAVE_DMA_IRQ */
522
523 #endif /* __HAVE_DMA */