1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
35 #include "gamma_drm.h"
36 #include "gamma_drv.h"
38 #include <linux/interrupt.h> /* For task queue support */
39 #include <linux/delay.h>
41 static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
44 drm_gamma_private_t *dev_priv =
45 (drm_gamma_private_t *)dev->dev_private;
47 while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
50 GAMMA_WRITE(GAMMA_DMAADDRESS, address);
52 while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
55 GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
58 void gamma_dma_quiescent_single(drm_device_t *dev)
60 drm_gamma_private_t *dev_priv =
61 (drm_gamma_private_t *)dev->dev_private;
62 while (GAMMA_READ(GAMMA_DMACOUNT))
65 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
68 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
69 GAMMA_WRITE(GAMMA_SYNC, 0);
72 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
74 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
77 void gamma_dma_quiescent_dual(drm_device_t *dev)
79 drm_gamma_private_t *dev_priv =
80 (drm_gamma_private_t *)dev->dev_private;
81 while (GAMMA_READ(GAMMA_DMACOUNT))
84 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
87 GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
88 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
89 GAMMA_WRITE(GAMMA_SYNC, 0);
91 /* Read from first MX */
93 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
95 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
97 /* Read from second MX */
99 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
101 } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
104 void gamma_dma_ready(drm_device_t *dev)
106 drm_gamma_private_t *dev_priv =
107 (drm_gamma_private_t *)dev->dev_private;
108 while (GAMMA_READ(GAMMA_DMACOUNT))
112 static inline int gamma_dma_is_ready(drm_device_t *dev)
114 drm_gamma_private_t *dev_priv =
115 (drm_gamma_private_t *)dev->dev_private;
116 return (!GAMMA_READ(GAMMA_DMACOUNT));
119 irqreturn_t gamma_irq_handler( DRM_IRQ_ARGS )
121 drm_device_t *dev = (drm_device_t *)arg;
122 drm_device_dma_t *dma = dev->dma;
123 drm_gamma_private_t *dev_priv =
124 (drm_gamma_private_t *)dev->dev_private;
126 /* FIXME: should check whether we're actually interested in the interrupt? */
127 atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
129 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
132 GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
133 GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
134 GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
135 if (gamma_dma_is_ready(dev)) {
136 /* Free previous buffer */
137 if (test_and_set_bit(0, &dev->dma_flag))
139 if (dma->this_buffer) {
140 gamma_free_buffer(dev, dma->this_buffer);
141 dma->this_buffer = NULL;
143 clear_bit(0, &dev->dma_flag);
145 /* Dispatch new buffer */
146 schedule_work(&dev->work);
151 /* Only called by gamma_dma_schedule. */
152 static int gamma_do_dma(drm_device_t *dev, int locked)
154 unsigned long address;
155 unsigned long length;
158 drm_device_dma_t *dma = dev->dma;
160 if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
163 if (!dma->next_buffer) {
164 DRM_ERROR("No next_buffer\n");
165 clear_bit(0, &dev->dma_flag);
169 buf = dma->next_buffer;
170 /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
171 /* So we pass the buffer index value into the physical page offset */
172 address = buf->idx << 12;
175 DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
176 buf->context, buf->idx, length);
178 if (buf->list == DRM_LIST_RECLAIM) {
179 gamma_clear_next_buffer(dev);
180 gamma_free_buffer(dev, buf);
181 clear_bit(0, &dev->dma_flag);
186 DRM_ERROR("0 length buffer\n");
187 gamma_clear_next_buffer(dev);
188 gamma_free_buffer(dev, buf);
189 clear_bit(0, &dev->dma_flag);
193 if (!gamma_dma_is_ready(dev)) {
194 clear_bit(0, &dev->dma_flag);
198 if (buf->while_locked) {
199 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
200 DRM_ERROR("Dispatching buffer %d from pid %d"
201 " \"while locked\", but no lock held\n",
202 buf->idx, current->pid);
205 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
206 DRM_KERNEL_CONTEXT)) {
207 clear_bit(0, &dev->dma_flag);
212 if (dev->last_context != buf->context
213 && !(dev->queuelist[buf->context]->flags
214 & _DRM_CONTEXT_PRESERVED)) {
215 /* PRE: dev->last_context != buf->context */
216 if (DRM(context_switch)(dev, dev->last_context,
218 DRM(clear_next_buffer)(dev);
219 DRM(free_buffer)(dev, buf);
224 /* POST: we will wait for the context
225 switch and will dispatch on a later call
226 when dev->last_context == buf->context.
227 NOTE WE HOLD THE LOCK THROUGHOUT THIS
231 gamma_clear_next_buffer(dev);
234 buf->list = DRM_LIST_PEND;
236 /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
237 address = buf->idx << 12;
239 gamma_dma_dispatch(dev, address, length);
240 gamma_free_buffer(dev, dma->this_buffer);
241 dma->this_buffer = buf;
243 atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
244 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
246 if (!buf->while_locked && !dev->context_flag && !locked) {
247 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
248 DRM_KERNEL_CONTEXT)) {
254 clear_bit(0, &dev->dma_flag);
260 static void gamma_dma_timer_bh(unsigned long dev)
262 gamma_dma_schedule((drm_device_t *)dev, 0);
265 void gamma_irq_immediate_bh(void *dev)
267 gamma_dma_schedule(dev, 0);
270 int gamma_dma_schedule(drm_device_t *dev, int locked)
279 drm_device_dma_t *dma = dev->dma;
281 if (test_and_set_bit(0, &dev->interrupt_flag)) {
283 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
286 missed = atomic_read(&dev->counts[10]);
290 if (dev->context_flag) {
291 clear_bit(0, &dev->interrupt_flag);
294 if (dma->next_buffer) {
295 /* Unsent buffer that was previously
296 selected, but that couldn't be sent
297 because the lock could not be obtained
298 or the DMA engine wasn't ready. Try
300 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
303 next = gamma_select_queue(dev, gamma_dma_timer_bh);
305 q = dev->queuelist[next];
306 buf = gamma_waitlist_get(&q->waitlist);
307 dma->next_buffer = buf;
309 if (buf && buf->list == DRM_LIST_RECLAIM) {
310 gamma_clear_next_buffer(dev);
311 gamma_free_buffer(dev, buf);
314 } while (next >= 0 && !dma->next_buffer);
315 if (dma->next_buffer) {
316 if (!(retcode = gamma_do_dma(dev, locked))) {
323 if (missed != atomic_read(&dev->counts[10])) {
324 if (gamma_dma_is_ready(dev)) goto again;
326 if (processed && gamma_dma_is_ready(dev)) {
332 clear_bit(0, &dev->interrupt_flag);
337 static int gamma_dma_priority(struct file *filp,
338 drm_device_t *dev, drm_dma_t *d)
340 unsigned long address;
341 unsigned long length;
347 drm_buf_t *last_buf = NULL;
348 drm_device_dma_t *dma = dev->dma;
349 DECLARE_WAITQUEUE(entry, current);
351 /* Turn off interrupt handling */
352 while (test_and_set_bit(0, &dev->interrupt_flag)) {
354 if (signal_pending(current)) return -EINTR;
356 if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
357 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
358 DRM_KERNEL_CONTEXT)) {
360 if (signal_pending(current)) {
361 clear_bit(0, &dev->interrupt_flag);
368 for (i = 0; i < d->send_count; i++) {
369 idx = d->send_indices[i];
370 if (idx < 0 || idx >= dma->buf_count) {
371 DRM_ERROR("Index %d (of %d max)\n",
372 d->send_indices[i], dma->buf_count - 1);
375 buf = dma->buflist[ idx ];
376 if (buf->filp != filp) {
377 DRM_ERROR("Process %d using buffer not owned\n",
382 if (buf->list != DRM_LIST_NONE) {
383 DRM_ERROR("Process %d using buffer on list %d\n",
384 current->pid, buf->list);
388 /* This isn't a race condition on
389 buf->list, since our concern is the
390 buffer reclaim during the time the
391 process closes the /dev/drm? handle, so
392 it can't also be doing DMA. */
393 buf->list = DRM_LIST_PRIO;
394 buf->used = d->send_sizes[i];
395 buf->context = d->context;
396 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
397 address = (unsigned long)buf->address;
400 DRM_ERROR("0 length buffer\n");
403 DRM_ERROR("Sending pending buffer:"
404 " buffer %d, offset %d\n",
405 d->send_indices[i], i);
410 DRM_ERROR("Sending waiting buffer:"
411 " buffer %d, offset %d\n",
412 d->send_indices[i], i);
418 if (dev->last_context != buf->context
419 && !(dev->queuelist[buf->context]->flags
420 & _DRM_CONTEXT_PRESERVED)) {
421 add_wait_queue(&dev->context_wait, &entry);
422 current->state = TASK_INTERRUPTIBLE;
423 /* PRE: dev->last_context != buf->context */
424 DRM(context_switch)(dev, dev->last_context,
426 /* POST: we will wait for the context
427 switch and will dispatch on a later call
428 when dev->last_context == buf->context.
429 NOTE WE HOLD THE LOCK THROUGHOUT THIS
432 current->state = TASK_RUNNING;
433 remove_wait_queue(&dev->context_wait, &entry);
434 if (signal_pending(current)) {
438 if (dev->last_context != buf->context) {
439 DRM_ERROR("Context mismatch: %d %d\n",
445 gamma_dma_dispatch(dev, address, length);
446 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
447 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
450 gamma_free_buffer(dev, last_buf);
458 gamma_dma_ready(dev);
459 gamma_free_buffer(dev, last_buf);
462 if (must_free && !dev->context_flag) {
463 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
464 DRM_KERNEL_CONTEXT)) {
468 clear_bit(0, &dev->interrupt_flag);
472 static int gamma_dma_send_buffers(struct file *filp,
473 drm_device_t *dev, drm_dma_t *d)
475 DECLARE_WAITQUEUE(entry, current);
476 drm_buf_t *last_buf = NULL;
478 drm_device_dma_t *dma = dev->dma;
480 if (d->flags & _DRM_DMA_BLOCK) {
481 last_buf = dma->buflist[d->send_indices[d->send_count-1]];
482 add_wait_queue(&last_buf->dma_wait, &entry);
485 if ((retcode = gamma_dma_enqueue(filp, d))) {
486 if (d->flags & _DRM_DMA_BLOCK)
487 remove_wait_queue(&last_buf->dma_wait, &entry);
491 gamma_dma_schedule(dev, 0);
493 if (d->flags & _DRM_DMA_BLOCK) {
494 DRM_DEBUG("%d waiting\n", current->pid);
496 current->state = TASK_INTERRUPTIBLE;
497 if (!last_buf->waiting && !last_buf->pending)
498 break; /* finished */
500 if (signal_pending(current)) {
501 retcode = -EINTR; /* Can't restart */
505 current->state = TASK_RUNNING;
506 DRM_DEBUG("%d running\n", current->pid);
507 remove_wait_queue(&last_buf->dma_wait, &entry);
509 || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
510 if (!waitqueue_active(&last_buf->dma_wait)) {
511 gamma_free_buffer(dev, last_buf);
515 DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
519 (long)DRM_WAITCOUNT(dev, d->context),
528 int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
531 drm_file_t *priv = filp->private_data;
532 drm_device_t *dev = priv->dev;
533 drm_device_dma_t *dma = dev->dma;
537 if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
540 if (d.send_count < 0 || d.send_count > dma->buf_count) {
541 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
542 current->pid, d.send_count, dma->buf_count);
546 if (d.request_count < 0 || d.request_count > dma->buf_count) {
547 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
548 current->pid, d.request_count, dma->buf_count);
553 if (d.flags & _DRM_DMA_PRIORITY)
554 retcode = gamma_dma_priority(filp, dev, &d);
556 retcode = gamma_dma_send_buffers(filp, dev, &d);
561 if (!retcode && d.request_count) {
562 retcode = gamma_dma_get_buffers(filp, &d);
565 DRM_DEBUG("%d returning, granted = %d\n",
566 current->pid, d.granted_count);
567 if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
573 /* =============================================================
574 * DMA initialization, cleanup
577 static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
579 drm_gamma_private_t *dev_priv;
580 drm_device_dma_t *dma = dev->dma;
583 struct list_head *list;
586 DRM_DEBUG( "%s\n", __FUNCTION__ );
588 dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
593 dev->dev_private = (void *)dev_priv;
595 memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
597 dev_priv->num_rast = init->num_rast;
599 list_for_each(list, &dev->maplist->head) {
600 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
602 r_list->map->type == _DRM_SHM &&
603 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
604 dev_priv->sarea = r_list->map;
609 DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 );
610 DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 );
611 DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 );
612 DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 );
614 dev_priv->sarea_priv = (drm_gamma_sarea_t *)
615 ((u8 *)dev_priv->sarea->handle +
616 init->sarea_priv_offset);
619 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
622 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
623 buf = dma->buflist[i];
624 *pgt = virt_to_phys((void*)buf->address) | 0x07;
628 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
630 DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
632 DRM_IOREMAP( dev_priv->buffers, dev );
634 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
637 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
638 buf = dma->buflist[i];
639 *pgt = (unsigned long)buf->address + 0x07;
643 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
645 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
646 GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
648 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
649 GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
650 GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
655 int gamma_do_cleanup_dma( drm_device_t *dev )
657 DRM_DEBUG( "%s\n", __FUNCTION__ );
660 /* Make sure interrupts are disabled here because the uninstall ioctl
661 * may not have been called from userspace and after dev_private
662 * is freed, it's too late.
664 if ( dev->irq_enabled ) DRM(irq_uninstall)(dev);
667 if ( dev->dev_private ) {
668 drm_gamma_private_t *dev_priv = dev->dev_private;
670 if ( dev_priv->buffers != NULL )
671 DRM_IOREMAPFREE( dev_priv->buffers, dev );
673 DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
675 dev->dev_private = NULL;
681 int gamma_dma_init( struct inode *inode, struct file *filp,
682 unsigned int cmd, unsigned long arg )
684 drm_file_t *priv = filp->private_data;
685 drm_device_t *dev = priv->dev;
686 drm_gamma_init_t init;
688 LOCK_TEST_WITH_RETURN( dev, filp );
690 if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) )
693 switch ( init.func ) {
695 return gamma_do_init_dma( dev, &init );
696 case GAMMA_CLEANUP_DMA:
697 return gamma_do_cleanup_dma( dev );
703 static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
705 drm_device_dma_t *dma = dev->dma;
706 unsigned int *screenbuf;
708 DRM_DEBUG( "%s\n", __FUNCTION__ );
710 /* We've DRM_RESTRICTED this DMA buffer */
712 screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
715 *buffer++ = 0x180; /* Tag (FilterMode) */
716 *buffer++ = 0x200; /* Allow FBColor through */
717 *buffer++ = 0x53B; /* Tag */
718 *buffer++ = copy->Pitch;
719 *buffer++ = 0x53A; /* Tag */
720 *buffer++ = copy->SrcAddress;
721 *buffer++ = 0x539; /* Tag */
722 *buffer++ = copy->WidthHeight; /* Initiates transfer */
723 *buffer++ = 0x53C; /* Tag - DMAOutputAddress */
724 *buffer++ = virt_to_phys((void*)screenbuf);
725 *buffer++ = 0x53D; /* Tag - DMAOutputCount */
726 *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
728 /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
729 /* Now put it back to the screen */
731 *buffer++ = 0x180; /* Tag (FilterMode) */
732 *buffer++ = 0x400; /* Allow Sync through */
733 *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */
734 *buffer++ = 0x155; /* FBSourceData | count */
735 *buffer++ = 0x537; /* Tag */
736 *buffer++ = copy->Pitch;
737 *buffer++ = 0x536; /* Tag */
738 *buffer++ = copy->DstAddress;
739 *buffer++ = 0x535; /* Tag */
740 *buffer++ = copy->WidthHeight; /* Initiates transfer */
741 *buffer++ = 0x530; /* Tag - DMAAddr */
742 *buffer++ = virt_to_phys((void*)screenbuf);
744 *buffer++ = copy->Count; /* initiates DMA transfer of color data */
747 /* need to dispatch it now */
752 int gamma_dma_copy( struct inode *inode, struct file *filp,
753 unsigned int cmd, unsigned long arg )
755 drm_file_t *priv = filp->private_data;
756 drm_device_t *dev = priv->dev;
757 drm_gamma_copy_t copy;
759 if ( copy_from_user( ©, (drm_gamma_copy_t *)arg, sizeof(copy) ) )
762 return gamma_do_copy_dma( dev, © );
765 /* =============================================================
766 * Per Context SAREA Support
769 int gamma_getsareactx(struct inode *inode, struct file *filp,
770 unsigned int cmd, unsigned long arg)
772 drm_file_t *priv = filp->private_data;
773 drm_device_t *dev = priv->dev;
774 drm_ctx_priv_map_t request;
777 if (copy_from_user(&request,
778 (drm_ctx_priv_map_t *)arg,
782 down(&dev->struct_sem);
783 if ((int)request.ctx_id >= dev->max_context) {
784 up(&dev->struct_sem);
788 map = dev->context_sareas[request.ctx_id];
789 up(&dev->struct_sem);
791 request.handle = map->handle;
792 if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
797 int gamma_setsareactx(struct inode *inode, struct file *filp,
798 unsigned int cmd, unsigned long arg)
800 drm_file_t *priv = filp->private_data;
801 drm_device_t *dev = priv->dev;
802 drm_ctx_priv_map_t request;
803 drm_map_t *map = NULL;
804 drm_map_list_t *r_list;
805 struct list_head *list;
807 if (copy_from_user(&request,
808 (drm_ctx_priv_map_t *)arg,
812 down(&dev->struct_sem);
814 list_for_each(list, &dev->maplist->head) {
815 r_list = list_entry(list, drm_map_list_t, head);
817 r_list->map->handle == request.handle) break;
819 if (list == &(dev->maplist->head)) {
820 up(&dev->struct_sem);
824 up(&dev->struct_sem);
826 if (!map) return -EINVAL;
828 down(&dev->struct_sem);
829 if ((int)request.ctx_id >= dev->max_context) {
830 up(&dev->struct_sem);
833 dev->context_sareas[request.ctx_id] = map;
834 up(&dev->struct_sem);
838 void DRM(driver_irq_preinstall)( drm_device_t *dev ) {
839 drm_gamma_private_t *dev_priv =
840 (drm_gamma_private_t *)dev->dev_private;
842 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
845 GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 );
846 GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );
849 void DRM(driver_irq_postinstall)( drm_device_t *dev ) {
850 drm_gamma_private_t *dev_priv =
851 (drm_gamma_private_t *)dev->dev_private;
853 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
856 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 );
857 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 );
858 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 );
861 void DRM(driver_irq_uninstall)( drm_device_t *dev ) {
862 drm_gamma_private_t *dev_priv =
863 (drm_gamma_private_t *)dev->dev_private;
867 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
870 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 );
871 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
872 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );