1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
35 #include "gamma_drm.h"
36 #include "gamma_drv.h"
38 #include <linux/interrupt.h> /* For task queue support */
39 #include <linux/delay.h>
41 static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
44 drm_gamma_private_t *dev_priv =
45 (drm_gamma_private_t *)dev->dev_private;
47 while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
50 GAMMA_WRITE(GAMMA_DMAADDRESS, address);
52 while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
55 GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
58 void gamma_dma_quiescent_single(drm_device_t *dev)
60 drm_gamma_private_t *dev_priv =
61 (drm_gamma_private_t *)dev->dev_private;
62 while (GAMMA_READ(GAMMA_DMACOUNT))
65 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
68 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
69 GAMMA_WRITE(GAMMA_SYNC, 0);
72 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
74 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
77 void gamma_dma_quiescent_dual(drm_device_t *dev)
79 drm_gamma_private_t *dev_priv =
80 (drm_gamma_private_t *)dev->dev_private;
81 while (GAMMA_READ(GAMMA_DMACOUNT))
84 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
87 GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
88 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
89 GAMMA_WRITE(GAMMA_SYNC, 0);
91 /* Read from first MX */
93 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
95 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
97 /* Read from second MX */
99 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
101 } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
104 void gamma_dma_ready(drm_device_t *dev)
106 drm_gamma_private_t *dev_priv =
107 (drm_gamma_private_t *)dev->dev_private;
108 while (GAMMA_READ(GAMMA_DMACOUNT))
112 static inline int gamma_dma_is_ready(drm_device_t *dev)
114 drm_gamma_private_t *dev_priv =
115 (drm_gamma_private_t *)dev->dev_private;
116 return (!GAMMA_READ(GAMMA_DMACOUNT));
119 irqreturn_t gamma_irq_handler( DRM_IRQ_ARGS )
121 drm_device_t *dev = (drm_device_t *)arg;
122 drm_device_dma_t *dma = dev->dma;
123 drm_gamma_private_t *dev_priv =
124 (drm_gamma_private_t *)dev->dev_private;
126 /* FIXME: should check whether we're actually interested in the interrupt? */
127 atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
129 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
132 GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
133 GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
134 GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
135 if (gamma_dma_is_ready(dev)) {
136 /* Free previous buffer */
137 if (test_and_set_bit(0, &dev->dma_flag))
139 if (dma->this_buffer) {
140 gamma_free_buffer(dev, dma->this_buffer);
141 dma->this_buffer = NULL;
143 clear_bit(0, &dev->dma_flag);
145 /* Dispatch new buffer */
146 schedule_work(&dev->work);
151 /* Only called by gamma_dma_schedule. */
152 static int gamma_do_dma(drm_device_t *dev, int locked)
154 unsigned long address;
155 unsigned long length;
158 drm_device_dma_t *dma = dev->dma;
160 if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
163 if (!dma->next_buffer) {
164 DRM_ERROR("No next_buffer\n");
165 clear_bit(0, &dev->dma_flag);
169 buf = dma->next_buffer;
170 /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
171 /* So we pass the buffer index value into the physical page offset */
172 address = buf->idx << 12;
175 DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
176 buf->context, buf->idx, length);
178 if (buf->list == DRM_LIST_RECLAIM) {
179 gamma_clear_next_buffer(dev);
180 gamma_free_buffer(dev, buf);
181 clear_bit(0, &dev->dma_flag);
186 DRM_ERROR("0 length buffer\n");
187 gamma_clear_next_buffer(dev);
188 gamma_free_buffer(dev, buf);
189 clear_bit(0, &dev->dma_flag);
193 if (!gamma_dma_is_ready(dev)) {
194 clear_bit(0, &dev->dma_flag);
198 if (buf->while_locked) {
199 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
200 DRM_ERROR("Dispatching buffer %d from pid %d"
201 " \"while locked\", but no lock held\n",
202 buf->idx, current->pid);
205 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
206 DRM_KERNEL_CONTEXT)) {
207 clear_bit(0, &dev->dma_flag);
212 if (dev->last_context != buf->context
213 && !(dev->queuelist[buf->context]->flags
214 & _DRM_CONTEXT_PRESERVED)) {
215 /* PRE: dev->last_context != buf->context */
216 if (DRM(context_switch)(dev, dev->last_context,
218 DRM(clear_next_buffer)(dev);
219 DRM(free_buffer)(dev, buf);
224 /* POST: we will wait for the context
225 switch and will dispatch on a later call
226 when dev->last_context == buf->context.
227 NOTE WE HOLD THE LOCK THROUGHOUT THIS
231 gamma_clear_next_buffer(dev);
234 buf->list = DRM_LIST_PEND;
236 /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
237 address = buf->idx << 12;
239 gamma_dma_dispatch(dev, address, length);
240 gamma_free_buffer(dev, dma->this_buffer);
241 dma->this_buffer = buf;
243 atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
244 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
246 if (!buf->while_locked && !dev->context_flag && !locked) {
247 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
248 DRM_KERNEL_CONTEXT)) {
254 clear_bit(0, &dev->dma_flag);
260 static void gamma_dma_timer_bh(unsigned long dev)
262 gamma_dma_schedule((drm_device_t *)dev, 0);
265 void gamma_irq_immediate_bh(void *dev)
267 gamma_dma_schedule(dev, 0);
270 int gamma_dma_schedule(drm_device_t *dev, int locked)
279 drm_device_dma_t *dma = dev->dma;
281 if (test_and_set_bit(0, &dev->interrupt_flag)) {
283 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
286 missed = atomic_read(&dev->counts[10]);
290 if (dev->context_flag) {
291 clear_bit(0, &dev->interrupt_flag);
294 if (dma->next_buffer) {
295 /* Unsent buffer that was previously
296 selected, but that couldn't be sent
297 because the lock could not be obtained
298 or the DMA engine wasn't ready. Try
300 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
303 next = gamma_select_queue(dev, gamma_dma_timer_bh);
305 q = dev->queuelist[next];
306 buf = gamma_waitlist_get(&q->waitlist);
307 dma->next_buffer = buf;
309 if (buf && buf->list == DRM_LIST_RECLAIM) {
310 gamma_clear_next_buffer(dev);
311 gamma_free_buffer(dev, buf);
314 } while (next >= 0 && !dma->next_buffer);
315 if (dma->next_buffer) {
316 if (!(retcode = gamma_do_dma(dev, locked))) {
323 if (missed != atomic_read(&dev->counts[10])) {
324 if (gamma_dma_is_ready(dev)) goto again;
326 if (processed && gamma_dma_is_ready(dev)) {
332 clear_bit(0, &dev->interrupt_flag);
337 static int gamma_dma_priority(struct file *filp,
338 drm_device_t *dev, drm_dma_t *d)
340 unsigned long address;
341 unsigned long length;
347 drm_buf_t *last_buf = NULL;
348 drm_device_dma_t *dma = dev->dma;
349 int *send_indices = NULL;
350 int *send_sizes = NULL;
352 DECLARE_WAITQUEUE(entry, current);
354 /* Turn off interrupt handling */
355 while (test_and_set_bit(0, &dev->interrupt_flag)) {
357 if (signal_pending(current)) return -EINTR;
359 if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
360 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
361 DRM_KERNEL_CONTEXT)) {
363 if (signal_pending(current)) {
364 clear_bit(0, &dev->interrupt_flag);
371 send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices),
373 if (send_indices == NULL)
375 if (copy_from_user(send_indices, d->send_indices,
376 d->send_count * sizeof(*send_indices))) {
381 send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes),
383 if (send_sizes == NULL)
385 if (copy_from_user(send_sizes, d->send_sizes,
386 d->send_count * sizeof(*send_sizes))) {
391 for (i = 0; i < d->send_count; i++) {
392 idx = send_indices[i];
393 if (idx < 0 || idx >= dma->buf_count) {
394 DRM_ERROR("Index %d (of %d max)\n",
395 send_indices[i], dma->buf_count - 1);
398 buf = dma->buflist[ idx ];
399 if (buf->filp != filp) {
400 DRM_ERROR("Process %d using buffer not owned\n",
405 if (buf->list != DRM_LIST_NONE) {
406 DRM_ERROR("Process %d using buffer on list %d\n",
407 current->pid, buf->list);
411 /* This isn't a race condition on
412 buf->list, since our concern is the
413 buffer reclaim during the time the
414 process closes the /dev/drm? handle, so
415 it can't also be doing DMA. */
416 buf->list = DRM_LIST_PRIO;
417 buf->used = send_sizes[i];
418 buf->context = d->context;
419 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
420 address = (unsigned long)buf->address;
423 DRM_ERROR("0 length buffer\n");
426 DRM_ERROR("Sending pending buffer:"
427 " buffer %d, offset %d\n",
433 DRM_ERROR("Sending waiting buffer:"
434 " buffer %d, offset %d\n",
441 if (dev->last_context != buf->context
442 && !(dev->queuelist[buf->context]->flags
443 & _DRM_CONTEXT_PRESERVED)) {
444 add_wait_queue(&dev->context_wait, &entry);
445 current->state = TASK_INTERRUPTIBLE;
446 /* PRE: dev->last_context != buf->context */
447 DRM(context_switch)(dev, dev->last_context,
449 /* POST: we will wait for the context
450 switch and will dispatch on a later call
451 when dev->last_context == buf->context.
452 NOTE WE HOLD THE LOCK THROUGHOUT THIS
455 current->state = TASK_RUNNING;
456 remove_wait_queue(&dev->context_wait, &entry);
457 if (signal_pending(current)) {
461 if (dev->last_context != buf->context) {
462 DRM_ERROR("Context mismatch: %d %d\n",
468 gamma_dma_dispatch(dev, address, length);
469 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
470 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
473 gamma_free_buffer(dev, last_buf);
481 gamma_dma_ready(dev);
482 gamma_free_buffer(dev, last_buf);
485 DRM(free)(send_indices, d->send_count * sizeof(*send_indices),
488 DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes),
491 if (must_free && !dev->context_flag) {
492 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
493 DRM_KERNEL_CONTEXT)) {
497 clear_bit(0, &dev->interrupt_flag);
501 static int gamma_dma_send_buffers(struct file *filp,
502 drm_device_t *dev, drm_dma_t *d)
504 DECLARE_WAITQUEUE(entry, current);
505 drm_buf_t *last_buf = NULL;
507 drm_device_dma_t *dma = dev->dma;
510 if (get_user(send_index, &d->send_indices[d->send_count-1]))
513 if (d->flags & _DRM_DMA_BLOCK) {
514 last_buf = dma->buflist[send_index];
515 add_wait_queue(&last_buf->dma_wait, &entry);
518 if ((retcode = gamma_dma_enqueue(filp, d))) {
519 if (d->flags & _DRM_DMA_BLOCK)
520 remove_wait_queue(&last_buf->dma_wait, &entry);
524 gamma_dma_schedule(dev, 0);
526 if (d->flags & _DRM_DMA_BLOCK) {
527 DRM_DEBUG("%d waiting\n", current->pid);
529 current->state = TASK_INTERRUPTIBLE;
530 if (!last_buf->waiting && !last_buf->pending)
531 break; /* finished */
533 if (signal_pending(current)) {
534 retcode = -EINTR; /* Can't restart */
538 current->state = TASK_RUNNING;
539 DRM_DEBUG("%d running\n", current->pid);
540 remove_wait_queue(&last_buf->dma_wait, &entry);
542 || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
543 if (!waitqueue_active(&last_buf->dma_wait)) {
544 gamma_free_buffer(dev, last_buf);
548 DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
552 (long)DRM_WAITCOUNT(dev, d->context),
561 int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
564 drm_file_t *priv = filp->private_data;
565 drm_device_t *dev = priv->dev;
566 drm_device_dma_t *dma = dev->dma;
568 drm_dma_t __user *argp = (void __user *)arg;
571 if (copy_from_user(&d, argp, sizeof(d)))
574 if (d.send_count < 0 || d.send_count > dma->buf_count) {
575 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
576 current->pid, d.send_count, dma->buf_count);
580 if (d.request_count < 0 || d.request_count > dma->buf_count) {
581 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
582 current->pid, d.request_count, dma->buf_count);
587 if (d.flags & _DRM_DMA_PRIORITY)
588 retcode = gamma_dma_priority(filp, dev, &d);
590 retcode = gamma_dma_send_buffers(filp, dev, &d);
595 if (!retcode && d.request_count) {
596 retcode = gamma_dma_get_buffers(filp, &d);
599 DRM_DEBUG("%d returning, granted = %d\n",
600 current->pid, d.granted_count);
601 if (copy_to_user(argp, &d, sizeof(d)))
607 /* =============================================================
608 * DMA initialization, cleanup
611 static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
613 drm_gamma_private_t *dev_priv;
614 drm_device_dma_t *dma = dev->dma;
617 struct list_head *list;
620 DRM_DEBUG( "%s\n", __FUNCTION__ );
622 dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
627 dev->dev_private = (void *)dev_priv;
629 memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
631 dev_priv->num_rast = init->num_rast;
633 list_for_each(list, &dev->maplist->head) {
634 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
636 r_list->map->type == _DRM_SHM &&
637 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
638 dev_priv->sarea = r_list->map;
643 DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 );
644 DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 );
645 DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 );
646 DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 );
648 dev_priv->sarea_priv = (drm_gamma_sarea_t *)
649 ((u8 *)dev_priv->sarea->handle +
650 init->sarea_priv_offset);
653 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
656 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
657 buf = dma->buflist[i];
658 *pgt = virt_to_phys((void*)buf->address) | 0x07;
662 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
664 DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
666 DRM_IOREMAP( dev_priv->buffers, dev );
668 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
671 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
672 buf = dma->buflist[i];
673 *pgt = (unsigned long)buf->address + 0x07;
677 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
679 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
680 GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
682 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
683 GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
684 GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
689 int gamma_do_cleanup_dma( drm_device_t *dev )
691 DRM_DEBUG( "%s\n", __FUNCTION__ );
694 /* Make sure interrupts are disabled here because the uninstall ioctl
695 * may not have been called from userspace and after dev_private
696 * is freed, it's too late.
698 if ( dev->irq_enabled ) DRM(irq_uninstall)(dev);
701 if ( dev->dev_private ) {
702 drm_gamma_private_t *dev_priv = dev->dev_private;
704 if ( dev_priv->buffers != NULL )
705 DRM_IOREMAPFREE( dev_priv->buffers, dev );
707 DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
709 dev->dev_private = NULL;
715 int gamma_dma_init( struct inode *inode, struct file *filp,
716 unsigned int cmd, unsigned long arg )
718 drm_file_t *priv = filp->private_data;
719 drm_device_t *dev = priv->dev;
720 drm_gamma_init_t init;
722 LOCK_TEST_WITH_RETURN( dev, filp );
724 if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
727 switch ( init.func ) {
729 return gamma_do_init_dma( dev, &init );
730 case GAMMA_CLEANUP_DMA:
731 return gamma_do_cleanup_dma( dev );
737 static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
739 drm_device_dma_t *dma = dev->dma;
740 unsigned int *screenbuf;
742 DRM_DEBUG( "%s\n", __FUNCTION__ );
744 /* We've DRM_RESTRICTED this DMA buffer */
746 screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
749 *buffer++ = 0x180; /* Tag (FilterMode) */
750 *buffer++ = 0x200; /* Allow FBColor through */
751 *buffer++ = 0x53B; /* Tag */
752 *buffer++ = copy->Pitch;
753 *buffer++ = 0x53A; /* Tag */
754 *buffer++ = copy->SrcAddress;
755 *buffer++ = 0x539; /* Tag */
756 *buffer++ = copy->WidthHeight; /* Initiates transfer */
757 *buffer++ = 0x53C; /* Tag - DMAOutputAddress */
758 *buffer++ = virt_to_phys((void*)screenbuf);
759 *buffer++ = 0x53D; /* Tag - DMAOutputCount */
760 *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
762 /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
763 /* Now put it back to the screen */
765 *buffer++ = 0x180; /* Tag (FilterMode) */
766 *buffer++ = 0x400; /* Allow Sync through */
767 *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */
768 *buffer++ = 0x155; /* FBSourceData | count */
769 *buffer++ = 0x537; /* Tag */
770 *buffer++ = copy->Pitch;
771 *buffer++ = 0x536; /* Tag */
772 *buffer++ = copy->DstAddress;
773 *buffer++ = 0x535; /* Tag */
774 *buffer++ = copy->WidthHeight; /* Initiates transfer */
775 *buffer++ = 0x530; /* Tag - DMAAddr */
776 *buffer++ = virt_to_phys((void*)screenbuf);
778 *buffer++ = copy->Count; /* initiates DMA transfer of color data */
781 /* need to dispatch it now */
786 int gamma_dma_copy( struct inode *inode, struct file *filp,
787 unsigned int cmd, unsigned long arg )
789 drm_file_t *priv = filp->private_data;
790 drm_device_t *dev = priv->dev;
791 drm_gamma_copy_t copy;
793 if ( copy_from_user( ©, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
796 return gamma_do_copy_dma( dev, © );
799 /* =============================================================
800 * Per Context SAREA Support
803 int gamma_getsareactx(struct inode *inode, struct file *filp,
804 unsigned int cmd, unsigned long arg)
806 drm_file_t *priv = filp->private_data;
807 drm_device_t *dev = priv->dev;
808 drm_ctx_priv_map_t __user *argp = (void __user *)arg;
809 drm_ctx_priv_map_t request;
812 if (copy_from_user(&request, argp, sizeof(request)))
815 down(&dev->struct_sem);
816 if ((int)request.ctx_id >= dev->max_context) {
817 up(&dev->struct_sem);
821 map = dev->context_sareas[request.ctx_id];
822 up(&dev->struct_sem);
824 request.handle = map->handle;
825 if (copy_to_user(argp, &request, sizeof(request)))
830 int gamma_setsareactx(struct inode *inode, struct file *filp,
831 unsigned int cmd, unsigned long arg)
833 drm_file_t *priv = filp->private_data;
834 drm_device_t *dev = priv->dev;
835 drm_ctx_priv_map_t request;
836 drm_map_t *map = NULL;
837 drm_map_list_t *r_list;
838 struct list_head *list;
840 if (copy_from_user(&request,
841 (drm_ctx_priv_map_t __user *)arg,
845 down(&dev->struct_sem);
847 list_for_each(list, &dev->maplist->head) {
848 r_list = list_entry(list, drm_map_list_t, head);
850 r_list->map->handle == request.handle) break;
852 if (list == &(dev->maplist->head)) {
853 up(&dev->struct_sem);
857 up(&dev->struct_sem);
859 if (!map) return -EINVAL;
861 down(&dev->struct_sem);
862 if ((int)request.ctx_id >= dev->max_context) {
863 up(&dev->struct_sem);
866 dev->context_sareas[request.ctx_id] = map;
867 up(&dev->struct_sem);
871 void DRM(driver_irq_preinstall)( drm_device_t *dev ) {
872 drm_gamma_private_t *dev_priv =
873 (drm_gamma_private_t *)dev->dev_private;
875 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
878 GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 );
879 GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );
882 void DRM(driver_irq_postinstall)( drm_device_t *dev ) {
883 drm_gamma_private_t *dev_priv =
884 (drm_gamma_private_t *)dev->dev_private;
886 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
889 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 );
890 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 );
891 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 );
894 void DRM(driver_irq_uninstall)( drm_device_t *dev ) {
895 drm_gamma_private_t *dev_priv =
896 (drm_gamma_private_t *)dev->dev_private;
900 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
903 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 );
904 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
905 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );