1 /* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Abraham vd Merwe <abraham@2d3d.co.za>
38 #include <linux/interrupt.h> /* For task queue support */
39 #include <linux/pagemap.h> /* For FASTCALL on unlock_page() */
40 #include <linux/delay.h>
41 #include <asm/uaccess.h>
43 #define I830_BUF_FREE 2
44 #define I830_BUF_CLIENT 1
45 #define I830_BUF_HARDWARE 0
47 #define I830_BUF_UNMAPPED 0
48 #define I830_BUF_MAPPED 1
50 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
51 #define down_write down
55 drm_ioctl_desc_t i830_ioctls[] = {
56 [DRM_IOCTL_NR(DRM_I830_INIT)] = { i830_dma_init, 1, 1 },
57 [DRM_IOCTL_NR(DRM_I830_VERTEX)] = { i830_dma_vertex, 1, 0 },
58 [DRM_IOCTL_NR(DRM_I830_CLEAR)] = { i830_clear_bufs, 1, 0 },
59 [DRM_IOCTL_NR(DRM_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 },
60 [DRM_IOCTL_NR(DRM_I830_GETAGE)] = { i830_getage, 1, 0 },
61 [DRM_IOCTL_NR(DRM_I830_GETBUF)] = { i830_getbuf, 1, 0 },
62 [DRM_IOCTL_NR(DRM_I830_SWAP)] = { i830_swap_bufs, 1, 0 },
63 [DRM_IOCTL_NR(DRM_I830_COPY)] = { i830_copybuf, 1, 0 },
64 [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = { i830_docopy, 1, 0 },
65 [DRM_IOCTL_NR(DRM_I830_FLIP)] = { i830_flip_bufs, 1, 0 },
66 [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = { i830_irq_emit, 1, 0 },
67 [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = { i830_irq_wait, 1, 0 },
68 [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = { i830_getparam, 1, 0 },
69 [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = { i830_setparam, 1, 0 }
72 int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
74 static inline void i830_print_status_page(drm_device_t *dev)
76 drm_device_dma_t *dma = dev->dma;
77 drm_i830_private_t *dev_priv = dev->dev_private;
78 u32 *temp = dev_priv->hw_status_page;
81 DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
82 DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
83 DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
84 DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
85 DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
86 for(i = 9; i < dma->buf_count + 9; i++) {
87 DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 9, temp[i]);
91 static drm_buf_t *i830_freelist_get(drm_device_t *dev)
93 drm_device_dma_t *dma = dev->dma;
97 /* Linear search might not be the best solution */
99 for (i = 0; i < dma->buf_count; i++) {
100 drm_buf_t *buf = dma->buflist[ i ];
101 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
102 /* In use is already a pointer */
103 used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
105 if(used == I830_BUF_FREE) {
112 /* This should only be called if the buffer is not sent to the hardware
113 * yet, the hardware updates in use for us once its on the ring buffer.
116 static int i830_freelist_put(drm_device_t *dev, drm_buf_t *buf)
118 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
121 /* In use is already a pointer */
122 used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
123 if(used != I830_BUF_CLIENT) {
124 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
131 static struct file_operations i830_buffer_fops = {
134 .release = drm_release,
136 .mmap = i830_mmap_buffers,
137 .fasync = drm_fasync,
140 int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
142 drm_file_t *priv = filp->private_data;
144 drm_i830_private_t *dev_priv;
146 drm_i830_buf_priv_t *buf_priv;
150 dev_priv = dev->dev_private;
151 buf = dev_priv->mmap_buffer;
152 buf_priv = buf->dev_private;
154 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
157 buf_priv->currently_mapped = I830_BUF_MAPPED;
160 if (remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
161 VM_OFFSET(vma) >> PAGE_SHIFT,
162 vma->vm_end - vma->vm_start,
163 vma->vm_page_prot)) return -EAGAIN;
167 static int i830_map_buffer(drm_buf_t *buf, struct file *filp)
169 drm_file_t *priv = filp->private_data;
170 drm_device_t *dev = priv->dev;
171 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
172 drm_i830_private_t *dev_priv = dev->dev_private;
173 struct file_operations *old_fops;
174 unsigned long virtual;
177 if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL;
179 down_write( ¤t->mm->mmap_sem );
180 old_fops = filp->f_op;
181 filp->f_op = &i830_buffer_fops;
182 dev_priv->mmap_buffer = buf;
183 virtual = do_mmap(filp, 0, buf->total, PROT_READ|PROT_WRITE,
184 MAP_SHARED, buf->bus_address);
185 dev_priv->mmap_buffer = NULL;
186 filp->f_op = old_fops;
187 if (IS_ERR((void *)virtual)) { /* ugh */
189 DRM_ERROR("mmap error\n");
191 buf_priv->virtual = NULL;
193 buf_priv->virtual = (void __user *)virtual;
195 up_write( ¤t->mm->mmap_sem );
200 static int i830_unmap_buffer(drm_buf_t *buf)
202 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
205 if(buf_priv->currently_mapped != I830_BUF_MAPPED)
208 down_write(¤t->mm->mmap_sem);
209 retcode = do_munmap(current->mm,
210 (unsigned long)buf_priv->virtual,
211 (size_t) buf->total);
212 up_write(¤t->mm->mmap_sem);
214 buf_priv->currently_mapped = I830_BUF_UNMAPPED;
215 buf_priv->virtual = NULL;
220 static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d,
224 drm_i830_buf_priv_t *buf_priv;
227 buf = i830_freelist_get(dev);
230 DRM_DEBUG("retcode=%d\n", retcode);
234 retcode = i830_map_buffer(buf, filp);
236 i830_freelist_put(dev, buf);
237 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
241 buf_priv = buf->dev_private;
243 d->request_idx = buf->idx;
244 d->request_size = buf->total;
245 d->virtual = buf_priv->virtual;
250 int i830_dma_cleanup(drm_device_t *dev)
252 drm_device_dma_t *dma = dev->dma;
254 /* Make sure interrupts are disabled here because the uninstall ioctl
255 * may not have been called from userspace and after dev_private
256 * is freed, it's too late.
258 if ( dev->irq_enabled ) drm_irq_uninstall(dev);
260 if (dev->dev_private) {
262 drm_i830_private_t *dev_priv =
263 (drm_i830_private_t *) dev->dev_private;
265 if (dev_priv->ring.virtual_start) {
266 drm_ioremapfree((void *) dev_priv->ring.virtual_start,
267 dev_priv->ring.Size, dev);
269 if (dev_priv->hw_status_page) {
270 pci_free_consistent(dev->pdev, PAGE_SIZE,
271 dev_priv->hw_status_page,
272 dev_priv->dma_status_page);
273 /* Need to rewrite hardware status page */
274 I830_WRITE(0x02080, 0x1ffff000);
277 drm_free(dev->dev_private, sizeof(drm_i830_private_t),
279 dev->dev_private = NULL;
281 for (i = 0; i < dma->buf_count; i++) {
282 drm_buf_t *buf = dma->buflist[ i ];
283 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
284 if ( buf_priv->kernel_virtual && buf->total )
285 drm_ioremapfree(buf_priv->kernel_virtual, buf->total, dev);
291 int i830_wait_ring(drm_device_t *dev, int n, const char *caller)
293 drm_i830_private_t *dev_priv = dev->dev_private;
294 drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
297 unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
299 end = jiffies + (HZ*3);
300 while (ring->space < n) {
301 ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
302 ring->space = ring->head - (ring->tail+8);
303 if (ring->space < 0) ring->space += ring->Size;
305 if (ring->head != last_head) {
306 end = jiffies + (HZ*3);
307 last_head = ring->head;
311 if(time_before(end, jiffies)) {
312 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
313 DRM_ERROR("lockup\n");
317 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
324 static void i830_kernel_lost_context(drm_device_t *dev)
326 drm_i830_private_t *dev_priv = dev->dev_private;
327 drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
329 ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
330 ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
331 ring->space = ring->head - (ring->tail+8);
332 if (ring->space < 0) ring->space += ring->Size;
334 if (ring->head == ring->tail)
335 dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
338 static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv)
340 drm_device_dma_t *dma = dev->dma;
342 u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
345 if(dma->buf_count > 1019) {
346 /* Not enough space in the status page for the freelist */
350 for (i = 0; i < dma->buf_count; i++) {
351 drm_buf_t *buf = dma->buflist[ i ];
352 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
354 buf_priv->in_use = hw_status++;
355 buf_priv->my_use_idx = my_idx;
358 *buf_priv->in_use = I830_BUF_FREE;
360 buf_priv->kernel_virtual = drm_ioremap(buf->bus_address,
366 static int i830_dma_initialize(drm_device_t *dev,
367 drm_i830_private_t *dev_priv,
368 drm_i830_init_t *init)
370 struct list_head *list;
372 memset(dev_priv, 0, sizeof(drm_i830_private_t));
374 list_for_each(list, &dev->maplist->head) {
375 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
377 r_list->map->type == _DRM_SHM &&
378 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
379 dev_priv->sarea_map = r_list->map;
384 if(!dev_priv->sarea_map) {
385 dev->dev_private = (void *)dev_priv;
386 i830_dma_cleanup(dev);
387 DRM_ERROR("can not find sarea!\n");
390 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
391 if(!dev_priv->mmio_map) {
392 dev->dev_private = (void *)dev_priv;
393 i830_dma_cleanup(dev);
394 DRM_ERROR("can not find mmio map!\n");
397 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
398 if(!dev->agp_buffer_map) {
399 dev->dev_private = (void *)dev_priv;
400 i830_dma_cleanup(dev);
401 DRM_ERROR("can not find dma buffer map!\n");
405 dev_priv->sarea_priv = (drm_i830_sarea_t *)
406 ((u8 *)dev_priv->sarea_map->handle +
407 init->sarea_priv_offset);
409 dev_priv->ring.Start = init->ring_start;
410 dev_priv->ring.End = init->ring_end;
411 dev_priv->ring.Size = init->ring_size;
413 dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base +
415 init->ring_size, dev);
417 if (dev_priv->ring.virtual_start == NULL) {
418 dev->dev_private = (void *) dev_priv;
419 i830_dma_cleanup(dev);
420 DRM_ERROR("can not ioremap virtual address for"
425 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
427 dev_priv->w = init->w;
428 dev_priv->h = init->h;
429 dev_priv->pitch = init->pitch;
430 dev_priv->back_offset = init->back_offset;
431 dev_priv->depth_offset = init->depth_offset;
432 dev_priv->front_offset = init->front_offset;
434 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
435 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
436 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
438 DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
439 DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
440 DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
441 DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
443 dev_priv->cpp = init->cpp;
444 /* We are using separate values as placeholders for mechanisms for
445 * private backbuffer/depthbuffer usage.
448 dev_priv->back_pitch = init->back_pitch;
449 dev_priv->depth_pitch = init->depth_pitch;
450 dev_priv->do_boxes = 0;
451 dev_priv->use_mi_batchbuffer_start = 0;
453 /* Program Hardware Status Page */
454 dev_priv->hw_status_page =
455 pci_alloc_consistent(dev->pdev, PAGE_SIZE,
456 &dev_priv->dma_status_page);
457 if (!dev_priv->hw_status_page) {
458 dev->dev_private = (void *)dev_priv;
459 i830_dma_cleanup(dev);
460 DRM_ERROR("Can not allocate hardware status page\n");
463 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
464 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
466 I830_WRITE(0x02080, dev_priv->dma_status_page);
467 DRM_DEBUG("Enabled hardware status page\n");
469 /* Now we need to init our freelist */
470 if(i830_freelist_init(dev, dev_priv) != 0) {
471 dev->dev_private = (void *)dev_priv;
472 i830_dma_cleanup(dev);
473 DRM_ERROR("Not enough space in the status page for"
477 dev->dev_private = (void *)dev_priv;
482 int i830_dma_init(struct inode *inode, struct file *filp,
483 unsigned int cmd, unsigned long arg)
485 drm_file_t *priv = filp->private_data;
486 drm_device_t *dev = priv->dev;
487 drm_i830_private_t *dev_priv;
488 drm_i830_init_t init;
491 if (copy_from_user(&init, (void * __user) arg, sizeof(init)))
496 dev_priv = drm_alloc(sizeof(drm_i830_private_t),
498 if(dev_priv == NULL) return -ENOMEM;
499 retcode = i830_dma_initialize(dev, dev_priv, &init);
501 case I830_CLEANUP_DMA:
502 retcode = i830_dma_cleanup(dev);
512 #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
513 #define ST1_ENABLE (1<<16)
514 #define ST1_MASK (0xffff)
516 /* Most efficient way to verify state for the i830 is as it is
517 * emitted. Non-conformant state is silently dropped.
519 static void i830EmitContextVerified( drm_device_t *dev,
522 drm_i830_private_t *dev_priv = dev->dev_private;
527 BEGIN_LP_RING( I830_CTX_SETUP_SIZE + 4 );
529 for ( i = 0 ; i < I830_CTXREG_BLENDCOLR0 ; i++ ) {
531 if ((tmp & (7<<29)) == CMD_3D &&
532 (tmp & (0x1f<<24)) < (0x1d<<24)) {
536 DRM_ERROR("Skipping %d\n", i);
540 OUT_RING( STATE3D_CONST_BLEND_COLOR_CMD );
541 OUT_RING( code[I830_CTXREG_BLENDCOLR] );
544 for ( i = I830_CTXREG_VF ; i < I830_CTXREG_MCSB0 ; i++ ) {
546 if ((tmp & (7<<29)) == CMD_3D &&
547 (tmp & (0x1f<<24)) < (0x1d<<24)) {
551 DRM_ERROR("Skipping %d\n", i);
555 OUT_RING( STATE3D_MAP_COORD_SETBIND_CMD );
556 OUT_RING( code[I830_CTXREG_MCSB1] );
565 static void i830EmitTexVerified( drm_device_t *dev, unsigned int *code )
567 drm_i830_private_t *dev_priv = dev->dev_private;
572 if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
573 (code[I830_TEXREG_MI0] & ~(0xf*LOAD_TEXTURE_MAP0)) ==
574 (STATE3D_LOAD_STATE_IMMEDIATE_2|4)) {
576 BEGIN_LP_RING( I830_TEX_SETUP_SIZE );
578 OUT_RING( code[I830_TEXREG_MI0] ); /* TM0LI */
579 OUT_RING( code[I830_TEXREG_MI1] ); /* TM0S0 */
580 OUT_RING( code[I830_TEXREG_MI2] ); /* TM0S1 */
581 OUT_RING( code[I830_TEXREG_MI3] ); /* TM0S2 */
582 OUT_RING( code[I830_TEXREG_MI4] ); /* TM0S3 */
583 OUT_RING( code[I830_TEXREG_MI5] ); /* TM0S4 */
585 for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) {
597 printk("rejected packet %x\n", code[0]);
600 static void i830EmitTexBlendVerified( drm_device_t *dev,
604 drm_i830_private_t *dev_priv = dev->dev_private;
612 BEGIN_LP_RING( num + 1 );
614 for ( i = 0 ; i < num ; i++ ) {
626 static void i830EmitTexPalette( drm_device_t *dev,
627 unsigned int *palette,
631 drm_i830_private_t *dev_priv = dev->dev_private;
637 BEGIN_LP_RING( 258 );
640 OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
644 OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
646 for(i = 0; i < 256; i++) {
647 OUT_RING(palette[i]);
650 /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
654 /* Need to do some additional checking when setting the dest buffer.
656 static void i830EmitDestVerified( drm_device_t *dev,
659 drm_i830_private_t *dev_priv = dev->dev_private;
663 BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 10 );
666 tmp = code[I830_DESTREG_CBUFADDR];
667 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
668 if (((int)outring) & 8) {
673 OUT_RING( CMD_OP_DESTBUFFER_INFO );
674 OUT_RING( BUF_3D_ID_COLOR_BACK |
675 BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
680 OUT_RING( CMD_OP_DESTBUFFER_INFO );
681 OUT_RING( BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
682 BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
683 OUT_RING( dev_priv->zi1 );
686 DRM_ERROR("bad di1 %x (allow %x or %x)\n",
687 tmp, dev_priv->front_di1, dev_priv->back_di1);
694 OUT_RING( GFX_OP_DESTBUFFER_VARS );
695 OUT_RING( code[I830_DESTREG_DV1] );
697 OUT_RING( GFX_OP_DRAWRECT_INFO );
698 OUT_RING( code[I830_DESTREG_DR1] );
699 OUT_RING( code[I830_DESTREG_DR2] );
700 OUT_RING( code[I830_DESTREG_DR3] );
701 OUT_RING( code[I830_DESTREG_DR4] );
703 /* Need to verify this */
704 tmp = code[I830_DESTREG_SENABLE];
705 if((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
708 DRM_ERROR("bad scissor enable\n");
712 OUT_RING( GFX_OP_SCISSOR_RECT );
713 OUT_RING( code[I830_DESTREG_SR1] );
714 OUT_RING( code[I830_DESTREG_SR2] );
720 static void i830EmitStippleVerified( drm_device_t *dev,
723 drm_i830_private_t *dev_priv = dev->dev_private;
727 OUT_RING( GFX_OP_STIPPLE );
733 static void i830EmitState( drm_device_t *dev )
735 drm_i830_private_t *dev_priv = dev->dev_private;
736 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
737 unsigned int dirty = sarea_priv->dirty;
739 DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
741 if (dirty & I830_UPLOAD_BUFFERS) {
742 i830EmitDestVerified( dev, sarea_priv->BufferState );
743 sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
746 if (dirty & I830_UPLOAD_CTX) {
747 i830EmitContextVerified( dev, sarea_priv->ContextState );
748 sarea_priv->dirty &= ~I830_UPLOAD_CTX;
751 if (dirty & I830_UPLOAD_TEX0) {
752 i830EmitTexVerified( dev, sarea_priv->TexState[0] );
753 sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
756 if (dirty & I830_UPLOAD_TEX1) {
757 i830EmitTexVerified( dev, sarea_priv->TexState[1] );
758 sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
761 if (dirty & I830_UPLOAD_TEXBLEND0) {
762 i830EmitTexBlendVerified( dev, sarea_priv->TexBlendState[0],
763 sarea_priv->TexBlendStateWordsUsed[0]);
764 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
767 if (dirty & I830_UPLOAD_TEXBLEND1) {
768 i830EmitTexBlendVerified( dev, sarea_priv->TexBlendState[1],
769 sarea_priv->TexBlendStateWordsUsed[1]);
770 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
773 if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
774 i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
776 if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
777 i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
778 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
780 if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
781 i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
782 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
788 if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
789 i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
790 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
792 if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
793 i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
794 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
801 if (dirty & I830_UPLOAD_STIPPLE) {
802 i830EmitStippleVerified( dev,
803 sarea_priv->StippleState);
804 sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
807 if (dirty & I830_UPLOAD_TEX2) {
808 i830EmitTexVerified( dev, sarea_priv->TexState2 );
809 sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
812 if (dirty & I830_UPLOAD_TEX3) {
813 i830EmitTexVerified( dev, sarea_priv->TexState3 );
814 sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
818 if (dirty & I830_UPLOAD_TEXBLEND2) {
819 i830EmitTexBlendVerified(
821 sarea_priv->TexBlendState2,
822 sarea_priv->TexBlendStateWordsUsed2);
824 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
827 if (dirty & I830_UPLOAD_TEXBLEND3) {
828 i830EmitTexBlendVerified(
830 sarea_priv->TexBlendState3,
831 sarea_priv->TexBlendStateWordsUsed3);
832 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
836 /* ================================================================
837 * Performance monitoring functions
840 static void i830_fill_box( drm_device_t *dev,
841 int x, int y, int w, int h,
842 int r, int g, int b )
844 drm_i830_private_t *dev_priv = dev->dev_private;
846 unsigned int BR13, CMD;
849 BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1<<24);
850 CMD = XY_COLOR_BLT_CMD;
851 x += dev_priv->sarea_priv->boxes[0].x1;
852 y += dev_priv->sarea_priv->boxes[0].y1;
854 if (dev_priv->cpp == 4) {
856 CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
857 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
859 color = (((r & 0xf8) << 8) |
867 OUT_RING( (y << 16) | x );
868 OUT_RING( ((y+h) << 16) | (x+w) );
870 if ( dev_priv->current_page == 1 ) {
871 OUT_RING( dev_priv->front_offset );
873 OUT_RING( dev_priv->back_offset );
880 static void i830_cp_performance_boxes( drm_device_t *dev )
882 drm_i830_private_t *dev_priv = dev->dev_private;
884 /* Purple box for page flipping
886 if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP )
887 i830_fill_box( dev, 4, 4, 8, 8, 255, 0, 255 );
889 /* Red box if we have to wait for idle at any point
891 if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT )
892 i830_fill_box( dev, 16, 4, 8, 8, 255, 0, 0 );
894 /* Blue box: lost context?
896 if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT )
897 i830_fill_box( dev, 28, 4, 8, 8, 0, 0, 255 );
899 /* Yellow box for texture swaps
901 if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD )
902 i830_fill_box( dev, 40, 4, 8, 8, 255, 255, 0 );
904 /* Green box if hardware never idles (as far as we can tell)
906 if ( !(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY) )
907 i830_fill_box( dev, 64, 4, 8, 8, 0, 255, 0 );
910 /* Draw bars indicating number of buffers allocated
911 * (not a great measure, easily confused)
913 if (dev_priv->dma_used) {
914 int bar = dev_priv->dma_used / 10240;
915 if (bar > 100) bar = 100;
916 if (bar < 1) bar = 1;
917 i830_fill_box( dev, 4, 16, bar, 4, 196, 128, 128 );
918 dev_priv->dma_used = 0;
921 dev_priv->sarea_priv->perf_boxes = 0;
924 static void i830_dma_dispatch_clear( drm_device_t *dev, int flags,
925 unsigned int clear_color,
926 unsigned int clear_zval,
927 unsigned int clear_depthmask)
929 drm_i830_private_t *dev_priv = dev->dev_private;
930 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
931 int nbox = sarea_priv->nbox;
932 drm_clip_rect_t *pbox = sarea_priv->boxes;
933 int pitch = dev_priv->pitch;
934 int cpp = dev_priv->cpp;
936 unsigned int BR13, CMD, D_CMD;
940 if ( dev_priv->current_page == 1 ) {
941 unsigned int tmp = flags;
943 flags &= ~(I830_FRONT | I830_BACK);
944 if ( tmp & I830_FRONT ) flags |= I830_BACK;
945 if ( tmp & I830_BACK ) flags |= I830_FRONT;
948 i830_kernel_lost_context(dev);
952 BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24);
953 D_CMD = CMD = XY_COLOR_BLT_CMD;
956 BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24) | (1<<25);
957 CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
958 XY_COLOR_BLT_WRITE_RGB);
959 D_CMD = XY_COLOR_BLT_CMD;
960 if(clear_depthmask & 0x00ffffff)
961 D_CMD |= XY_COLOR_BLT_WRITE_RGB;
962 if(clear_depthmask & 0xff000000)
963 D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
966 BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24);
967 D_CMD = CMD = XY_COLOR_BLT_CMD;
971 if (nbox > I830_NR_SAREA_CLIPRECTS)
972 nbox = I830_NR_SAREA_CLIPRECTS;
974 for (i = 0 ; i < nbox ; i++, pbox++) {
975 if (pbox->x1 > pbox->x2 ||
976 pbox->y1 > pbox->y2 ||
977 pbox->x2 > dev_priv->w ||
978 pbox->y2 > dev_priv->h)
981 if ( flags & I830_FRONT ) {
982 DRM_DEBUG("clear front\n");
986 OUT_RING( (pbox->y1 << 16) | pbox->x1 );
987 OUT_RING( (pbox->y2 << 16) | pbox->x2 );
988 OUT_RING( dev_priv->front_offset );
989 OUT_RING( clear_color );
993 if ( flags & I830_BACK ) {
994 DRM_DEBUG("clear back\n");
998 OUT_RING( (pbox->y1 << 16) | pbox->x1 );
999 OUT_RING( (pbox->y2 << 16) | pbox->x2 );
1000 OUT_RING( dev_priv->back_offset );
1001 OUT_RING( clear_color );
1005 if ( flags & I830_DEPTH ) {
1006 DRM_DEBUG("clear depth\n");
1010 OUT_RING( (pbox->y1 << 16) | pbox->x1 );
1011 OUT_RING( (pbox->y2 << 16) | pbox->x2 );
1012 OUT_RING( dev_priv->depth_offset );
1013 OUT_RING( clear_zval );
1019 static void i830_dma_dispatch_swap( drm_device_t *dev )
1021 drm_i830_private_t *dev_priv = dev->dev_private;
1022 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
1023 int nbox = sarea_priv->nbox;
1024 drm_clip_rect_t *pbox = sarea_priv->boxes;
1025 int pitch = dev_priv->pitch;
1026 int cpp = dev_priv->cpp;
1028 unsigned int CMD, BR13;
1031 DRM_DEBUG("swapbuffers\n");
1033 i830_kernel_lost_context(dev);
1035 if (dev_priv->do_boxes)
1036 i830_cp_performance_boxes( dev );
1040 BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
1041 CMD = XY_SRC_COPY_BLT_CMD;
1044 BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24) | (1<<25);
1045 CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
1046 XY_SRC_COPY_BLT_WRITE_RGB);
1049 BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
1050 CMD = XY_SRC_COPY_BLT_CMD;
1055 if (nbox > I830_NR_SAREA_CLIPRECTS)
1056 nbox = I830_NR_SAREA_CLIPRECTS;
1058 for (i = 0 ; i < nbox; i++, pbox++)
1060 if (pbox->x1 > pbox->x2 ||
1061 pbox->y1 > pbox->y2 ||
1062 pbox->x2 > dev_priv->w ||
1063 pbox->y2 > dev_priv->h)
1066 DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
1068 pbox->x2, pbox->y2);
1073 OUT_RING( (pbox->y1 << 16) | pbox->x1 );
1074 OUT_RING( (pbox->y2 << 16) | pbox->x2 );
1076 if (dev_priv->current_page == 0)
1077 OUT_RING( dev_priv->front_offset );
1079 OUT_RING( dev_priv->back_offset );
1081 OUT_RING( (pbox->y1 << 16) | pbox->x1 );
1082 OUT_RING( BR13 & 0xffff );
1084 if (dev_priv->current_page == 0)
1085 OUT_RING( dev_priv->back_offset );
1087 OUT_RING( dev_priv->front_offset );
1093 static void i830_dma_dispatch_flip( drm_device_t *dev )
1095 drm_i830_private_t *dev_priv = dev->dev_private;
1098 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
1100 dev_priv->current_page,
1101 dev_priv->sarea_priv->pf_current_page);
1103 i830_kernel_lost_context(dev);
1105 if (dev_priv->do_boxes) {
1106 dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
1107 i830_cp_performance_boxes( dev );
1112 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
1117 OUT_RING( CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP );
1119 if ( dev_priv->current_page == 0 ) {
1120 OUT_RING( dev_priv->back_offset );
1121 dev_priv->current_page = 1;
1123 OUT_RING( dev_priv->front_offset );
1124 dev_priv->current_page = 0;
1131 OUT_RING( MI_WAIT_FOR_EVENT |
1132 MI_WAIT_FOR_PLANE_A_FLIP );
1137 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1140 static void i830_dma_dispatch_vertex(drm_device_t *dev,
1145 drm_i830_private_t *dev_priv = dev->dev_private;
1146 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1147 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
1148 drm_clip_rect_t *box = sarea_priv->boxes;
1149 int nbox = sarea_priv->nbox;
1150 unsigned long address = (unsigned long)buf->bus_address;
1151 unsigned long start = address - dev->agp->base;
1155 i830_kernel_lost_context(dev);
1157 if (nbox > I830_NR_SAREA_CLIPRECTS)
1158 nbox = I830_NR_SAREA_CLIPRECTS;
1161 u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1163 if(u != I830_BUF_CLIENT) {
1164 DRM_DEBUG("xxxx 2\n");
1171 if (sarea_priv->dirty)
1172 i830EmitState( dev );
1174 DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
1175 address, used, nbox);
1177 dev_priv->counter++;
1178 DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter);
1179 DRM_DEBUG( "i830_dma_dispatch\n");
1180 DRM_DEBUG( "start : %lx\n", start);
1181 DRM_DEBUG( "used : %d\n", used);
1182 DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4);
1184 if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
1185 u32 *vp = buf_priv->kernel_virtual;
1187 vp[0] = (GFX_OP_PRIMITIVE |
1188 sarea_priv->vertex_prim |
1191 if (dev_priv->use_mi_batchbuffer_start) {
1192 vp[used/4] = MI_BATCH_BUFFER_END;
1201 i830_unmap_buffer(buf);
1208 OUT_RING( GFX_OP_DRAWRECT_INFO );
1209 OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR1] );
1210 OUT_RING( box[i].x1 | (box[i].y1<<16) );
1211 OUT_RING( box[i].x2 | (box[i].y2<<16) );
1212 OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR4] );
1217 if (dev_priv->use_mi_batchbuffer_start) {
1219 OUT_RING( MI_BATCH_BUFFER_START | (2<<6) );
1220 OUT_RING( start | MI_BATCH_NON_SECURE );
1225 OUT_RING( MI_BATCH_BUFFER );
1226 OUT_RING( start | MI_BATCH_NON_SECURE );
1227 OUT_RING( start + used - 4 );
1232 } while (++i < nbox);
1236 dev_priv->counter++;
1238 (void) cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1242 OUT_RING( CMD_STORE_DWORD_IDX );
1244 OUT_RING( dev_priv->counter );
1245 OUT_RING( CMD_STORE_DWORD_IDX );
1246 OUT_RING( buf_priv->my_use_idx );
1247 OUT_RING( I830_BUF_FREE );
1248 OUT_RING( CMD_REPORT_HEAD );
1255 void i830_dma_quiescent(drm_device_t *dev)
1257 drm_i830_private_t *dev_priv = dev->dev_private;
1260 i830_kernel_lost_context(dev);
1263 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
1264 OUT_RING( CMD_REPORT_HEAD );
1269 i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ );
1272 static int i830_flush_queue(drm_device_t *dev)
1274 drm_i830_private_t *dev_priv = dev->dev_private;
1275 drm_device_dma_t *dma = dev->dma;
1279 i830_kernel_lost_context(dev);
1282 OUT_RING( CMD_REPORT_HEAD );
1286 i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ );
1288 for (i = 0; i < dma->buf_count; i++) {
1289 drm_buf_t *buf = dma->buflist[ i ];
1290 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1292 int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
1295 if (used == I830_BUF_HARDWARE)
1296 DRM_DEBUG("reclaimed from HARDWARE\n");
1297 if (used == I830_BUF_CLIENT)
1298 DRM_DEBUG("still on client\n");
1304 /* Must be called with the lock held */
1305 void i830_reclaim_buffers(drm_device_t *dev, struct file *filp)
1307 drm_device_dma_t *dma = dev->dma;
1311 if (!dev->dev_private) return;
1312 if (!dma->buflist) return;
1314 i830_flush_queue(dev);
1316 for (i = 0; i < dma->buf_count; i++) {
1317 drm_buf_t *buf = dma->buflist[ i ];
1318 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1320 if (buf->filp == filp && buf_priv) {
1321 int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1324 if (used == I830_BUF_CLIENT)
1325 DRM_DEBUG("reclaimed from client\n");
1326 if(buf_priv->currently_mapped == I830_BUF_MAPPED)
1327 buf_priv->currently_mapped = I830_BUF_UNMAPPED;
1332 int i830_flush_ioctl(struct inode *inode, struct file *filp,
1333 unsigned int cmd, unsigned long arg)
1335 drm_file_t *priv = filp->private_data;
1336 drm_device_t *dev = priv->dev;
1338 LOCK_TEST_WITH_RETURN(dev, filp);
1340 i830_flush_queue(dev);
1344 int i830_dma_vertex(struct inode *inode, struct file *filp,
1345 unsigned int cmd, unsigned long arg)
1347 drm_file_t *priv = filp->private_data;
1348 drm_device_t *dev = priv->dev;
1349 drm_device_dma_t *dma = dev->dma;
1350 drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
1351 u32 *hw_status = dev_priv->hw_status_page;
1352 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1353 dev_priv->sarea_priv;
1354 drm_i830_vertex_t vertex;
1356 if (copy_from_user(&vertex, (drm_i830_vertex_t __user *)arg, sizeof(vertex)))
1359 LOCK_TEST_WITH_RETURN(dev, filp);
1361 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
1362 vertex.idx, vertex.used, vertex.discard);
1364 if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL;
1366 i830_dma_dispatch_vertex( dev,
1367 dma->buflist[ vertex.idx ],
1368 vertex.discard, vertex.used );
1370 sarea_priv->last_enqueue = dev_priv->counter-1;
1371 sarea_priv->last_dispatch = (int) hw_status[5];
1376 int i830_clear_bufs(struct inode *inode, struct file *filp,
1377 unsigned int cmd, unsigned long arg)
1379 drm_file_t *priv = filp->private_data;
1380 drm_device_t *dev = priv->dev;
1381 drm_i830_clear_t clear;
1383 if (copy_from_user(&clear, (drm_i830_clear_t __user *)arg, sizeof(clear)))
1386 LOCK_TEST_WITH_RETURN(dev, filp);
1388 /* GH: Someone's doing nasty things... */
1389 if (!dev->dev_private) {
1393 i830_dma_dispatch_clear( dev, clear.flags,
1396 clear.clear_depthmask);
1400 int i830_swap_bufs(struct inode *inode, struct file *filp,
1401 unsigned int cmd, unsigned long arg)
1403 drm_file_t *priv = filp->private_data;
1404 drm_device_t *dev = priv->dev;
1406 DRM_DEBUG("i830_swap_bufs\n");
1408 LOCK_TEST_WITH_RETURN(dev, filp);
1410 i830_dma_dispatch_swap( dev );
1416 /* Not sure why this isn't set all the time:
1418 static void i830_do_init_pageflip( drm_device_t *dev )
1420 drm_i830_private_t *dev_priv = dev->dev_private;
1422 DRM_DEBUG("%s\n", __FUNCTION__);
1423 dev_priv->page_flipping = 1;
1424 dev_priv->current_page = 0;
1425 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1428 int i830_do_cleanup_pageflip( drm_device_t *dev )
1430 drm_i830_private_t *dev_priv = dev->dev_private;
1432 DRM_DEBUG("%s\n", __FUNCTION__);
1433 if (dev_priv->current_page != 0)
1434 i830_dma_dispatch_flip( dev );
1436 dev_priv->page_flipping = 0;
1440 int i830_flip_bufs(struct inode *inode, struct file *filp,
1441 unsigned int cmd, unsigned long arg)
1443 drm_file_t *priv = filp->private_data;
1444 drm_device_t *dev = priv->dev;
1445 drm_i830_private_t *dev_priv = dev->dev_private;
1447 DRM_DEBUG("%s\n", __FUNCTION__);
1449 LOCK_TEST_WITH_RETURN(dev, filp);
1451 if (!dev_priv->page_flipping)
1452 i830_do_init_pageflip( dev );
1454 i830_dma_dispatch_flip( dev );
1458 int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1461 drm_file_t *priv = filp->private_data;
1462 drm_device_t *dev = priv->dev;
1463 drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
1464 u32 *hw_status = dev_priv->hw_status_page;
1465 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1466 dev_priv->sarea_priv;
1468 sarea_priv->last_dispatch = (int) hw_status[5];
1472 int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
1475 drm_file_t *priv = filp->private_data;
1476 drm_device_t *dev = priv->dev;
1479 drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
1480 u32 *hw_status = dev_priv->hw_status_page;
1481 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1482 dev_priv->sarea_priv;
1484 DRM_DEBUG("getbuf\n");
1485 if (copy_from_user(&d, (drm_i830_dma_t __user *)arg, sizeof(d)))
1488 LOCK_TEST_WITH_RETURN(dev, filp);
1492 retcode = i830_dma_get_buffer(dev, &d, filp);
1494 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
1495 current->pid, retcode, d.granted);
1497 if (copy_to_user((drm_dma_t __user *)arg, &d, sizeof(d)))
1499 sarea_priv->last_dispatch = (int) hw_status[5];
1504 int i830_copybuf(struct inode *inode,
1509 /* Never copy - 2.4.x doesn't need it */
1513 int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
1521 int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd,
1524 drm_file_t *priv = filp->private_data;
1525 drm_device_t *dev = priv->dev;
1526 drm_i830_private_t *dev_priv = dev->dev_private;
1527 drm_i830_getparam_t param;
1531 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1535 if (copy_from_user(¶m, (drm_i830_getparam_t __user *)arg, sizeof(param) ))
1538 switch( param.param ) {
1539 case I830_PARAM_IRQ_ACTIVE:
1540 value = dev->irq_enabled;
1546 if ( copy_to_user( param.value, &value, sizeof(int) ) ) {
1547 DRM_ERROR( "copy_to_user\n" );
1555 int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd,
1558 drm_file_t *priv = filp->private_data;
1559 drm_device_t *dev = priv->dev;
1560 drm_i830_private_t *dev_priv = dev->dev_private;
1561 drm_i830_setparam_t param;
1564 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1568 if (copy_from_user(¶m, (drm_i830_setparam_t __user *)arg, sizeof(param) ))
1571 switch( param.param ) {
1572 case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
1573 dev_priv->use_mi_batchbuffer_start = param.value;
1583 void i830_driver_pretakedown(drm_device_t *dev)
1585 i830_dma_cleanup( dev );
1588 void i830_driver_release(drm_device_t *dev, struct file *filp)
1590 i830_reclaim_buffers(dev, filp);
1593 int i830_driver_dma_quiescent(drm_device_t *dev)
1595 i830_dma_quiescent( dev );