1 /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
38 #include <linux/interrupt.h> /* For task queue support */
39 #include <linux/delay.h>
40 #include <linux/pagemap.h>
42 #define I810_BUF_FREE 2
43 #define I810_BUF_CLIENT 1
44 #define I810_BUF_HARDWARE 0
46 #define I810_BUF_UNMAPPED 0
47 #define I810_BUF_MAPPED 1
49 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
50 #define down_write down
54 static inline void i810_print_status_page(drm_device_t *dev)
56 drm_device_dma_t *dma = dev->dma;
57 drm_i810_private_t *dev_priv = dev->dev_private;
58 u32 *temp = dev_priv->hw_status_page;
61 DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
62 DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
63 DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
64 DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
65 DRM_DEBUG( "hw_status: Last Render: %x\n", temp[4]);
66 DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
67 for(i = 6; i < dma->buf_count + 6; i++) {
68 DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
72 static drm_buf_t *i810_freelist_get(drm_device_t *dev)
74 drm_device_dma_t *dma = dev->dma;
78 /* Linear search might not be the best solution */
80 for (i = 0; i < dma->buf_count; i++) {
81 drm_buf_t *buf = dma->buflist[ i ];
82 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
83 /* In use is already a pointer */
84 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
86 if (used == I810_BUF_FREE) {
93 /* This should only be called if the buffer is not sent to the hardware
94 * yet, the hardware updates in use for us once its on the ring buffer.
97 static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
99 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
102 /* In use is already a pointer */
103 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
104 if (used != I810_BUF_CLIENT) {
105 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
112 static struct file_operations i810_buffer_fops = {
115 .release = DRM(release),
117 .mmap = i810_mmap_buffers,
118 .fasync = DRM(fasync),
121 int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
123 drm_file_t *priv = filp->private_data;
125 drm_i810_private_t *dev_priv;
127 drm_i810_buf_priv_t *buf_priv;
131 dev_priv = dev->dev_private;
132 buf = dev_priv->mmap_buffer;
133 buf_priv = buf->dev_private;
135 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
138 buf_priv->currently_mapped = I810_BUF_MAPPED;
141 if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
143 vma->vm_end - vma->vm_start,
144 vma->vm_page_prot)) return -EAGAIN;
148 static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
150 drm_file_t *priv = filp->private_data;
151 drm_device_t *dev = priv->dev;
152 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
153 drm_i810_private_t *dev_priv = dev->dev_private;
154 struct file_operations *old_fops;
157 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
160 down_write( ¤t->mm->mmap_sem );
161 old_fops = filp->f_op;
162 filp->f_op = &i810_buffer_fops;
163 dev_priv->mmap_buffer = buf;
164 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
165 PROT_READ|PROT_WRITE,
168 dev_priv->mmap_buffer = NULL;
169 filp->f_op = old_fops;
170 if ((unsigned long)buf_priv->virtual > -1024UL) {
172 DRM_ERROR("mmap error\n");
173 retcode = (signed int)buf_priv->virtual;
174 buf_priv->virtual = NULL;
176 up_write( ¤t->mm->mmap_sem );
181 static int i810_unmap_buffer(drm_buf_t *buf)
183 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
186 if (buf_priv->currently_mapped != I810_BUF_MAPPED)
189 down_write(¤t->mm->mmap_sem);
190 retcode = do_munmap(current->mm,
191 (unsigned long)buf_priv->virtual,
192 (size_t) buf->total);
193 up_write(¤t->mm->mmap_sem);
195 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
196 buf_priv->virtual = NULL;
201 static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
205 drm_i810_buf_priv_t *buf_priv;
208 buf = i810_freelist_get(dev);
211 DRM_DEBUG("retcode=%d\n", retcode);
215 retcode = i810_map_buffer(buf, filp);
217 i810_freelist_put(dev, buf);
218 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
222 buf_priv = buf->dev_private;
224 d->request_idx = buf->idx;
225 d->request_size = buf->total;
226 d->virtual = buf_priv->virtual;
231 int i810_dma_cleanup(drm_device_t *dev)
233 drm_device_dma_t *dma = dev->dma;
236 /* Make sure interrupts are disabled here because the uninstall ioctl
237 * may not have been called from userspace and after dev_private
238 * is freed, it's too late.
240 if ( dev->irq_enabled ) DRM(irq_uninstall)(dev);
243 if (dev->dev_private) {
245 drm_i810_private_t *dev_priv =
246 (drm_i810_private_t *) dev->dev_private;
248 if (dev_priv->ring.virtual_start) {
249 DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
250 dev_priv->ring.Size, dev);
252 if (dev_priv->hw_status_page) {
253 pci_free_consistent(dev->pdev, PAGE_SIZE,
254 dev_priv->hw_status_page,
255 dev_priv->dma_status_page);
256 /* Need to rewrite hardware status page */
257 I810_WRITE(0x02080, 0x1ffff000);
259 DRM(free)(dev->dev_private, sizeof(drm_i810_private_t),
261 dev->dev_private = NULL;
263 for (i = 0; i < dma->buf_count; i++) {
264 drm_buf_t *buf = dma->buflist[ i ];
265 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
266 if ( buf_priv->kernel_virtual && buf->total )
267 DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
273 static int i810_wait_ring(drm_device_t *dev, int n)
275 drm_i810_private_t *dev_priv = dev->dev_private;
276 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
279 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
281 end = jiffies + (HZ*3);
282 while (ring->space < n) {
283 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
284 ring->space = ring->head - (ring->tail+8);
285 if (ring->space < 0) ring->space += ring->Size;
287 if (ring->head != last_head) {
288 end = jiffies + (HZ*3);
289 last_head = ring->head;
293 if (time_before(end, jiffies)) {
294 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
295 DRM_ERROR("lockup\n");
305 static void i810_kernel_lost_context(drm_device_t *dev)
307 drm_i810_private_t *dev_priv = dev->dev_private;
308 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
310 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
311 ring->tail = I810_READ(LP_RING + RING_TAIL);
312 ring->space = ring->head - (ring->tail+8);
313 if (ring->space < 0) ring->space += ring->Size;
316 static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
318 drm_device_dma_t *dma = dev->dma;
320 u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
323 if (dma->buf_count > 1019) {
324 /* Not enough space in the status page for the freelist */
328 for (i = 0; i < dma->buf_count; i++) {
329 drm_buf_t *buf = dma->buflist[ i ];
330 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
332 buf_priv->in_use = hw_status++;
333 buf_priv->my_use_idx = my_idx;
336 *buf_priv->in_use = I810_BUF_FREE;
338 buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
344 static int i810_dma_initialize(drm_device_t *dev,
345 drm_i810_private_t *dev_priv,
346 drm_i810_init_t *init)
348 struct list_head *list;
350 memset(dev_priv, 0, sizeof(drm_i810_private_t));
352 list_for_each(list, &dev->maplist->head) {
353 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
355 r_list->map->type == _DRM_SHM &&
356 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
357 dev_priv->sarea_map = r_list->map;
361 if (!dev_priv->sarea_map) {
362 dev->dev_private = (void *)dev_priv;
363 i810_dma_cleanup(dev);
364 DRM_ERROR("can not find sarea!\n");
367 DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
368 if (!dev_priv->mmio_map) {
369 dev->dev_private = (void *)dev_priv;
370 i810_dma_cleanup(dev);
371 DRM_ERROR("can not find mmio map!\n");
374 DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
375 if (!dev_priv->buffer_map) {
376 dev->dev_private = (void *)dev_priv;
377 i810_dma_cleanup(dev);
378 DRM_ERROR("can not find dma buffer map!\n");
382 dev_priv->sarea_priv = (drm_i810_sarea_t *)
383 ((u8 *)dev_priv->sarea_map->handle +
384 init->sarea_priv_offset);
386 dev_priv->ring.Start = init->ring_start;
387 dev_priv->ring.End = init->ring_end;
388 dev_priv->ring.Size = init->ring_size;
390 dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
392 init->ring_size, dev);
394 if (dev_priv->ring.virtual_start == NULL) {
395 dev->dev_private = (void *) dev_priv;
396 i810_dma_cleanup(dev);
397 DRM_ERROR("can not ioremap virtual address for"
402 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
404 dev_priv->w = init->w;
405 dev_priv->h = init->h;
406 dev_priv->pitch = init->pitch;
407 dev_priv->back_offset = init->back_offset;
408 dev_priv->depth_offset = init->depth_offset;
409 dev_priv->front_offset = init->front_offset;
411 dev_priv->overlay_offset = init->overlay_offset;
412 dev_priv->overlay_physical = init->overlay_physical;
414 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
415 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
416 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
418 /* Program Hardware Status Page */
419 dev_priv->hw_status_page =
420 pci_alloc_consistent(dev->pdev, PAGE_SIZE,
421 &dev_priv->dma_status_page);
422 if (!dev_priv->hw_status_page) {
423 dev->dev_private = (void *)dev_priv;
424 i810_dma_cleanup(dev);
425 DRM_ERROR("Can not allocate hardware status page\n");
428 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
429 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
431 I810_WRITE(0x02080, dev_priv->dma_status_page);
432 DRM_DEBUG("Enabled hardware status page\n");
434 /* Now we need to init our freelist */
435 if (i810_freelist_init(dev, dev_priv) != 0) {
436 dev->dev_private = (void *)dev_priv;
437 i810_dma_cleanup(dev);
438 DRM_ERROR("Not enough space in the status page for"
442 dev->dev_private = (void *)dev_priv;
447 /* i810 DRM version 1.1 used a smaller init structure with different
448 * ordering of values than is currently used (drm >= 1.2). There is
449 * no defined way to detect the XFree version to correct this problem,
450 * however by checking using this procedure we can detect the correct
453 * #1 Read the Smaller init structure from user-space
454 * #2 Verify the overlay_physical is a valid physical address, or NULL
455 * If it isn't then we have a v1.1 client. Fix up params.
456 * If it is, then we have a 1.2 client... get the rest of the data.
458 int i810_dma_init_compat(drm_i810_init_t *init, unsigned long arg)
461 /* Get v1.1 init data */
462 if (copy_from_user(init, (drm_i810_pre12_init_t __user *)arg,
463 sizeof(drm_i810_pre12_init_t))) {
467 if ((!init->overlay_physical) || (init->overlay_physical > 4096)) {
469 /* This is a v1.2 client, just get the v1.2 init data */
470 DRM_INFO("Using POST v1.2 init.\n");
471 if (copy_from_user(init, (drm_i810_init_t __user *)arg,
472 sizeof(drm_i810_init_t))) {
477 /* This is a v1.1 client, fix the params */
478 DRM_INFO("Using PRE v1.2 init.\n");
479 init->pitch_bits = init->h;
480 init->pitch = init->w;
481 init->h = init->overlay_physical;
482 init->w = init->overlay_offset;
483 init->overlay_physical = 0;
484 init->overlay_offset = 0;
490 int i810_dma_init(struct inode *inode, struct file *filp,
491 unsigned int cmd, unsigned long arg)
493 drm_file_t *priv = filp->private_data;
494 drm_device_t *dev = priv->dev;
495 drm_i810_private_t *dev_priv;
496 drm_i810_init_t init;
499 /* Get only the init func */
500 if (copy_from_user(&init, (void __user *)arg, sizeof(drm_i810_init_func_t)))
505 /* This case is for backward compatibility. It
506 * handles XFree 4.1.0 and 4.2.0, and has to
507 * do some parameter checking as described below.
508 * It will someday go away.
510 retcode = i810_dma_init_compat(&init, arg);
514 dev_priv = DRM(alloc)(sizeof(drm_i810_private_t),
516 if (dev_priv == NULL)
518 retcode = i810_dma_initialize(dev, dev_priv, &init);
522 case I810_INIT_DMA_1_4:
523 DRM_INFO("Using v1.4 init.\n");
524 if (copy_from_user(&init, (drm_i810_init_t __user *)arg,
525 sizeof(drm_i810_init_t))) {
528 dev_priv = DRM(alloc)(sizeof(drm_i810_private_t),
530 if (dev_priv == NULL)
532 retcode = i810_dma_initialize(dev, dev_priv, &init);
535 case I810_CLEANUP_DMA:
536 DRM_INFO("DMA Cleanup\n");
537 retcode = i810_dma_cleanup(dev);
546 /* Most efficient way to verify state for the i810 is as it is
547 * emitted. Non-conformant state is silently dropped.
549 * Use 'volatile' & local var tmp to force the emitted values to be
550 * identical to the verified ones.
552 static void i810EmitContextVerified( drm_device_t *dev,
553 volatile unsigned int *code )
555 drm_i810_private_t *dev_priv = dev->dev_private;
560 BEGIN_LP_RING( I810_CTX_SETUP_SIZE );
562 OUT_RING( GFX_OP_COLOR_FACTOR );
563 OUT_RING( code[I810_CTXREG_CF1] );
565 OUT_RING( GFX_OP_STIPPLE );
566 OUT_RING( code[I810_CTXREG_ST1] );
568 for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) {
571 if ((tmp & (7<<29)) == (3<<29) &&
572 (tmp & (0x1f<<24)) < (0x1d<<24))
577 else printk("constext state dropped!!!\n");
586 static void i810EmitTexVerified( drm_device_t *dev,
587 volatile unsigned int *code )
589 drm_i810_private_t *dev_priv = dev->dev_private;
594 BEGIN_LP_RING( I810_TEX_SETUP_SIZE );
596 OUT_RING( GFX_OP_MAP_INFO );
597 OUT_RING( code[I810_TEXREG_MI1] );
598 OUT_RING( code[I810_TEXREG_MI2] );
599 OUT_RING( code[I810_TEXREG_MI3] );
601 for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) {
604 if ((tmp & (7<<29)) == (3<<29) &&
605 (tmp & (0x1f<<24)) < (0x1d<<24))
610 else printk("texture state dropped!!!\n");
620 /* Need to do some additional checking when setting the dest buffer.
622 static void i810EmitDestVerified( drm_device_t *dev,
623 volatile unsigned int *code )
625 drm_i810_private_t *dev_priv = dev->dev_private;
629 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
631 tmp = code[I810_DESTREG_DI1];
632 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
633 OUT_RING( CMD_OP_DESTBUFFER_INFO );
636 DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
637 tmp, dev_priv->front_di1, dev_priv->back_di1);
641 OUT_RING( CMD_OP_Z_BUFFER_INFO );
642 OUT_RING( dev_priv->zi1 );
644 OUT_RING( GFX_OP_DESTBUFFER_VARS );
645 OUT_RING( code[I810_DESTREG_DV1] );
647 OUT_RING( GFX_OP_DRAWRECT_INFO );
648 OUT_RING( code[I810_DESTREG_DR1] );
649 OUT_RING( code[I810_DESTREG_DR2] );
650 OUT_RING( code[I810_DESTREG_DR3] );
651 OUT_RING( code[I810_DESTREG_DR4] );
659 static void i810EmitState( drm_device_t *dev )
661 drm_i810_private_t *dev_priv = dev->dev_private;
662 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
663 unsigned int dirty = sarea_priv->dirty;
665 DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
667 if (dirty & I810_UPLOAD_BUFFERS) {
668 i810EmitDestVerified( dev, sarea_priv->BufferState );
669 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
672 if (dirty & I810_UPLOAD_CTX) {
673 i810EmitContextVerified( dev, sarea_priv->ContextState );
674 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
677 if (dirty & I810_UPLOAD_TEX0) {
678 i810EmitTexVerified( dev, sarea_priv->TexState[0] );
679 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
682 if (dirty & I810_UPLOAD_TEX1) {
683 i810EmitTexVerified( dev, sarea_priv->TexState[1] );
684 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
692 static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
693 unsigned int clear_color,
694 unsigned int clear_zval )
696 drm_i810_private_t *dev_priv = dev->dev_private;
697 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
698 int nbox = sarea_priv->nbox;
699 drm_clip_rect_t *pbox = sarea_priv->boxes;
700 int pitch = dev_priv->pitch;
705 if ( dev_priv->current_page == 1 ) {
706 unsigned int tmp = flags;
708 flags &= ~(I810_FRONT | I810_BACK);
709 if (tmp & I810_FRONT) flags |= I810_BACK;
710 if (tmp & I810_BACK) flags |= I810_FRONT;
713 i810_kernel_lost_context(dev);
715 if (nbox > I810_NR_SAREA_CLIPRECTS)
716 nbox = I810_NR_SAREA_CLIPRECTS;
718 for (i = 0 ; i < nbox ; i++, pbox++) {
719 unsigned int x = pbox->x1;
720 unsigned int y = pbox->y1;
721 unsigned int width = (pbox->x2 - x) * cpp;
722 unsigned int height = pbox->y2 - y;
723 unsigned int start = y * pitch + x * cpp;
725 if (pbox->x1 > pbox->x2 ||
726 pbox->y1 > pbox->y2 ||
727 pbox->x2 > dev_priv->w ||
728 pbox->y2 > dev_priv->h)
731 if ( flags & I810_FRONT ) {
733 OUT_RING( BR00_BITBLT_CLIENT |
734 BR00_OP_COLOR_BLT | 0x3 );
735 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
736 OUT_RING( (height << 16) | width );
738 OUT_RING( clear_color );
743 if ( flags & I810_BACK ) {
745 OUT_RING( BR00_BITBLT_CLIENT |
746 BR00_OP_COLOR_BLT | 0x3 );
747 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
748 OUT_RING( (height << 16) | width );
749 OUT_RING( dev_priv->back_offset + start );
750 OUT_RING( clear_color );
755 if ( flags & I810_DEPTH ) {
757 OUT_RING( BR00_BITBLT_CLIENT |
758 BR00_OP_COLOR_BLT | 0x3 );
759 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
760 OUT_RING( (height << 16) | width );
761 OUT_RING( dev_priv->depth_offset + start );
762 OUT_RING( clear_zval );
769 static void i810_dma_dispatch_swap( drm_device_t *dev )
771 drm_i810_private_t *dev_priv = dev->dev_private;
772 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
773 int nbox = sarea_priv->nbox;
774 drm_clip_rect_t *pbox = sarea_priv->boxes;
775 int pitch = dev_priv->pitch;
780 DRM_DEBUG("swapbuffers\n");
782 i810_kernel_lost_context(dev);
784 if (nbox > I810_NR_SAREA_CLIPRECTS)
785 nbox = I810_NR_SAREA_CLIPRECTS;
787 for (i = 0 ; i < nbox; i++, pbox++)
789 unsigned int w = pbox->x2 - pbox->x1;
790 unsigned int h = pbox->y2 - pbox->y1;
791 unsigned int dst = pbox->x1*cpp + pbox->y1*pitch;
792 unsigned int start = dst;
794 if (pbox->x1 > pbox->x2 ||
795 pbox->y1 > pbox->y2 ||
796 pbox->x2 > dev_priv->w ||
797 pbox->y2 > dev_priv->h)
801 OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 );
802 OUT_RING( pitch | (0xCC << 16));
803 OUT_RING( (h << 16) | (w * cpp));
804 if (dev_priv->current_page == 0)
805 OUT_RING(dev_priv->front_offset + start);
807 OUT_RING(dev_priv->back_offset + start);
809 if (dev_priv->current_page == 0)
810 OUT_RING(dev_priv->back_offset + start);
812 OUT_RING(dev_priv->front_offset + start);
818 static void i810_dma_dispatch_vertex(drm_device_t *dev,
823 drm_i810_private_t *dev_priv = dev->dev_private;
824 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
825 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
826 drm_clip_rect_t *box = sarea_priv->boxes;
827 int nbox = sarea_priv->nbox;
828 unsigned long address = (unsigned long)buf->bus_address;
829 unsigned long start = address - dev->agp->base;
833 i810_kernel_lost_context(dev);
835 if (nbox > I810_NR_SAREA_CLIPRECTS)
836 nbox = I810_NR_SAREA_CLIPRECTS;
841 if (sarea_priv->dirty)
842 i810EmitState( dev );
844 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
845 unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
847 *(u32 *)buf_priv->kernel_virtual = ((GFX_OP_PRIMITIVE | prim | ((used/4)-2)));
850 *(u32 *)((u32)buf_priv->kernel_virtual + used) = 0;
854 i810_unmap_buffer(buf);
861 OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
863 OUT_RING( GFX_OP_SCISSOR_INFO );
864 OUT_RING( box[i].x1 | (box[i].y1<<16) );
865 OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) );
870 OUT_RING( CMD_OP_BATCH_BUFFER );
871 OUT_RING( start | BB1_PROTECTED );
872 OUT_RING( start + used - 4 );
876 } while (++i < nbox);
882 (void) cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
886 OUT_RING( CMD_STORE_DWORD_IDX );
888 OUT_RING( dev_priv->counter );
889 OUT_RING( CMD_STORE_DWORD_IDX );
890 OUT_RING( buf_priv->my_use_idx );
891 OUT_RING( I810_BUF_FREE );
892 OUT_RING( CMD_REPORT_HEAD );
898 static void i810_dma_dispatch_flip( drm_device_t *dev )
900 drm_i810_private_t *dev_priv = dev->dev_private;
901 int pitch = dev_priv->pitch;
904 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
906 dev_priv->current_page,
907 dev_priv->sarea_priv->pf_current_page);
909 i810_kernel_lost_context(dev);
912 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
916 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
917 /* On i815 at least ASYNC is buggy */
918 /* pitch<<5 is from 11.2.8 p158,
919 its the pitch / 8 then left shifted 8,
920 so (pitch >> 3) << 8 */
921 OUT_RING( CMD_OP_FRONTBUFFER_INFO | (pitch<<5) /*| ASYNC_FLIP */ );
922 if ( dev_priv->current_page == 0 ) {
923 OUT_RING( dev_priv->back_offset );
924 dev_priv->current_page = 1;
926 OUT_RING( dev_priv->front_offset );
927 dev_priv->current_page = 0;
933 OUT_RING( CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP );
937 /* Increment the frame counter. The client-side 3D driver must
938 * throttle the framerate by waiting for this value before
939 * performing the swapbuffer ioctl.
941 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
945 void i810_dma_quiescent(drm_device_t *dev)
947 drm_i810_private_t *dev_priv = dev->dev_private;
950 /* printk("%s\n", __FUNCTION__); */
952 i810_kernel_lost_context(dev);
955 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
956 OUT_RING( CMD_REPORT_HEAD );
961 i810_wait_ring( dev, dev_priv->ring.Size - 8 );
964 static int i810_flush_queue(drm_device_t *dev)
966 drm_i810_private_t *dev_priv = dev->dev_private;
967 drm_device_dma_t *dma = dev->dma;
971 /* printk("%s\n", __FUNCTION__); */
973 i810_kernel_lost_context(dev);
976 OUT_RING( CMD_REPORT_HEAD );
980 i810_wait_ring( dev, dev_priv->ring.Size - 8 );
982 for (i = 0; i < dma->buf_count; i++) {
983 drm_buf_t *buf = dma->buflist[ i ];
984 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
986 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
989 if (used == I810_BUF_HARDWARE)
990 DRM_DEBUG("reclaimed from HARDWARE\n");
991 if (used == I810_BUF_CLIENT)
992 DRM_DEBUG("still on client\n");
998 /* Must be called with the lock held */
999 void i810_reclaim_buffers(struct file *filp)
1001 drm_file_t *priv = filp->private_data;
1002 drm_device_t *dev = priv->dev;
1003 drm_device_dma_t *dma = dev->dma;
1007 if (!dev->dev_private) return;
1008 if (!dma->buflist) return;
1010 i810_flush_queue(dev);
1012 for (i = 0; i < dma->buf_count; i++) {
1013 drm_buf_t *buf = dma->buflist[ i ];
1014 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1016 if (buf->filp == filp && buf_priv) {
1017 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
1020 if (used == I810_BUF_CLIENT)
1021 DRM_DEBUG("reclaimed from client\n");
1022 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
1023 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
1028 int i810_flush_ioctl(struct inode *inode, struct file *filp,
1029 unsigned int cmd, unsigned long arg)
1031 drm_file_t *priv = filp->private_data;
1032 drm_device_t *dev = priv->dev;
1034 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1035 DRM_ERROR("i810_flush_ioctl called without lock held\n");
1039 i810_flush_queue(dev);
1044 int i810_dma_vertex(struct inode *inode, struct file *filp,
1045 unsigned int cmd, unsigned long arg)
1047 drm_file_t *priv = filp->private_data;
1048 drm_device_t *dev = priv->dev;
1049 drm_device_dma_t *dma = dev->dma;
1050 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1051 u32 *hw_status = dev_priv->hw_status_page;
1052 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1053 dev_priv->sarea_priv;
1054 drm_i810_vertex_t vertex;
1056 if (copy_from_user(&vertex, (drm_i810_vertex_t __user *)arg, sizeof(vertex)))
1059 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1060 DRM_ERROR("i810_dma_vertex called without lock held\n");
1064 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
1065 vertex.idx, vertex.used, vertex.discard);
1067 if (vertex.idx < 0 || vertex.idx > dma->buf_count)
1070 i810_dma_dispatch_vertex( dev,
1071 dma->buflist[ vertex.idx ],
1072 vertex.discard, vertex.used );
1074 atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]);
1075 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1076 sarea_priv->last_enqueue = dev_priv->counter-1;
1077 sarea_priv->last_dispatch = (int) hw_status[5];
1084 int i810_clear_bufs(struct inode *inode, struct file *filp,
1085 unsigned int cmd, unsigned long arg)
1087 drm_file_t *priv = filp->private_data;
1088 drm_device_t *dev = priv->dev;
1089 drm_i810_clear_t clear;
1091 if (copy_from_user(&clear, (drm_i810_clear_t __user *)arg, sizeof(clear)))
1094 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1095 DRM_ERROR("i810_clear_bufs called without lock held\n");
1099 /* GH: Someone's doing nasty things... */
1100 if (!dev->dev_private) {
1104 i810_dma_dispatch_clear( dev, clear.flags,
1106 clear.clear_depth );
1110 int i810_swap_bufs(struct inode *inode, struct file *filp,
1111 unsigned int cmd, unsigned long arg)
1113 drm_file_t *priv = filp->private_data;
1114 drm_device_t *dev = priv->dev;
1116 DRM_DEBUG("i810_swap_bufs\n");
1118 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1119 DRM_ERROR("i810_swap_buf called without lock held\n");
1123 i810_dma_dispatch_swap( dev );
1127 int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1130 drm_file_t *priv = filp->private_data;
1131 drm_device_t *dev = priv->dev;
1132 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1133 u32 *hw_status = dev_priv->hw_status_page;
1134 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1135 dev_priv->sarea_priv;
1137 sarea_priv->last_dispatch = (int) hw_status[5];
1141 int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
1144 drm_file_t *priv = filp->private_data;
1145 drm_device_t *dev = priv->dev;
1148 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1149 u32 *hw_status = dev_priv->hw_status_page;
1150 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1151 dev_priv->sarea_priv;
1153 if (copy_from_user(&d, (drm_i810_dma_t __user *)arg, sizeof(d)))
1156 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1157 DRM_ERROR("i810_dma called without lock held\n");
1163 retcode = i810_dma_get_buffer(dev, &d, filp);
1165 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1166 current->pid, retcode, d.granted);
1168 if (copy_to_user((drm_dma_t __user *)arg, &d, sizeof(d)))
1170 sarea_priv->last_dispatch = (int) hw_status[5];
1175 int i810_copybuf(struct inode *inode,
1180 /* Never copy - 2.4.x doesn't need it */
1184 int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
1187 /* Never copy - 2.4.x doesn't need it */
1191 static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
1192 unsigned int last_render)
1194 drm_i810_private_t *dev_priv = dev->dev_private;
1195 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1196 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
1197 unsigned long address = (unsigned long)buf->bus_address;
1198 unsigned long start = address - dev->agp->base;
1202 i810_kernel_lost_context(dev);
1204 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
1206 if (u != I810_BUF_CLIENT) {
1207 DRM_DEBUG("MC found buffer that isn't mine!\n");
1213 sarea_priv->dirty = 0x7f;
1215 DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n",
1218 dev_priv->counter++;
1219 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1220 DRM_DEBUG("i810_dma_dispatch_mc\n");
1221 DRM_DEBUG("start : %lx\n", start);
1222 DRM_DEBUG("used : %d\n", used);
1223 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1225 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1227 *(u32 *)((u32)buf_priv->virtual + used) = 0;
1231 i810_unmap_buffer(buf);
1234 OUT_RING( CMD_OP_BATCH_BUFFER );
1235 OUT_RING( start | BB1_PROTECTED );
1236 OUT_RING( start + used - 4 );
1242 OUT_RING( CMD_STORE_DWORD_IDX );
1243 OUT_RING( buf_priv->my_use_idx );
1244 OUT_RING( I810_BUF_FREE );
1247 OUT_RING( CMD_STORE_DWORD_IDX );
1249 OUT_RING( last_render );
1254 int i810_dma_mc(struct inode *inode, struct file *filp,
1255 unsigned int cmd, unsigned long arg)
1257 drm_file_t *priv = filp->private_data;
1258 drm_device_t *dev = priv->dev;
1259 drm_device_dma_t *dma = dev->dma;
1260 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1261 u32 *hw_status = dev_priv->hw_status_page;
1262 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1263 dev_priv->sarea_priv;
1266 if (copy_from_user(&mc, (drm_i810_mc_t __user *)arg, sizeof(mc)))
1270 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1271 DRM_ERROR("i810_dma_mc called without lock held\n");
1275 if (mc.idx >= dma->buf_count || mc.idx < 0)
1278 i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used,
1281 atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]);
1282 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1283 sarea_priv->last_enqueue = dev_priv->counter-1;
1284 sarea_priv->last_dispatch = (int) hw_status[5];
1289 int i810_rstatus(struct inode *inode, struct file *filp,
1290 unsigned int cmd, unsigned long arg)
1292 drm_file_t *priv = filp->private_data;
1293 drm_device_t *dev = priv->dev;
1294 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1296 return (int)(((u32 *)(dev_priv->hw_status_page))[4]);
1299 int i810_ov0_info(struct inode *inode, struct file *filp,
1300 unsigned int cmd, unsigned long arg)
1302 drm_file_t *priv = filp->private_data;
1303 drm_device_t *dev = priv->dev;
1304 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1305 drm_i810_overlay_t data;
1307 data.offset = dev_priv->overlay_offset;
1308 data.physical = dev_priv->overlay_physical;
1309 if (copy_to_user((drm_i810_overlay_t __user *)arg,&data,sizeof(data)))
1314 int i810_fstatus(struct inode *inode, struct file *filp,
1315 unsigned int cmd, unsigned long arg)
1317 drm_file_t *priv = filp->private_data;
1318 drm_device_t *dev = priv->dev;
1319 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1321 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1322 DRM_ERROR("i810_fstatus called without lock held\n");
1325 return I810_READ(0x30008);
1328 int i810_ov0_flip(struct inode *inode, struct file *filp,
1329 unsigned int cmd, unsigned long arg)
1331 drm_file_t *priv = filp->private_data;
1332 drm_device_t *dev = priv->dev;
1333 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1335 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1336 DRM_ERROR("i810_ov0_flip called without lock held\n");
1340 //Tell the overlay to update
1341 I810_WRITE(0x30000,dev_priv->overlay_physical | 0x80000000);
1347 /* Not sure why this isn't set all the time:
1349 static void i810_do_init_pageflip( drm_device_t *dev )
1351 drm_i810_private_t *dev_priv = dev->dev_private;
1353 DRM_DEBUG("%s\n", __FUNCTION__);
1354 dev_priv->page_flipping = 1;
1355 dev_priv->current_page = 0;
1356 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1359 int i810_do_cleanup_pageflip( drm_device_t *dev )
1361 drm_i810_private_t *dev_priv = dev->dev_private;
1363 DRM_DEBUG("%s\n", __FUNCTION__);
1364 if (dev_priv->current_page != 0)
1365 i810_dma_dispatch_flip( dev );
1367 dev_priv->page_flipping = 0;
1371 int i810_flip_bufs(struct inode *inode, struct file *filp,
1372 unsigned int cmd, unsigned long arg)
1374 drm_file_t *priv = filp->private_data;
1375 drm_device_t *dev = priv->dev;
1376 drm_i810_private_t *dev_priv = dev->dev_private;
1378 DRM_DEBUG("%s\n", __FUNCTION__);
1380 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1381 DRM_ERROR("i810_flip_buf called without lock held\n");
1385 if (!dev_priv->page_flipping)
1386 i810_do_init_pageflip( dev );
1388 i810_dma_dispatch_flip( dev );