1 /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
38 #include <linux/interrupt.h> /* For task queue support */
39 #include <linux/delay.h>
40 #include <linux/pagemap.h>
42 #define I810_BUF_FREE 2
43 #define I810_BUF_CLIENT 1
44 #define I810_BUF_HARDWARE 0
46 #define I810_BUF_UNMAPPED 0
47 #define I810_BUF_MAPPED 1
49 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
50 #define down_write down
54 static inline void i810_print_status_page(drm_device_t *dev)
56 drm_device_dma_t *dma = dev->dma;
57 drm_i810_private_t *dev_priv = dev->dev_private;
58 u32 *temp = dev_priv->hw_status_page;
61 DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
62 DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
63 DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
64 DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
65 DRM_DEBUG( "hw_status: Last Render: %x\n", temp[4]);
66 DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
67 for(i = 6; i < dma->buf_count + 6; i++) {
68 DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
72 static drm_buf_t *i810_freelist_get(drm_device_t *dev)
74 drm_device_dma_t *dma = dev->dma;
78 /* Linear search might not be the best solution */
80 for (i = 0; i < dma->buf_count; i++) {
81 drm_buf_t *buf = dma->buflist[ i ];
82 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
83 /* In use is already a pointer */
84 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
86 if (used == I810_BUF_FREE) {
93 /* This should only be called if the buffer is not sent to the hardware
94 * yet, the hardware updates in use for us once its on the ring buffer.
97 static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
99 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
102 /* In use is already a pointer */
103 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
104 if (used != I810_BUF_CLIENT) {
105 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
112 static struct file_operations i810_buffer_fops = {
115 .release = DRM(release),
117 .mmap = i810_mmap_buffers,
118 .fasync = DRM(fasync),
121 int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
123 drm_file_t *priv = filp->private_data;
125 drm_i810_private_t *dev_priv;
127 drm_i810_buf_priv_t *buf_priv;
131 dev_priv = dev->dev_private;
132 buf = dev_priv->mmap_buffer;
133 buf_priv = buf->dev_private;
135 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
138 buf_priv->currently_mapped = I810_BUF_MAPPED;
141 if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
143 vma->vm_end - vma->vm_start,
144 vma->vm_page_prot)) return -EAGAIN;
148 static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
150 drm_file_t *priv = filp->private_data;
151 drm_device_t *dev = priv->dev;
152 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
153 drm_i810_private_t *dev_priv = dev->dev_private;
154 struct file_operations *old_fops;
157 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
160 down_write( ¤t->mm->mmap_sem );
161 old_fops = filp->f_op;
162 filp->f_op = &i810_buffer_fops;
163 dev_priv->mmap_buffer = buf;
164 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
165 PROT_READ|PROT_WRITE,
168 dev_priv->mmap_buffer = NULL;
169 filp->f_op = old_fops;
170 if ((unsigned long)buf_priv->virtual > -1024UL) {
172 DRM_ERROR("mmap error\n");
173 retcode = (signed int)buf_priv->virtual;
174 buf_priv->virtual = 0;
176 up_write( ¤t->mm->mmap_sem );
181 static int i810_unmap_buffer(drm_buf_t *buf)
183 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
186 if (buf_priv->currently_mapped != I810_BUF_MAPPED)
189 down_write(¤t->mm->mmap_sem);
190 retcode = do_munmap(current->mm,
191 (unsigned long)buf_priv->virtual,
192 (size_t) buf->total);
193 up_write(¤t->mm->mmap_sem);
195 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
196 buf_priv->virtual = 0;
201 static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
205 drm_i810_buf_priv_t *buf_priv;
208 buf = i810_freelist_get(dev);
211 DRM_DEBUG("retcode=%d\n", retcode);
215 retcode = i810_map_buffer(buf, filp);
217 i810_freelist_put(dev, buf);
218 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
222 buf_priv = buf->dev_private;
224 d->request_idx = buf->idx;
225 d->request_size = buf->total;
226 d->virtual = buf_priv->virtual;
231 int i810_dma_cleanup(drm_device_t *dev)
233 drm_device_dma_t *dma = dev->dma;
236 /* Make sure interrupts are disabled here because the uninstall ioctl
237 * may not have been called from userspace and after dev_private
238 * is freed, it's too late.
240 if (dev->irq) DRM(irq_uninstall)(dev);
243 if (dev->dev_private) {
245 drm_i810_private_t *dev_priv =
246 (drm_i810_private_t *) dev->dev_private;
248 if (dev_priv->ring.virtual_start) {
249 DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
250 dev_priv->ring.Size, dev);
252 if (dev_priv->hw_status_page) {
253 pci_free_consistent(dev->pdev, PAGE_SIZE,
254 dev_priv->hw_status_page,
255 dev_priv->dma_status_page);
256 /* Need to rewrite hardware status page */
257 I810_WRITE(0x02080, 0x1ffff000);
259 DRM(free)(dev->dev_private, sizeof(drm_i810_private_t),
261 dev->dev_private = NULL;
263 for (i = 0; i < dma->buf_count; i++) {
264 drm_buf_t *buf = dma->buflist[ i ];
265 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
266 if ( buf_priv->kernel_virtual && buf->total )
267 DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
273 static int i810_wait_ring(drm_device_t *dev, int n)
275 drm_i810_private_t *dev_priv = dev->dev_private;
276 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
279 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
281 end = jiffies + (HZ*3);
282 while (ring->space < n) {
283 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
284 ring->space = ring->head - (ring->tail+8);
285 if (ring->space < 0) ring->space += ring->Size;
287 if (ring->head != last_head) {
288 end = jiffies + (HZ*3);
289 last_head = ring->head;
293 if (time_before(end, jiffies)) {
294 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
295 DRM_ERROR("lockup\n");
305 static void i810_kernel_lost_context(drm_device_t *dev)
307 drm_i810_private_t *dev_priv = dev->dev_private;
308 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
310 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
311 ring->tail = I810_READ(LP_RING + RING_TAIL);
312 ring->space = ring->head - (ring->tail+8);
313 if (ring->space < 0) ring->space += ring->Size;
316 static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
318 drm_device_dma_t *dma = dev->dma;
320 u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
323 if (dma->buf_count > 1019) {
324 /* Not enough space in the status page for the freelist */
328 for (i = 0; i < dma->buf_count; i++) {
329 drm_buf_t *buf = dma->buflist[ i ];
330 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
332 buf_priv->in_use = hw_status++;
333 buf_priv->my_use_idx = my_idx;
336 *buf_priv->in_use = I810_BUF_FREE;
338 buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
344 static int i810_dma_initialize(drm_device_t *dev,
345 drm_i810_private_t *dev_priv,
346 drm_i810_init_t *init)
348 struct list_head *list;
350 memset(dev_priv, 0, sizeof(drm_i810_private_t));
352 list_for_each(list, &dev->maplist->head) {
353 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
355 r_list->map->type == _DRM_SHM &&
356 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
357 dev_priv->sarea_map = r_list->map;
361 if (!dev_priv->sarea_map) {
362 dev->dev_private = (void *)dev_priv;
363 i810_dma_cleanup(dev);
364 DRM_ERROR("can not find sarea!\n");
367 DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
368 if (!dev_priv->mmio_map) {
369 dev->dev_private = (void *)dev_priv;
370 i810_dma_cleanup(dev);
371 DRM_ERROR("can not find mmio map!\n");
374 DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
375 if (!dev_priv->buffer_map) {
376 dev->dev_private = (void *)dev_priv;
377 i810_dma_cleanup(dev);
378 DRM_ERROR("can not find dma buffer map!\n");
382 dev_priv->sarea_priv = (drm_i810_sarea_t *)
383 ((u8 *)dev_priv->sarea_map->handle +
384 init->sarea_priv_offset);
386 dev_priv->ring.Start = init->ring_start;
387 dev_priv->ring.End = init->ring_end;
388 dev_priv->ring.Size = init->ring_size;
390 dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
392 init->ring_size, dev);
394 if (dev_priv->ring.virtual_start == NULL) {
395 dev->dev_private = (void *) dev_priv;
396 i810_dma_cleanup(dev);
397 DRM_ERROR("can not ioremap virtual address for"
402 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
404 dev_priv->w = init->w;
405 dev_priv->h = init->h;
406 dev_priv->pitch = init->pitch;
407 dev_priv->back_offset = init->back_offset;
408 dev_priv->depth_offset = init->depth_offset;
409 dev_priv->front_offset = init->front_offset;
411 dev_priv->overlay_offset = init->overlay_offset;
412 dev_priv->overlay_physical = init->overlay_physical;
414 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
415 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
416 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
418 /* Program Hardware Status Page */
419 dev_priv->hw_status_page =
420 pci_alloc_consistent(dev->pdev, PAGE_SIZE,
421 &dev_priv->dma_status_page);
422 if (!dev_priv->hw_status_page) {
423 dev->dev_private = (void *)dev_priv;
424 i810_dma_cleanup(dev);
425 DRM_ERROR("Can not allocate hardware status page\n");
428 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
429 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
431 I810_WRITE(0x02080, dev_priv->dma_status_page);
432 DRM_DEBUG("Enabled hardware status page\n");
434 /* Now we need to init our freelist */
435 if (i810_freelist_init(dev, dev_priv) != 0) {
436 dev->dev_private = (void *)dev_priv;
437 i810_dma_cleanup(dev);
438 DRM_ERROR("Not enough space in the status page for"
442 dev->dev_private = (void *)dev_priv;
447 /* i810 DRM version 1.1 used a smaller init structure with different
448 * ordering of values than is currently used (drm >= 1.2). There is
449 * no defined way to detect the XFree version to correct this problem,
450 * however by checking using this procedure we can detect the correct
453 * #1 Read the Smaller init structure from user-space
454 * #2 Verify the overlay_physical is a valid physical address, or NULL
455 * If it isn't then we have a v1.1 client. Fix up params.
456 * If it is, then we have a 1.2 client... get the rest of the data.
458 int i810_dma_init_compat(drm_i810_init_t *init, unsigned long arg)
461 /* Get v1.1 init data */
462 if (copy_from_user(init, (drm_i810_pre12_init_t *)arg,
463 sizeof(drm_i810_pre12_init_t))) {
467 if ((!init->overlay_physical) || (init->overlay_physical > 4096)) {
469 /* This is a v1.2 client, just get the v1.2 init data */
470 DRM_INFO("Using POST v1.2 init.\n");
471 if (copy_from_user(init, (drm_i810_init_t *)arg,
472 sizeof(drm_i810_init_t))) {
477 /* This is a v1.1 client, fix the params */
478 DRM_INFO("Using PRE v1.2 init.\n");
479 init->pitch_bits = init->h;
480 init->pitch = init->w;
481 init->h = init->overlay_physical;
482 init->w = init->overlay_offset;
483 init->overlay_physical = 0;
484 init->overlay_offset = 0;
490 int i810_dma_init(struct inode *inode, struct file *filp,
491 unsigned int cmd, unsigned long arg)
493 drm_file_t *priv = filp->private_data;
494 drm_device_t *dev = priv->dev;
495 drm_i810_private_t *dev_priv;
496 drm_i810_init_t init;
499 /* Get only the init func */
500 if (copy_from_user(&init, (void *)arg, sizeof(drm_i810_init_func_t)))
505 /* This case is for backward compatibility. It
506 * handles XFree 4.1.0 and 4.2.0, and has to
507 * do some parameter checking as described below.
508 * It will someday go away.
510 retcode = i810_dma_init_compat(&init, arg);
514 dev_priv = DRM(alloc)(sizeof(drm_i810_private_t),
516 if (dev_priv == NULL)
518 retcode = i810_dma_initialize(dev, dev_priv, &init);
522 case I810_INIT_DMA_1_4:
523 DRM_INFO("Using v1.4 init.\n");
524 if (copy_from_user(&init, (drm_i810_init_t *)arg,
525 sizeof(drm_i810_init_t))) {
528 dev_priv = DRM(alloc)(sizeof(drm_i810_private_t),
530 if (dev_priv == NULL)
532 retcode = i810_dma_initialize(dev, dev_priv, &init);
535 case I810_CLEANUP_DMA:
536 DRM_INFO("DMA Cleanup\n");
537 retcode = i810_dma_cleanup(dev);
546 /* Most efficient way to verify state for the i810 is as it is
547 * emitted. Non-conformant state is silently dropped.
549 * Use 'volatile' & local var tmp to force the emitted values to be
550 * identical to the verified ones.
552 static void i810EmitContextVerified( drm_device_t *dev,
553 volatile unsigned int *code )
555 drm_i810_private_t *dev_priv = dev->dev_private;
560 BEGIN_LP_RING( I810_CTX_SETUP_SIZE );
562 OUT_RING( GFX_OP_COLOR_FACTOR );
563 OUT_RING( code[I810_CTXREG_CF1] );
565 OUT_RING( GFX_OP_STIPPLE );
566 OUT_RING( code[I810_CTXREG_ST1] );
568 for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) {
571 if ((tmp & (7<<29)) == (3<<29) &&
572 (tmp & (0x1f<<24)) < (0x1d<<24))
577 else printk("constext state dropped!!!\n");
586 static void i810EmitTexVerified( drm_device_t *dev,
587 volatile unsigned int *code )
589 drm_i810_private_t *dev_priv = dev->dev_private;
594 BEGIN_LP_RING( I810_TEX_SETUP_SIZE );
596 OUT_RING( GFX_OP_MAP_INFO );
597 OUT_RING( code[I810_TEXREG_MI1] );
598 OUT_RING( code[I810_TEXREG_MI2] );
599 OUT_RING( code[I810_TEXREG_MI3] );
601 for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) {
604 if ((tmp & (7<<29)) == (3<<29) &&
605 (tmp & (0x1f<<24)) < (0x1d<<24))
610 else printk("texture state dropped!!!\n");
620 /* Need to do some additional checking when setting the dest buffer.
622 static void i810EmitDestVerified( drm_device_t *dev,
623 volatile unsigned int *code )
625 drm_i810_private_t *dev_priv = dev->dev_private;
629 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
631 tmp = code[I810_DESTREG_DI1];
632 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
633 OUT_RING( CMD_OP_DESTBUFFER_INFO );
636 DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
637 tmp, dev_priv->front_di1, dev_priv->back_di1);
641 OUT_RING( CMD_OP_Z_BUFFER_INFO );
642 OUT_RING( dev_priv->zi1 );
644 OUT_RING( GFX_OP_DESTBUFFER_VARS );
645 OUT_RING( code[I810_DESTREG_DV1] );
647 OUT_RING( GFX_OP_DRAWRECT_INFO );
648 OUT_RING( code[I810_DESTREG_DR1] );
649 OUT_RING( code[I810_DESTREG_DR2] );
650 OUT_RING( code[I810_DESTREG_DR3] );
651 OUT_RING( code[I810_DESTREG_DR4] );
659 static void i810EmitState( drm_device_t *dev )
661 drm_i810_private_t *dev_priv = dev->dev_private;
662 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
663 unsigned int dirty = sarea_priv->dirty;
665 DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
667 if (dirty & I810_UPLOAD_BUFFERS) {
668 i810EmitDestVerified( dev, sarea_priv->BufferState );
669 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
672 if (dirty & I810_UPLOAD_CTX) {
673 i810EmitContextVerified( dev, sarea_priv->ContextState );
674 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
677 if (dirty & I810_UPLOAD_TEX0) {
678 i810EmitTexVerified( dev, sarea_priv->TexState[0] );
679 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
682 if (dirty & I810_UPLOAD_TEX1) {
683 i810EmitTexVerified( dev, sarea_priv->TexState[1] );
684 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
692 static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
693 unsigned int clear_color,
694 unsigned int clear_zval )
696 drm_i810_private_t *dev_priv = dev->dev_private;
697 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
698 int nbox = sarea_priv->nbox;
699 drm_clip_rect_t *pbox = sarea_priv->boxes;
700 int pitch = dev_priv->pitch;
705 if ( dev_priv->current_page == 1 ) {
706 unsigned int tmp = flags;
708 flags &= ~(I810_FRONT | I810_BACK);
709 if (tmp & I810_FRONT) flags |= I810_BACK;
710 if (tmp & I810_BACK) flags |= I810_FRONT;
713 i810_kernel_lost_context(dev);
715 if (nbox > I810_NR_SAREA_CLIPRECTS)
716 nbox = I810_NR_SAREA_CLIPRECTS;
718 for (i = 0 ; i < nbox ; i++, pbox++) {
719 unsigned int x = pbox->x1;
720 unsigned int y = pbox->y1;
721 unsigned int width = (pbox->x2 - x) * cpp;
722 unsigned int height = pbox->y2 - y;
723 unsigned int start = y * pitch + x * cpp;
725 if (pbox->x1 > pbox->x2 ||
726 pbox->y1 > pbox->y2 ||
727 pbox->x2 > dev_priv->w ||
728 pbox->y2 > dev_priv->h)
731 if ( flags & I810_FRONT ) {
733 OUT_RING( BR00_BITBLT_CLIENT |
734 BR00_OP_COLOR_BLT | 0x3 );
735 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
736 OUT_RING( (height << 16) | width );
738 OUT_RING( clear_color );
743 if ( flags & I810_BACK ) {
745 OUT_RING( BR00_BITBLT_CLIENT |
746 BR00_OP_COLOR_BLT | 0x3 );
747 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
748 OUT_RING( (height << 16) | width );
749 OUT_RING( dev_priv->back_offset + start );
750 OUT_RING( clear_color );
755 if ( flags & I810_DEPTH ) {
757 OUT_RING( BR00_BITBLT_CLIENT |
758 BR00_OP_COLOR_BLT | 0x3 );
759 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
760 OUT_RING( (height << 16) | width );
761 OUT_RING( dev_priv->depth_offset + start );
762 OUT_RING( clear_zval );
769 static void i810_dma_dispatch_swap( drm_device_t *dev )
771 drm_i810_private_t *dev_priv = dev->dev_private;
772 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
773 int nbox = sarea_priv->nbox;
774 drm_clip_rect_t *pbox = sarea_priv->boxes;
775 int pitch = dev_priv->pitch;
780 DRM_DEBUG("swapbuffers\n");
782 i810_kernel_lost_context(dev);
784 if (nbox > I810_NR_SAREA_CLIPRECTS)
785 nbox = I810_NR_SAREA_CLIPRECTS;
787 for (i = 0 ; i < nbox; i++, pbox++)
789 unsigned int w = pbox->x2 - pbox->x1;
790 unsigned int h = pbox->y2 - pbox->y1;
791 unsigned int dst = pbox->x1*cpp + pbox->y1*pitch;
792 unsigned int start = dst;
794 if (pbox->x1 > pbox->x2 ||
795 pbox->y1 > pbox->y2 ||
796 pbox->x2 > dev_priv->w ||
797 pbox->y2 > dev_priv->h)
801 OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 );
802 OUT_RING( pitch | (0xCC << 16));
803 OUT_RING( (h << 16) | (w * cpp));
804 if (dev_priv->current_page == 0)
805 OUT_RING(dev_priv->front_offset + start);
807 OUT_RING(dev_priv->back_offset + start);
809 if (dev_priv->current_page == 0)
810 OUT_RING(dev_priv->back_offset + start);
812 OUT_RING(dev_priv->front_offset + start);
818 static void i810_dma_dispatch_vertex(drm_device_t *dev,
823 drm_i810_private_t *dev_priv = dev->dev_private;
824 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
825 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
826 drm_clip_rect_t *box = sarea_priv->boxes;
827 int nbox = sarea_priv->nbox;
828 unsigned long address = (unsigned long)buf->bus_address;
829 unsigned long start = address - dev->agp->base;
833 i810_kernel_lost_context(dev);
835 if (nbox > I810_NR_SAREA_CLIPRECTS)
836 nbox = I810_NR_SAREA_CLIPRECTS;
841 if (sarea_priv->dirty)
842 i810EmitState( dev );
844 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
845 unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
847 put_user((GFX_OP_PRIMITIVE | prim |
849 (u32 *)buf_priv->virtual);
853 (u32 *)((u32)buf_priv->virtual + used));
857 i810_unmap_buffer(buf);
864 OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
866 OUT_RING( GFX_OP_SCISSOR_INFO );
867 OUT_RING( box[i].x1 | (box[i].y1<<16) );
868 OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) );
873 OUT_RING( CMD_OP_BATCH_BUFFER );
874 OUT_RING( start | BB1_PROTECTED );
875 OUT_RING( start + used - 4 );
879 } while (++i < nbox);
885 (void) cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
889 OUT_RING( CMD_STORE_DWORD_IDX );
891 OUT_RING( dev_priv->counter );
892 OUT_RING( CMD_STORE_DWORD_IDX );
893 OUT_RING( buf_priv->my_use_idx );
894 OUT_RING( I810_BUF_FREE );
895 OUT_RING( CMD_REPORT_HEAD );
901 static void i810_dma_dispatch_flip( drm_device_t *dev )
903 drm_i810_private_t *dev_priv = dev->dev_private;
904 int pitch = dev_priv->pitch;
907 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
909 dev_priv->current_page,
910 dev_priv->sarea_priv->pf_current_page);
912 i810_kernel_lost_context(dev);
915 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
919 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
920 /* On i815 at least ASYNC is buggy */
921 /* pitch<<5 is from 11.2.8 p158,
922 its the pitch / 8 then left shifted 8,
923 so (pitch >> 3) << 8 */
924 OUT_RING( CMD_OP_FRONTBUFFER_INFO | (pitch<<5) /*| ASYNC_FLIP */ );
925 if ( dev_priv->current_page == 0 ) {
926 OUT_RING( dev_priv->back_offset );
927 dev_priv->current_page = 1;
929 OUT_RING( dev_priv->front_offset );
930 dev_priv->current_page = 0;
936 OUT_RING( CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP );
940 /* Increment the frame counter. The client-side 3D driver must
941 * throttle the framerate by waiting for this value before
942 * performing the swapbuffer ioctl.
944 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
948 void i810_dma_quiescent(drm_device_t *dev)
950 drm_i810_private_t *dev_priv = dev->dev_private;
953 /* printk("%s\n", __FUNCTION__); */
955 i810_kernel_lost_context(dev);
958 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
959 OUT_RING( CMD_REPORT_HEAD );
964 i810_wait_ring( dev, dev_priv->ring.Size - 8 );
967 static int i810_flush_queue(drm_device_t *dev)
969 drm_i810_private_t *dev_priv = dev->dev_private;
970 drm_device_dma_t *dma = dev->dma;
974 /* printk("%s\n", __FUNCTION__); */
976 i810_kernel_lost_context(dev);
979 OUT_RING( CMD_REPORT_HEAD );
983 i810_wait_ring( dev, dev_priv->ring.Size - 8 );
985 for (i = 0; i < dma->buf_count; i++) {
986 drm_buf_t *buf = dma->buflist[ i ];
987 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
989 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
992 if (used == I810_BUF_HARDWARE)
993 DRM_DEBUG("reclaimed from HARDWARE\n");
994 if (used == I810_BUF_CLIENT)
995 DRM_DEBUG("still on client\n");
1001 /* Must be called with the lock held */
1002 void i810_reclaim_buffers(struct file *filp)
1004 drm_file_t *priv = filp->private_data;
1005 drm_device_t *dev = priv->dev;
1006 drm_device_dma_t *dma = dev->dma;
1010 if (!dev->dev_private) return;
1011 if (!dma->buflist) return;
1013 i810_flush_queue(dev);
1015 for (i = 0; i < dma->buf_count; i++) {
1016 drm_buf_t *buf = dma->buflist[ i ];
1017 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1019 if (buf->filp == filp && buf_priv) {
1020 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
1023 if (used == I810_BUF_CLIENT)
1024 DRM_DEBUG("reclaimed from client\n");
1025 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
1026 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
1031 int i810_flush_ioctl(struct inode *inode, struct file *filp,
1032 unsigned int cmd, unsigned long arg)
1034 drm_file_t *priv = filp->private_data;
1035 drm_device_t *dev = priv->dev;
1037 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1038 DRM_ERROR("i810_flush_ioctl called without lock held\n");
1042 i810_flush_queue(dev);
1047 int i810_dma_vertex(struct inode *inode, struct file *filp,
1048 unsigned int cmd, unsigned long arg)
1050 drm_file_t *priv = filp->private_data;
1051 drm_device_t *dev = priv->dev;
1052 drm_device_dma_t *dma = dev->dma;
1053 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1054 u32 *hw_status = dev_priv->hw_status_page;
1055 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1056 dev_priv->sarea_priv;
1057 drm_i810_vertex_t vertex;
1059 if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex)))
1062 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1063 DRM_ERROR("i810_dma_vertex called without lock held\n");
1067 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
1068 vertex.idx, vertex.used, vertex.discard);
1070 if (vertex.idx < 0 || vertex.idx > dma->buf_count)
1073 i810_dma_dispatch_vertex( dev,
1074 dma->buflist[ vertex.idx ],
1075 vertex.discard, vertex.used );
1077 atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]);
1078 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1079 sarea_priv->last_enqueue = dev_priv->counter-1;
1080 sarea_priv->last_dispatch = (int) hw_status[5];
1087 int i810_clear_bufs(struct inode *inode, struct file *filp,
1088 unsigned int cmd, unsigned long arg)
1090 drm_file_t *priv = filp->private_data;
1091 drm_device_t *dev = priv->dev;
1092 drm_i810_clear_t clear;
1094 if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear)))
1097 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1098 DRM_ERROR("i810_clear_bufs called without lock held\n");
1102 /* GH: Someone's doing nasty things... */
1103 if (!dev->dev_private) {
1107 i810_dma_dispatch_clear( dev, clear.flags,
1109 clear.clear_depth );
1113 int i810_swap_bufs(struct inode *inode, struct file *filp,
1114 unsigned int cmd, unsigned long arg)
1116 drm_file_t *priv = filp->private_data;
1117 drm_device_t *dev = priv->dev;
1119 DRM_DEBUG("i810_swap_bufs\n");
1121 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1122 DRM_ERROR("i810_swap_buf called without lock held\n");
1126 i810_dma_dispatch_swap( dev );
1130 int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1133 drm_file_t *priv = filp->private_data;
1134 drm_device_t *dev = priv->dev;
1135 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1136 u32 *hw_status = dev_priv->hw_status_page;
1137 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1138 dev_priv->sarea_priv;
1140 sarea_priv->last_dispatch = (int) hw_status[5];
1144 int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
1147 drm_file_t *priv = filp->private_data;
1148 drm_device_t *dev = priv->dev;
1151 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1152 u32 *hw_status = dev_priv->hw_status_page;
1153 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1154 dev_priv->sarea_priv;
1156 if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d)))
1159 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1160 DRM_ERROR("i810_dma called without lock held\n");
1166 retcode = i810_dma_get_buffer(dev, &d, filp);
1168 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1169 current->pid, retcode, d.granted);
1171 if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
1173 sarea_priv->last_dispatch = (int) hw_status[5];
1178 int i810_copybuf(struct inode *inode,
1183 /* Never copy - 2.4.x doesn't need it */
1187 int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
1190 /* Never copy - 2.4.x doesn't need it */
1194 static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
1195 unsigned int last_render)
1197 drm_i810_private_t *dev_priv = dev->dev_private;
1198 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1199 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
1200 unsigned long address = (unsigned long)buf->bus_address;
1201 unsigned long start = address - dev->agp->base;
1205 i810_kernel_lost_context(dev);
1207 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
1209 if (u != I810_BUF_CLIENT) {
1210 DRM_DEBUG("MC found buffer that isn't mine!\n");
1216 sarea_priv->dirty = 0x7f;
1218 DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n",
1221 dev_priv->counter++;
1222 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1223 DRM_DEBUG("i810_dma_dispatch_mc\n");
1224 DRM_DEBUG("start : %lx\n", start);
1225 DRM_DEBUG("used : %d\n", used);
1226 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1228 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1230 *(u32 *)((u32)buf_priv->virtual + used) = 0;
1234 i810_unmap_buffer(buf);
1237 OUT_RING( CMD_OP_BATCH_BUFFER );
1238 OUT_RING( start | BB1_PROTECTED );
1239 OUT_RING( start + used - 4 );
1245 OUT_RING( CMD_STORE_DWORD_IDX );
1246 OUT_RING( buf_priv->my_use_idx );
1247 OUT_RING( I810_BUF_FREE );
1250 OUT_RING( CMD_STORE_DWORD_IDX );
1252 OUT_RING( last_render );
1257 int i810_dma_mc(struct inode *inode, struct file *filp,
1258 unsigned int cmd, unsigned long arg)
1260 drm_file_t *priv = filp->private_data;
1261 drm_device_t *dev = priv->dev;
1262 drm_device_dma_t *dma = dev->dma;
1263 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1264 u32 *hw_status = dev_priv->hw_status_page;
1265 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1266 dev_priv->sarea_priv;
1269 if (copy_from_user(&mc, (drm_i810_mc_t *)arg, sizeof(mc)))
1273 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1274 DRM_ERROR("i810_dma_mc called without lock held\n");
1278 if (mc.idx >= dma->buf_count || mc.idx < 0)
1281 i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used,
1284 atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]);
1285 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1286 sarea_priv->last_enqueue = dev_priv->counter-1;
1287 sarea_priv->last_dispatch = (int) hw_status[5];
1292 int i810_rstatus(struct inode *inode, struct file *filp,
1293 unsigned int cmd, unsigned long arg)
1295 drm_file_t *priv = filp->private_data;
1296 drm_device_t *dev = priv->dev;
1297 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1299 return (int)(((u32 *)(dev_priv->hw_status_page))[4]);
1302 int i810_ov0_info(struct inode *inode, struct file *filp,
1303 unsigned int cmd, unsigned long arg)
1305 drm_file_t *priv = filp->private_data;
1306 drm_device_t *dev = priv->dev;
1307 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1308 drm_i810_overlay_t data;
1310 data.offset = dev_priv->overlay_offset;
1311 data.physical = dev_priv->overlay_physical;
1312 if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)))
1317 int i810_fstatus(struct inode *inode, struct file *filp,
1318 unsigned int cmd, unsigned long arg)
1320 drm_file_t *priv = filp->private_data;
1321 drm_device_t *dev = priv->dev;
1322 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1324 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1325 DRM_ERROR("i810_fstatus called without lock held\n");
1328 return I810_READ(0x30008);
1331 int i810_ov0_flip(struct inode *inode, struct file *filp,
1332 unsigned int cmd, unsigned long arg)
1334 drm_file_t *priv = filp->private_data;
1335 drm_device_t *dev = priv->dev;
1336 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1338 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1339 DRM_ERROR("i810_ov0_flip called without lock held\n");
1343 //Tell the overlay to update
1344 I810_WRITE(0x30000,dev_priv->overlay_physical | 0x80000000);
1350 /* Not sure why this isn't set all the time:
1352 static void i810_do_init_pageflip( drm_device_t *dev )
1354 drm_i810_private_t *dev_priv = dev->dev_private;
1356 DRM_DEBUG("%s\n", __FUNCTION__);
1357 dev_priv->page_flipping = 1;
1358 dev_priv->current_page = 0;
1359 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1362 int i810_do_cleanup_pageflip( drm_device_t *dev )
1364 drm_i810_private_t *dev_priv = dev->dev_private;
1366 DRM_DEBUG("%s\n", __FUNCTION__);
1367 if (dev_priv->current_page != 0)
1368 i810_dma_dispatch_flip( dev );
1370 dev_priv->page_flipping = 0;
1374 int i810_flip_bufs(struct inode *inode, struct file *filp,
1375 unsigned int cmd, unsigned long arg)
1377 drm_file_t *priv = filp->private_data;
1378 drm_device_t *dev = priv->dev;
1379 drm_i810_private_t *dev_priv = dev->dev_private;
1381 DRM_DEBUG("%s\n", __FUNCTION__);
1383 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1384 DRM_ERROR("i810_flip_buf called without lock held\n");
1388 if (!dev_priv->page_flipping)
1389 i810_do_init_pageflip( dev );
1391 i810_dma_dispatch_flip( dev );