1 /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
38 #include <linux/interrupt.h> /* For task queue support */
39 #include <linux/delay.h>
40 #include <linux/pagemap.h>
42 #define I810_BUF_FREE 2
43 #define I810_BUF_CLIENT 1
44 #define I810_BUF_HARDWARE 0
46 #define I810_BUF_UNMAPPED 0
47 #define I810_BUF_MAPPED 1
49 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
50 #define down_write down
54 static inline void i810_print_status_page(drm_device_t *dev)
56 drm_device_dma_t *dma = dev->dma;
57 drm_i810_private_t *dev_priv = dev->dev_private;
58 u32 *temp = dev_priv->hw_status_page;
61 DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
62 DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
63 DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
64 DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
65 DRM_DEBUG( "hw_status: Last Render: %x\n", temp[4]);
66 DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
67 for(i = 6; i < dma->buf_count + 6; i++) {
68 DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
72 static drm_buf_t *i810_freelist_get(drm_device_t *dev)
74 drm_device_dma_t *dma = dev->dma;
78 /* Linear search might not be the best solution */
80 for (i = 0; i < dma->buf_count; i++) {
81 drm_buf_t *buf = dma->buflist[ i ];
82 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
83 /* In use is already a pointer */
84 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
86 if (used == I810_BUF_FREE) {
93 /* This should only be called if the buffer is not sent to the hardware
94 * yet, the hardware updates in use for us once its on the ring buffer.
97 static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
99 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
102 /* In use is already a pointer */
103 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
104 if (used != I810_BUF_CLIENT) {
105 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
112 static struct file_operations i810_buffer_fops = {
115 .release = DRM(release),
117 .mmap = i810_mmap_buffers,
118 .fasync = DRM(fasync),
121 int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
123 drm_file_t *priv = filp->private_data;
125 drm_i810_private_t *dev_priv;
127 drm_i810_buf_priv_t *buf_priv;
131 dev_priv = dev->dev_private;
132 buf = dev_priv->mmap_buffer;
133 buf_priv = buf->dev_private;
135 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
138 buf_priv->currently_mapped = I810_BUF_MAPPED;
141 if (remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
142 VM_OFFSET(vma) >> PAGE_SHIFT,
143 vma->vm_end - vma->vm_start,
144 vma->vm_page_prot)) return -EAGAIN;
148 static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
150 drm_file_t *priv = filp->private_data;
151 drm_device_t *dev = priv->dev;
152 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
153 drm_i810_private_t *dev_priv = dev->dev_private;
154 struct file_operations *old_fops;
157 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
160 down_write( ¤t->mm->mmap_sem );
161 old_fops = filp->f_op;
162 filp->f_op = &i810_buffer_fops;
163 dev_priv->mmap_buffer = buf;
164 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
165 PROT_READ|PROT_WRITE,
168 dev_priv->mmap_buffer = NULL;
169 filp->f_op = old_fops;
170 if ((unsigned long)buf_priv->virtual > -1024UL) {
172 DRM_ERROR("mmap error\n");
173 retcode = (signed int)buf_priv->virtual;
174 buf_priv->virtual = NULL;
176 up_write( ¤t->mm->mmap_sem );
181 static int i810_unmap_buffer(drm_buf_t *buf)
183 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
186 if (buf_priv->currently_mapped != I810_BUF_MAPPED)
189 down_write(¤t->mm->mmap_sem);
190 retcode = do_munmap(current->mm,
191 (unsigned long)buf_priv->virtual,
192 (size_t) buf->total);
193 up_write(¤t->mm->mmap_sem);
195 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
196 buf_priv->virtual = NULL;
201 static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
205 drm_i810_buf_priv_t *buf_priv;
208 buf = i810_freelist_get(dev);
211 DRM_DEBUG("retcode=%d\n", retcode);
215 retcode = i810_map_buffer(buf, filp);
217 i810_freelist_put(dev, buf);
218 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
222 buf_priv = buf->dev_private;
224 d->request_idx = buf->idx;
225 d->request_size = buf->total;
226 d->virtual = buf_priv->virtual;
231 int i810_dma_cleanup(drm_device_t *dev)
233 drm_device_dma_t *dma = dev->dma;
235 /* Make sure interrupts are disabled here because the uninstall ioctl
236 * may not have been called from userspace and after dev_private
237 * is freed, it's too late.
239 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
240 DRM(irq_uninstall)(dev);
242 if (dev->dev_private) {
244 drm_i810_private_t *dev_priv =
245 (drm_i810_private_t *) dev->dev_private;
247 if (dev_priv->ring.virtual_start) {
248 DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
249 dev_priv->ring.Size, dev);
251 if (dev_priv->hw_status_page) {
252 pci_free_consistent(dev->pdev, PAGE_SIZE,
253 dev_priv->hw_status_page,
254 dev_priv->dma_status_page);
255 /* Need to rewrite hardware status page */
256 I810_WRITE(0x02080, 0x1ffff000);
258 DRM(free)(dev->dev_private, sizeof(drm_i810_private_t),
260 dev->dev_private = NULL;
262 for (i = 0; i < dma->buf_count; i++) {
263 drm_buf_t *buf = dma->buflist[ i ];
264 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
265 if ( buf_priv->kernel_virtual && buf->total )
266 DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
272 static int i810_wait_ring(drm_device_t *dev, int n)
274 drm_i810_private_t *dev_priv = dev->dev_private;
275 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
278 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
280 end = jiffies + (HZ*3);
281 while (ring->space < n) {
282 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
283 ring->space = ring->head - (ring->tail+8);
284 if (ring->space < 0) ring->space += ring->Size;
286 if (ring->head != last_head) {
287 end = jiffies + (HZ*3);
288 last_head = ring->head;
292 if (time_before(end, jiffies)) {
293 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
294 DRM_ERROR("lockup\n");
304 static void i810_kernel_lost_context(drm_device_t *dev)
306 drm_i810_private_t *dev_priv = dev->dev_private;
307 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
309 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
310 ring->tail = I810_READ(LP_RING + RING_TAIL);
311 ring->space = ring->head - (ring->tail+8);
312 if (ring->space < 0) ring->space += ring->Size;
315 static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
317 drm_device_dma_t *dma = dev->dma;
319 u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
322 if (dma->buf_count > 1019) {
323 /* Not enough space in the status page for the freelist */
327 for (i = 0; i < dma->buf_count; i++) {
328 drm_buf_t *buf = dma->buflist[ i ];
329 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
331 buf_priv->in_use = hw_status++;
332 buf_priv->my_use_idx = my_idx;
335 *buf_priv->in_use = I810_BUF_FREE;
337 buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
343 static int i810_dma_initialize(drm_device_t *dev,
344 drm_i810_private_t *dev_priv,
345 drm_i810_init_t *init)
347 struct list_head *list;
349 memset(dev_priv, 0, sizeof(drm_i810_private_t));
351 list_for_each(list, &dev->maplist->head) {
352 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
354 r_list->map->type == _DRM_SHM &&
355 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
356 dev_priv->sarea_map = r_list->map;
360 if (!dev_priv->sarea_map) {
361 dev->dev_private = (void *)dev_priv;
362 i810_dma_cleanup(dev);
363 DRM_ERROR("can not find sarea!\n");
366 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
367 if (!dev_priv->mmio_map) {
368 dev->dev_private = (void *)dev_priv;
369 i810_dma_cleanup(dev);
370 DRM_ERROR("can not find mmio map!\n");
373 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
374 if (!dev->agp_buffer_map) {
375 dev->dev_private = (void *)dev_priv;
376 i810_dma_cleanup(dev);
377 DRM_ERROR("can not find dma buffer map!\n");
381 dev_priv->sarea_priv = (drm_i810_sarea_t *)
382 ((u8 *)dev_priv->sarea_map->handle +
383 init->sarea_priv_offset);
385 dev_priv->ring.Start = init->ring_start;
386 dev_priv->ring.End = init->ring_end;
387 dev_priv->ring.Size = init->ring_size;
389 dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
391 init->ring_size, dev);
393 if (dev_priv->ring.virtual_start == NULL) {
394 dev->dev_private = (void *) dev_priv;
395 i810_dma_cleanup(dev);
396 DRM_ERROR("can not ioremap virtual address for"
401 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
403 dev_priv->w = init->w;
404 dev_priv->h = init->h;
405 dev_priv->pitch = init->pitch;
406 dev_priv->back_offset = init->back_offset;
407 dev_priv->depth_offset = init->depth_offset;
408 dev_priv->front_offset = init->front_offset;
410 dev_priv->overlay_offset = init->overlay_offset;
411 dev_priv->overlay_physical = init->overlay_physical;
413 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
414 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
415 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
417 /* Program Hardware Status Page */
418 dev_priv->hw_status_page =
419 pci_alloc_consistent(dev->pdev, PAGE_SIZE,
420 &dev_priv->dma_status_page);
421 if (!dev_priv->hw_status_page) {
422 dev->dev_private = (void *)dev_priv;
423 i810_dma_cleanup(dev);
424 DRM_ERROR("Can not allocate hardware status page\n");
427 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
428 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
430 I810_WRITE(0x02080, dev_priv->dma_status_page);
431 DRM_DEBUG("Enabled hardware status page\n");
433 /* Now we need to init our freelist */
434 if (i810_freelist_init(dev, dev_priv) != 0) {
435 dev->dev_private = (void *)dev_priv;
436 i810_dma_cleanup(dev);
437 DRM_ERROR("Not enough space in the status page for"
441 dev->dev_private = (void *)dev_priv;
446 /* i810 DRM version 1.1 used a smaller init structure with different
447 * ordering of values than is currently used (drm >= 1.2). There is
448 * no defined way to detect the XFree version to correct this problem,
449 * however by checking using this procedure we can detect the correct
452 * #1 Read the Smaller init structure from user-space
453 * #2 Verify the overlay_physical is a valid physical address, or NULL
454 * If it isn't then we have a v1.1 client. Fix up params.
455 * If it is, then we have a 1.2 client... get the rest of the data.
457 int i810_dma_init_compat(drm_i810_init_t *init, unsigned long arg)
460 /* Get v1.1 init data */
461 if (copy_from_user(init, (drm_i810_pre12_init_t __user *)arg,
462 sizeof(drm_i810_pre12_init_t))) {
466 if ((!init->overlay_physical) || (init->overlay_physical > 4096)) {
468 /* This is a v1.2 client, just get the v1.2 init data */
469 DRM_INFO("Using POST v1.2 init.\n");
470 if (copy_from_user(init, (drm_i810_init_t __user *)arg,
471 sizeof(drm_i810_init_t))) {
476 /* This is a v1.1 client, fix the params */
477 DRM_INFO("Using PRE v1.2 init.\n");
478 init->pitch_bits = init->h;
479 init->pitch = init->w;
480 init->h = init->overlay_physical;
481 init->w = init->overlay_offset;
482 init->overlay_physical = 0;
483 init->overlay_offset = 0;
489 int i810_dma_init(struct inode *inode, struct file *filp,
490 unsigned int cmd, unsigned long arg)
492 drm_file_t *priv = filp->private_data;
493 drm_device_t *dev = priv->dev;
494 drm_i810_private_t *dev_priv;
495 drm_i810_init_t init;
498 /* Get only the init func */
499 if (copy_from_user(&init, (void __user *)arg, sizeof(drm_i810_init_func_t)))
504 /* This case is for backward compatibility. It
505 * handles XFree 4.1.0 and 4.2.0, and has to
506 * do some parameter checking as described below.
507 * It will someday go away.
509 retcode = i810_dma_init_compat(&init, arg);
513 dev_priv = DRM(alloc)(sizeof(drm_i810_private_t),
515 if (dev_priv == NULL)
517 retcode = i810_dma_initialize(dev, dev_priv, &init);
521 case I810_INIT_DMA_1_4:
522 DRM_INFO("Using v1.4 init.\n");
523 if (copy_from_user(&init, (drm_i810_init_t __user *)arg,
524 sizeof(drm_i810_init_t))) {
527 dev_priv = DRM(alloc)(sizeof(drm_i810_private_t),
529 if (dev_priv == NULL)
531 retcode = i810_dma_initialize(dev, dev_priv, &init);
534 case I810_CLEANUP_DMA:
535 DRM_INFO("DMA Cleanup\n");
536 retcode = i810_dma_cleanup(dev);
545 /* Most efficient way to verify state for the i810 is as it is
546 * emitted. Non-conformant state is silently dropped.
548 * Use 'volatile' & local var tmp to force the emitted values to be
549 * identical to the verified ones.
551 static void i810EmitContextVerified( drm_device_t *dev,
552 volatile unsigned int *code )
554 drm_i810_private_t *dev_priv = dev->dev_private;
559 BEGIN_LP_RING( I810_CTX_SETUP_SIZE );
561 OUT_RING( GFX_OP_COLOR_FACTOR );
562 OUT_RING( code[I810_CTXREG_CF1] );
564 OUT_RING( GFX_OP_STIPPLE );
565 OUT_RING( code[I810_CTXREG_ST1] );
567 for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) {
570 if ((tmp & (7<<29)) == (3<<29) &&
571 (tmp & (0x1f<<24)) < (0x1d<<24))
576 else printk("constext state dropped!!!\n");
585 static void i810EmitTexVerified( drm_device_t *dev,
586 volatile unsigned int *code )
588 drm_i810_private_t *dev_priv = dev->dev_private;
593 BEGIN_LP_RING( I810_TEX_SETUP_SIZE );
595 OUT_RING( GFX_OP_MAP_INFO );
596 OUT_RING( code[I810_TEXREG_MI1] );
597 OUT_RING( code[I810_TEXREG_MI2] );
598 OUT_RING( code[I810_TEXREG_MI3] );
600 for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) {
603 if ((tmp & (7<<29)) == (3<<29) &&
604 (tmp & (0x1f<<24)) < (0x1d<<24))
609 else printk("texture state dropped!!!\n");
619 /* Need to do some additional checking when setting the dest buffer.
621 static void i810EmitDestVerified( drm_device_t *dev,
622 volatile unsigned int *code )
624 drm_i810_private_t *dev_priv = dev->dev_private;
628 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
630 tmp = code[I810_DESTREG_DI1];
631 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
632 OUT_RING( CMD_OP_DESTBUFFER_INFO );
635 DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
636 tmp, dev_priv->front_di1, dev_priv->back_di1);
640 OUT_RING( CMD_OP_Z_BUFFER_INFO );
641 OUT_RING( dev_priv->zi1 );
643 OUT_RING( GFX_OP_DESTBUFFER_VARS );
644 OUT_RING( code[I810_DESTREG_DV1] );
646 OUT_RING( GFX_OP_DRAWRECT_INFO );
647 OUT_RING( code[I810_DESTREG_DR1] );
648 OUT_RING( code[I810_DESTREG_DR2] );
649 OUT_RING( code[I810_DESTREG_DR3] );
650 OUT_RING( code[I810_DESTREG_DR4] );
658 static void i810EmitState( drm_device_t *dev )
660 drm_i810_private_t *dev_priv = dev->dev_private;
661 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
662 unsigned int dirty = sarea_priv->dirty;
664 DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
666 if (dirty & I810_UPLOAD_BUFFERS) {
667 i810EmitDestVerified( dev, sarea_priv->BufferState );
668 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
671 if (dirty & I810_UPLOAD_CTX) {
672 i810EmitContextVerified( dev, sarea_priv->ContextState );
673 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
676 if (dirty & I810_UPLOAD_TEX0) {
677 i810EmitTexVerified( dev, sarea_priv->TexState[0] );
678 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
681 if (dirty & I810_UPLOAD_TEX1) {
682 i810EmitTexVerified( dev, sarea_priv->TexState[1] );
683 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
691 static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
692 unsigned int clear_color,
693 unsigned int clear_zval )
695 drm_i810_private_t *dev_priv = dev->dev_private;
696 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
697 int nbox = sarea_priv->nbox;
698 drm_clip_rect_t *pbox = sarea_priv->boxes;
699 int pitch = dev_priv->pitch;
704 if ( dev_priv->current_page == 1 ) {
705 unsigned int tmp = flags;
707 flags &= ~(I810_FRONT | I810_BACK);
708 if (tmp & I810_FRONT) flags |= I810_BACK;
709 if (tmp & I810_BACK) flags |= I810_FRONT;
712 i810_kernel_lost_context(dev);
714 if (nbox > I810_NR_SAREA_CLIPRECTS)
715 nbox = I810_NR_SAREA_CLIPRECTS;
717 for (i = 0 ; i < nbox ; i++, pbox++) {
718 unsigned int x = pbox->x1;
719 unsigned int y = pbox->y1;
720 unsigned int width = (pbox->x2 - x) * cpp;
721 unsigned int height = pbox->y2 - y;
722 unsigned int start = y * pitch + x * cpp;
724 if (pbox->x1 > pbox->x2 ||
725 pbox->y1 > pbox->y2 ||
726 pbox->x2 > dev_priv->w ||
727 pbox->y2 > dev_priv->h)
730 if ( flags & I810_FRONT ) {
732 OUT_RING( BR00_BITBLT_CLIENT |
733 BR00_OP_COLOR_BLT | 0x3 );
734 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
735 OUT_RING( (height << 16) | width );
737 OUT_RING( clear_color );
742 if ( flags & I810_BACK ) {
744 OUT_RING( BR00_BITBLT_CLIENT |
745 BR00_OP_COLOR_BLT | 0x3 );
746 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
747 OUT_RING( (height << 16) | width );
748 OUT_RING( dev_priv->back_offset + start );
749 OUT_RING( clear_color );
754 if ( flags & I810_DEPTH ) {
756 OUT_RING( BR00_BITBLT_CLIENT |
757 BR00_OP_COLOR_BLT | 0x3 );
758 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
759 OUT_RING( (height << 16) | width );
760 OUT_RING( dev_priv->depth_offset + start );
761 OUT_RING( clear_zval );
768 static void i810_dma_dispatch_swap( drm_device_t *dev )
770 drm_i810_private_t *dev_priv = dev->dev_private;
771 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
772 int nbox = sarea_priv->nbox;
773 drm_clip_rect_t *pbox = sarea_priv->boxes;
774 int pitch = dev_priv->pitch;
779 DRM_DEBUG("swapbuffers\n");
781 i810_kernel_lost_context(dev);
783 if (nbox > I810_NR_SAREA_CLIPRECTS)
784 nbox = I810_NR_SAREA_CLIPRECTS;
786 for (i = 0 ; i < nbox; i++, pbox++)
788 unsigned int w = pbox->x2 - pbox->x1;
789 unsigned int h = pbox->y2 - pbox->y1;
790 unsigned int dst = pbox->x1*cpp + pbox->y1*pitch;
791 unsigned int start = dst;
793 if (pbox->x1 > pbox->x2 ||
794 pbox->y1 > pbox->y2 ||
795 pbox->x2 > dev_priv->w ||
796 pbox->y2 > dev_priv->h)
800 OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 );
801 OUT_RING( pitch | (0xCC << 16));
802 OUT_RING( (h << 16) | (w * cpp));
803 if (dev_priv->current_page == 0)
804 OUT_RING(dev_priv->front_offset + start);
806 OUT_RING(dev_priv->back_offset + start);
808 if (dev_priv->current_page == 0)
809 OUT_RING(dev_priv->back_offset + start);
811 OUT_RING(dev_priv->front_offset + start);
817 static void i810_dma_dispatch_vertex(drm_device_t *dev,
822 drm_i810_private_t *dev_priv = dev->dev_private;
823 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
824 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
825 drm_clip_rect_t *box = sarea_priv->boxes;
826 int nbox = sarea_priv->nbox;
827 unsigned long address = (unsigned long)buf->bus_address;
828 unsigned long start = address - dev->agp->base;
832 i810_kernel_lost_context(dev);
834 if (nbox > I810_NR_SAREA_CLIPRECTS)
835 nbox = I810_NR_SAREA_CLIPRECTS;
840 if (sarea_priv->dirty)
841 i810EmitState( dev );
843 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
844 unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
846 *(u32 *)buf_priv->kernel_virtual = ((GFX_OP_PRIMITIVE | prim | ((used/4)-2)));
849 *(u32 *)((u32)buf_priv->kernel_virtual + used) = 0;
853 i810_unmap_buffer(buf);
860 OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
862 OUT_RING( GFX_OP_SCISSOR_INFO );
863 OUT_RING( box[i].x1 | (box[i].y1<<16) );
864 OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) );
869 OUT_RING( CMD_OP_BATCH_BUFFER );
870 OUT_RING( start | BB1_PROTECTED );
871 OUT_RING( start + used - 4 );
875 } while (++i < nbox);
881 (void) cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
885 OUT_RING( CMD_STORE_DWORD_IDX );
887 OUT_RING( dev_priv->counter );
888 OUT_RING( CMD_STORE_DWORD_IDX );
889 OUT_RING( buf_priv->my_use_idx );
890 OUT_RING( I810_BUF_FREE );
891 OUT_RING( CMD_REPORT_HEAD );
897 static void i810_dma_dispatch_flip( drm_device_t *dev )
899 drm_i810_private_t *dev_priv = dev->dev_private;
900 int pitch = dev_priv->pitch;
903 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
905 dev_priv->current_page,
906 dev_priv->sarea_priv->pf_current_page);
908 i810_kernel_lost_context(dev);
911 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
915 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
916 /* On i815 at least ASYNC is buggy */
917 /* pitch<<5 is from 11.2.8 p158,
918 its the pitch / 8 then left shifted 8,
919 so (pitch >> 3) << 8 */
920 OUT_RING( CMD_OP_FRONTBUFFER_INFO | (pitch<<5) /*| ASYNC_FLIP */ );
921 if ( dev_priv->current_page == 0 ) {
922 OUT_RING( dev_priv->back_offset );
923 dev_priv->current_page = 1;
925 OUT_RING( dev_priv->front_offset );
926 dev_priv->current_page = 0;
932 OUT_RING( CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP );
936 /* Increment the frame counter. The client-side 3D driver must
937 * throttle the framerate by waiting for this value before
938 * performing the swapbuffer ioctl.
940 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
944 void i810_dma_quiescent(drm_device_t *dev)
946 drm_i810_private_t *dev_priv = dev->dev_private;
949 /* printk("%s\n", __FUNCTION__); */
951 i810_kernel_lost_context(dev);
954 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
955 OUT_RING( CMD_REPORT_HEAD );
960 i810_wait_ring( dev, dev_priv->ring.Size - 8 );
963 static int i810_flush_queue(drm_device_t *dev)
965 drm_i810_private_t *dev_priv = dev->dev_private;
966 drm_device_dma_t *dma = dev->dma;
970 /* printk("%s\n", __FUNCTION__); */
972 i810_kernel_lost_context(dev);
975 OUT_RING( CMD_REPORT_HEAD );
979 i810_wait_ring( dev, dev_priv->ring.Size - 8 );
981 for (i = 0; i < dma->buf_count; i++) {
982 drm_buf_t *buf = dma->buflist[ i ];
983 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
985 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
988 if (used == I810_BUF_HARDWARE)
989 DRM_DEBUG("reclaimed from HARDWARE\n");
990 if (used == I810_BUF_CLIENT)
991 DRM_DEBUG("still on client\n");
997 /* Must be called with the lock held */
998 void i810_reclaim_buffers(struct file *filp)
1000 drm_file_t *priv = filp->private_data;
1001 drm_device_t *dev = priv->dev;
1002 drm_device_dma_t *dma = dev->dma;
1006 if (!dev->dev_private) return;
1007 if (!dma->buflist) return;
1009 i810_flush_queue(dev);
1011 for (i = 0; i < dma->buf_count; i++) {
1012 drm_buf_t *buf = dma->buflist[ i ];
1013 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1015 if (buf->filp == filp && buf_priv) {
1016 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
1019 if (used == I810_BUF_CLIENT)
1020 DRM_DEBUG("reclaimed from client\n");
1021 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
1022 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
1027 int i810_flush_ioctl(struct inode *inode, struct file *filp,
1028 unsigned int cmd, unsigned long arg)
1030 drm_file_t *priv = filp->private_data;
1031 drm_device_t *dev = priv->dev;
1033 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1034 DRM_ERROR("i810_flush_ioctl called without lock held\n");
1038 i810_flush_queue(dev);
1043 int i810_dma_vertex(struct inode *inode, struct file *filp,
1044 unsigned int cmd, unsigned long arg)
1046 drm_file_t *priv = filp->private_data;
1047 drm_device_t *dev = priv->dev;
1048 drm_device_dma_t *dma = dev->dma;
1049 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1050 u32 *hw_status = dev_priv->hw_status_page;
1051 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1052 dev_priv->sarea_priv;
1053 drm_i810_vertex_t vertex;
1055 if (copy_from_user(&vertex, (drm_i810_vertex_t __user *)arg, sizeof(vertex)))
1058 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1059 DRM_ERROR("i810_dma_vertex called without lock held\n");
1063 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
1064 vertex.idx, vertex.used, vertex.discard);
1066 if (vertex.idx < 0 || vertex.idx > dma->buf_count)
1069 i810_dma_dispatch_vertex( dev,
1070 dma->buflist[ vertex.idx ],
1071 vertex.discard, vertex.used );
1073 atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]);
1074 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1075 sarea_priv->last_enqueue = dev_priv->counter-1;
1076 sarea_priv->last_dispatch = (int) hw_status[5];
1083 int i810_clear_bufs(struct inode *inode, struct file *filp,
1084 unsigned int cmd, unsigned long arg)
1086 drm_file_t *priv = filp->private_data;
1087 drm_device_t *dev = priv->dev;
1088 drm_i810_clear_t clear;
1090 if (copy_from_user(&clear, (drm_i810_clear_t __user *)arg, sizeof(clear)))
1093 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1094 DRM_ERROR("i810_clear_bufs called without lock held\n");
1098 /* GH: Someone's doing nasty things... */
1099 if (!dev->dev_private) {
1103 i810_dma_dispatch_clear( dev, clear.flags,
1105 clear.clear_depth );
1109 int i810_swap_bufs(struct inode *inode, struct file *filp,
1110 unsigned int cmd, unsigned long arg)
1112 drm_file_t *priv = filp->private_data;
1113 drm_device_t *dev = priv->dev;
1115 DRM_DEBUG("i810_swap_bufs\n");
1117 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1118 DRM_ERROR("i810_swap_buf called without lock held\n");
1122 i810_dma_dispatch_swap( dev );
1126 int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1129 drm_file_t *priv = filp->private_data;
1130 drm_device_t *dev = priv->dev;
1131 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1132 u32 *hw_status = dev_priv->hw_status_page;
1133 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1134 dev_priv->sarea_priv;
1136 sarea_priv->last_dispatch = (int) hw_status[5];
1140 int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
1143 drm_file_t *priv = filp->private_data;
1144 drm_device_t *dev = priv->dev;
1147 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1148 u32 *hw_status = dev_priv->hw_status_page;
1149 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1150 dev_priv->sarea_priv;
1152 if (copy_from_user(&d, (drm_i810_dma_t __user *)arg, sizeof(d)))
1155 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1156 DRM_ERROR("i810_dma called without lock held\n");
1162 retcode = i810_dma_get_buffer(dev, &d, filp);
1164 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1165 current->pid, retcode, d.granted);
1167 if (copy_to_user((drm_dma_t __user *)arg, &d, sizeof(d)))
1169 sarea_priv->last_dispatch = (int) hw_status[5];
1174 int i810_copybuf(struct inode *inode,
1179 /* Never copy - 2.4.x doesn't need it */
1183 int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
1186 /* Never copy - 2.4.x doesn't need it */
1190 static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
1191 unsigned int last_render)
1193 drm_i810_private_t *dev_priv = dev->dev_private;
1194 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1195 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
1196 unsigned long address = (unsigned long)buf->bus_address;
1197 unsigned long start = address - dev->agp->base;
1201 i810_kernel_lost_context(dev);
1203 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
1205 if (u != I810_BUF_CLIENT) {
1206 DRM_DEBUG("MC found buffer that isn't mine!\n");
1212 sarea_priv->dirty = 0x7f;
1214 DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n",
1217 dev_priv->counter++;
1218 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1219 DRM_DEBUG("i810_dma_dispatch_mc\n");
1220 DRM_DEBUG("start : %lx\n", start);
1221 DRM_DEBUG("used : %d\n", used);
1222 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1224 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1226 *(u32 *)((u32)buf_priv->virtual + used) = 0;
1230 i810_unmap_buffer(buf);
1233 OUT_RING( CMD_OP_BATCH_BUFFER );
1234 OUT_RING( start | BB1_PROTECTED );
1235 OUT_RING( start + used - 4 );
1241 OUT_RING( CMD_STORE_DWORD_IDX );
1242 OUT_RING( buf_priv->my_use_idx );
1243 OUT_RING( I810_BUF_FREE );
1246 OUT_RING( CMD_STORE_DWORD_IDX );
1248 OUT_RING( last_render );
1253 int i810_dma_mc(struct inode *inode, struct file *filp,
1254 unsigned int cmd, unsigned long arg)
1256 drm_file_t *priv = filp->private_data;
1257 drm_device_t *dev = priv->dev;
1258 drm_device_dma_t *dma = dev->dma;
1259 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1260 u32 *hw_status = dev_priv->hw_status_page;
1261 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1262 dev_priv->sarea_priv;
1265 if (copy_from_user(&mc, (drm_i810_mc_t __user *)arg, sizeof(mc)))
1269 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1270 DRM_ERROR("i810_dma_mc called without lock held\n");
1274 if (mc.idx >= dma->buf_count || mc.idx < 0)
1277 i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used,
1280 atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]);
1281 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1282 sarea_priv->last_enqueue = dev_priv->counter-1;
1283 sarea_priv->last_dispatch = (int) hw_status[5];
1288 int i810_rstatus(struct inode *inode, struct file *filp,
1289 unsigned int cmd, unsigned long arg)
1291 drm_file_t *priv = filp->private_data;
1292 drm_device_t *dev = priv->dev;
1293 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1295 return (int)(((u32 *)(dev_priv->hw_status_page))[4]);
1298 int i810_ov0_info(struct inode *inode, struct file *filp,
1299 unsigned int cmd, unsigned long arg)
1301 drm_file_t *priv = filp->private_data;
1302 drm_device_t *dev = priv->dev;
1303 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1304 drm_i810_overlay_t data;
1306 data.offset = dev_priv->overlay_offset;
1307 data.physical = dev_priv->overlay_physical;
1308 if (copy_to_user((drm_i810_overlay_t __user *)arg,&data,sizeof(data)))
1313 int i810_fstatus(struct inode *inode, struct file *filp,
1314 unsigned int cmd, unsigned long arg)
1316 drm_file_t *priv = filp->private_data;
1317 drm_device_t *dev = priv->dev;
1318 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1320 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1321 DRM_ERROR("i810_fstatus called without lock held\n");
1324 return I810_READ(0x30008);
1327 int i810_ov0_flip(struct inode *inode, struct file *filp,
1328 unsigned int cmd, unsigned long arg)
1330 drm_file_t *priv = filp->private_data;
1331 drm_device_t *dev = priv->dev;
1332 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1334 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1335 DRM_ERROR("i810_ov0_flip called without lock held\n");
1339 //Tell the overlay to update
1340 I810_WRITE(0x30000,dev_priv->overlay_physical | 0x80000000);
1346 /* Not sure why this isn't set all the time:
1348 static void i810_do_init_pageflip( drm_device_t *dev )
1350 drm_i810_private_t *dev_priv = dev->dev_private;
1352 DRM_DEBUG("%s\n", __FUNCTION__);
1353 dev_priv->page_flipping = 1;
1354 dev_priv->current_page = 0;
1355 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1358 int i810_do_cleanup_pageflip( drm_device_t *dev )
1360 drm_i810_private_t *dev_priv = dev->dev_private;
1362 DRM_DEBUG("%s\n", __FUNCTION__);
1363 if (dev_priv->current_page != 0)
1364 i810_dma_dispatch_flip( dev );
1366 dev_priv->page_flipping = 0;
1370 int i810_flip_bufs(struct inode *inode, struct file *filp,
1371 unsigned int cmd, unsigned long arg)
1373 drm_file_t *priv = filp->private_data;
1374 drm_device_t *dev = priv->dev;
1375 drm_i810_private_t *dev_priv = dev->dev_private;
1377 DRM_DEBUG("%s\n", __FUNCTION__);
1379 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1380 DRM_ERROR("i810_flip_buf called without lock held\n");
1384 if (!dev_priv->page_flipping)
1385 i810_do_init_pageflip( dev );
1387 i810_dma_dispatch_flip( dev );
1391 static void i810_driver_pretakedown(drm_device_t *dev)
1393 i810_dma_cleanup( dev );
1396 static void i810_driver_release(drm_device_t *dev, struct file *filp)
1398 i810_reclaim_buffers(filp);
1401 static int i810_driver_dma_quiescent(drm_device_t *dev)
1403 i810_dma_quiescent( dev );
1407 void i810_driver_register_fns(drm_device_t *dev)
1409 dev->driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE;
1410 dev->dev_priv_size = sizeof(drm_i810_buf_priv_t);
1411 dev->fn_tbl.pretakedown = i810_driver_pretakedown;
1412 dev->fn_tbl.release = i810_driver_release;
1413 dev->fn_tbl.dma_quiescent = i810_driver_dma_quiescent;
1414 dev->fn_tbl.reclaim_buffers = i810_reclaim_buffers;
1417 dev->types[6] = _DRM_STAT_IRQ;
1418 dev->types[7] = _DRM_STAT_PRIMARY;
1419 dev->types[8] = _DRM_STAT_SECONDARY;
1420 dev->types[9] = _DRM_STAT_DMA;