1ff4c7ca0bff0421ea39e97d9df98883faa4b071
[linux-2.6.git] / drivers / char / drm / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 /* Really want an OS-independent resettable timer.  Would like to have
35  * this loop run for (eg) 3 sec, but have the timer reset every time
36  * the head pointer changes, so that EBUSY only happens if the ring
37  * actually stalls for (eg) 3 seconds.
38  */
39 int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
40 {
41         drm_i915_private_t *dev_priv = dev->dev_private;
42         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43         u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
44         int i;
45
46         for (i = 0; i < 10000; i++) {
47                 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
48                 ring->space = ring->head - (ring->tail + 8);
49                 if (ring->space < 0)
50                         ring->space += ring->Size;
51                 if (ring->space >= n)
52                         return 0;
53
54                 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
55
56                 if (ring->head != last_head)
57                         i = 0;
58
59                 last_head = ring->head;
60         }
61
62         return DRM_ERR(EBUSY);
63 }
64
65 void i915_kernel_lost_context(drm_device_t * dev)
66 {
67         drm_i915_private_t *dev_priv = dev->dev_private;
68         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
69
70         ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
71         ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
72         ring->space = ring->head - (ring->tail + 8);
73         if (ring->space < 0)
74                 ring->space += ring->Size;
75
76         if (ring->head == ring->tail)
77                 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
78 }
79
80 static int i915_dma_cleanup(drm_device_t * dev)
81 {
82         /* Make sure interrupts are disabled here because the uninstall ioctl
83          * may not have been called from userspace and after dev_private
84          * is freed, it's too late.
85          */
86         if (dev->irq)
87                 drm_irq_uninstall(dev);
88
89         if (dev->dev_private) {
90                 drm_i915_private_t *dev_priv =
91                     (drm_i915_private_t *) dev->dev_private;
92
93                 if (dev_priv->ring.virtual_start) {
94                         drm_core_ioremapfree(&dev_priv->ring.map, dev);
95                 }
96
97                 if (dev_priv->status_page_dmah) {
98                         drm_pci_free(dev, dev_priv->status_page_dmah);
99                         /* Need to rewrite hardware status page */
100                         I915_WRITE(0x02080, 0x1ffff000);
101                 }
102
103                 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
104                          DRM_MEM_DRIVER);
105
106                 dev->dev_private = NULL;
107         }
108
109         return 0;
110 }
111
112 static int i915_initialize(drm_device_t * dev,
113                            drm_i915_private_t * dev_priv,
114                            drm_i915_init_t * init)
115 {
116         memset(dev_priv, 0, sizeof(drm_i915_private_t));
117
118         DRM_GETSAREA();
119         if (!dev_priv->sarea) {
120                 DRM_ERROR("can not find sarea!\n");
121                 dev->dev_private = (void *)dev_priv;
122                 i915_dma_cleanup(dev);
123                 return DRM_ERR(EINVAL);
124         }
125
126         dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
127         if (!dev_priv->mmio_map) {
128                 dev->dev_private = (void *)dev_priv;
129                 i915_dma_cleanup(dev);
130                 DRM_ERROR("can not find mmio map!\n");
131                 return DRM_ERR(EINVAL);
132         }
133
134         dev_priv->sarea_priv = (drm_i915_sarea_t *)
135             ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
136
137         dev_priv->ring.Start = init->ring_start;
138         dev_priv->ring.End = init->ring_end;
139         dev_priv->ring.Size = init->ring_size;
140         dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
141
142         dev_priv->ring.map.offset = init->ring_start;
143         dev_priv->ring.map.size = init->ring_size;
144         dev_priv->ring.map.type = 0;
145         dev_priv->ring.map.flags = 0;
146         dev_priv->ring.map.mtrr = 0;
147
148         drm_core_ioremap(&dev_priv->ring.map, dev);
149
150         if (dev_priv->ring.map.handle == NULL) {
151                 dev->dev_private = (void *)dev_priv;
152                 i915_dma_cleanup(dev);
153                 DRM_ERROR("can not ioremap virtual address for"
154                           " ring buffer\n");
155                 return DRM_ERR(ENOMEM);
156         }
157
158         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
159
160         dev_priv->back_offset = init->back_offset;
161         dev_priv->front_offset = init->front_offset;
162         dev_priv->current_page = 0;
163         dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
164
165         /* We are using separate values as placeholders for mechanisms for
166          * private backbuffer/depthbuffer usage.
167          */
168         dev_priv->use_mi_batchbuffer_start = 0;
169
170         /* Allow hardware batchbuffers unless told otherwise.
171          */
172         dev_priv->allow_batchbuffer = 1;
173
174         /* Program Hardware Status Page */
175         dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
176                                                    0xffffffff);
177
178         if (!dev_priv->status_page_dmah) {
179                 dev->dev_private = (void *)dev_priv;
180                 i915_dma_cleanup(dev);
181                 DRM_ERROR("Can not allocate hardware status page\n");
182                 return DRM_ERR(ENOMEM);
183         }
184         dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
185         dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
186
187         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
188         DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
189
190         I915_WRITE(0x02080, dev_priv->dma_status_page);
191         DRM_DEBUG("Enabled hardware status page\n");
192
193         dev->dev_private = (void *)dev_priv;
194
195         return 0;
196 }
197
198 static int i915_dma_resume(drm_device_t * dev)
199 {
200         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
201
202         DRM_DEBUG("%s\n", __FUNCTION__);
203
204         if (!dev_priv->sarea) {
205                 DRM_ERROR("can not find sarea!\n");
206                 return DRM_ERR(EINVAL);
207         }
208
209         if (!dev_priv->mmio_map) {
210                 DRM_ERROR("can not find mmio map!\n");
211                 return DRM_ERR(EINVAL);
212         }
213
214         if (dev_priv->ring.map.handle == NULL) {
215                 DRM_ERROR("can not ioremap virtual address for"
216                           " ring buffer\n");
217                 return DRM_ERR(ENOMEM);
218         }
219
220         /* Program Hardware Status Page */
221         if (!dev_priv->hw_status_page) {
222                 DRM_ERROR("Can not find hardware status page\n");
223                 return DRM_ERR(EINVAL);
224         }
225         DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
226
227         I915_WRITE(0x02080, dev_priv->dma_status_page);
228         DRM_DEBUG("Enabled hardware status page\n");
229
230         return 0;
231 }
232
233 static int i915_dma_init(DRM_IOCTL_ARGS)
234 {
235         DRM_DEVICE;
236         drm_i915_private_t *dev_priv;
237         drm_i915_init_t init;
238         int retcode = 0;
239
240         DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
241                                  sizeof(init));
242
243         switch (init.func) {
244         case I915_INIT_DMA:
245                 dev_priv = drm_alloc(sizeof(drm_i915_private_t),
246                                      DRM_MEM_DRIVER);
247                 if (dev_priv == NULL)
248                         return DRM_ERR(ENOMEM);
249                 retcode = i915_initialize(dev, dev_priv, &init);
250                 break;
251         case I915_CLEANUP_DMA:
252                 retcode = i915_dma_cleanup(dev);
253                 break;
254         case I915_RESUME_DMA:
255                 retcode = i915_dma_resume(dev);
256                 break;
257         default:
258                 retcode = -EINVAL;
259                 break;
260         }
261
262         return retcode;
263 }
264
265 /* Implement basically the same security restrictions as hardware does
266  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
267  *
268  * Most of the calculations below involve calculating the size of a
269  * particular instruction.  It's important to get the size right as
270  * that tells us where the next instruction to check is.  Any illegal
271  * instruction detected will be given a size of zero, which is a
272  * signal to abort the rest of the buffer.
273  */
274 static int do_validate_cmd(int cmd)
275 {
276         switch (((cmd >> 29) & 0x7)) {
277         case 0x0:
278                 switch ((cmd >> 23) & 0x3f) {
279                 case 0x0:
280                         return 1;       /* MI_NOOP */
281                 case 0x4:
282                         return 1;       /* MI_FLUSH */
283                 default:
284                         return 0;       /* disallow everything else */
285                 }
286                 break;
287         case 0x1:
288                 return 0;       /* reserved */
289         case 0x2:
290                 return (cmd & 0xff) + 2;        /* 2d commands */
291         case 0x3:
292                 if (((cmd >> 24) & 0x1f) <= 0x18)
293                         return 1;
294
295                 switch ((cmd >> 24) & 0x1f) {
296                 case 0x1c:
297                         return 1;
298                 case 0x1d:
299                         switch ((cmd >> 16) & 0xff) {
300                         case 0x3:
301                                 return (cmd & 0x1f) + 2;
302                         case 0x4:
303                                 return (cmd & 0xf) + 2;
304                         default:
305                                 return (cmd & 0xffff) + 2;
306                         }
307                 case 0x1e:
308                         if (cmd & (1 << 23))
309                                 return (cmd & 0xffff) + 1;
310                         else
311                                 return 1;
312                 case 0x1f:
313                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
314                                 return (cmd & 0x1ffff) + 2;
315                         else if (cmd & (1 << 17))       /* indirect random */
316                                 if ((cmd & 0xffff) == 0)
317                                         return 0;       /* unknown length, too hard */
318                                 else
319                                         return (((cmd & 0xffff) + 1) / 2) + 1;
320                         else
321                                 return 2;       /* indirect sequential */
322                 default:
323                         return 0;
324                 }
325         default:
326                 return 0;
327         }
328
329         return 0;
330 }
331
332 static int validate_cmd(int cmd)
333 {
334         int ret = do_validate_cmd(cmd);
335
336 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
337
338         return ret;
339 }
340
341 static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
342 {
343         drm_i915_private_t *dev_priv = dev->dev_private;
344         int i;
345         RING_LOCALS;
346
347         if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
348                 return DRM_ERR(EINVAL);
349
350         BEGIN_LP_RING(((dwords+1)&~1));
351
352         for (i = 0; i < dwords;) {
353                 int cmd, sz;
354
355                 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
356                         return DRM_ERR(EINVAL);
357
358                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
359                         return DRM_ERR(EINVAL);
360
361                 OUT_RING(cmd);
362
363                 while (++i, --sz) {
364                         if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
365                                                          sizeof(cmd))) {
366                                 return DRM_ERR(EINVAL);
367                         }
368                         OUT_RING(cmd);
369                 }
370         }
371
372         if (dwords & 1)
373                 OUT_RING(0);
374
375         ADVANCE_LP_RING();
376
377         return 0;
378 }
379
380 static int i915_emit_box(drm_device_t * dev,
381                          drm_clip_rect_t __user * boxes,
382                          int i, int DR1, int DR4)
383 {
384         drm_i915_private_t *dev_priv = dev->dev_private;
385         drm_clip_rect_t box;
386         RING_LOCALS;
387
388         if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
389                 return EFAULT;
390         }
391
392         if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
393                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
394                           box.x1, box.y1, box.x2, box.y2);
395                 return DRM_ERR(EINVAL);
396         }
397
398         BEGIN_LP_RING(6);
399         OUT_RING(GFX_OP_DRAWRECT_INFO);
400         OUT_RING(DR1);
401         OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
402         OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
403         OUT_RING(DR4);
404         OUT_RING(0);
405         ADVANCE_LP_RING();
406
407         return 0;
408 }
409
410 static void i915_emit_breadcrumb(drm_device_t *dev)
411 {
412         drm_i915_private_t *dev_priv = dev->dev_private;
413         RING_LOCALS;
414
415         dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
416
417         BEGIN_LP_RING(4);
418         OUT_RING(CMD_STORE_DWORD_IDX);
419         OUT_RING(20);
420         OUT_RING(dev_priv->counter);
421         OUT_RING(0);
422         ADVANCE_LP_RING();
423 }
424
425 static int i915_dispatch_cmdbuffer(drm_device_t * dev,
426                                    drm_i915_cmdbuffer_t * cmd)
427 {
428         int nbox = cmd->num_cliprects;
429         int i = 0, count, ret;
430
431         if (cmd->sz & 0x3) {
432                 DRM_ERROR("alignment");
433                 return DRM_ERR(EINVAL);
434         }
435
436         i915_kernel_lost_context(dev);
437
438         count = nbox ? nbox : 1;
439
440         for (i = 0; i < count; i++) {
441                 if (i < nbox) {
442                         ret = i915_emit_box(dev, cmd->cliprects, i,
443                                             cmd->DR1, cmd->DR4);
444                         if (ret)
445                                 return ret;
446                 }
447
448                 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
449                 if (ret)
450                         return ret;
451         }
452
453         i915_emit_breadcrumb(dev);
454         return 0;
455 }
456
457 static int i915_dispatch_batchbuffer(drm_device_t * dev,
458                                      drm_i915_batchbuffer_t * batch)
459 {
460         drm_i915_private_t *dev_priv = dev->dev_private;
461         drm_clip_rect_t __user *boxes = batch->cliprects;
462         int nbox = batch->num_cliprects;
463         int i = 0, count;
464         RING_LOCALS;
465
466         if ((batch->start | batch->used) & 0x7) {
467                 DRM_ERROR("alignment");
468                 return DRM_ERR(EINVAL);
469         }
470
471         i915_kernel_lost_context(dev);
472
473         count = nbox ? nbox : 1;
474
475         for (i = 0; i < count; i++) {
476                 if (i < nbox) {
477                         int ret = i915_emit_box(dev, boxes, i,
478                                                 batch->DR1, batch->DR4);
479                         if (ret)
480                                 return ret;
481                 }
482
483                 if (dev_priv->use_mi_batchbuffer_start) {
484                         BEGIN_LP_RING(2);
485                         OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
486                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
487                         ADVANCE_LP_RING();
488                 } else {
489                         BEGIN_LP_RING(4);
490                         OUT_RING(MI_BATCH_BUFFER);
491                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
492                         OUT_RING(batch->start + batch->used - 4);
493                         OUT_RING(0);
494                         ADVANCE_LP_RING();
495                 }
496         }
497
498         dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
499
500         i915_emit_breadcrumb(dev);
501
502         return 0;
503 }
504
505 static int i915_dispatch_flip(drm_device_t * dev)
506 {
507         drm_i915_private_t *dev_priv = dev->dev_private;
508         RING_LOCALS;
509
510         DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
511                   __FUNCTION__,
512                   dev_priv->current_page,
513                   dev_priv->sarea_priv->pf_current_page);
514
515         i915_kernel_lost_context(dev);
516
517         BEGIN_LP_RING(2);
518         OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
519         OUT_RING(0);
520         ADVANCE_LP_RING();
521
522         BEGIN_LP_RING(6);
523         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
524         OUT_RING(0);
525         if (dev_priv->current_page == 0) {
526                 OUT_RING(dev_priv->back_offset);
527                 dev_priv->current_page = 1;
528         } else {
529                 OUT_RING(dev_priv->front_offset);
530                 dev_priv->current_page = 0;
531         }
532         OUT_RING(0);
533         ADVANCE_LP_RING();
534
535         BEGIN_LP_RING(2);
536         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
537         OUT_RING(0);
538         ADVANCE_LP_RING();
539
540         dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
541
542         BEGIN_LP_RING(4);
543         OUT_RING(CMD_STORE_DWORD_IDX);
544         OUT_RING(20);
545         OUT_RING(dev_priv->counter);
546         OUT_RING(0);
547         ADVANCE_LP_RING();
548
549         dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
550         return 0;
551 }
552
553 static int i915_quiescent(drm_device_t * dev)
554 {
555         drm_i915_private_t *dev_priv = dev->dev_private;
556
557         i915_kernel_lost_context(dev);
558         return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
559 }
560
561 static int i915_flush_ioctl(DRM_IOCTL_ARGS)
562 {
563         DRM_DEVICE;
564
565         LOCK_TEST_WITH_RETURN(dev, filp);
566
567         return i915_quiescent(dev);
568 }
569
570 static int i915_batchbuffer(DRM_IOCTL_ARGS)
571 {
572         DRM_DEVICE;
573         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
574         u32 *hw_status = dev_priv->hw_status_page;
575         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
576             dev_priv->sarea_priv;
577         drm_i915_batchbuffer_t batch;
578         int ret;
579
580         if (!dev_priv->allow_batchbuffer) {
581                 DRM_ERROR("Batchbuffer ioctl disabled\n");
582                 return DRM_ERR(EINVAL);
583         }
584
585         DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
586                                  sizeof(batch));
587
588         DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
589                   batch.start, batch.used, batch.num_cliprects);
590
591         LOCK_TEST_WITH_RETURN(dev, filp);
592
593         if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
594                                                        batch.num_cliprects *
595                                                        sizeof(drm_clip_rect_t)))
596                 return DRM_ERR(EFAULT);
597
598         ret = i915_dispatch_batchbuffer(dev, &batch);
599
600         sarea_priv->last_dispatch = (int)hw_status[5];
601         return ret;
602 }
603
604 static int i915_cmdbuffer(DRM_IOCTL_ARGS)
605 {
606         DRM_DEVICE;
607         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
608         u32 *hw_status = dev_priv->hw_status_page;
609         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
610             dev_priv->sarea_priv;
611         drm_i915_cmdbuffer_t cmdbuf;
612         int ret;
613
614         DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
615                                  sizeof(cmdbuf));
616
617         DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
618                   cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
619
620         LOCK_TEST_WITH_RETURN(dev, filp);
621
622         if (cmdbuf.num_cliprects &&
623             DRM_VERIFYAREA_READ(cmdbuf.cliprects,
624                                 cmdbuf.num_cliprects *
625                                 sizeof(drm_clip_rect_t))) {
626                 DRM_ERROR("Fault accessing cliprects\n");
627                 return DRM_ERR(EFAULT);
628         }
629
630         ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
631         if (ret) {
632                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
633                 return ret;
634         }
635
636         sarea_priv->last_dispatch = (int)hw_status[5];
637         return 0;
638 }
639
640 static int i915_flip_bufs(DRM_IOCTL_ARGS)
641 {
642         DRM_DEVICE;
643
644         DRM_DEBUG("%s\n", __FUNCTION__);
645
646         LOCK_TEST_WITH_RETURN(dev, filp);
647
648         return i915_dispatch_flip(dev);
649 }
650
651 static int i915_getparam(DRM_IOCTL_ARGS)
652 {
653         DRM_DEVICE;
654         drm_i915_private_t *dev_priv = dev->dev_private;
655         drm_i915_getparam_t param;
656         int value;
657
658         if (!dev_priv) {
659                 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
660                 return DRM_ERR(EINVAL);
661         }
662
663         DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
664                                  sizeof(param));
665
666         switch (param.param) {
667         case I915_PARAM_IRQ_ACTIVE:
668                 value = dev->irq ? 1 : 0;
669                 break;
670         case I915_PARAM_ALLOW_BATCHBUFFER:
671                 value = dev_priv->allow_batchbuffer ? 1 : 0;
672                 break;
673         case I915_PARAM_LAST_DISPATCH:
674                 value = READ_BREADCRUMB(dev_priv);
675                 break;
676         default:
677                 DRM_ERROR("Unknown parameter %d\n", param.param);
678                 return DRM_ERR(EINVAL);
679         }
680
681         if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
682                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
683                 return DRM_ERR(EFAULT);
684         }
685
686         return 0;
687 }
688
689 static int i915_setparam(DRM_IOCTL_ARGS)
690 {
691         DRM_DEVICE;
692         drm_i915_private_t *dev_priv = dev->dev_private;
693         drm_i915_setparam_t param;
694
695         if (!dev_priv) {
696                 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
697                 return DRM_ERR(EINVAL);
698         }
699
700         DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
701                                  sizeof(param));
702
703         switch (param.param) {
704         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
705                 dev_priv->use_mi_batchbuffer_start = param.value;
706                 break;
707         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
708                 dev_priv->tex_lru_log_granularity = param.value;
709                 break;
710         case I915_SETPARAM_ALLOW_BATCHBUFFER:
711                 dev_priv->allow_batchbuffer = param.value;
712                 break;
713         default:
714                 DRM_ERROR("unknown parameter %d\n", param.param);
715                 return DRM_ERR(EINVAL);
716         }
717
718         return 0;
719 }
720
721 int i915_driver_load(drm_device_t *dev, unsigned long flags)
722 {
723         /* i915 has 4 more counters */
724         dev->counters += 4;
725         dev->types[6] = _DRM_STAT_IRQ;
726         dev->types[7] = _DRM_STAT_PRIMARY;
727         dev->types[8] = _DRM_STAT_SECONDARY;
728         dev->types[9] = _DRM_STAT_DMA;
729
730         return 0;
731 }
732
733 void i915_driver_lastclose(drm_device_t * dev)
734 {
735         if (dev->dev_private) {
736                 drm_i915_private_t *dev_priv = dev->dev_private;
737                 i915_mem_takedown(&(dev_priv->agp_heap));
738         }
739         i915_dma_cleanup(dev);
740 }
741
742 void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
743 {
744         if (dev->dev_private) {
745                 drm_i915_private_t *dev_priv = dev->dev_private;
746                 i915_mem_release(dev, filp, dev_priv->agp_heap);
747         }
748 }
749
750 drm_ioctl_desc_t i915_ioctls[] = {
751         [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
752         [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
753         [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
754         [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
755         [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
756         [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
757         [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
758         [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
759         [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
760         [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
761         [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
762         [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
763         [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }
764 };
765
766 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
767
768 /**
769  * Determine if the device really is AGP or not.
770  *
771  * All Intel graphics chipsets are treated as AGP, even if they are really
772  * PCI-e.
773  *
774  * \param dev   The device to be tested.
775  *
776  * \returns
777  * A value of 1 is always retured to indictate every i9x5 is AGP.
778  */
779 int i915_driver_device_is_agp(drm_device_t * dev)
780 {
781         return 1;
782 }