ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / char / drm / gamma_dma.c
1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *
30  */
31
32 #include "gamma.h"
33 #include "drmP.h"
34 #include "drm.h"
35 #include "gamma_drm.h"
36 #include "gamma_drv.h"
37
38 #include <linux/interrupt.h>    /* For task queue support */
39 #include <linux/delay.h>
40
41 static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
42                                       unsigned long length)
43 {
44         drm_gamma_private_t *dev_priv =
45                                 (drm_gamma_private_t *)dev->dev_private;
46         mb();
47         while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
48                 cpu_relax();
49
50         GAMMA_WRITE(GAMMA_DMAADDRESS, address);
51
52         while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
53                 cpu_relax();
54
55         GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
56 }
57
58 void gamma_dma_quiescent_single(drm_device_t *dev)
59 {
60         drm_gamma_private_t *dev_priv =
61                                 (drm_gamma_private_t *)dev->dev_private;
62         while (GAMMA_READ(GAMMA_DMACOUNT))
63                 cpu_relax();
64
65         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
66                 cpu_relax();
67
68         GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
69         GAMMA_WRITE(GAMMA_SYNC, 0);
70
71         do {
72                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
73                         cpu_relax();
74         } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
75 }
76
77 void gamma_dma_quiescent_dual(drm_device_t *dev)
78 {
79         drm_gamma_private_t *dev_priv =
80                                 (drm_gamma_private_t *)dev->dev_private;
81         while (GAMMA_READ(GAMMA_DMACOUNT))
82                 cpu_relax();
83
84         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
85                 cpu_relax();
86
87         GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
88         GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
89         GAMMA_WRITE(GAMMA_SYNC, 0);
90
91         /* Read from first MX */
92         do {
93                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
94                         cpu_relax();
95         } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
96
97         /* Read from second MX */
98         do {
99                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
100                         cpu_relax();
101         } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
102 }
103
104 void gamma_dma_ready(drm_device_t *dev)
105 {
106         drm_gamma_private_t *dev_priv =
107                                 (drm_gamma_private_t *)dev->dev_private;
108         while (GAMMA_READ(GAMMA_DMACOUNT))
109                 cpu_relax();
110 }
111
112 static inline int gamma_dma_is_ready(drm_device_t *dev)
113 {
114         drm_gamma_private_t *dev_priv =
115                                 (drm_gamma_private_t *)dev->dev_private;
116         return (!GAMMA_READ(GAMMA_DMACOUNT));
117 }
118
119 irqreturn_t gamma_dma_service( DRM_IRQ_ARGS )
120 {
121         drm_device_t     *dev = (drm_device_t *)arg;
122         drm_device_dma_t *dma = dev->dma;
123         drm_gamma_private_t *dev_priv =
124                                 (drm_gamma_private_t *)dev->dev_private;
125
126         /* FIXME: should check whether we're actually interested in the interrupt? */
127         atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
128
129         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
130                 cpu_relax();
131
132         GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
133         GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
134         GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
135         if (gamma_dma_is_ready(dev)) {
136                                 /* Free previous buffer */
137                 if (test_and_set_bit(0, &dev->dma_flag))
138                         return IRQ_HANDLED;
139                 if (dma->this_buffer) {
140                         gamma_free_buffer(dev, dma->this_buffer);
141                         dma->this_buffer = NULL;
142                 }
143                 clear_bit(0, &dev->dma_flag);
144
145                 /* Dispatch new buffer */
146                 schedule_work(&dev->work);
147         }
148         return IRQ_HANDLED;
149 }
150
151 /* Only called by gamma_dma_schedule. */
152 static int gamma_do_dma(drm_device_t *dev, int locked)
153 {
154         unsigned long    address;
155         unsigned long    length;
156         drm_buf_t        *buf;
157         int              retcode = 0;
158         drm_device_dma_t *dma = dev->dma;
159
160         if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
161
162
163         if (!dma->next_buffer) {
164                 DRM_ERROR("No next_buffer\n");
165                 clear_bit(0, &dev->dma_flag);
166                 return -EINVAL;
167         }
168
169         buf     = dma->next_buffer;
170         /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
171         /* So we pass the buffer index value into the physical page offset */
172         address = buf->idx << 12;
173         length  = buf->used;
174
175         DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
176                   buf->context, buf->idx, length);
177
178         if (buf->list == DRM_LIST_RECLAIM) {
179                 gamma_clear_next_buffer(dev);
180                 gamma_free_buffer(dev, buf);
181                 clear_bit(0, &dev->dma_flag);
182                 return -EINVAL;
183         }
184
185         if (!length) {
186                 DRM_ERROR("0 length buffer\n");
187                 gamma_clear_next_buffer(dev);
188                 gamma_free_buffer(dev, buf);
189                 clear_bit(0, &dev->dma_flag);
190                 return 0;
191         }
192
193         if (!gamma_dma_is_ready(dev)) {
194                 clear_bit(0, &dev->dma_flag);
195                 return -EBUSY;
196         }
197
198         if (buf->while_locked) {
199                 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
200                         DRM_ERROR("Dispatching buffer %d from pid %d"
201                                   " \"while locked\", but no lock held\n",
202                                   buf->idx, current->pid);
203                 }
204         } else {
205                 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
206                                               DRM_KERNEL_CONTEXT)) {
207                         clear_bit(0, &dev->dma_flag);
208                         return -EBUSY;
209                 }
210         }
211
212         if (dev->last_context != buf->context
213             && !(dev->queuelist[buf->context]->flags
214                  & _DRM_CONTEXT_PRESERVED)) {
215                                 /* PRE: dev->last_context != buf->context */
216                 if (DRM(context_switch)(dev, dev->last_context,
217                                         buf->context)) {
218                         DRM(clear_next_buffer)(dev);
219                         DRM(free_buffer)(dev, buf);
220                 }
221                 retcode = -EBUSY;
222                 goto cleanup;
223
224                                 /* POST: we will wait for the context
225                                    switch and will dispatch on a later call
226                                    when dev->last_context == buf->context.
227                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
228                                    TIME! */
229         }
230
231         gamma_clear_next_buffer(dev);
232         buf->pending     = 1;
233         buf->waiting     = 0;
234         buf->list        = DRM_LIST_PEND;
235
236         /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
237         address = buf->idx << 12;
238
239         gamma_dma_dispatch(dev, address, length);
240         gamma_free_buffer(dev, dma->this_buffer);
241         dma->this_buffer = buf;
242
243         atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
244         atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
245
246         if (!buf->while_locked && !dev->context_flag && !locked) {
247                 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
248                                   DRM_KERNEL_CONTEXT)) {
249                         DRM_ERROR("\n");
250                 }
251         }
252 cleanup:
253
254         clear_bit(0, &dev->dma_flag);
255
256
257         return retcode;
258 }
259
260 static void gamma_dma_timer_bh(unsigned long dev)
261 {
262         gamma_dma_schedule((drm_device_t *)dev, 0);
263 }
264
265 void gamma_dma_immediate_bh(void *dev)
266 {
267         gamma_dma_schedule(dev, 0);
268 }
269
270 int gamma_dma_schedule(drm_device_t *dev, int locked)
271 {
272         int              next;
273         drm_queue_t      *q;
274         drm_buf_t        *buf;
275         int              retcode   = 0;
276         int              processed = 0;
277         int              missed;
278         int              expire    = 20;
279         drm_device_dma_t *dma      = dev->dma;
280
281         if (test_and_set_bit(0, &dev->interrupt_flag)) {
282                                 /* Not reentrant */
283                 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
284                 return -EBUSY;
285         }
286         missed = atomic_read(&dev->counts[10]);
287
288
289 again:
290         if (dev->context_flag) {
291                 clear_bit(0, &dev->interrupt_flag);
292                 return -EBUSY;
293         }
294         if (dma->next_buffer) {
295                                 /* Unsent buffer that was previously
296                                    selected, but that couldn't be sent
297                                    because the lock could not be obtained
298                                    or the DMA engine wasn't ready.  Try
299                                    again. */
300                 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
301         } else {
302                 do {
303                         next = gamma_select_queue(dev, gamma_dma_timer_bh);
304                         if (next >= 0) {
305                                 q   = dev->queuelist[next];
306                                 buf = gamma_waitlist_get(&q->waitlist);
307                                 dma->next_buffer = buf;
308                                 dma->next_queue  = q;
309                                 if (buf && buf->list == DRM_LIST_RECLAIM) {
310                                         gamma_clear_next_buffer(dev);
311                                         gamma_free_buffer(dev, buf);
312                                 }
313                         }
314                 } while (next >= 0 && !dma->next_buffer);
315                 if (dma->next_buffer) {
316                         if (!(retcode = gamma_do_dma(dev, locked))) {
317                                 ++processed;
318                         }
319                 }
320         }
321
322         if (--expire) {
323                 if (missed != atomic_read(&dev->counts[10])) {
324                         if (gamma_dma_is_ready(dev)) goto again;
325                 }
326                 if (processed && gamma_dma_is_ready(dev)) {
327                         processed = 0;
328                         goto again;
329                 }
330         }
331
332         clear_bit(0, &dev->interrupt_flag);
333
334         return retcode;
335 }
336
337 static int gamma_dma_priority(struct file *filp, 
338                               drm_device_t *dev, drm_dma_t *d)
339 {
340         unsigned long     address;
341         unsigned long     length;
342         int               must_free = 0;
343         int               retcode   = 0;
344         int               i;
345         int               idx;
346         drm_buf_t         *buf;
347         drm_buf_t         *last_buf = NULL;
348         drm_device_dma_t  *dma      = dev->dma;
349         DECLARE_WAITQUEUE(entry, current);
350
351                                 /* Turn off interrupt handling */
352         while (test_and_set_bit(0, &dev->interrupt_flag)) {
353                 schedule();
354                 if (signal_pending(current)) return -EINTR;
355         }
356         if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
357                 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
358                                       DRM_KERNEL_CONTEXT)) {
359                         schedule();
360                         if (signal_pending(current)) {
361                                 clear_bit(0, &dev->interrupt_flag);
362                                 return -EINTR;
363                         }
364                 }
365                 ++must_free;
366         }
367
368         for (i = 0; i < d->send_count; i++) {
369                 idx = d->send_indices[i];
370                 if (idx < 0 || idx >= dma->buf_count) {
371                         DRM_ERROR("Index %d (of %d max)\n",
372                                   d->send_indices[i], dma->buf_count - 1);
373                         continue;
374                 }
375                 buf = dma->buflist[ idx ];
376                 if (buf->filp != filp) {
377                         DRM_ERROR("Process %d using buffer not owned\n",
378                                   current->pid);
379                         retcode = -EINVAL;
380                         goto cleanup;
381                 }
382                 if (buf->list != DRM_LIST_NONE) {
383                         DRM_ERROR("Process %d using buffer on list %d\n",
384                                   current->pid, buf->list);
385                         retcode = -EINVAL;
386                         goto cleanup;
387                 }
388                                 /* This isn't a race condition on
389                                    buf->list, since our concern is the
390                                    buffer reclaim during the time the
391                                    process closes the /dev/drm? handle, so
392                                    it can't also be doing DMA. */
393                 buf->list         = DRM_LIST_PRIO;
394                 buf->used         = d->send_sizes[i];
395                 buf->context      = d->context;
396                 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
397                 address           = (unsigned long)buf->address;
398                 length            = buf->used;
399                 if (!length) {
400                         DRM_ERROR("0 length buffer\n");
401                 }
402                 if (buf->pending) {
403                         DRM_ERROR("Sending pending buffer:"
404                                   " buffer %d, offset %d\n",
405                                   d->send_indices[i], i);
406                         retcode = -EINVAL;
407                         goto cleanup;
408                 }
409                 if (buf->waiting) {
410                         DRM_ERROR("Sending waiting buffer:"
411                                   " buffer %d, offset %d\n",
412                                   d->send_indices[i], i);
413                         retcode = -EINVAL;
414                         goto cleanup;
415                 }
416                 buf->pending = 1;
417
418                 if (dev->last_context != buf->context
419                     && !(dev->queuelist[buf->context]->flags
420                          & _DRM_CONTEXT_PRESERVED)) {
421                         add_wait_queue(&dev->context_wait, &entry);
422                         current->state = TASK_INTERRUPTIBLE;
423                                 /* PRE: dev->last_context != buf->context */
424                         DRM(context_switch)(dev, dev->last_context,
425                                             buf->context);
426                                 /* POST: we will wait for the context
427                                    switch and will dispatch on a later call
428                                    when dev->last_context == buf->context.
429                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
430                                    TIME! */
431                         schedule();
432                         current->state = TASK_RUNNING;
433                         remove_wait_queue(&dev->context_wait, &entry);
434                         if (signal_pending(current)) {
435                                 retcode = -EINTR;
436                                 goto cleanup;
437                         }
438                         if (dev->last_context != buf->context) {
439                                 DRM_ERROR("Context mismatch: %d %d\n",
440                                           dev->last_context,
441                                           buf->context);
442                         }
443                 }
444
445                 gamma_dma_dispatch(dev, address, length);
446                 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
447                 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
448
449                 if (last_buf) {
450                         gamma_free_buffer(dev, last_buf);
451                 }
452                 last_buf = buf;
453         }
454
455
456 cleanup:
457         if (last_buf) {
458                 gamma_dma_ready(dev);
459                 gamma_free_buffer(dev, last_buf);
460         }
461
462         if (must_free && !dev->context_flag) {
463                 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
464                                   DRM_KERNEL_CONTEXT)) {
465                         DRM_ERROR("\n");
466                 }
467         }
468         clear_bit(0, &dev->interrupt_flag);
469         return retcode;
470 }
471
472 static int gamma_dma_send_buffers(struct file *filp,
473                                   drm_device_t *dev, drm_dma_t *d)
474 {
475         DECLARE_WAITQUEUE(entry, current);
476         drm_buf_t         *last_buf = NULL;
477         int               retcode   = 0;
478         drm_device_dma_t  *dma      = dev->dma;
479
480         if (d->flags & _DRM_DMA_BLOCK) {
481                 last_buf = dma->buflist[d->send_indices[d->send_count-1]];
482                 add_wait_queue(&last_buf->dma_wait, &entry);
483         }
484
485         if ((retcode = gamma_dma_enqueue(filp, d))) {
486                 if (d->flags & _DRM_DMA_BLOCK)
487                         remove_wait_queue(&last_buf->dma_wait, &entry);
488                 return retcode;
489         }
490
491         gamma_dma_schedule(dev, 0);
492
493         if (d->flags & _DRM_DMA_BLOCK) {
494                 DRM_DEBUG("%d waiting\n", current->pid);
495                 for (;;) {
496                         current->state = TASK_INTERRUPTIBLE;
497                         if (!last_buf->waiting && !last_buf->pending)
498                                 break; /* finished */
499                         schedule();
500                         if (signal_pending(current)) {
501                                 retcode = -EINTR; /* Can't restart */
502                                 break;
503                         }
504                 }
505                 current->state = TASK_RUNNING;
506                 DRM_DEBUG("%d running\n", current->pid);
507                 remove_wait_queue(&last_buf->dma_wait, &entry);
508                 if (!retcode
509                     || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
510                         if (!waitqueue_active(&last_buf->dma_wait)) {
511                                 gamma_free_buffer(dev, last_buf);
512                         }
513                 }
514                 if (retcode) {
515                         DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
516                                   d->context,
517                                   last_buf->waiting,
518                                   last_buf->pending,
519                                   (long)DRM_WAITCOUNT(dev, d->context),
520                                   last_buf->idx,
521                                   last_buf->list,
522                                   current->pid);
523                 }
524         }
525         return retcode;
526 }
527
528 int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
529               unsigned long arg)
530 {
531         drm_file_t        *priv     = filp->private_data;
532         drm_device_t      *dev      = priv->dev;
533         drm_device_dma_t  *dma      = dev->dma;
534         int               retcode   = 0;
535         drm_dma_t         d;
536
537         if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
538                 return -EFAULT;
539
540         if (d.send_count < 0 || d.send_count > dma->buf_count) {
541                 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
542                           current->pid, d.send_count, dma->buf_count);
543                 return -EINVAL;
544         }
545
546         if (d.request_count < 0 || d.request_count > dma->buf_count) {
547                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
548                           current->pid, d.request_count, dma->buf_count);
549                 return -EINVAL;
550         }
551
552         if (d.send_count) {
553                 if (d.flags & _DRM_DMA_PRIORITY)
554                         retcode = gamma_dma_priority(filp, dev, &d);
555                 else
556                         retcode = gamma_dma_send_buffers(filp, dev, &d);
557         }
558
559         d.granted_count = 0;
560
561         if (!retcode && d.request_count) {
562                 retcode = gamma_dma_get_buffers(filp, &d);
563         }
564
565         DRM_DEBUG("%d returning, granted = %d\n",
566                   current->pid, d.granted_count);
567         if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
568                 return -EFAULT;
569
570         return retcode;
571 }
572
573 /* =============================================================
574  * DMA initialization, cleanup
575  */
576
577 static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
578 {
579         drm_gamma_private_t *dev_priv;
580         drm_device_dma_t    *dma = dev->dma;
581         drm_buf_t           *buf;
582         int i;
583         struct list_head    *list;
584         unsigned long       *pgt;
585
586         DRM_DEBUG( "%s\n", __FUNCTION__ );
587
588         dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
589                                                         DRM_MEM_DRIVER );
590         if ( !dev_priv )
591                 return -ENOMEM;
592
593         dev->dev_private = (void *)dev_priv;
594
595         memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
596
597         dev_priv->num_rast = init->num_rast;
598
599         list_for_each(list, &dev->maplist->head) {
600                 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
601                 if( r_list->map &&
602                     r_list->map->type == _DRM_SHM &&
603                     r_list->map->flags & _DRM_CONTAINS_LOCK ) {
604                         dev_priv->sarea = r_list->map;
605                         break;
606                 }
607         }
608
609         DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 );
610         DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 );
611         DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 );
612         DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 );
613
614         dev_priv->sarea_priv = (drm_gamma_sarea_t *)
615                 ((u8 *)dev_priv->sarea->handle +
616                  init->sarea_priv_offset);
617
618         if (init->pcimode) {
619                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
620                 pgt = buf->address;
621
622                 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
623                         buf = dma->buflist[i];
624                         *pgt = virt_to_phys((void*)buf->address) | 0x07;
625                         pgt++;
626                 }
627
628                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
629         } else {
630                 DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
631
632                 DRM_IOREMAP( dev_priv->buffers, dev );
633
634                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
635                 pgt = buf->address;
636
637                 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
638                         buf = dma->buflist[i];
639                         *pgt = (unsigned long)buf->address + 0x07;
640                         pgt++;
641                 }
642
643                 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
644
645                 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
646                 GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
647         }
648         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
649         GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
650         GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
651
652         return 0;
653 }
654
655 int gamma_do_cleanup_dma( drm_device_t *dev )
656 {
657         DRM_DEBUG( "%s\n", __FUNCTION__ );
658
659 #if _HAVE_DMA_IRQ
660         /* Make sure interrupts are disabled here because the uninstall ioctl
661          * may not have been called from userspace and after dev_private
662          * is freed, it's too late.
663          */
664         if ( dev->irq ) DRM(irq_uninstall)(dev);
665 #endif
666
667         if ( dev->dev_private ) {
668                 drm_gamma_private_t *dev_priv = dev->dev_private;
669
670                 if ( dev_priv->buffers != NULL )
671                         DRM_IOREMAPFREE( dev_priv->buffers, dev );
672
673                 DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
674                            DRM_MEM_DRIVER );
675                 dev->dev_private = NULL;
676         }
677
678         return 0;
679 }
680
681 int gamma_dma_init( struct inode *inode, struct file *filp,
682                   unsigned int cmd, unsigned long arg )
683 {
684         drm_file_t *priv = filp->private_data;
685         drm_device_t *dev = priv->dev;
686         drm_gamma_init_t init;
687
688         LOCK_TEST_WITH_RETURN( dev, filp );
689
690         if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) )
691                 return -EFAULT;
692
693         switch ( init.func ) {
694         case GAMMA_INIT_DMA:
695                 return gamma_do_init_dma( dev, &init );
696         case GAMMA_CLEANUP_DMA:
697                 return gamma_do_cleanup_dma( dev );
698         }
699
700         return -EINVAL;
701 }
702
703 static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
704 {
705         drm_device_dma_t    *dma = dev->dma;
706         unsigned int        *screenbuf;
707
708         DRM_DEBUG( "%s\n", __FUNCTION__ );
709
710         /* We've DRM_RESTRICTED this DMA buffer */
711
712         screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
713
714 #if 0
715         *buffer++ = 0x180;      /* Tag (FilterMode) */
716         *buffer++ = 0x200;      /* Allow FBColor through */
717         *buffer++ = 0x53B;      /* Tag */
718         *buffer++ = copy->Pitch;
719         *buffer++ = 0x53A;      /* Tag */
720         *buffer++ = copy->SrcAddress;
721         *buffer++ = 0x539;      /* Tag */
722         *buffer++ = copy->WidthHeight; /* Initiates transfer */
723         *buffer++ = 0x53C;      /* Tag - DMAOutputAddress */
724         *buffer++ = virt_to_phys((void*)screenbuf);
725         *buffer++ = 0x53D;      /* Tag - DMAOutputCount */
726         *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
727
728         /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
729         /* Now put it back to the screen */
730
731         *buffer++ = 0x180;      /* Tag (FilterMode) */
732         *buffer++ = 0x400;      /* Allow Sync through */
733         *buffer++ = 0x538;      /* Tag - DMARectangleReadTarget */
734         *buffer++ = 0x155;      /* FBSourceData | count */
735         *buffer++ = 0x537;      /* Tag */
736         *buffer++ = copy->Pitch;
737         *buffer++ = 0x536;      /* Tag */
738         *buffer++ = copy->DstAddress;
739         *buffer++ = 0x535;      /* Tag */
740         *buffer++ = copy->WidthHeight; /* Initiates transfer */
741         *buffer++ = 0x530;      /* Tag - DMAAddr */
742         *buffer++ = virt_to_phys((void*)screenbuf);
743         *buffer++ = 0x531;
744         *buffer++ = copy->Count; /* initiates DMA transfer of color data */
745 #endif
746
747         /* need to dispatch it now */
748
749         return 0;
750 }
751
752 int gamma_dma_copy( struct inode *inode, struct file *filp,
753                   unsigned int cmd, unsigned long arg )
754 {
755         drm_file_t *priv = filp->private_data;
756         drm_device_t *dev = priv->dev;
757         drm_gamma_copy_t copy;
758
759         if ( copy_from_user( &copy, (drm_gamma_copy_t *)arg, sizeof(copy) ) )
760                 return -EFAULT;
761
762         return gamma_do_copy_dma( dev, &copy );
763 }
764
765 /* =============================================================
766  * Per Context SAREA Support
767  */
768
769 int gamma_getsareactx(struct inode *inode, struct file *filp,
770                      unsigned int cmd, unsigned long arg)
771 {
772         drm_file_t      *priv   = filp->private_data;
773         drm_device_t    *dev    = priv->dev;
774         drm_ctx_priv_map_t request;
775         drm_map_t *map;
776
777         if (copy_from_user(&request,
778                            (drm_ctx_priv_map_t *)arg,
779                            sizeof(request)))
780                 return -EFAULT;
781
782         down(&dev->struct_sem);
783         if ((int)request.ctx_id >= dev->max_context) {
784                 up(&dev->struct_sem);
785                 return -EINVAL;
786         }
787
788         map = dev->context_sareas[request.ctx_id];
789         up(&dev->struct_sem);
790
791         request.handle = map->handle;
792         if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
793                 return -EFAULT;
794         return 0;
795 }
796
797 int gamma_setsareactx(struct inode *inode, struct file *filp,
798                      unsigned int cmd, unsigned long arg)
799 {
800         drm_file_t      *priv   = filp->private_data;
801         drm_device_t    *dev    = priv->dev;
802         drm_ctx_priv_map_t request;
803         drm_map_t *map = NULL;
804         drm_map_list_t *r_list;
805         struct list_head *list;
806
807         if (copy_from_user(&request,
808                            (drm_ctx_priv_map_t *)arg,
809                            sizeof(request)))
810                 return -EFAULT;
811
812         down(&dev->struct_sem);
813         r_list = NULL;
814         list_for_each(list, &dev->maplist->head) {
815                 r_list = list_entry(list, drm_map_list_t, head);
816                 if(r_list->map &&
817                    r_list->map->handle == request.handle) break;
818         }
819         if (list == &(dev->maplist->head)) {
820                 up(&dev->struct_sem);
821                 return -EINVAL;
822         }
823         map = r_list->map;
824         up(&dev->struct_sem);
825
826         if (!map) return -EINVAL;
827
828         down(&dev->struct_sem);
829         if ((int)request.ctx_id >= dev->max_context) {
830                 up(&dev->struct_sem);
831                 return -EINVAL;
832         }
833         dev->context_sareas[request.ctx_id] = map;
834         up(&dev->struct_sem);
835         return 0;
836 }
837
838 void DRM(driver_irq_preinstall)( drm_device_t *dev ) {
839         drm_gamma_private_t *dev_priv =
840                                 (drm_gamma_private_t *)dev->dev_private;
841
842         while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
843                 cpu_relax();
844
845         GAMMA_WRITE( GAMMA_GCOMMANDMODE,        0x00000004 );
846         GAMMA_WRITE( GAMMA_GDMACONTROL,         0x00000000 );
847 }
848
849 void DRM(driver_irq_postinstall)( drm_device_t *dev ) {
850         drm_gamma_private_t *dev_priv =
851                                 (drm_gamma_private_t *)dev->dev_private;
852
853         while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
854                 cpu_relax();
855
856         GAMMA_WRITE( GAMMA_GINTENABLE,          0x00002001 );
857         GAMMA_WRITE( GAMMA_COMMANDINTENABLE,    0x00000008 );
858         GAMMA_WRITE( GAMMA_GDELAYTIMER,         0x00039090 );
859 }
860
861 void DRM(driver_irq_uninstall)( drm_device_t *dev ) {
862         drm_gamma_private_t *dev_priv =
863                                 (drm_gamma_private_t *)dev->dev_private;
864         if (!dev_priv)
865                 return;
866
867         while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
868                 cpu_relax();
869
870         GAMMA_WRITE( GAMMA_GDELAYTIMER,         0x00000000 );
871         GAMMA_WRITE( GAMMA_COMMANDINTENABLE,    0x00000000 );
872         GAMMA_WRITE( GAMMA_GINTENABLE,          0x00000000 );
873 }