ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / char / drm / gamma_old_dma.h
1 /* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  */
31
32
33 /* Gamma-specific code pulled from drm_dma.h:
34  */
35
36 void DRM(clear_next_buffer)(drm_device_t *dev)
37 {
38         drm_device_dma_t *dma = dev->dma;
39
40         dma->next_buffer = NULL;
41         if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
42                 wake_up_interruptible(&dma->next_queue->flush_queue);
43         }
44         dma->next_queue  = NULL;
45 }
46
47 int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
48 {
49         int        i;
50         int        candidate = -1;
51         int        j         = jiffies;
52
53         if (!dev) {
54                 DRM_ERROR("No device\n");
55                 return -1;
56         }
57         if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
58                                 /* This only happens between the time the
59                                    interrupt is initialized and the time
60                                    the queues are initialized. */
61                 return -1;
62         }
63
64                                 /* Doing "while locked" DMA? */
65         if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
66                 return DRM_KERNEL_CONTEXT;
67         }
68
69                                 /* If there are buffers on the last_context
70                                    queue, and we have not been executing
71                                    this context very long, continue to
72                                    execute this context. */
73         if (dev->last_switch <= j
74             && dev->last_switch + DRM_TIME_SLICE > j
75             && DRM_WAITCOUNT(dev, dev->last_context)) {
76                 return dev->last_context;
77         }
78
79                                 /* Otherwise, find a candidate */
80         for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
81                 if (DRM_WAITCOUNT(dev, i)) {
82                         candidate = dev->last_checked = i;
83                         break;
84                 }
85         }
86
87         if (candidate < 0) {
88                 for (i = 0; i < dev->queue_count; i++) {
89                         if (DRM_WAITCOUNT(dev, i)) {
90                                 candidate = dev->last_checked = i;
91                                 break;
92                         }
93                 }
94         }
95
96         if (wrapper
97             && candidate >= 0
98             && candidate != dev->last_context
99             && dev->last_switch <= j
100             && dev->last_switch + DRM_TIME_SLICE > j) {
101                 if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
102                         del_timer(&dev->timer);
103                         dev->timer.function = wrapper;
104                         dev->timer.data     = (unsigned long)dev;
105                         dev->timer.expires  = dev->last_switch+DRM_TIME_SLICE;
106                         add_timer(&dev->timer);
107                 }
108                 return -1;
109         }
110
111         return candidate;
112 }
113
114
115 int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d)
116 {
117         drm_file_t    *priv   = filp->private_data;
118         drm_device_t  *dev    = priv->dev;
119         int               i;
120         drm_queue_t       *q;
121         drm_buf_t         *buf;
122         int               idx;
123         int               while_locked = 0;
124         drm_device_dma_t  *dma = dev->dma;
125         DECLARE_WAITQUEUE(entry, current);
126
127         DRM_DEBUG("%d\n", d->send_count);
128
129         if (d->flags & _DRM_DMA_WHILE_LOCKED) {
130                 int context = dev->lock.hw_lock->lock;
131
132                 if (!_DRM_LOCK_IS_HELD(context)) {
133                         DRM_ERROR("No lock held during \"while locked\""
134                                   " request\n");
135                         return -EINVAL;
136                 }
137                 if (d->context != _DRM_LOCKING_CONTEXT(context)
138                     && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
139                         DRM_ERROR("Lock held by %d while %d makes"
140                                   " \"while locked\" request\n",
141                                   _DRM_LOCKING_CONTEXT(context),
142                                   d->context);
143                         return -EINVAL;
144                 }
145                 q = dev->queuelist[DRM_KERNEL_CONTEXT];
146                 while_locked = 1;
147         } else {
148                 q = dev->queuelist[d->context];
149         }
150
151
152         atomic_inc(&q->use_count);
153         if (atomic_read(&q->block_write)) {
154                 add_wait_queue(&q->write_queue, &entry);
155                 atomic_inc(&q->block_count);
156                 for (;;) {
157                         current->state = TASK_INTERRUPTIBLE;
158                         if (!atomic_read(&q->block_write)) break;
159                         schedule();
160                         if (signal_pending(current)) {
161                                 atomic_dec(&q->use_count);
162                                 remove_wait_queue(&q->write_queue, &entry);
163                                 return -EINTR;
164                         }
165                 }
166                 atomic_dec(&q->block_count);
167                 current->state = TASK_RUNNING;
168                 remove_wait_queue(&q->write_queue, &entry);
169         }
170
171         for (i = 0; i < d->send_count; i++) {
172                 idx = d->send_indices[i];
173                 if (idx < 0 || idx >= dma->buf_count) {
174                         atomic_dec(&q->use_count);
175                         DRM_ERROR("Index %d (of %d max)\n",
176                                   d->send_indices[i], dma->buf_count - 1);
177                         return -EINVAL;
178                 }
179                 buf = dma->buflist[ idx ];
180                 if (buf->filp != filp) {
181                         atomic_dec(&q->use_count);
182                         DRM_ERROR("Process %d using buffer not owned\n",
183                                   current->pid);
184                         return -EINVAL;
185                 }
186                 if (buf->list != DRM_LIST_NONE) {
187                         atomic_dec(&q->use_count);
188                         DRM_ERROR("Process %d using buffer %d on list %d\n",
189                                   current->pid, buf->idx, buf->list);
190                 }
191                 buf->used         = d->send_sizes[i];
192                 buf->while_locked = while_locked;
193                 buf->context      = d->context;
194                 if (!buf->used) {
195                         DRM_ERROR("Queueing 0 length buffer\n");
196                 }
197                 if (buf->pending) {
198                         atomic_dec(&q->use_count);
199                         DRM_ERROR("Queueing pending buffer:"
200                                   " buffer %d, offset %d\n",
201                                   d->send_indices[i], i);
202                         return -EINVAL;
203                 }
204                 if (buf->waiting) {
205                         atomic_dec(&q->use_count);
206                         DRM_ERROR("Queueing waiting buffer:"
207                                   " buffer %d, offset %d\n",
208                                   d->send_indices[i], i);
209                         return -EINVAL;
210                 }
211                 buf->waiting = 1;
212                 if (atomic_read(&q->use_count) == 1
213                     || atomic_read(&q->finalization)) {
214                         DRM(free_buffer)(dev, buf);
215                 } else {
216                         DRM(waitlist_put)(&q->waitlist, buf);
217                         atomic_inc(&q->total_queued);
218                 }
219         }
220         atomic_dec(&q->use_count);
221
222         return 0;
223 }
224
225 static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
226                                          int order)
227 {
228         drm_file_t    *priv   = filp->private_data;
229         drm_device_t  *dev    = priv->dev;
230         int               i;
231         drm_buf_t         *buf;
232         drm_device_dma_t  *dma = dev->dma;
233
234         for (i = d->granted_count; i < d->request_count; i++) {
235                 buf = DRM(freelist_get)(&dma->bufs[order].freelist,
236                                         d->flags & _DRM_DMA_WAIT);
237                 if (!buf) break;
238                 if (buf->pending || buf->waiting) {
239                         DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n",
240                                   buf->idx,
241                                   buf->filp,
242                                   buf->waiting,
243                                   buf->pending);
244                 }
245                 buf->filp     = filp;
246                 if (copy_to_user(&d->request_indices[i],
247                                  &buf->idx,
248                                  sizeof(buf->idx)))
249                         return -EFAULT;
250
251                 if (copy_to_user(&d->request_sizes[i],
252                                  &buf->total,
253                                  sizeof(buf->total)))
254                         return -EFAULT;
255
256                 ++d->granted_count;
257         }
258         return 0;
259 }
260
261
262 int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma)
263 {
264         int               order;
265         int               retcode = 0;
266         int               tmp_order;
267
268         order = DRM(order)(dma->request_size);
269
270         dma->granted_count = 0;
271         retcode            = DRM(dma_get_buffers_of_order)(filp, dma, order);
272
273         if (dma->granted_count < dma->request_count
274             && (dma->flags & _DRM_DMA_SMALLER_OK)) {
275                 for (tmp_order = order - 1;
276                      !retcode
277                              && dma->granted_count < dma->request_count
278                              && tmp_order >= DRM_MIN_ORDER;
279                      --tmp_order) {
280
281                         retcode = DRM(dma_get_buffers_of_order)(filp, dma,
282                                                                 tmp_order);
283                 }
284         }
285
286         if (dma->granted_count < dma->request_count
287             && (dma->flags & _DRM_DMA_LARGER_OK)) {
288                 for (tmp_order = order + 1;
289                      !retcode
290                              && dma->granted_count < dma->request_count
291                              && tmp_order <= DRM_MAX_ORDER;
292                      ++tmp_order) {
293
294                         retcode = DRM(dma_get_buffers_of_order)(filp, dma,
295                                                                 tmp_order);
296                 }
297         }
298         return 0;
299 }
300