ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / char / drm / drm_vm.h
1 /**
2  * \file drm_vm.h
3  * Memory mapping for DRM
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38 /** AGP virtual memory operations */
39 struct vm_operations_struct   DRM(vm_ops) = {
40         .nopage = DRM(vm_nopage),
41         .open   = DRM(vm_open),
42         .close  = DRM(vm_close),
43 };
44
45 /** Shared virtual memory operations */
46 struct vm_operations_struct   DRM(vm_shm_ops) = {
47         .nopage = DRM(vm_shm_nopage),
48         .open   = DRM(vm_open),
49         .close  = DRM(vm_shm_close),
50 };
51
52 /** DMA virtual memory operations */
53 struct vm_operations_struct   DRM(vm_dma_ops) = {
54         .nopage = DRM(vm_dma_nopage),
55         .open   = DRM(vm_open),
56         .close  = DRM(vm_close),
57 };
58
59 /** Scatter-gather virtual memory operations */
60 struct vm_operations_struct   DRM(vm_sg_ops) = {
61         .nopage = DRM(vm_sg_nopage),
62         .open   = DRM(vm_open),
63         .close  = DRM(vm_close),
64 };
65
66 /**
67  * \c nopage method for AGP virtual memory.
68  *
69  * \param vma virtual memory area.
70  * \param address access address.
71  * \param write_access sharing.
72  * \return pointer to the page structure.
73  * 
74  * Find the right map and if it's AGP memory find the real physical page to
75  * map, get the page, increment the use count and return it.
76  */
77 struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
78                             unsigned long address,
79                             int *type)
80 {
81 #if __REALLY_HAVE_AGP
82         drm_file_t *priv  = vma->vm_file->private_data;
83         drm_device_t *dev = priv->dev;
84         drm_map_t *map    = NULL;
85         drm_map_list_t  *r_list;
86         struct list_head *list;
87
88         /*
89          * Find the right map
90          */
91
92         if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
93
94         list_for_each(list, &dev->maplist->head) {
95                 r_list = list_entry(list, drm_map_list_t, head);
96                 map = r_list->map;
97                 if (!map) continue;
98                 if (map->offset == VM_OFFSET(vma)) break;
99         }
100
101         if (map && map->type == _DRM_AGP) {
102                 unsigned long offset = address - vma->vm_start;
103                 unsigned long baddr = VM_OFFSET(vma) + offset;
104                 struct drm_agp_mem *agpmem;
105                 struct page *page;
106
107 #if __alpha__
108                 /*
109                  * Adjust to a bus-relative address
110                  */
111                 baddr -= dev->hose->mem_space->start;
112 #endif
113
114                 /*
115                  * It's AGP memory - find the real physical page to map
116                  */
117                 for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
118                         if (agpmem->bound <= baddr &&
119                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) 
120                                 break;
121                 }
122
123                 if (!agpmem) goto vm_nopage_error;
124
125                 /*
126                  * Get the page, inc the use count, and return it
127                  */
128                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
129                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
130                 get_page(page);
131
132                 DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
133                           baddr, __va(agpmem->memory->memory[offset]), offset,
134                           atomic_read(&page->count));
135
136                 if (type)
137                         *type = VM_FAULT_MINOR;
138                 return page;
139         }
140 vm_nopage_error:
141 #endif /* __REALLY_HAVE_AGP */
142
143         return NOPAGE_SIGBUS;           /* Disallow mremap */
144 }
145
146 /**
147  * \c nopage method for shared virtual memory.
148  *
149  * \param vma virtual memory area.
150  * \param address access address.
151  * \param write_access sharing.
152  * \return pointer to the page structure.
153  * 
154  * Get the the mapping, find the real physical page to map, get the page, and
155  * return it.
156  */
157 struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
158                                 unsigned long address,
159                                 int *type)
160 {
161         drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
162         unsigned long    offset;
163         unsigned long    i;
164         struct page      *page;
165
166         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
167         if (!map)                  return NOPAGE_OOM;  /* Nothing allocated */
168
169         offset   = address - vma->vm_start;
170         i = (unsigned long)map->handle + offset;
171         page = vmalloc_to_page((void *)i);
172         if (!page)
173                 return NOPAGE_OOM;
174         get_page(page);
175         if (type)
176                 *type = VM_FAULT_MINOR;
177
178         DRM_DEBUG("shm_nopage 0x%lx\n", address);
179         return page;
180 }
181
182
183 /**
184  * \c close method for shared virtual memory.
185  * 
186  * \param vma virtual memory area.
187  * 
188  * Deletes map information if we are the last
189  * person to close a mapping and it's not in the global maplist.
190  */
191 void DRM(vm_shm_close)(struct vm_area_struct *vma)
192 {
193         drm_file_t      *priv   = vma->vm_file->private_data;
194         drm_device_t    *dev    = priv->dev;
195         drm_vma_entry_t *pt, *prev, *next;
196         drm_map_t *map;
197         drm_map_list_t *r_list;
198         struct list_head *list;
199         int found_maps = 0;
200
201         DRM_DEBUG("0x%08lx,0x%08lx\n",
202                   vma->vm_start, vma->vm_end - vma->vm_start);
203         atomic_dec(&dev->vma_count);
204
205         map = vma->vm_private_data;
206
207         down(&dev->struct_sem);
208         for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
209                 next = pt->next;
210                 if (pt->vma->vm_private_data == map) found_maps++;
211                 if (pt->vma == vma) {
212                         if (prev) {
213                                 prev->next = pt->next;
214                         } else {
215                                 dev->vmalist = pt->next;
216                         }
217                         DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
218                 } else {
219                         prev = pt;
220                 }
221         }
222         /* We were the only map that was found */
223         if(found_maps == 1 &&
224            map->flags & _DRM_REMOVABLE) {
225                 /* Check to see if we are in the maplist, if we are not, then
226                  * we delete this mappings information.
227                  */
228                 found_maps = 0;
229                 list = &dev->maplist->head;
230                 list_for_each(list, &dev->maplist->head) {
231                         r_list = list_entry(list, drm_map_list_t, head);
232                         if (r_list->map == map) found_maps++;
233                 }
234
235                 if(!found_maps) {
236                         switch (map->type) {
237                         case _DRM_REGISTERS:
238                         case _DRM_FRAME_BUFFER:
239 #if __REALLY_HAVE_MTRR
240                                 if (map->mtrr >= 0) {
241                                         int retcode;
242                                         retcode = mtrr_del(map->mtrr,
243                                                            map->offset,
244                                                            map->size);
245                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
246                                 }
247 #endif
248                                 DRM(ioremapfree)(map->handle, map->size, dev);
249                                 break;
250                         case _DRM_SHM:
251                                 vfree(map->handle);
252                                 break;
253                         case _DRM_AGP:
254                         case _DRM_SCATTER_GATHER:
255                                 break;
256                         }
257                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
258                 }
259         }
260         up(&dev->struct_sem);
261 }
262
263 /**
264  * \c nopage method for DMA virtual memory.
265  *
266  * \param vma virtual memory area.
267  * \param address access address.
268  * \param write_access sharing.
269  * \return pointer to the page structure.
270  * 
271  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
272  */
273 struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
274                                 unsigned long address,
275                                 int *type)
276 {
277         drm_file_t       *priv   = vma->vm_file->private_data;
278         drm_device_t     *dev    = priv->dev;
279         drm_device_dma_t *dma    = dev->dma;
280         unsigned long    offset;
281         unsigned long    page_nr;
282         struct page      *page;
283
284         if (!dma)                  return NOPAGE_SIGBUS; /* Error */
285         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
286         if (!dma->pagelist)        return NOPAGE_OOM ; /* Nothing allocated */
287
288         offset   = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
289         page_nr  = offset >> PAGE_SHIFT;
290         page = virt_to_page((dma->pagelist[page_nr] + 
291                              (offset & (~PAGE_MASK))));
292
293         get_page(page);
294         if (type)
295                 *type = VM_FAULT_MINOR;
296
297         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
298         return page;
299 }
300
301 /**
302  * \c nopage method for scatter-gather virtual memory.
303  *
304  * \param vma virtual memory area.
305  * \param address access address.
306  * \param write_access sharing.
307  * \return pointer to the page structure.
308  * 
309  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
310  */
311 struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
312                                unsigned long address,
313                                int *type)
314 {
315         drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
316         drm_file_t *priv = vma->vm_file->private_data;
317         drm_device_t *dev = priv->dev;
318         drm_sg_mem_t *entry = dev->sg;
319         unsigned long offset;
320         unsigned long map_offset;
321         unsigned long page_offset;
322         struct page *page;
323
324         if (!entry)                return NOPAGE_SIGBUS; /* Error */
325         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
326         if (!entry->pagelist)      return NOPAGE_OOM ;  /* Nothing allocated */
327
328
329         offset = address - vma->vm_start;
330         map_offset = map->offset - dev->sg->handle;
331         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
332         page = entry->pagelist[page_offset];
333         get_page(page);
334         if (type)
335                 *type = VM_FAULT_MINOR;
336
337         return page;
338 }
339
340 /**
341  * \c open method for shared virtual memory.
342  * 
343  * \param vma virtual memory area.
344  * 
345  * Create a new drm_vma_entry structure as the \p vma private data entry and
346  * add it to drm_device::vmalist.
347  */
348 void DRM(vm_open)(struct vm_area_struct *vma)
349 {
350         drm_file_t      *priv   = vma->vm_file->private_data;
351         drm_device_t    *dev    = priv->dev;
352         drm_vma_entry_t *vma_entry;
353
354         DRM_DEBUG("0x%08lx,0x%08lx\n",
355                   vma->vm_start, vma->vm_end - vma->vm_start);
356         atomic_inc(&dev->vma_count);
357
358         vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
359         if (vma_entry) {
360                 down(&dev->struct_sem);
361                 vma_entry->vma  = vma;
362                 vma_entry->next = dev->vmalist;
363                 vma_entry->pid  = current->pid;
364                 dev->vmalist    = vma_entry;
365                 up(&dev->struct_sem);
366         }
367 }
368
369 /**
370  * \c close method for all virtual memory types.
371  * 
372  * \param vma virtual memory area.
373  * 
374  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
375  * free it.
376  */
377 void DRM(vm_close)(struct vm_area_struct *vma)
378 {
379         drm_file_t      *priv   = vma->vm_file->private_data;
380         drm_device_t    *dev    = priv->dev;
381         drm_vma_entry_t *pt, *prev;
382
383         DRM_DEBUG("0x%08lx,0x%08lx\n",
384                   vma->vm_start, vma->vm_end - vma->vm_start);
385         atomic_dec(&dev->vma_count);
386
387         down(&dev->struct_sem);
388         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
389                 if (pt->vma == vma) {
390                         if (prev) {
391                                 prev->next = pt->next;
392                         } else {
393                                 dev->vmalist = pt->next;
394                         }
395                         DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
396                         break;
397                 }
398         }
399         up(&dev->struct_sem);
400 }
401
402 /**
403  * mmap DMA memory.
404  *
405  * \param filp file pointer.
406  * \param vma virtual memory area.
407  * \return zero on success or a negative number on failure.
408  * 
409  * Sets the virtual memory area operations structure to vm_dma_ops, the file
410  * pointer, and calls vm_open().
411  */
412 int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
413 {
414         drm_file_t       *priv   = filp->private_data;
415         drm_device_t     *dev;
416         drm_device_dma_t *dma;
417         unsigned long    length  = vma->vm_end - vma->vm_start;
418
419         lock_kernel();
420         dev      = priv->dev;
421         dma      = dev->dma;
422         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
423                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
424
425                                 /* Length must match exact page count */
426         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
427                 unlock_kernel();
428                 return -EINVAL;
429         }
430         unlock_kernel();
431
432         vma->vm_ops   = &DRM(vm_dma_ops);
433
434 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
435         vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
436 #else
437         vma->vm_flags |= VM_RESERVED; /* Don't swap */
438 #endif
439
440         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
441         DRM(vm_open)(vma);
442         return 0;
443 }
444
445 #ifndef DRIVER_GET_MAP_OFS
446 #define DRIVER_GET_MAP_OFS()    (map->offset)
447 #endif
448
449 #ifndef DRIVER_GET_REG_OFS
450 #ifdef __alpha__
451 #define DRIVER_GET_REG_OFS()    (dev->hose->dense_mem_base -    \
452                                  dev->hose->mem_space->start)
453 #else
454 #define DRIVER_GET_REG_OFS()    0
455 #endif
456 #endif
457
458 /**
459  * mmap DMA memory.
460  *
461  * \param filp file pointer.
462  * \param vma virtual memory area.
463  * \return zero on success or a negative number on failure.
464  * 
465  * If the virtual memory area has no offset associated with it then it's a DMA
466  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
467  * checks that the restricted flag is not set, sets the virtual memory operations
468  * according to the mapping type and remaps the pages. Finally sets the file
469  * pointer and calls vm_open().
470  */
471 int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
472 {
473         drm_file_t      *priv   = filp->private_data;
474         drm_device_t    *dev    = priv->dev;
475         drm_map_t       *map    = NULL;
476         drm_map_list_t  *r_list;
477         unsigned long   offset  = 0;
478         struct list_head *list;
479
480         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
481                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
482
483         if ( !priv->authenticated ) return -EACCES;
484
485         /* We check for "dma". On Apple's UniNorth, it's valid to have
486          * the AGP mapped at physical address 0
487          * --BenH.
488          */
489         if (!VM_OFFSET(vma)
490 #if __REALLY_HAVE_AGP
491             && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
492 #endif
493             )
494                 return DRM(mmap_dma)(filp, vma);
495
496                                 /* A sequential search of a linked list is
497                                    fine here because: 1) there will only be
498                                    about 5-10 entries in the list and, 2) a
499                                    DRI client only has to do this mapping
500                                    once, so it doesn't have to be optimized
501                                    for performance, even if the list was a
502                                    bit longer. */
503         list_for_each(list, &dev->maplist->head) {
504                 unsigned long off;
505
506                 r_list = list_entry(list, drm_map_list_t, head);
507                 map = r_list->map;
508                 if (!map) continue;
509                 off = DRIVER_GET_MAP_OFS();
510                 if (off == VM_OFFSET(vma)) break;
511         }
512
513         if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
514                 return -EPERM;
515
516                                 /* Check for valid size. */
517         if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
518
519         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
520                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
521 #if defined(__i386__) || defined(__x86_64__)
522                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
523 #else
524                                 /* Ye gads this is ugly.  With more thought
525                                    we could move this up higher and use
526                                    `protection_map' instead.  */
527                 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
528                         __pte(pgprot_val(vma->vm_page_prot)))));
529 #endif
530         }
531
532         switch (map->type) {
533         case _DRM_AGP:
534 #if __REALLY_HAVE_AGP
535           if (dev->agp->cant_use_aperture) {
536                 /*
537                  * On some platforms we can't talk to bus dma address from the CPU, so for
538                  * memory of type DRM_AGP, we'll deal with sorting out the real physical
539                  * pages and mappings in nopage()
540                  */
541 #if defined(__powerpc__)
542                 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
543 #endif
544                 vma->vm_ops = &DRM(vm_ops);
545                 break;
546           }
547 #endif
548                 /* fall through to _DRM_FRAME_BUFFER... */        
549         case _DRM_FRAME_BUFFER:
550         case _DRM_REGISTERS:
551                 if (VM_OFFSET(vma) >= __pa(high_memory)) {
552 #if defined(__i386__) || defined(__x86_64__)
553                         if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
554                                 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
555                                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
556                         }
557 #elif defined(__powerpc__)
558                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
559 #endif
560                         vma->vm_flags |= VM_IO; /* not in core dump */
561                 }
562 #if defined(__ia64__)
563                 if (map->type != _DRM_AGP)
564                         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
565 #endif
566                 offset = DRIVER_GET_REG_OFS();
567 #ifdef __sparc__
568                 if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
569                                         VM_OFFSET(vma) + offset,
570                                         vma->vm_end - vma->vm_start,
571                                         vma->vm_page_prot, 0))
572 #else
573                 if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
574                                      VM_OFFSET(vma) + offset,
575                                      vma->vm_end - vma->vm_start,
576                                      vma->vm_page_prot))
577 #endif
578                                 return -EAGAIN;
579                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
580                           " offset = 0x%lx\n",
581                           map->type,
582                           vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset);
583                 vma->vm_ops = &DRM(vm_ops);
584                 break;
585         case _DRM_SHM:
586                 vma->vm_ops = &DRM(vm_shm_ops);
587                 vma->vm_private_data = (void *)map;
588                                 /* Don't let this area swap.  Change when
589                                    DRM_KERNEL advisory is supported. */
590 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
591                 vma->vm_flags |= VM_LOCKED;
592 #else
593                 vma->vm_flags |= VM_RESERVED;
594 #endif
595                 break;
596         case _DRM_SCATTER_GATHER:
597                 vma->vm_ops = &DRM(vm_sg_ops);
598                 vma->vm_private_data = (void *)map;
599 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
600                 vma->vm_flags |= VM_LOCKED;
601 #else
602                 vma->vm_flags |= VM_RESERVED;
603 #endif
604                 break;
605         default:
606                 return -EINVAL; /* This should never happen. */
607         }
608 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
609         vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
610 #else
611         vma->vm_flags |= VM_RESERVED; /* Don't swap */
612 #endif
613
614         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
615         DRM(vm_open)(vma);
616         return 0;
617 }