3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
40 * \c nopage method for AGP virtual memory.
42 * \param vma virtual memory area.
43 * \param address access address.
44 * \return pointer to the page structure.
46 * Find the right map and if it's AGP memory find the real physical page to
47 * map, get the page, increment the use count and return it.
49 static __inline__ struct page *DRM(do_vm_nopage)(struct vm_area_struct *vma,
50 unsigned long address)
53 drm_file_t *priv = vma->vm_file->private_data;
54 drm_device_t *dev = priv->dev;
55 drm_map_t *map = NULL;
56 drm_map_list_t *r_list;
57 struct list_head *list;
63 if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
65 list_for_each(list, &dev->maplist->head) {
66 r_list = list_entry(list, drm_map_list_t, head);
69 if (map->offset == VM_OFFSET(vma)) break;
72 if (map && map->type == _DRM_AGP) {
73 unsigned long offset = address - vma->vm_start;
74 unsigned long baddr = VM_OFFSET(vma) + offset;
75 struct drm_agp_mem *agpmem;
80 * Adjust to a bus-relative address
82 baddr -= dev->hose->mem_space->start;
86 * It's AGP memory - find the real physical page to map
88 for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
89 if (agpmem->bound <= baddr &&
90 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
94 if (!agpmem) goto vm_nopage_error;
97 * Get the page, inc the use count, and return it
99 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
100 page = virt_to_page(__va(agpmem->memory->memory[offset]));
103 DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
104 baddr, __va(agpmem->memory->memory[offset]), offset,
110 #endif /* __REALLY_HAVE_AGP */
112 return NOPAGE_SIGBUS; /* Disallow mremap */
116 * \c nopage method for shared virtual memory.
118 * \param vma virtual memory area.
119 * \param address access address.
120 * \return pointer to the page structure.
122 * Get the the mapping, find the real physical page to map, get the page, and
125 static __inline__ struct page *DRM(do_vm_shm_nopage)(struct vm_area_struct *vma,
126 unsigned long address)
128 drm_map_t *map = (drm_map_t *)vma->vm_private_data;
129 unsigned long offset;
133 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
134 if (!map) return NOPAGE_OOM; /* Nothing allocated */
136 offset = address - vma->vm_start;
137 i = (unsigned long)map->handle + offset;
138 page = vmalloc_to_page((void *)i);
143 DRM_DEBUG("shm_nopage 0x%lx\n", address);
149 * \c close method for shared virtual memory.
151 * \param vma virtual memory area.
153 * Deletes map information if we are the last
154 * person to close a mapping and it's not in the global maplist.
156 void DRM(vm_shm_close)(struct vm_area_struct *vma)
158 drm_file_t *priv = vma->vm_file->private_data;
159 drm_device_t *dev = priv->dev;
160 drm_vma_entry_t *pt, *prev, *next;
162 drm_map_list_t *r_list;
163 struct list_head *list;
166 DRM_DEBUG("0x%08lx,0x%08lx\n",
167 vma->vm_start, vma->vm_end - vma->vm_start);
168 atomic_dec(&dev->vma_count);
170 map = vma->vm_private_data;
172 down(&dev->struct_sem);
173 for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
175 if (pt->vma->vm_private_data == map) found_maps++;
176 if (pt->vma == vma) {
178 prev->next = pt->next;
180 dev->vmalist = pt->next;
182 DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
187 /* We were the only map that was found */
188 if(found_maps == 1 &&
189 map->flags & _DRM_REMOVABLE) {
190 /* Check to see if we are in the maplist, if we are not, then
191 * we delete this mappings information.
194 list = &dev->maplist->head;
195 list_for_each(list, &dev->maplist->head) {
196 r_list = list_entry(list, drm_map_list_t, head);
197 if (r_list->map == map) found_maps++;
203 case _DRM_FRAME_BUFFER:
204 #if __REALLY_HAVE_MTRR
205 if (map->mtrr >= 0) {
207 retcode = mtrr_del(map->mtrr,
210 DRM_DEBUG("mtrr_del = %d\n", retcode);
213 DRM(ioremapfree)(map->handle, map->size, dev);
219 case _DRM_SCATTER_GATHER:
222 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
225 up(&dev->struct_sem);
229 * \c nopage method for DMA virtual memory.
231 * \param vma virtual memory area.
232 * \param address access address.
233 * \return pointer to the page structure.
235 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
237 static __inline__ struct page *DRM(do_vm_dma_nopage)(struct vm_area_struct *vma,
238 unsigned long address)
240 drm_file_t *priv = vma->vm_file->private_data;
241 drm_device_t *dev = priv->dev;
242 drm_device_dma_t *dma = dev->dma;
243 unsigned long offset;
244 unsigned long page_nr;
247 if (!dma) return NOPAGE_SIGBUS; /* Error */
248 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
249 if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
251 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
252 page_nr = offset >> PAGE_SHIFT;
253 page = virt_to_page((dma->pagelist[page_nr] +
254 (offset & (~PAGE_MASK))));
258 DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
263 * \c nopage method for scatter-gather virtual memory.
265 * \param vma virtual memory area.
266 * \param address access address.
267 * \return pointer to the page structure.
269 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
271 static __inline__ struct page *DRM(do_vm_sg_nopage)(struct vm_area_struct *vma,
272 unsigned long address)
274 drm_map_t *map = (drm_map_t *)vma->vm_private_data;
275 drm_file_t *priv = vma->vm_file->private_data;
276 drm_device_t *dev = priv->dev;
277 drm_sg_mem_t *entry = dev->sg;
278 unsigned long offset;
279 unsigned long map_offset;
280 unsigned long page_offset;
283 if (!entry) return NOPAGE_SIGBUS; /* Error */
284 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
285 if (!entry->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
288 offset = address - vma->vm_start;
289 map_offset = map->offset - dev->sg->handle;
290 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
291 page = entry->pagelist[page_offset];
298 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
300 static struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
301 unsigned long address,
303 if (type) *type = VM_FAULT_MINOR;
304 return DRM(do_vm_nopage)(vma, address);
307 static struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
308 unsigned long address,
310 if (type) *type = VM_FAULT_MINOR;
311 return DRM(do_vm_shm_nopage)(vma, address);
314 static struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
315 unsigned long address,
317 if (type) *type = VM_FAULT_MINOR;
318 return DRM(do_vm_dma_nopage)(vma, address);
321 static struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
322 unsigned long address,
324 if (type) *type = VM_FAULT_MINOR;
325 return DRM(do_vm_sg_nopage)(vma, address);
328 #else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
330 static struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
331 unsigned long address,
333 return DRM(do_vm_nopage)(vma, address);
336 static struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
337 unsigned long address,
339 return DRM(do_vm_shm_nopage)(vma, address);
342 static struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
343 unsigned long address,
345 return DRM(do_vm_dma_nopage)(vma, address);
348 static struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
349 unsigned long address,
351 return DRM(do_vm_sg_nopage)(vma, address);
357 /** AGP virtual memory operations */
358 static struct vm_operations_struct DRM(vm_ops) = {
359 .nopage = DRM(vm_nopage),
360 .open = DRM(vm_open),
361 .close = DRM(vm_close),
364 /** Shared virtual memory operations */
365 static struct vm_operations_struct DRM(vm_shm_ops) = {
366 .nopage = DRM(vm_shm_nopage),
367 .open = DRM(vm_open),
368 .close = DRM(vm_shm_close),
371 /** DMA virtual memory operations */
372 static struct vm_operations_struct DRM(vm_dma_ops) = {
373 .nopage = DRM(vm_dma_nopage),
374 .open = DRM(vm_open),
375 .close = DRM(vm_close),
378 /** Scatter-gather virtual memory operations */
379 static struct vm_operations_struct DRM(vm_sg_ops) = {
380 .nopage = DRM(vm_sg_nopage),
381 .open = DRM(vm_open),
382 .close = DRM(vm_close),
387 * \c open method for shared virtual memory.
389 * \param vma virtual memory area.
391 * Create a new drm_vma_entry structure as the \p vma private data entry and
392 * add it to drm_device::vmalist.
394 void DRM(vm_open)(struct vm_area_struct *vma)
396 drm_file_t *priv = vma->vm_file->private_data;
397 drm_device_t *dev = priv->dev;
398 drm_vma_entry_t *vma_entry;
400 DRM_DEBUG("0x%08lx,0x%08lx\n",
401 vma->vm_start, vma->vm_end - vma->vm_start);
402 atomic_inc(&dev->vma_count);
404 vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
406 down(&dev->struct_sem);
407 vma_entry->vma = vma;
408 vma_entry->next = dev->vmalist;
409 vma_entry->pid = current->pid;
410 dev->vmalist = vma_entry;
411 up(&dev->struct_sem);
416 * \c close method for all virtual memory types.
418 * \param vma virtual memory area.
420 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
423 void DRM(vm_close)(struct vm_area_struct *vma)
425 drm_file_t *priv = vma->vm_file->private_data;
426 drm_device_t *dev = priv->dev;
427 drm_vma_entry_t *pt, *prev;
429 DRM_DEBUG("0x%08lx,0x%08lx\n",
430 vma->vm_start, vma->vm_end - vma->vm_start);
431 atomic_dec(&dev->vma_count);
433 down(&dev->struct_sem);
434 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
435 if (pt->vma == vma) {
437 prev->next = pt->next;
439 dev->vmalist = pt->next;
441 DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
445 up(&dev->struct_sem);
451 * \param filp file pointer.
452 * \param vma virtual memory area.
453 * \return zero on success or a negative number on failure.
455 * Sets the virtual memory area operations structure to vm_dma_ops, the file
456 * pointer, and calls vm_open().
458 int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
460 drm_file_t *priv = filp->private_data;
462 drm_device_dma_t *dma;
463 unsigned long length = vma->vm_end - vma->vm_start;
468 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
469 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
471 /* Length must match exact page count */
472 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
478 vma->vm_ops = &DRM(vm_dma_ops);
480 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
481 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
483 vma->vm_flags |= VM_RESERVED; /* Don't swap */
486 vma->vm_file = filp; /* Needed for drm_vm_open() */
491 #ifndef DRIVER_GET_MAP_OFS
492 #define DRIVER_GET_MAP_OFS() (map->offset)
495 #ifndef DRIVER_GET_REG_OFS
497 #define DRIVER_GET_REG_OFS() (dev->hose->dense_mem_base - \
498 dev->hose->mem_space->start)
500 #define DRIVER_GET_REG_OFS() 0
507 * \param filp file pointer.
508 * \param vma virtual memory area.
509 * \return zero on success or a negative number on failure.
511 * If the virtual memory area has no offset associated with it then it's a DMA
512 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
513 * checks that the restricted flag is not set, sets the virtual memory operations
514 * according to the mapping type and remaps the pages. Finally sets the file
515 * pointer and calls vm_open().
517 int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
519 drm_file_t *priv = filp->private_data;
520 drm_device_t *dev = priv->dev;
521 drm_map_t *map = NULL;
522 drm_map_list_t *r_list;
523 unsigned long offset = 0;
524 struct list_head *list;
526 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
527 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
529 if ( !priv->authenticated ) return -EACCES;
531 /* We check for "dma". On Apple's UniNorth, it's valid to have
532 * the AGP mapped at physical address 0
536 #if __REALLY_HAVE_AGP
537 && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
540 return DRM(mmap_dma)(filp, vma);
542 /* A sequential search of a linked list is
543 fine here because: 1) there will only be
544 about 5-10 entries in the list and, 2) a
545 DRI client only has to do this mapping
546 once, so it doesn't have to be optimized
547 for performance, even if the list was a
549 list_for_each(list, &dev->maplist->head) {
552 r_list = list_entry(list, drm_map_list_t, head);
555 off = DRIVER_GET_MAP_OFS();
556 if (off == VM_OFFSET(vma)) break;
559 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
562 /* Check for valid size. */
563 if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
565 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
566 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
567 #if defined(__i386__) || defined(__x86_64__)
568 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
570 /* Ye gads this is ugly. With more thought
571 we could move this up higher and use
572 `protection_map' instead. */
573 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
574 __pte(pgprot_val(vma->vm_page_prot)))));
580 #if __REALLY_HAVE_AGP
581 if (dev->agp->cant_use_aperture) {
583 * On some platforms we can't talk to bus dma address from the CPU, so for
584 * memory of type DRM_AGP, we'll deal with sorting out the real physical
585 * pages and mappings in nopage()
587 #if defined(__powerpc__)
588 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
590 vma->vm_ops = &DRM(vm_ops);
594 /* fall through to _DRM_FRAME_BUFFER... */
595 case _DRM_FRAME_BUFFER:
597 if (VM_OFFSET(vma) >= __pa(high_memory)) {
598 #if defined(__i386__) || defined(__x86_64__)
599 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
600 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
601 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
603 #elif defined(__powerpc__)
604 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
606 vma->vm_flags |= VM_IO; /* not in core dump */
608 #if defined(__ia64__)
609 if (map->type != _DRM_AGP)
610 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
612 offset = DRIVER_GET_REG_OFS();
614 if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
615 VM_OFFSET(vma) + offset,
616 vma->vm_end - vma->vm_start,
617 vma->vm_page_prot, 0))
619 if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
620 VM_OFFSET(vma) + offset,
621 vma->vm_end - vma->vm_start,
625 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
628 vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset);
629 vma->vm_ops = &DRM(vm_ops);
632 vma->vm_ops = &DRM(vm_shm_ops);
633 vma->vm_private_data = (void *)map;
634 /* Don't let this area swap. Change when
635 DRM_KERNEL advisory is supported. */
636 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
637 vma->vm_flags |= VM_LOCKED;
639 vma->vm_flags |= VM_RESERVED;
642 case _DRM_SCATTER_GATHER:
643 vma->vm_ops = &DRM(vm_sg_ops);
644 vma->vm_private_data = (void *)map;
645 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
646 vma->vm_flags |= VM_LOCKED;
648 vma->vm_flags |= VM_RESERVED;
652 return -EINVAL; /* This should never happen. */
654 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
655 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
657 vma->vm_flags |= VM_RESERVED; /* Don't swap */
660 vma->vm_file = filp; /* Needed for drm_vm_open() */