vserver 1.9.3
[linux-2.6.git] / drivers / char / drm / drm_vm.h
1 /**
2  * \file drm_vm.h
3  * Memory mapping for DRM
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38
39 /**
40  * \c nopage method for AGP virtual memory.
41  *
42  * \param vma virtual memory area.
43  * \param address access address.
44  * \return pointer to the page structure.
45  * 
46  * Find the right map and if it's AGP memory find the real physical page to
47  * map, get the page, increment the use count and return it.
48  */
49 #if __OS_HAS_AGP
50 static __inline__ struct page *DRM(do_vm_nopage)(struct vm_area_struct *vma,
51                                                  unsigned long address)
52 {
53         drm_file_t *priv  = vma->vm_file->private_data;
54         drm_device_t *dev = priv->dev;
55         drm_map_t *map    = NULL;
56         drm_map_list_t  *r_list;
57         struct list_head *list;
58
59         /*
60          * Find the right map
61          */
62         if (!drm_core_has_AGP(dev))
63                 goto vm_nopage_error;
64
65         if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
66
67         list_for_each(list, &dev->maplist->head) {
68                 r_list = list_entry(list, drm_map_list_t, head);
69                 map = r_list->map;
70                 if (!map) continue;
71                 if (map->offset == VM_OFFSET(vma)) break;
72         }
73
74         if (map && map->type == _DRM_AGP) {
75                 unsigned long offset = address - vma->vm_start;
76                 unsigned long baddr = VM_OFFSET(vma) + offset;
77                 struct drm_agp_mem *agpmem;
78                 struct page *page;
79
80 #ifdef __alpha__
81                 /*
82                  * Adjust to a bus-relative address
83                  */
84                 baddr -= dev->hose->mem_space->start;
85 #endif
86
87                 /*
88                  * It's AGP memory - find the real physical page to map
89                  */
90                 for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
91                         if (agpmem->bound <= baddr &&
92                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) 
93                                 break;
94                 }
95
96                 if (!agpmem) goto vm_nopage_error;
97
98                 /*
99                  * Get the page, inc the use count, and return it
100                  */
101                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
102                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
103                 get_page(page);
104
105                 DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
106                           baddr, __va(agpmem->memory->memory[offset]), offset,
107                           page_count(page));
108
109                 return page;
110         }
111 vm_nopage_error:
112         return NOPAGE_SIGBUS;           /* Disallow mremap */
113 }
114 #else /* __OS_HAS_AGP */
115 static __inline__ struct page *DRM(do_vm_nopage)(struct vm_area_struct *vma,
116                                                  unsigned long address)
117 {
118         return NOPAGE_SIGBUS;
119 }
120 #endif /* __OS_HAS_AGP */
121
122 /**
123  * \c nopage method for shared virtual memory.
124  *
125  * \param vma virtual memory area.
126  * \param address access address.
127  * \return pointer to the page structure.
128  * 
129  * Get the the mapping, find the real physical page to map, get the page, and
130  * return it.
131  */
132 static __inline__ struct page *DRM(do_vm_shm_nopage)(struct vm_area_struct *vma,
133                                                      unsigned long address)
134 {
135         drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
136         unsigned long    offset;
137         unsigned long    i;
138         struct page      *page;
139
140         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
141         if (!map)                  return NOPAGE_OOM;  /* Nothing allocated */
142
143         offset   = address - vma->vm_start;
144         i = (unsigned long)map->handle + offset;
145         page = vmalloc_to_page((void *)i);
146         if (!page)
147                 return NOPAGE_OOM;
148         get_page(page);
149
150         DRM_DEBUG("shm_nopage 0x%lx\n", address);
151         return page;
152 }
153
154
155 /**
156  * \c close method for shared virtual memory.
157  * 
158  * \param vma virtual memory area.
159  * 
160  * Deletes map information if we are the last
161  * person to close a mapping and it's not in the global maplist.
162  */
163 void DRM(vm_shm_close)(struct vm_area_struct *vma)
164 {
165         drm_file_t      *priv   = vma->vm_file->private_data;
166         drm_device_t    *dev    = priv->dev;
167         drm_vma_entry_t *pt, *prev, *next;
168         drm_map_t *map;
169         drm_map_list_t *r_list;
170         struct list_head *list;
171         int found_maps = 0;
172
173         DRM_DEBUG("0x%08lx,0x%08lx\n",
174                   vma->vm_start, vma->vm_end - vma->vm_start);
175         atomic_dec(&dev->vma_count);
176
177         map = vma->vm_private_data;
178
179         down(&dev->struct_sem);
180         for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
181                 next = pt->next;
182                 if (pt->vma->vm_private_data == map) found_maps++;
183                 if (pt->vma == vma) {
184                         if (prev) {
185                                 prev->next = pt->next;
186                         } else {
187                                 dev->vmalist = pt->next;
188                         }
189                         DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
190                 } else {
191                         prev = pt;
192                 }
193         }
194         /* We were the only map that was found */
195         if(found_maps == 1 &&
196            map->flags & _DRM_REMOVABLE) {
197                 /* Check to see if we are in the maplist, if we are not, then
198                  * we delete this mappings information.
199                  */
200                 found_maps = 0;
201                 list = &dev->maplist->head;
202                 list_for_each(list, &dev->maplist->head) {
203                         r_list = list_entry(list, drm_map_list_t, head);
204                         if (r_list->map == map) found_maps++;
205                 }
206
207                 if(!found_maps) {
208                         switch (map->type) {
209                         case _DRM_REGISTERS:
210                         case _DRM_FRAME_BUFFER:
211                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
212                                         int retcode;
213                                         retcode = mtrr_del(map->mtrr,
214                                                            map->offset,
215                                                            map->size);
216                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
217                                 }
218                                 DRM(ioremapfree)(map->handle, map->size, dev);
219                                 break;
220                         case _DRM_SHM:
221                                 vfree(map->handle);
222                                 break;
223                         case _DRM_AGP:
224                         case _DRM_SCATTER_GATHER:
225                                 break;
226                         }
227                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
228                 }
229         }
230         up(&dev->struct_sem);
231 }
232
233 /**
234  * \c nopage method for DMA virtual memory.
235  *
236  * \param vma virtual memory area.
237  * \param address access address.
238  * \return pointer to the page structure.
239  * 
240  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
241  */
242 static __inline__ struct page *DRM(do_vm_dma_nopage)(struct vm_area_struct *vma,
243                                                      unsigned long address)
244 {
245         drm_file_t       *priv   = vma->vm_file->private_data;
246         drm_device_t     *dev    = priv->dev;
247         drm_device_dma_t *dma    = dev->dma;
248         unsigned long    offset;
249         unsigned long    page_nr;
250         struct page      *page;
251
252         if (!dma)                  return NOPAGE_SIGBUS; /* Error */
253         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
254         if (!dma->pagelist)        return NOPAGE_OOM ; /* Nothing allocated */
255
256         offset   = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
257         page_nr  = offset >> PAGE_SHIFT;
258         page = virt_to_page((dma->pagelist[page_nr] + 
259                              (offset & (~PAGE_MASK))));
260
261         get_page(page);
262
263         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
264         return page;
265 }
266
267 /**
268  * \c nopage method for scatter-gather virtual memory.
269  *
270  * \param vma virtual memory area.
271  * \param address access address.
272  * \return pointer to the page structure.
273  * 
274  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
275  */
276 static __inline__ struct page *DRM(do_vm_sg_nopage)(struct vm_area_struct *vma,
277                                                     unsigned long address)
278 {
279         drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
280         drm_file_t *priv = vma->vm_file->private_data;
281         drm_device_t *dev = priv->dev;
282         drm_sg_mem_t *entry = dev->sg;
283         unsigned long offset;
284         unsigned long map_offset;
285         unsigned long page_offset;
286         struct page *page;
287
288         if (!entry)                return NOPAGE_SIGBUS; /* Error */
289         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
290         if (!entry->pagelist)      return NOPAGE_OOM ;  /* Nothing allocated */
291
292
293         offset = address - vma->vm_start;
294         map_offset = map->offset - dev->sg->handle;
295         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
296         page = entry->pagelist[page_offset];
297         get_page(page);
298
299         return page;
300 }
301
302
303 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
304
305 static struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
306                                    unsigned long address,
307                                    int *type) {
308         if (type) *type = VM_FAULT_MINOR;
309         return DRM(do_vm_nopage)(vma, address);
310 }
311
312 static struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
313                                        unsigned long address,
314                                        int *type) {
315         if (type) *type = VM_FAULT_MINOR;
316         return DRM(do_vm_shm_nopage)(vma, address);
317 }
318
319 static struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
320                                        unsigned long address,
321                                        int *type) {
322         if (type) *type = VM_FAULT_MINOR;
323         return DRM(do_vm_dma_nopage)(vma, address);
324 }
325
326 static struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
327                                       unsigned long address,
328                                       int *type) {
329         if (type) *type = VM_FAULT_MINOR;
330         return DRM(do_vm_sg_nopage)(vma, address);
331 }
332
333 #else   /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
334
335 static struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
336                                    unsigned long address,
337                                    int unused) {
338         return DRM(do_vm_nopage)(vma, address);
339 }
340
341 static struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
342                                        unsigned long address,
343                                        int unused) {
344         return DRM(do_vm_shm_nopage)(vma, address);
345 }
346
347 static struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
348                                        unsigned long address,
349                                        int unused) {
350         return DRM(do_vm_dma_nopage)(vma, address);
351 }
352
353 static struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
354                                       unsigned long address,
355                                       int unused) {
356         return DRM(do_vm_sg_nopage)(vma, address);
357 }
358
359 #endif
360
361
362 /** AGP virtual memory operations */
363 static struct vm_operations_struct   DRM(vm_ops) = {
364         .nopage = DRM(vm_nopage),
365         .open   = DRM(vm_open),
366         .close  = DRM(vm_close),
367 };
368
369 /** Shared virtual memory operations */
370 static struct vm_operations_struct   DRM(vm_shm_ops) = {
371         .nopage = DRM(vm_shm_nopage),
372         .open   = DRM(vm_open),
373         .close  = DRM(vm_shm_close),
374 };
375
376 /** DMA virtual memory operations */
377 static struct vm_operations_struct   DRM(vm_dma_ops) = {
378         .nopage = DRM(vm_dma_nopage),
379         .open   = DRM(vm_open),
380         .close  = DRM(vm_close),
381 };
382
383 /** Scatter-gather virtual memory operations */
384 static struct vm_operations_struct   DRM(vm_sg_ops) = {
385         .nopage = DRM(vm_sg_nopage),
386         .open   = DRM(vm_open),
387         .close  = DRM(vm_close),
388 };
389
390
391 /**
392  * \c open method for shared virtual memory.
393  * 
394  * \param vma virtual memory area.
395  * 
396  * Create a new drm_vma_entry structure as the \p vma private data entry and
397  * add it to drm_device::vmalist.
398  */
399 void DRM(vm_open)(struct vm_area_struct *vma)
400 {
401         drm_file_t      *priv   = vma->vm_file->private_data;
402         drm_device_t    *dev    = priv->dev;
403         drm_vma_entry_t *vma_entry;
404
405         DRM_DEBUG("0x%08lx,0x%08lx\n",
406                   vma->vm_start, vma->vm_end - vma->vm_start);
407         atomic_inc(&dev->vma_count);
408
409         vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
410         if (vma_entry) {
411                 down(&dev->struct_sem);
412                 vma_entry->vma  = vma;
413                 vma_entry->next = dev->vmalist;
414                 vma_entry->pid  = current->pid;
415                 dev->vmalist    = vma_entry;
416                 up(&dev->struct_sem);
417         }
418 }
419
420 /**
421  * \c close method for all virtual memory types.
422  * 
423  * \param vma virtual memory area.
424  * 
425  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
426  * free it.
427  */
428 void DRM(vm_close)(struct vm_area_struct *vma)
429 {
430         drm_file_t      *priv   = vma->vm_file->private_data;
431         drm_device_t    *dev    = priv->dev;
432         drm_vma_entry_t *pt, *prev;
433
434         DRM_DEBUG("0x%08lx,0x%08lx\n",
435                   vma->vm_start, vma->vm_end - vma->vm_start);
436         atomic_dec(&dev->vma_count);
437
438         down(&dev->struct_sem);
439         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
440                 if (pt->vma == vma) {
441                         if (prev) {
442                                 prev->next = pt->next;
443                         } else {
444                                 dev->vmalist = pt->next;
445                         }
446                         DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
447                         break;
448                 }
449         }
450         up(&dev->struct_sem);
451 }
452
453 /**
454  * mmap DMA memory.
455  *
456  * \param filp file pointer.
457  * \param vma virtual memory area.
458  * \return zero on success or a negative number on failure.
459  * 
460  * Sets the virtual memory area operations structure to vm_dma_ops, the file
461  * pointer, and calls vm_open().
462  */
463 int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
464 {
465         drm_file_t       *priv   = filp->private_data;
466         drm_device_t     *dev;
467         drm_device_dma_t *dma;
468         unsigned long    length  = vma->vm_end - vma->vm_start;
469
470         lock_kernel();
471         dev      = priv->dev;
472         dma      = dev->dma;
473         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
474                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
475
476                                 /* Length must match exact page count */
477         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
478                 unlock_kernel();
479                 return -EINVAL;
480         }
481         unlock_kernel();
482
483         vma->vm_ops   = &DRM(vm_dma_ops);
484
485 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
486         vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
487 #else
488         vma->vm_flags |= VM_RESERVED; /* Don't swap */
489 #endif
490
491         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
492         DRM(vm_open)(vma);
493         return 0;
494 }
495
496 unsigned long DRM(core_get_map_ofs)(drm_map_t *map)
497 {
498         return map->offset;
499 }
500
501 unsigned long DRM(core_get_reg_ofs)(struct drm_device *dev)
502 {
503 #ifdef __alpha__
504         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
505 #else
506         return 0;
507 #endif
508 }
509
510 /**
511  * mmap DMA memory.
512  *
513  * \param filp file pointer.
514  * \param vma virtual memory area.
515  * \return zero on success or a negative number on failure.
516  * 
517  * If the virtual memory area has no offset associated with it then it's a DMA
518  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
519  * checks that the restricted flag is not set, sets the virtual memory operations
520  * according to the mapping type and remaps the pages. Finally sets the file
521  * pointer and calls vm_open().
522  */
523 int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
524 {
525         drm_file_t      *priv   = filp->private_data;
526         drm_device_t    *dev    = priv->dev;
527         drm_map_t       *map    = NULL;
528         drm_map_list_t  *r_list;
529         unsigned long   offset  = 0;
530         struct list_head *list;
531
532         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
533                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
534
535         if ( !priv->authenticated ) return -EACCES;
536
537         /* We check for "dma". On Apple's UniNorth, it's valid to have
538          * the AGP mapped at physical address 0
539          * --BenH.
540          */
541         if (!VM_OFFSET(vma)
542 #if __OS_HAS_AGP
543             && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
544 #endif
545             )
546                 return DRM(mmap_dma)(filp, vma);
547
548                                 /* A sequential search of a linked list is
549                                    fine here because: 1) there will only be
550                                    about 5-10 entries in the list and, 2) a
551                                    DRI client only has to do this mapping
552                                    once, so it doesn't have to be optimized
553                                    for performance, even if the list was a
554                                    bit longer. */
555         list_for_each(list, &dev->maplist->head) {
556                 unsigned long off;
557
558                 r_list = list_entry(list, drm_map_list_t, head);
559                 map = r_list->map;
560                 if (!map) continue;
561                 off = dev->fn_tbl.get_map_ofs(map);
562                 if (off == VM_OFFSET(vma)) break;
563         }
564
565         if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
566                 return -EPERM;
567
568                                 /* Check for valid size. */
569         if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
570
571         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
572                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
573 #if defined(__i386__) || defined(__x86_64__)
574                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
575 #else
576                                 /* Ye gads this is ugly.  With more thought
577                                    we could move this up higher and use
578                                    `protection_map' instead.  */
579                 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
580                         __pte(pgprot_val(vma->vm_page_prot)))));
581 #endif
582         }
583
584         switch (map->type) {
585         case _DRM_AGP:
586           if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
587                 /*
588                  * On some platforms we can't talk to bus dma address from the CPU, so for
589                  * memory of type DRM_AGP, we'll deal with sorting out the real physical
590                  * pages and mappings in nopage()
591                  */
592 #if defined(__powerpc__)
593                 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
594 #endif
595                 vma->vm_ops = &DRM(vm_ops);
596                 break;
597           }
598                 /* fall through to _DRM_FRAME_BUFFER... */        
599         case _DRM_FRAME_BUFFER:
600         case _DRM_REGISTERS:
601                 if (VM_OFFSET(vma) >= __pa(high_memory)) {
602 #if defined(__i386__) || defined(__x86_64__)
603                         if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
604                                 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
605                                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
606                         }
607 #elif defined(__powerpc__)
608                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
609 #endif
610                         vma->vm_flags |= VM_IO; /* not in core dump */
611                 }
612 #if defined(__ia64__)
613                 if (map->type != _DRM_AGP)
614                         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
615 #endif
616                 offset = dev->fn_tbl.get_reg_ofs(dev);
617 #ifdef __sparc__
618                 if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
619                                         VM_OFFSET(vma) + offset,
620                                         vma->vm_end - vma->vm_start,
621                                         vma->vm_page_prot, 0))
622 #else
623                 if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
624                                      VM_OFFSET(vma) + offset,
625                                      vma->vm_end - vma->vm_start,
626                                      vma->vm_page_prot))
627 #endif
628                                 return -EAGAIN;
629                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
630                           " offset = 0x%lx\n",
631                           map->type,
632                           vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset);
633                 vma->vm_ops = &DRM(vm_ops);
634                 break;
635         case _DRM_SHM:
636                 vma->vm_ops = &DRM(vm_shm_ops);
637                 vma->vm_private_data = (void *)map;
638                                 /* Don't let this area swap.  Change when
639                                    DRM_KERNEL advisory is supported. */
640 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
641                 vma->vm_flags |= VM_LOCKED;
642 #else
643                 vma->vm_flags |= VM_RESERVED;
644 #endif
645                 break;
646         case _DRM_SCATTER_GATHER:
647                 vma->vm_ops = &DRM(vm_sg_ops);
648                 vma->vm_private_data = (void *)map;
649 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
650                 vma->vm_flags |= VM_LOCKED;
651 #else
652                 vma->vm_flags |= VM_RESERVED;
653 #endif
654                 break;
655         default:
656                 return -EINVAL; /* This should never happen. */
657         }
658 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
659         vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
660 #else
661         vma->vm_flags |= VM_RESERVED; /* Don't swap */
662 #endif
663
664         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
665         DRM(vm_open)(vma);
666         return 0;
667 }