patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / char / drm / drm_vm.h
1 /**
2  * \file drm_vm.h
3  * Memory mapping for DRM
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38
39 /**
40  * \c nopage method for AGP virtual memory.
41  *
42  * \param vma virtual memory area.
43  * \param address access address.
44  * \return pointer to the page structure.
45  * 
46  * Find the right map and if it's AGP memory find the real physical page to
47  * map, get the page, increment the use count and return it.
48  */
49 static __inline__ struct page *DRM(do_vm_nopage)(struct vm_area_struct *vma,
50                                                  unsigned long address)
51 {
52 #if __REALLY_HAVE_AGP
53         drm_file_t *priv  = vma->vm_file->private_data;
54         drm_device_t *dev = priv->dev;
55         drm_map_t *map    = NULL;
56         drm_map_list_t  *r_list;
57         struct list_head *list;
58
59         /*
60          * Find the right map
61          */
62
63         if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
64
65         list_for_each(list, &dev->maplist->head) {
66                 r_list = list_entry(list, drm_map_list_t, head);
67                 map = r_list->map;
68                 if (!map) continue;
69                 if (map->offset == VM_OFFSET(vma)) break;
70         }
71
72         if (map && map->type == _DRM_AGP) {
73                 unsigned long offset = address - vma->vm_start;
74                 unsigned long baddr = VM_OFFSET(vma) + offset;
75                 struct drm_agp_mem *agpmem;
76                 struct page *page;
77
78 #if __alpha__
79                 /*
80                  * Adjust to a bus-relative address
81                  */
82                 baddr -= dev->hose->mem_space->start;
83 #endif
84
85                 /*
86                  * It's AGP memory - find the real physical page to map
87                  */
88                 for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
89                         if (agpmem->bound <= baddr &&
90                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) 
91                                 break;
92                 }
93
94                 if (!agpmem) goto vm_nopage_error;
95
96                 /*
97                  * Get the page, inc the use count, and return it
98                  */
99                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
100                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
101                 get_page(page);
102
103                 DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
104                           baddr, __va(agpmem->memory->memory[offset]), offset,
105                           page_count(page));
106
107                 return page;
108         }
109 vm_nopage_error:
110 #endif /* __REALLY_HAVE_AGP */
111
112         return NOPAGE_SIGBUS;           /* Disallow mremap */
113 }
114
115 /**
116  * \c nopage method for shared virtual memory.
117  *
118  * \param vma virtual memory area.
119  * \param address access address.
120  * \return pointer to the page structure.
121  * 
122  * Get the the mapping, find the real physical page to map, get the page, and
123  * return it.
124  */
125 static __inline__ struct page *DRM(do_vm_shm_nopage)(struct vm_area_struct *vma,
126                                                      unsigned long address)
127 {
128         drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
129         unsigned long    offset;
130         unsigned long    i;
131         struct page      *page;
132
133         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
134         if (!map)                  return NOPAGE_OOM;  /* Nothing allocated */
135
136         offset   = address - vma->vm_start;
137         i = (unsigned long)map->handle + offset;
138         page = vmalloc_to_page((void *)i);
139         if (!page)
140                 return NOPAGE_OOM;
141         get_page(page);
142
143         DRM_DEBUG("shm_nopage 0x%lx\n", address);
144         return page;
145 }
146
147
148 /**
149  * \c close method for shared virtual memory.
150  * 
151  * \param vma virtual memory area.
152  * 
153  * Deletes map information if we are the last
154  * person to close a mapping and it's not in the global maplist.
155  */
156 void DRM(vm_shm_close)(struct vm_area_struct *vma)
157 {
158         drm_file_t      *priv   = vma->vm_file->private_data;
159         drm_device_t    *dev    = priv->dev;
160         drm_vma_entry_t *pt, *prev, *next;
161         drm_map_t *map;
162         drm_map_list_t *r_list;
163         struct list_head *list;
164         int found_maps = 0;
165
166         DRM_DEBUG("0x%08lx,0x%08lx\n",
167                   vma->vm_start, vma->vm_end - vma->vm_start);
168         atomic_dec(&dev->vma_count);
169
170         map = vma->vm_private_data;
171
172         down(&dev->struct_sem);
173         for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
174                 next = pt->next;
175                 if (pt->vma->vm_private_data == map) found_maps++;
176                 if (pt->vma == vma) {
177                         if (prev) {
178                                 prev->next = pt->next;
179                         } else {
180                                 dev->vmalist = pt->next;
181                         }
182                         DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
183                 } else {
184                         prev = pt;
185                 }
186         }
187         /* We were the only map that was found */
188         if(found_maps == 1 &&
189            map->flags & _DRM_REMOVABLE) {
190                 /* Check to see if we are in the maplist, if we are not, then
191                  * we delete this mappings information.
192                  */
193                 found_maps = 0;
194                 list = &dev->maplist->head;
195                 list_for_each(list, &dev->maplist->head) {
196                         r_list = list_entry(list, drm_map_list_t, head);
197                         if (r_list->map == map) found_maps++;
198                 }
199
200                 if(!found_maps) {
201                         switch (map->type) {
202                         case _DRM_REGISTERS:
203                         case _DRM_FRAME_BUFFER:
204 #if __REALLY_HAVE_MTRR
205                                 if (map->mtrr >= 0) {
206                                         int retcode;
207                                         retcode = mtrr_del(map->mtrr,
208                                                            map->offset,
209                                                            map->size);
210                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
211                                 }
212 #endif
213                                 DRM(ioremapfree)(map->handle, map->size, dev);
214                                 break;
215                         case _DRM_SHM:
216                                 vfree(map->handle);
217                                 break;
218                         case _DRM_AGP:
219                         case _DRM_SCATTER_GATHER:
220                                 break;
221                         }
222                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
223                 }
224         }
225         up(&dev->struct_sem);
226 }
227
228 /**
229  * \c nopage method for DMA virtual memory.
230  *
231  * \param vma virtual memory area.
232  * \param address access address.
233  * \return pointer to the page structure.
234  * 
235  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
236  */
237 static __inline__ struct page *DRM(do_vm_dma_nopage)(struct vm_area_struct *vma,
238                                                      unsigned long address)
239 {
240         drm_file_t       *priv   = vma->vm_file->private_data;
241         drm_device_t     *dev    = priv->dev;
242         drm_device_dma_t *dma    = dev->dma;
243         unsigned long    offset;
244         unsigned long    page_nr;
245         struct page      *page;
246
247         if (!dma)                  return NOPAGE_SIGBUS; /* Error */
248         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
249         if (!dma->pagelist)        return NOPAGE_OOM ; /* Nothing allocated */
250
251         offset   = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
252         page_nr  = offset >> PAGE_SHIFT;
253         page = virt_to_page((dma->pagelist[page_nr] + 
254                              (offset & (~PAGE_MASK))));
255
256         get_page(page);
257
258         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
259         return page;
260 }
261
262 /**
263  * \c nopage method for scatter-gather virtual memory.
264  *
265  * \param vma virtual memory area.
266  * \param address access address.
267  * \return pointer to the page structure.
268  * 
269  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
270  */
271 static __inline__ struct page *DRM(do_vm_sg_nopage)(struct vm_area_struct *vma,
272                                                     unsigned long address)
273 {
274         drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
275         drm_file_t *priv = vma->vm_file->private_data;
276         drm_device_t *dev = priv->dev;
277         drm_sg_mem_t *entry = dev->sg;
278         unsigned long offset;
279         unsigned long map_offset;
280         unsigned long page_offset;
281         struct page *page;
282
283         if (!entry)                return NOPAGE_SIGBUS; /* Error */
284         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
285         if (!entry->pagelist)      return NOPAGE_OOM ;  /* Nothing allocated */
286
287
288         offset = address - vma->vm_start;
289         map_offset = map->offset - dev->sg->handle;
290         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
291         page = entry->pagelist[page_offset];
292         get_page(page);
293
294         return page;
295 }
296
297
298 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
299
300 static struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
301                                    unsigned long address,
302                                    int *type) {
303         if (type) *type = VM_FAULT_MINOR;
304         return DRM(do_vm_nopage)(vma, address);
305 }
306
307 static struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
308                                        unsigned long address,
309                                        int *type) {
310         if (type) *type = VM_FAULT_MINOR;
311         return DRM(do_vm_shm_nopage)(vma, address);
312 }
313
314 static struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
315                                        unsigned long address,
316                                        int *type) {
317         if (type) *type = VM_FAULT_MINOR;
318         return DRM(do_vm_dma_nopage)(vma, address);
319 }
320
321 static struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
322                                       unsigned long address,
323                                       int *type) {
324         if (type) *type = VM_FAULT_MINOR;
325         return DRM(do_vm_sg_nopage)(vma, address);
326 }
327
328 #else   /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
329
330 static struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
331                                    unsigned long address,
332                                    int unused) {
333         return DRM(do_vm_nopage)(vma, address);
334 }
335
336 static struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
337                                        unsigned long address,
338                                        int unused) {
339         return DRM(do_vm_shm_nopage)(vma, address);
340 }
341
342 static struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
343                                        unsigned long address,
344                                        int unused) {
345         return DRM(do_vm_dma_nopage)(vma, address);
346 }
347
348 static struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
349                                       unsigned long address,
350                                       int unused) {
351         return DRM(do_vm_sg_nopage)(vma, address);
352 }
353
354 #endif
355
356
357 /** AGP virtual memory operations */
358 static struct vm_operations_struct   DRM(vm_ops) = {
359         .nopage = DRM(vm_nopage),
360         .open   = DRM(vm_open),
361         .close  = DRM(vm_close),
362 };
363
364 /** Shared virtual memory operations */
365 static struct vm_operations_struct   DRM(vm_shm_ops) = {
366         .nopage = DRM(vm_shm_nopage),
367         .open   = DRM(vm_open),
368         .close  = DRM(vm_shm_close),
369 };
370
371 /** DMA virtual memory operations */
372 static struct vm_operations_struct   DRM(vm_dma_ops) = {
373         .nopage = DRM(vm_dma_nopage),
374         .open   = DRM(vm_open),
375         .close  = DRM(vm_close),
376 };
377
378 /** Scatter-gather virtual memory operations */
379 static struct vm_operations_struct   DRM(vm_sg_ops) = {
380         .nopage = DRM(vm_sg_nopage),
381         .open   = DRM(vm_open),
382         .close  = DRM(vm_close),
383 };
384
385
386 /**
387  * \c open method for shared virtual memory.
388  * 
389  * \param vma virtual memory area.
390  * 
391  * Create a new drm_vma_entry structure as the \p vma private data entry and
392  * add it to drm_device::vmalist.
393  */
394 void DRM(vm_open)(struct vm_area_struct *vma)
395 {
396         drm_file_t      *priv   = vma->vm_file->private_data;
397         drm_device_t    *dev    = priv->dev;
398         drm_vma_entry_t *vma_entry;
399
400         DRM_DEBUG("0x%08lx,0x%08lx\n",
401                   vma->vm_start, vma->vm_end - vma->vm_start);
402         atomic_inc(&dev->vma_count);
403
404         vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
405         if (vma_entry) {
406                 down(&dev->struct_sem);
407                 vma_entry->vma  = vma;
408                 vma_entry->next = dev->vmalist;
409                 vma_entry->pid  = current->pid;
410                 dev->vmalist    = vma_entry;
411                 up(&dev->struct_sem);
412         }
413 }
414
415 /**
416  * \c close method for all virtual memory types.
417  * 
418  * \param vma virtual memory area.
419  * 
420  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
421  * free it.
422  */
423 void DRM(vm_close)(struct vm_area_struct *vma)
424 {
425         drm_file_t      *priv   = vma->vm_file->private_data;
426         drm_device_t    *dev    = priv->dev;
427         drm_vma_entry_t *pt, *prev;
428
429         DRM_DEBUG("0x%08lx,0x%08lx\n",
430                   vma->vm_start, vma->vm_end - vma->vm_start);
431         atomic_dec(&dev->vma_count);
432
433         down(&dev->struct_sem);
434         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
435                 if (pt->vma == vma) {
436                         if (prev) {
437                                 prev->next = pt->next;
438                         } else {
439                                 dev->vmalist = pt->next;
440                         }
441                         DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
442                         break;
443                 }
444         }
445         up(&dev->struct_sem);
446 }
447
448 /**
449  * mmap DMA memory.
450  *
451  * \param filp file pointer.
452  * \param vma virtual memory area.
453  * \return zero on success or a negative number on failure.
454  * 
455  * Sets the virtual memory area operations structure to vm_dma_ops, the file
456  * pointer, and calls vm_open().
457  */
458 int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
459 {
460         drm_file_t       *priv   = filp->private_data;
461         drm_device_t     *dev;
462         drm_device_dma_t *dma;
463         unsigned long    length  = vma->vm_end - vma->vm_start;
464
465         lock_kernel();
466         dev      = priv->dev;
467         dma      = dev->dma;
468         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
469                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
470
471                                 /* Length must match exact page count */
472         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
473                 unlock_kernel();
474                 return -EINVAL;
475         }
476         unlock_kernel();
477
478         vma->vm_ops   = &DRM(vm_dma_ops);
479
480 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
481         vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
482 #else
483         vma->vm_flags |= VM_RESERVED; /* Don't swap */
484 #endif
485
486         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
487         DRM(vm_open)(vma);
488         return 0;
489 }
490
491 #ifndef DRIVER_GET_MAP_OFS
492 #define DRIVER_GET_MAP_OFS()    (map->offset)
493 #endif
494
495 #ifndef DRIVER_GET_REG_OFS
496 #ifdef __alpha__
497 #define DRIVER_GET_REG_OFS()    (dev->hose->dense_mem_base -    \
498                                  dev->hose->mem_space->start)
499 #else
500 #define DRIVER_GET_REG_OFS()    0
501 #endif
502 #endif
503
504 /**
505  * mmap DMA memory.
506  *
507  * \param filp file pointer.
508  * \param vma virtual memory area.
509  * \return zero on success or a negative number on failure.
510  * 
511  * If the virtual memory area has no offset associated with it then it's a DMA
512  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
513  * checks that the restricted flag is not set, sets the virtual memory operations
514  * according to the mapping type and remaps the pages. Finally sets the file
515  * pointer and calls vm_open().
516  */
517 int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
518 {
519         drm_file_t      *priv   = filp->private_data;
520         drm_device_t    *dev    = priv->dev;
521         drm_map_t       *map    = NULL;
522         drm_map_list_t  *r_list;
523         unsigned long   offset  = 0;
524         struct list_head *list;
525
526         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
527                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
528
529         if ( !priv->authenticated ) return -EACCES;
530
531         /* We check for "dma". On Apple's UniNorth, it's valid to have
532          * the AGP mapped at physical address 0
533          * --BenH.
534          */
535         if (!VM_OFFSET(vma)
536 #if __REALLY_HAVE_AGP
537             && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
538 #endif
539             )
540                 return DRM(mmap_dma)(filp, vma);
541
542                                 /* A sequential search of a linked list is
543                                    fine here because: 1) there will only be
544                                    about 5-10 entries in the list and, 2) a
545                                    DRI client only has to do this mapping
546                                    once, so it doesn't have to be optimized
547                                    for performance, even if the list was a
548                                    bit longer. */
549         list_for_each(list, &dev->maplist->head) {
550                 unsigned long off;
551
552                 r_list = list_entry(list, drm_map_list_t, head);
553                 map = r_list->map;
554                 if (!map) continue;
555                 off = DRIVER_GET_MAP_OFS();
556                 if (off == VM_OFFSET(vma)) break;
557         }
558
559         if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
560                 return -EPERM;
561
562                                 /* Check for valid size. */
563         if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
564
565         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
566                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
567 #if defined(__i386__) || defined(__x86_64__)
568                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
569 #else
570                                 /* Ye gads this is ugly.  With more thought
571                                    we could move this up higher and use
572                                    `protection_map' instead.  */
573                 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
574                         __pte(pgprot_val(vma->vm_page_prot)))));
575 #endif
576         }
577
578         switch (map->type) {
579         case _DRM_AGP:
580 #if __REALLY_HAVE_AGP
581           if (dev->agp->cant_use_aperture) {
582                 /*
583                  * On some platforms we can't talk to bus dma address from the CPU, so for
584                  * memory of type DRM_AGP, we'll deal with sorting out the real physical
585                  * pages and mappings in nopage()
586                  */
587 #if defined(__powerpc__)
588                 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
589 #endif
590                 vma->vm_ops = &DRM(vm_ops);
591                 break;
592           }
593 #endif
594                 /* fall through to _DRM_FRAME_BUFFER... */        
595         case _DRM_FRAME_BUFFER:
596         case _DRM_REGISTERS:
597                 if (VM_OFFSET(vma) >= __pa(high_memory)) {
598 #if defined(__i386__) || defined(__x86_64__)
599                         if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
600                                 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
601                                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
602                         }
603 #elif defined(__powerpc__)
604                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
605 #endif
606                         vma->vm_flags |= VM_IO; /* not in core dump */
607                 }
608 #if defined(__ia64__)
609                 if (map->type != _DRM_AGP)
610                         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
611 #endif
612                 offset = DRIVER_GET_REG_OFS();
613 #ifdef __sparc__
614                 if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
615                                         VM_OFFSET(vma) + offset,
616                                         vma->vm_end - vma->vm_start,
617                                         vma->vm_page_prot, 0))
618 #else
619                 if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
620                                      VM_OFFSET(vma) + offset,
621                                      vma->vm_end - vma->vm_start,
622                                      vma->vm_page_prot))
623 #endif
624                                 return -EAGAIN;
625                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
626                           " offset = 0x%lx\n",
627                           map->type,
628                           vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset);
629                 vma->vm_ops = &DRM(vm_ops);
630                 break;
631         case _DRM_SHM:
632                 vma->vm_ops = &DRM(vm_shm_ops);
633                 vma->vm_private_data = (void *)map;
634                                 /* Don't let this area swap.  Change when
635                                    DRM_KERNEL advisory is supported. */
636 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
637                 vma->vm_flags |= VM_LOCKED;
638 #else
639                 vma->vm_flags |= VM_RESERVED;
640 #endif
641                 break;
642         case _DRM_SCATTER_GATHER:
643                 vma->vm_ops = &DRM(vm_sg_ops);
644                 vma->vm_private_data = (void *)map;
645 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
646                 vma->vm_flags |= VM_LOCKED;
647 #else
648                 vma->vm_flags |= VM_RESERVED;
649 #endif
650                 break;
651         default:
652                 return -EINVAL; /* This should never happen. */
653         }
654 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
655         vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
656 #else
657         vma->vm_flags |= VM_RESERVED; /* Don't swap */
658 #endif
659
660         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
661         DRM(vm_open)(vma);
662         return 0;
663 }