Merge to Fedora kernel-2.6.17-1.2187_FC5 patched with stable patch-2.6.17.13-vs2...
[linux-2.6.git] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support. 
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/config.h>
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/smp_lock.h>
23 #include <linux/devfs_fs_kernel.h>
24 #include <linux/ptrace.h>
25 #include <linux/device.h>
26 #include <linux/highmem.h>
27 #include <linux/crash_dump.h>
28 #include <linux/backing-dev.h>
29 #include <linux/bootmem.h>
30 #include <linux/pipe_fs_i.h>
31
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34
35 #ifdef CONFIG_IA64
36 # include <linux/efi.h>
37 #endif
38
39 static inline int range_is_allowed(unsigned long from, unsigned long to)
40 {
41         unsigned long cursor;
42
43         cursor = from >> PAGE_SHIFT;
44         while ((cursor << PAGE_SHIFT) < to) {
45                 if (!devmem_is_allowed(cursor)) {
46                         printk ("Program %s tried to read /dev/mem between %lx->%lx.\n",
47                                         current->comm, from, to);
48                         return 0;
49                 }
50                 cursor++;
51         }
52         return 1;
53 }
54
55 /*
56  * Architectures vary in how they handle caching for addresses
57  * outside of main memory.
58  *
59  */
60 static inline int uncached_access(struct file *file, unsigned long addr)
61 {
62 #if defined(__i386__)
63         /*
64          * On the PPro and successors, the MTRRs are used to set
65          * memory types for physical addresses outside main memory,
66          * so blindly setting PCD or PWT on those pages is wrong.
67          * For Pentiums and earlier, the surround logic should disable
68          * caching for the high addresses through the KEN pin, but
69          * we maintain the tradition of paranoia in this code.
70          */
71         if (file->f_flags & O_SYNC)
72                 return 1;
73         return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
74                   test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
75                   test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
76                   test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
77           && addr >= __pa(high_memory);
78 #elif defined(__x86_64__)
79         /* 
80          * This is broken because it can generate memory type aliases,
81          * which can cause cache corruptions
82          * But it is only available for root and we have to be bug-to-bug
83          * compatible with i386.
84          */
85         if (file->f_flags & O_SYNC)
86                 return 1;
87         /* same behaviour as i386. PAT always set to cached and MTRRs control the
88            caching behaviour. 
89            Hopefully a full PAT implementation will fix that soon. */      
90         return 0;
91 #elif defined(CONFIG_IA64)
92         /*
93          * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
94          */
95         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
96 #else
97         /*
98          * Accessing memory above the top the kernel knows about or through a file pointer
99          * that was marked O_SYNC will be done non-cached.
100          */
101         if (file->f_flags & O_SYNC)
102                 return 1;
103         return addr >= __pa(high_memory);
104 #endif
105 }
106
107 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
108 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
109 {
110         if (addr + count > __pa(high_memory))
111                 return 0;
112
113         return 1;
114 }
115
116 static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t size)
117 {
118         return 1;
119 }
120 #endif
121
122 #ifndef ARCH_HAS_DEV_MEM
123 /*
124  * This funcion reads the *physical* memory. The f_pos points directly to the 
125  * memory location. 
126  */
127 static ssize_t read_mem(struct file * file, char __user * buf,
128                         size_t count, loff_t *ppos)
129 {
130         unsigned long p = *ppos;
131         ssize_t read, sz;
132         char *ptr;
133
134         if (!valid_phys_addr_range(p, count))
135                 return -EFAULT;
136         read = 0;
137 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
138         /* we don't have page 0 mapped on sparc and m68k.. */
139         if (p < PAGE_SIZE) {
140                 sz = PAGE_SIZE - p;
141                 if (sz > count) 
142                         sz = count; 
143                 if (sz > 0) {
144                         if (clear_user(buf, sz))
145                                 return -EFAULT;
146                         buf += sz; 
147                         p += sz; 
148                         count -= sz; 
149                         read += sz; 
150                 }
151         }
152 #endif
153
154         while (count > 0) {
155                 /*
156                  * Handle first page in case it's not aligned
157                  */
158                 if (-p & (PAGE_SIZE - 1))
159                         sz = -p & (PAGE_SIZE - 1);
160                 else
161                         sz = PAGE_SIZE;
162
163                 sz = min_t(unsigned long, sz, count);
164
165                 /*
166                  * On ia64 if a page has been mapped somewhere as
167                  * uncached, then it must also be accessed uncached
168                  * by the kernel or data corruption may occur
169                  */
170                 ptr = xlate_dev_mem_ptr(p);
171
172                 if (!range_is_allowed(p, p+count))
173                         return -EPERM;
174                 if (copy_to_user(buf, ptr, sz))
175                         return -EFAULT;
176                 buf += sz;
177                 p += sz;
178                 count -= sz;
179                 read += sz;
180         }
181
182         *ppos += read;
183         return read;
184 }
185
186 static ssize_t write_mem(struct file * file, const char __user * buf, 
187                          size_t count, loff_t *ppos)
188 {
189         unsigned long p = *ppos;
190         ssize_t written, sz;
191         unsigned long copied;
192         void *ptr;
193
194         if (!valid_phys_addr_range(p, count))
195                 return -EFAULT;
196
197         written = 0;
198
199 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
200         /* we don't have page 0 mapped on sparc and m68k.. */
201         if (p < PAGE_SIZE) {
202                 unsigned long sz = PAGE_SIZE - p;
203                 if (sz > count)
204                         sz = count;
205                 /* Hmm. Do something? */
206                 buf += sz;
207                 p += sz;
208                 count -= sz;
209                 written += sz;
210         }
211 #endif
212
213         while (count > 0) {
214                 /*
215                  * Handle first page in case it's not aligned
216                  */
217                 if (-p & (PAGE_SIZE - 1))
218                         sz = -p & (PAGE_SIZE - 1);
219                 else
220                         sz = PAGE_SIZE;
221
222                 sz = min_t(unsigned long, sz, count);
223
224                 /*
225                  * On ia64 if a page has been mapped somewhere as
226                  * uncached, then it must also be accessed uncached
227                  * by the kernel or data corruption may occur
228                  */
229                 ptr = xlate_dev_mem_ptr(p);
230
231                 if (!range_is_allowed(ptr, ptr+sz))
232                         return -EPERM;
233                 copied = copy_from_user(ptr, buf, sz);
234                 if (copied) {
235                         written += sz - copied;
236                         if (written)
237                                 break;
238                         return -EFAULT;
239                 }
240                 buf += sz;
241                 p += sz;
242                 count -= sz;
243                 written += sz;
244         }
245
246         *ppos += written;
247         return written;
248 }
249 #endif
250
251 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
252 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
253                                      unsigned long size, pgprot_t vma_prot)
254 {
255 #ifdef pgprot_noncached
256         unsigned long offset = pfn << PAGE_SHIFT;
257
258         if (uncached_access(file, offset))
259                 return pgprot_noncached(vma_prot);
260 #endif
261         return vma_prot;
262 }
263 #endif
264
265 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
266 {
267         size_t size = vma->vm_end - vma->vm_start;
268
269         if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size))
270                 return -EINVAL;
271
272         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
273                                                  size,
274                                                  vma->vm_page_prot);
275
276         /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
277         if (remap_pfn_range(vma,
278                             vma->vm_start,
279                             vma->vm_pgoff,
280                             size,
281                             vma->vm_page_prot))
282                 return -EAGAIN;
283         return 0;
284 }
285
286 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
287 {
288         unsigned long pfn;
289
290         /* Turn a kernel-virtual address into a physical page frame */
291         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
292
293         /*
294          * RED-PEN: on some architectures there is more mapped memory
295          * than available in mem_map which pfn_valid checks
296          * for. Perhaps should add a new macro here.
297          *
298          * RED-PEN: vmalloc is not supported right now.
299          */
300         if (!pfn_valid(pfn))
301                 return -EIO;
302
303         vma->vm_pgoff = pfn;
304         return mmap_mem(file, vma);
305 }
306
307 #ifdef CONFIG_CRASH_DUMP
308 /*
309  * Read memory corresponding to the old kernel.
310  */
311 static ssize_t read_oldmem(struct file *file, char __user *buf,
312                                 size_t count, loff_t *ppos)
313 {
314         unsigned long pfn, offset;
315         size_t read = 0, csize;
316         int rc = 0;
317
318         while (count) {
319                 pfn = *ppos / PAGE_SIZE;
320                 if (pfn > saved_max_pfn)
321                         return read;
322
323                 offset = (unsigned long)(*ppos % PAGE_SIZE);
324                 if (count > PAGE_SIZE - offset)
325                         csize = PAGE_SIZE - offset;
326                 else
327                         csize = count;
328
329                 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
330                 if (rc < 0)
331                         return rc;
332                 buf += csize;
333                 *ppos += csize;
334                 read += csize;
335                 count -= csize;
336         }
337         return read;
338 }
339 #endif
340
341 extern long vread(char *buf, char *addr, unsigned long count);
342 extern long vwrite(char *buf, char *addr, unsigned long count);
343
344 /*
345  * This function reads the *virtual* memory as seen by the kernel.
346  */
347 static ssize_t read_kmem(struct file *file, char __user *buf, 
348                          size_t count, loff_t *ppos)
349 {
350         unsigned long p = *ppos;
351         ssize_t low_count, read, sz;
352         char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
353
354         return -EPERM;
355
356         read = 0;
357         if (p < (unsigned long) high_memory) {
358                 low_count = count;
359                 if (count > (unsigned long) high_memory - p)
360                         low_count = (unsigned long) high_memory - p;
361
362 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
363                 /* we don't have page 0 mapped on sparc and m68k.. */
364                 if (p < PAGE_SIZE && low_count > 0) {
365                         size_t tmp = PAGE_SIZE - p;
366                         if (tmp > low_count) tmp = low_count;
367                         if (clear_user(buf, tmp))
368                                 return -EFAULT;
369                         buf += tmp;
370                         p += tmp;
371                         read += tmp;
372                         low_count -= tmp;
373                         count -= tmp;
374                 }
375 #endif
376                 while (low_count > 0) {
377                         /*
378                          * Handle first page in case it's not aligned
379                          */
380                         if (-p & (PAGE_SIZE - 1))
381                                 sz = -p & (PAGE_SIZE - 1);
382                         else
383                                 sz = PAGE_SIZE;
384
385                         sz = min_t(unsigned long, sz, low_count);
386
387                         /*
388                          * On ia64 if a page has been mapped somewhere as
389                          * uncached, then it must also be accessed uncached
390                          * by the kernel or data corruption may occur
391                          */
392                         kbuf = xlate_dev_kmem_ptr((char *)p);
393
394                         if (copy_to_user(buf, kbuf, sz))
395                                 return -EFAULT;
396                         buf += sz;
397                         p += sz;
398                         read += sz;
399                         low_count -= sz;
400                         count -= sz;
401                 }
402         }
403
404         if (count > 0) {
405                 kbuf = (char *)__get_free_page(GFP_KERNEL);
406                 if (!kbuf)
407                         return -ENOMEM;
408                 while (count > 0) {
409                         int len = count;
410
411                         if (len > PAGE_SIZE)
412                                 len = PAGE_SIZE;
413                         len = vread(kbuf, (char *)p, len);
414                         if (!len)
415                                 break;
416                         if (copy_to_user(buf, kbuf, len)) {
417                                 free_page((unsigned long)kbuf);
418                                 return -EFAULT;
419                         }
420                         count -= len;
421                         buf += len;
422                         read += len;
423                         p += len;
424                 }
425                 free_page((unsigned long)kbuf);
426         }
427         *ppos = p;
428         return read;
429 }
430
431
432 #if defined(CONFIG_ISA) || !defined(__mc68000__)
433 static ssize_t read_port(struct file * file, char __user * buf,
434                          size_t count, loff_t *ppos)
435 {
436         unsigned long i = *ppos;
437         char __user *tmp = buf;
438
439         if (!access_ok(VERIFY_WRITE, buf, count))
440                 return -EFAULT; 
441         while (count-- > 0 && i < 65536) {
442                 if (__put_user(inb(i),tmp) < 0) 
443                         return -EFAULT;  
444                 i++;
445                 tmp++;
446         }
447         *ppos = i;
448         return tmp-buf;
449 }
450
451 static ssize_t write_port(struct file * file, const char __user * buf,
452                           size_t count, loff_t *ppos)
453 {
454         unsigned long i = *ppos;
455         const char __user * tmp = buf;
456
457         if (!access_ok(VERIFY_READ,buf,count))
458                 return -EFAULT;
459         while (count-- > 0 && i < 65536) {
460                 char c;
461                 if (__get_user(c, tmp)) {
462                         if (tmp > buf)
463                                 break;
464                         return -EFAULT; 
465                 }
466                 outb(c,i);
467                 i++;
468                 tmp++;
469         }
470         *ppos = i;
471         return tmp-buf;
472 }
473 #endif
474
475 static ssize_t read_null(struct file * file, char __user * buf,
476                          size_t count, loff_t *ppos)
477 {
478         return 0;
479 }
480
481 static ssize_t write_null(struct file * file, const char __user * buf,
482                           size_t count, loff_t *ppos)
483 {
484         return count;
485 }
486
487 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
488                         struct splice_desc *sd)
489 {
490         return sd->len;
491 }
492
493 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
494                                  loff_t *ppos, size_t len, unsigned int flags)
495 {
496         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
497 }
498
499 #ifdef CONFIG_MMU
500 /*
501  * For fun, we are using the MMU for this.
502  */
503 static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
504 {
505         struct mm_struct *mm;
506         struct vm_area_struct * vma;
507         unsigned long addr=(unsigned long)buf;
508
509         mm = current->mm;
510         /* Oops, this was forgotten before. -ben */
511         down_read(&mm->mmap_sem);
512
513         /* For private mappings, just map in zero pages. */
514         for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
515                 unsigned long count;
516
517                 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
518                         goto out_up;
519                 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
520                         break;
521                 count = vma->vm_end - addr;
522                 if (count > size)
523                         count = size;
524
525                 zap_page_range(vma, addr, count, NULL);
526                 zeromap_page_range(vma, addr, count, PAGE_COPY);
527
528                 size -= count;
529                 buf += count;
530                 addr += count;
531                 if (size == 0)
532                         goto out_up;
533         }
534
535         up_read(&mm->mmap_sem);
536         
537         /* The shared case is hard. Let's do the conventional zeroing. */ 
538         do {
539                 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
540                 if (unwritten)
541                         return size + unwritten - PAGE_SIZE;
542                 cond_resched();
543                 buf += PAGE_SIZE;
544                 size -= PAGE_SIZE;
545         } while (size);
546
547         return size;
548 out_up:
549         up_read(&mm->mmap_sem);
550         return size;
551 }
552
553 static ssize_t read_zero(struct file * file, char __user * buf, 
554                          size_t count, loff_t *ppos)
555 {
556         unsigned long left, unwritten, written = 0;
557
558         if (!count)
559                 return 0;
560
561         if (!access_ok(VERIFY_WRITE, buf, count))
562                 return -EFAULT;
563
564         left = count;
565
566         /* do we want to be clever? Arbitrary cut-off */
567         if (count >= PAGE_SIZE*4) {
568                 unsigned long partial;
569
570                 /* How much left of the page? */
571                 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
572                 unwritten = clear_user(buf, partial);
573                 written = partial - unwritten;
574                 if (unwritten)
575                         goto out;
576                 left -= partial;
577                 buf += partial;
578                 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
579                 written += (left & PAGE_MASK) - unwritten;
580                 if (unwritten)
581                         goto out;
582                 buf += left & PAGE_MASK;
583                 left &= ~PAGE_MASK;
584         }
585         unwritten = clear_user(buf, left);
586         written += left - unwritten;
587 out:
588         return written ? written : -EFAULT;
589 }
590
591 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
592 {
593         if (vma->vm_flags & VM_SHARED)
594                 return shmem_zero_setup(vma);
595         if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
596                 return -EAGAIN;
597         return 0;
598 }
599 #else /* CONFIG_MMU */
600 static ssize_t read_zero(struct file * file, char * buf, 
601                          size_t count, loff_t *ppos)
602 {
603         size_t todo = count;
604
605         while (todo) {
606                 size_t chunk = todo;
607
608                 if (chunk > 4096)
609                         chunk = 4096;   /* Just for latency reasons */
610                 if (clear_user(buf, chunk))
611                         return -EFAULT;
612                 buf += chunk;
613                 todo -= chunk;
614                 cond_resched();
615         }
616         return count;
617 }
618
619 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
620 {
621         return -ENOSYS;
622 }
623 #endif /* CONFIG_MMU */
624
625 static ssize_t write_full(struct file * file, const char __user * buf,
626                           size_t count, loff_t *ppos)
627 {
628         return -ENOSPC;
629 }
630
631 /*
632  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
633  * can fopen() both devices with "a" now.  This was previously impossible.
634  * -- SRB.
635  */
636
637 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
638 {
639         return file->f_pos = 0;
640 }
641
642 /*
643  * The memory devices use the full 32/64 bits of the offset, and so we cannot
644  * check against negative addresses: they are ok. The return value is weird,
645  * though, in that case (0).
646  *
647  * also note that seeking relative to the "end of file" isn't supported:
648  * it has no meaning, so it returns -EINVAL.
649  */
650 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
651 {
652         loff_t ret;
653
654         mutex_lock(&file->f_dentry->d_inode->i_mutex);
655         switch (orig) {
656                 case 0:
657                         file->f_pos = offset;
658                         ret = file->f_pos;
659                         force_successful_syscall_return();
660                         break;
661                 case 1:
662                         file->f_pos += offset;
663                         ret = file->f_pos;
664                         force_successful_syscall_return();
665                         break;
666                 default:
667                         ret = -EINVAL;
668         }
669         mutex_unlock(&file->f_dentry->d_inode->i_mutex);
670         return ret;
671 }
672
673 static int open_port(struct inode * inode, struct file * filp)
674 {
675         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
676 }
677
678 #define zero_lseek      null_lseek
679 #define full_lseek      null_lseek
680 #define write_zero      write_null
681 #define read_full       read_zero
682 #define open_mem        open_port
683 #define open_kmem       open_mem
684 #define open_oldmem     open_mem
685
686 #ifndef ARCH_HAS_DEV_MEM
687 static struct file_operations mem_fops = {
688         .llseek         = memory_lseek,
689         .read           = read_mem,
690         .write          = write_mem,
691         .mmap           = mmap_mem,
692         .open           = open_mem,
693 };
694 #else
695 extern struct file_operations mem_fops;
696 #endif
697
698 static struct file_operations kmem_fops = {
699         .llseek         = memory_lseek,
700         .read           = read_kmem,
701         .mmap           = mmap_kmem,
702         .open           = open_kmem,
703 };
704
705 static struct file_operations null_fops = {
706         .llseek         = null_lseek,
707         .read           = read_null,
708         .write          = write_null,
709         .splice_write   = splice_write_null,
710 };
711
712 #if defined(CONFIG_ISA) || !defined(__mc68000__)
713 static struct file_operations port_fops = {
714         .llseek         = memory_lseek,
715         .read           = read_port,
716         .write          = write_port,
717         .open           = open_port,
718 };
719 #endif
720
721 static struct file_operations zero_fops = {
722         .llseek         = zero_lseek,
723         .read           = read_zero,
724         .write          = write_zero,
725         .mmap           = mmap_zero,
726 };
727
728 static struct backing_dev_info zero_bdi = {
729         .capabilities   = BDI_CAP_MAP_COPY,
730 };
731
732 static struct file_operations full_fops = {
733         .llseek         = full_lseek,
734         .read           = read_full,
735         .write          = write_full,
736 };
737
738 #ifdef CONFIG_CRASH_DUMP
739 static struct file_operations oldmem_fops = {
740         .read   = read_oldmem,
741         .open   = open_oldmem,
742 };
743 #endif
744
745 static ssize_t kmsg_write(struct file * file, const char __user * buf,
746                           size_t count, loff_t *ppos)
747 {
748         char *tmp;
749         ssize_t ret;
750
751         tmp = kmalloc(count + 1, GFP_KERNEL);
752         if (tmp == NULL)
753                 return -ENOMEM;
754         ret = -EFAULT;
755         if (!copy_from_user(tmp, buf, count)) {
756                 tmp[count] = 0;
757                 ret = printk("%s", tmp);
758                 if (ret > count)
759                         /* printk can add a prefix */
760                         ret = count;
761         }
762         kfree(tmp);
763         return ret;
764 }
765
766 static struct file_operations kmsg_fops = {
767         .write =        kmsg_write,
768 };
769
770 static int memory_open(struct inode * inode, struct file * filp)
771 {
772         switch (iminor(inode)) {
773                 case 1:
774                         filp->f_op = &mem_fops;
775                         break;
776                 case 2:
777                         filp->f_op = &kmem_fops;
778                         break;
779                 case 3:
780                         filp->f_op = &null_fops;
781                         break;
782 #if defined(CONFIG_ISA) || !defined(__mc68000__)
783                 case 4:
784                         filp->f_op = &port_fops;
785                         break;
786 #endif
787                 case 5:
788                         filp->f_mapping->backing_dev_info = &zero_bdi;
789                         filp->f_op = &zero_fops;
790                         break;
791                 case 7:
792                         filp->f_op = &full_fops;
793                         break;
794                 case 8:
795                         filp->f_op = &random_fops;
796                         break;
797                 case 9:
798                         filp->f_op = &urandom_fops;
799                         break;
800                 case 11:
801                         filp->f_op = &kmsg_fops;
802                         break;
803 #ifdef CONFIG_CRASH_DUMP
804                 case 12:
805                         filp->f_op = &oldmem_fops;
806                         break;
807 #endif
808                 default:
809                         return -ENXIO;
810         }
811         if (filp->f_op && filp->f_op->open)
812                 return filp->f_op->open(inode,filp);
813         return 0;
814 }
815
816 static struct file_operations memory_fops = {
817         .open           = memory_open,  /* just a selector for the real open */
818 };
819
820 static const struct {
821         unsigned int            minor;
822         char                    *name;
823         umode_t                 mode;
824         const struct file_operations    *fops;
825 } devlist[] = { /* list of minor devices */
826         {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
827         {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
828 #if defined(CONFIG_ISA) || !defined(__mc68000__)
829         {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
830 #endif
831         {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
832         {7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
833         {8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
834         {9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
835         {11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
836 #ifdef CONFIG_CRASH_DUMP
837         {12,"oldmem",    S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
838 #endif
839 };
840
841 static struct class *mem_class;
842
843 static int __init chr_dev_init(void)
844 {
845         int i;
846
847         if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
848                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
849
850         mem_class = class_create(THIS_MODULE, "mem");
851         for (i = 0; i < ARRAY_SIZE(devlist); i++) {
852                 class_device_create(mem_class, NULL,
853                                         MKDEV(MEM_MAJOR, devlist[i].minor),
854                                         NULL, devlist[i].name);
855                 devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
856                                 S_IFCHR | devlist[i].mode, devlist[i].name);
857         }
858         
859         return 0;
860 }
861
862 fs_initcall(chr_dev_init);