2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
11 #include <linux/config.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/smp_lock.h>
23 #include <linux/devfs_fs_kernel.h>
24 #include <linux/ptrace.h>
25 #include <linux/device.h>
26 #include <linux/highmem.h>
27 #include <linux/crash_dump.h>
29 #include <asm/uaccess.h>
33 # include <linux/efi.h>
36 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
37 extern void tapechar_init(void);
41 * Architectures vary in how they handle caching for addresses
42 * outside of main memory.
45 static inline int uncached_access(struct file *file, unsigned long addr)
49 * On the PPro and successors, the MTRRs are used to set
50 * memory types for physical addresses outside main memory,
51 * so blindly setting PCD or PWT on those pages is wrong.
52 * For Pentiums and earlier, the surround logic should disable
53 * caching for the high addresses through the KEN pin, but
54 * we maintain the tradition of paranoia in this code.
56 if (file->f_flags & O_SYNC)
58 return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
59 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
60 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
61 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
62 && addr >= __pa(high_memory);
63 #elif defined(__x86_64__)
65 * This is broken because it can generate memory type aliases,
66 * which can cause cache corruptions
67 * But it is only available for root and we have to be bug-to-bug
68 * compatible with i386.
70 if (file->f_flags & O_SYNC)
72 /* same behaviour as i386. PAT always set to cached and MTRRs control the
74 Hopefully a full PAT implementation will fix that soon. */
76 #elif defined(CONFIG_IA64)
78 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
80 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
81 #elif defined(CONFIG_PPC64)
82 /* On PPC64, we always do non-cacheable access to the IO hole and
83 * cacheable elsewhere. Cache paradox can checkstop the CPU and
84 * the high_memory heuristic below is wrong on machines with memory
85 * above the IO hole... Ah, and of course, XFree86 doesn't pass
86 * O_SYNC when mapping us to tap IO space. Surprised ?
88 return !page_is_ram(addr >> PAGE_SHIFT);
91 * Accessing memory above the top the kernel knows about or through a file pointer
92 * that was marked O_SYNC will be done non-cached.
94 if (file->f_flags & O_SYNC)
96 return addr >= __pa(high_memory);
100 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
101 static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
103 unsigned long end_mem;
105 end_mem = __pa(high_memory);
109 if (*count > end_mem - addr)
110 *count = end_mem - addr;
116 static inline int range_is_allowed(unsigned long from, unsigned long to)
118 unsigned long cursor;
120 cursor = from >> PAGE_SHIFT;
121 while ((cursor << PAGE_SHIFT) < to) {
122 if (!devmem_is_allowed(cursor))
128 static ssize_t do_write_mem(void *p, unsigned long realp,
129 const char __user * buf, size_t count, loff_t *ppos)
132 unsigned long copied;
135 #if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
136 /* we don't have page 0 mapped on sparc and m68k.. */
137 if (realp < PAGE_SIZE) {
138 unsigned long sz = PAGE_SIZE-realp;
139 if (sz > count) sz = count;
140 /* Hmm. Do something? */
147 if (!range_is_allowed(realp, realp+count))
149 copied = copy_from_user(p, buf, count);
151 ssize_t ret = written + (count - copied);
164 * This funcion reads the *physical* memory. The f_pos points directly to the
167 static ssize_t read_mem(struct file * file, char __user * buf,
168 size_t count, loff_t *ppos)
170 unsigned long p = *ppos;
173 if (!valid_phys_addr_range(p, &count))
176 #if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
177 /* we don't have page 0 mapped on sparc and m68k.. */
179 unsigned long sz = PAGE_SIZE-p;
183 if (clear_user(buf, sz))
192 if (!range_is_allowed(p, p+count))
194 if (copy_to_user(buf, __va(p), count))
201 static ssize_t write_mem(struct file * file, const char __user * buf,
202 size_t count, loff_t *ppos)
204 unsigned long p = *ppos;
206 if (!valid_phys_addr_range(p, &count))
208 return do_write_mem(__va(p), p, buf, count, ppos);
211 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
213 #ifdef pgprot_noncached
214 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
217 uncached = uncached_access(file, offset);
219 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
222 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
223 if (remap_pfn_range(vma,
226 vma->vm_end-vma->vm_start,
232 #ifdef CONFIG_CRASH_DUMP
234 * Read memory corresponding to the old kernel.
235 * If we are reading from the reserved section, which is
236 * actually used by the current kernel, we just return zeroes.
237 * Or if we are reading from the first 640k, we return from the
240 static ssize_t read_oldmem(struct file * file, char * buf,
241 size_t count, loff_t *ppos)
244 unsigned backup_start, backup_end, relocate_start;
245 size_t read=0, csize;
247 backup_start = CRASH_BACKUP_BASE / PAGE_SIZE;
248 backup_end = backup_start + (CRASH_BACKUP_SIZE / PAGE_SIZE);
249 relocate_start = (CRASH_BACKUP_BASE + CRASH_BACKUP_SIZE) / PAGE_SIZE;
252 pfn = *ppos / PAGE_SIZE;
254 csize = (count > PAGE_SIZE) ? PAGE_SIZE : count;
256 /* Perform translation (see comment above) */
257 if ((pfn >= backup_start) && (pfn < backup_end)) {
258 if (clear_user(buf, csize)) {
264 } else if (pfn < (CRASH_RELOCATE_SIZE / PAGE_SIZE))
265 pfn += relocate_start;
267 if (pfn > saved_max_pfn) {
272 if (copy_oldmem_page(pfn, buf, csize, 1)) {
288 extern long vread(char *buf, char *addr, unsigned long count);
289 extern long vwrite(char *buf, char *addr, unsigned long count);
292 * This function reads the *virtual* memory as seen by the kernel.
294 static ssize_t read_kmem(struct file *file, char __user *buf,
295 size_t count, loff_t *ppos)
297 unsigned long p = *ppos;
300 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
304 if (p < (unsigned long) high_memory) {
306 if (count > (unsigned long) high_memory - p)
307 read = (unsigned long) high_memory - p;
309 #if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
310 /* we don't have page 0 mapped on sparc and m68k.. */
311 if (p < PAGE_SIZE && read > 0) {
312 size_t tmp = PAGE_SIZE - p;
313 if (tmp > read) tmp = read;
314 if (clear_user(buf, tmp))
322 if (copy_to_user(buf, (char *)p, read))
330 kbuf = (char *)__get_free_page(GFP_KERNEL);
338 len = vread(kbuf, (char *)p, len);
341 if (copy_to_user(buf, kbuf, len)) {
342 free_page((unsigned long)kbuf);
350 free_page((unsigned long)kbuf);
356 #if defined(CONFIG_ISA) || !defined(__mc68000__)
357 static ssize_t read_port(struct file * file, char __user * buf,
358 size_t count, loff_t *ppos)
360 unsigned long i = *ppos;
361 char __user *tmp = buf;
363 if (verify_area(VERIFY_WRITE,buf,count))
365 while (count-- > 0 && i < 65536) {
366 if (__put_user(inb(i),tmp) < 0)
375 static ssize_t write_port(struct file * file, const char __user * buf,
376 size_t count, loff_t *ppos)
378 unsigned long i = *ppos;
379 const char __user * tmp = buf;
381 if (verify_area(VERIFY_READ,buf,count))
383 while (count-- > 0 && i < 65536) {
385 if (__get_user(c, tmp))
396 static ssize_t read_null(struct file * file, char __user * buf,
397 size_t count, loff_t *ppos)
402 static ssize_t write_null(struct file * file, const char __user * buf,
403 size_t count, loff_t *ppos)
410 * For fun, we are using the MMU for this.
412 static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
414 struct mm_struct *mm;
415 struct vm_area_struct * vma;
416 unsigned long addr=(unsigned long)buf;
419 /* Oops, this was forgotten before. -ben */
420 down_read(&mm->mmap_sem);
422 /* For private mappings, just map in zero pages. */
423 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
426 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
428 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
430 count = vma->vm_end - addr;
434 zap_page_range(vma, addr, count, NULL);
435 zeromap_page_range(vma, addr, count, PAGE_COPY);
444 up_read(&mm->mmap_sem);
446 /* The shared case is hard. Let's do the conventional zeroing. */
448 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
450 return size + unwritten - PAGE_SIZE;
458 up_read(&mm->mmap_sem);
462 static ssize_t read_zero(struct file * file, char __user * buf,
463 size_t count, loff_t *ppos)
465 unsigned long left, unwritten, written = 0;
470 if (!access_ok(VERIFY_WRITE, buf, count))
475 /* do we want to be clever? Arbitrary cut-off */
476 if (count >= PAGE_SIZE*4) {
477 unsigned long partial;
479 /* How much left of the page? */
480 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
481 unwritten = clear_user(buf, partial);
482 written = partial - unwritten;
487 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
488 written += (left & PAGE_MASK) - unwritten;
491 buf += left & PAGE_MASK;
494 unwritten = clear_user(buf, left);
495 written += left - unwritten;
497 return written ? written : -EFAULT;
500 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
502 if (vma->vm_flags & VM_SHARED)
503 return shmem_zero_setup(vma);
504 if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
508 #else /* CONFIG_MMU */
509 static ssize_t read_zero(struct file * file, char * buf,
510 size_t count, loff_t *ppos)
518 chunk = 4096; /* Just for latency reasons */
519 if (clear_user(buf, chunk))
528 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
532 #endif /* CONFIG_MMU */
534 static ssize_t write_full(struct file * file, const char __user * buf,
535 size_t count, loff_t *ppos)
541 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
542 * can fopen() both devices with "a" now. This was previously impossible.
546 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
548 return file->f_pos = 0;
552 * The memory devices use the full 32/64 bits of the offset, and so we cannot
553 * check against negative addresses: they are ok. The return value is weird,
554 * though, in that case (0).
556 * also note that seeking relative to the "end of file" isn't supported:
557 * it has no meaning, so it returns -EINVAL.
559 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
563 down(&file->f_dentry->d_inode->i_sem);
566 file->f_pos = offset;
568 force_successful_syscall_return();
571 file->f_pos += offset;
573 force_successful_syscall_return();
578 up(&file->f_dentry->d_inode->i_sem);
582 static int open_port(struct inode * inode, struct file * filp)
584 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
587 #define mmap_kmem mmap_mem
588 #define zero_lseek null_lseek
589 #define full_lseek null_lseek
590 #define write_zero write_null
591 #define read_full read_zero
592 #define open_mem open_port
593 #define open_kmem open_mem
594 #define open_oldmem open_mem
596 static struct file_operations mem_fops = {
597 .llseek = memory_lseek,
604 static struct file_operations kmem_fops = {
605 .llseek = memory_lseek,
611 static struct file_operations null_fops = {
612 .llseek = null_lseek,
617 #if defined(CONFIG_ISA) || !defined(__mc68000__)
618 static struct file_operations port_fops = {
619 .llseek = memory_lseek,
626 static struct file_operations zero_fops = {
627 .llseek = zero_lseek,
633 static struct file_operations full_fops = {
634 .llseek = full_lseek,
639 #ifdef CONFIG_CRASH_DUMP
640 static struct file_operations oldmem_fops = {
646 static ssize_t kmsg_write(struct file * file, const char __user * buf,
647 size_t count, loff_t *ppos)
652 tmp = kmalloc(count + 1, GFP_KERNEL);
656 if (!copy_from_user(tmp, buf, count)) {
658 ret = printk("%s", tmp);
664 static struct file_operations kmsg_fops = {
668 static int memory_open(struct inode * inode, struct file * filp)
670 switch (iminor(inode)) {
672 filp->f_op = &mem_fops;
675 filp->f_op = &kmem_fops;
678 filp->f_op = &null_fops;
680 #if defined(CONFIG_ISA) || !defined(__mc68000__)
682 filp->f_op = &port_fops;
686 filp->f_op = &zero_fops;
689 filp->f_op = &full_fops;
692 filp->f_op = &random_fops;
695 filp->f_op = &urandom_fops;
698 filp->f_op = &kmsg_fops;
700 #ifdef CONFIG_CRASH_DUMP
702 filp->f_op = &oldmem_fops;
708 if (filp->f_op && filp->f_op->open)
709 return filp->f_op->open(inode,filp);
713 static struct file_operations memory_fops = {
714 .open = memory_open, /* just a selector for the real open */
717 static const struct {
721 struct file_operations *fops;
722 } devlist[] = { /* list of minor devices */
723 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
724 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
725 #if defined(CONFIG_ISA) || !defined(__mc68000__)
726 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
728 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
729 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
730 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
731 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
732 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
733 #ifdef CONFIG_CRASH_DUMP
734 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
738 static struct class_simple *mem_class;
740 static int __init chr_dev_init(void)
744 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
745 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
747 mem_class = class_simple_create(THIS_MODULE, "mem");
748 for (i = 0; i < ARRAY_SIZE(devlist); i++) {
749 class_simple_device_add(mem_class,
750 MKDEV(MEM_MAJOR, devlist[i].minor),
751 NULL, devlist[i].name);
752 devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
753 S_IFCHR | devlist[i].mode, devlist[i].name);
759 fs_initcall(chr_dev_init);