2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2004 Hugh Dickins.
10 * Copyright (C) 2002-2004 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 * This file is released under the GPL.
21 * This virtual memory filesystem is heavily based on the ramfs. It
22 * extends ramfs by the ability to use swap and honor resource limits
23 * which makes it a completely usable filesystem.
26 #include <linux/config.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/devfs_fs_kernel.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include <linux/swap.h>
35 #include <linux/pagemap.h>
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/backing-dev.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/mount.h>
41 #include <linux/writeback.h>
42 #include <linux/vfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/security.h>
45 #include <linux/swapops.h>
46 #include <linux/mempolicy.h>
47 #include <linux/namei.h>
48 #include <linux/xattr.h>
49 #include <asm/uaccess.h>
50 #include <asm/div64.h>
51 #include <asm/pgtable.h>
53 /* This magic number is used in glibc for posix shared memory */
55 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
56 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
57 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
59 #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
60 #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
62 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
64 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
65 #define SHMEM_PAGEIN VM_READ
66 #define SHMEM_TRUNCATE VM_WRITE
68 /* Pretend that each entry is of this size in directory's i_size */
69 #define BOGO_DIRENT_SIZE 20
71 /* Keep swapped page count in private field of indirect struct page */
72 #define nr_swapped private
74 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
76 SGP_QUICK, /* don't try more than file page cache lookup */
77 SGP_READ, /* don't exceed i_size, don't allocate page */
78 SGP_CACHE, /* don't exceed i_size, may allocate page */
79 SGP_WRITE, /* may exceed i_size, may allocate page */
82 static int shmem_getpage(struct inode *inode, unsigned long idx,
83 struct page **pagep, enum sgp_type sgp, int *type);
85 static inline struct page *shmem_dir_alloc(unsigned int gfp_mask)
88 * The above definition of ENTRIES_PER_PAGE, and the use of
89 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
90 * might be reconsidered if it ever diverges from PAGE_SIZE.
92 return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
95 static inline void shmem_dir_free(struct page *page)
97 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
100 static struct page **shmem_dir_map(struct page *page)
102 return (struct page **)kmap_atomic(page, KM_USER0);
105 static inline void shmem_dir_unmap(struct page **dir)
107 kunmap_atomic(dir, KM_USER0);
110 static swp_entry_t *shmem_swp_map(struct page *page)
112 return (swp_entry_t *)kmap_atomic(page, KM_USER1);
115 static inline void shmem_swp_balance_unmap(void)
118 * When passing a pointer to an i_direct entry, to code which
119 * also handles indirect entries and so will shmem_swp_unmap,
120 * we must arrange for the preempt count to remain in balance.
121 * What kmap_atomic of a lowmem page does depends on config
122 * and architecture, so pretend to kmap_atomic some lowmem page.
124 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
127 static inline void shmem_swp_unmap(swp_entry_t *entry)
129 kunmap_atomic(entry, KM_USER1);
132 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
134 return sb->s_fs_info;
138 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
139 * for shared memory and for shared anonymous (/dev/zero) mappings
140 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
141 * consistent with the pre-accounting of private mappings ...
143 static inline int shmem_acct_size(unsigned long flags, loff_t size)
145 return (flags & VM_ACCOUNT)?
146 security_vm_enough_memory(VM_ACCT(size)): 0;
149 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
151 if (flags & VM_ACCOUNT)
152 vm_unacct_memory(VM_ACCT(size));
156 * ... whereas tmpfs objects are accounted incrementally as
157 * pages are allocated, in order to allow huge sparse files.
158 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
159 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
161 static inline int shmem_acct_block(unsigned long flags)
163 return (flags & VM_ACCOUNT)?
164 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
167 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
169 if (!(flags & VM_ACCOUNT))
170 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
173 static struct super_operations shmem_ops;
174 static struct address_space_operations shmem_aops;
175 static struct file_operations shmem_file_operations;
176 static struct inode_operations shmem_inode_operations;
177 static struct inode_operations shmem_dir_inode_operations;
178 static struct inode_operations shmem_special_inode_operations;
179 static struct vm_operations_struct shmem_vm_ops;
181 static struct backing_dev_info shmem_backing_dev_info = {
182 .ra_pages = 0, /* No readahead */
183 .memory_backed = 1, /* Does not contribute to dirty memory */
184 .unplug_io_fn = default_unplug_io_fn,
187 static LIST_HEAD(shmem_swaplist);
188 static spinlock_t shmem_swaplist_lock = SPIN_LOCK_UNLOCKED;
190 static void shmem_free_block(struct inode *inode)
192 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
194 spin_lock(&sbinfo->stat_lock);
195 sbinfo->free_blocks++;
196 inode->i_blocks -= BLOCKS_PER_PAGE;
197 spin_unlock(&sbinfo->stat_lock);
202 * shmem_recalc_inode - recalculate the size of an inode
204 * @inode: inode to recalc
206 * We have to calculate the free blocks since the mm can drop
207 * undirtied hole pages behind our back.
209 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
210 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
212 * It has to be called with the spinlock held.
214 static void shmem_recalc_inode(struct inode *inode)
216 struct shmem_inode_info *info = SHMEM_I(inode);
219 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
221 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
222 info->alloced -= freed;
223 shmem_unacct_blocks(info->flags, freed);
225 spin_lock(&sbinfo->stat_lock);
226 sbinfo->free_blocks += freed;
227 inode->i_blocks -= freed*BLOCKS_PER_PAGE;
228 spin_unlock(&sbinfo->stat_lock);
234 * shmem_swp_entry - find the swap vector position in the info structure
236 * @info: info structure for the inode
237 * @index: index of the page to find
238 * @page: optional page to add to the structure. Has to be preset to
241 * If there is no space allocated yet it will return NULL when
242 * page is NULL, else it will use the page for the needed block,
243 * setting it to NULL on return to indicate that it has been used.
245 * The swap vector is organized the following way:
247 * There are SHMEM_NR_DIRECT entries directly stored in the
248 * shmem_inode_info structure. So small files do not need an addional
251 * For pages with index > SHMEM_NR_DIRECT there is the pointer
252 * i_indirect which points to a page which holds in the first half
253 * doubly indirect blocks, in the second half triple indirect blocks:
255 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
256 * following layout (for SHMEM_NR_DIRECT == 16):
258 * i_indirect -> dir --> 16-19
271 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
273 unsigned long offset;
277 if (index < SHMEM_NR_DIRECT) {
278 shmem_swp_balance_unmap();
279 return info->i_direct+index;
281 if (!info->i_indirect) {
283 info->i_indirect = *page;
286 return NULL; /* need another page */
289 index -= SHMEM_NR_DIRECT;
290 offset = index % ENTRIES_PER_PAGE;
291 index /= ENTRIES_PER_PAGE;
292 dir = shmem_dir_map(info->i_indirect);
294 if (index >= ENTRIES_PER_PAGE/2) {
295 index -= ENTRIES_PER_PAGE/2;
296 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
297 index %= ENTRIES_PER_PAGE;
304 shmem_dir_unmap(dir);
305 return NULL; /* need another page */
307 shmem_dir_unmap(dir);
308 dir = shmem_dir_map(subdir);
314 if (!page || !(subdir = *page)) {
315 shmem_dir_unmap(dir);
316 return NULL; /* need a page */
321 shmem_dir_unmap(dir);
322 return shmem_swp_map(subdir) + offset;
325 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
327 long incdec = value? 1: -1;
330 info->swapped += incdec;
331 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT)
332 kmap_atomic_to_page(entry)->nr_swapped += incdec;
336 * shmem_swp_alloc - get the position of the swap entry for the page.
337 * If it does not exist allocate the entry.
339 * @info: info structure for the inode
340 * @index: index of the page to find
341 * @sgp: check and recheck i_size? skip allocation?
343 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
345 struct inode *inode = &info->vfs_inode;
346 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
347 struct page *page = NULL;
350 if (sgp != SGP_WRITE &&
351 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
352 return ERR_PTR(-EINVAL);
354 while (!(entry = shmem_swp_entry(info, index, &page))) {
356 return shmem_swp_map(ZERO_PAGE(0));
358 * Test free_blocks against 1 not 0, since we have 1 data
359 * page (and perhaps indirect index pages) yet to allocate:
360 * a waste to allocate index if we cannot allocate data.
363 spin_lock(&sbinfo->stat_lock);
364 if (sbinfo->free_blocks <= 1) {
365 spin_unlock(&sbinfo->stat_lock);
366 return ERR_PTR(-ENOSPC);
368 sbinfo->free_blocks--;
369 inode->i_blocks += BLOCKS_PER_PAGE;
370 spin_unlock(&sbinfo->stat_lock);
373 spin_unlock(&info->lock);
374 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
376 clear_highpage(page);
377 page->nr_swapped = 0;
379 spin_lock(&info->lock);
382 shmem_free_block(inode);
383 return ERR_PTR(-ENOMEM);
385 if (sgp != SGP_WRITE &&
386 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
387 entry = ERR_PTR(-EINVAL);
390 if (info->next_index <= index)
391 info->next_index = index + 1;
394 /* another task gave its page, or truncated the file */
395 shmem_free_block(inode);
396 shmem_dir_free(page);
398 if (info->next_index <= index && !IS_ERR(entry))
399 info->next_index = index + 1;
404 * shmem_free_swp - free some swap entries in a directory
406 * @dir: pointer to the directory
407 * @edir: pointer after last entry of the directory
409 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
414 for (ptr = dir; ptr < edir; ptr++) {
416 free_swap_and_cache(*ptr);
417 *ptr = (swp_entry_t){0};
424 static void shmem_truncate(struct inode *inode)
426 struct shmem_inode_info *info = SHMEM_I(inode);
438 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
439 idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
440 if (idx >= info->next_index)
443 spin_lock(&info->lock);
444 info->flags |= SHMEM_TRUNCATE;
445 limit = info->next_index;
446 info->next_index = idx;
447 if (info->swapped && idx < SHMEM_NR_DIRECT) {
448 ptr = info->i_direct;
450 if (size > SHMEM_NR_DIRECT)
451 size = SHMEM_NR_DIRECT;
452 info->swapped -= shmem_free_swp(ptr+idx, ptr+size);
454 if (!info->i_indirect)
457 BUG_ON(limit <= SHMEM_NR_DIRECT);
458 limit -= SHMEM_NR_DIRECT;
459 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
460 offset = idx % ENTRIES_PER_PAGE;
464 dir = shmem_dir_map(info->i_indirect);
465 stage = ENTRIES_PER_PAGEPAGE/2;
466 if (idx < ENTRIES_PER_PAGEPAGE/2)
467 dir += idx/ENTRIES_PER_PAGE;
469 dir += ENTRIES_PER_PAGE/2;
470 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
472 stage += ENTRIES_PER_PAGEPAGE;
475 size = ((idx - ENTRIES_PER_PAGEPAGE/2) %
476 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
477 if (!size && !offset) {
481 shmem_dir_unmap(dir);
482 dir = shmem_dir_map(subdir) + size;
489 for (; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
490 if (unlikely(idx == stage)) {
491 shmem_dir_unmap(dir-1);
492 dir = shmem_dir_map(info->i_indirect) +
493 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
496 idx += ENTRIES_PER_PAGEPAGE;
500 stage = idx + ENTRIES_PER_PAGEPAGE;
503 shmem_dir_unmap(dir);
505 shmem_dir_free(empty);
506 shmem_free_block(inode);
509 cond_resched_lock(&info->lock);
510 dir = shmem_dir_map(subdir);
513 if (subdir && subdir->nr_swapped) {
514 ptr = shmem_swp_map(subdir);
516 if (size > ENTRIES_PER_PAGE)
517 size = ENTRIES_PER_PAGE;
518 freed = shmem_free_swp(ptr+offset, ptr+size);
519 shmem_swp_unmap(ptr);
520 info->swapped -= freed;
521 subdir->nr_swapped -= freed;
522 BUG_ON(subdir->nr_swapped > offset);
528 shmem_dir_free(subdir);
529 shmem_free_block(inode);
533 shmem_dir_unmap(dir-1);
535 shmem_dir_free(empty);
536 shmem_free_block(inode);
538 if (info->next_index <= SHMEM_NR_DIRECT) {
539 shmem_dir_free(info->i_indirect);
540 info->i_indirect = NULL;
541 shmem_free_block(inode);
544 BUG_ON(info->swapped > info->next_index);
545 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
547 * Call truncate_inode_pages again: racing shmem_unuse_inode
548 * may have swizzled a page in from swap since vmtruncate or
549 * generic_delete_inode did it, before we lowered next_index.
550 * Also, though shmem_getpage checks i_size before adding to
551 * cache, no recheck after: so fix the narrow window there too.
553 spin_unlock(&info->lock);
554 truncate_inode_pages(inode->i_mapping, inode->i_size);
555 spin_lock(&info->lock);
557 info->flags &= ~SHMEM_TRUNCATE;
558 shmem_recalc_inode(inode);
559 spin_unlock(&info->lock);
562 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
564 struct inode *inode = dentry->d_inode;
565 struct page *page = NULL;
568 if (attr->ia_valid & ATTR_SIZE) {
569 if (attr->ia_size < inode->i_size) {
571 * If truncating down to a partial page, then
572 * if that page is already allocated, hold it
573 * in memory until the truncation is over, so
574 * truncate_partial_page cannnot miss it were
575 * it assigned to swap.
577 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
578 (void) shmem_getpage(inode,
579 attr->ia_size>>PAGE_CACHE_SHIFT,
580 &page, SGP_READ, NULL);
583 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
584 * detect if any pages might have been added to cache
585 * after truncate_inode_pages. But we needn't bother
586 * if it's being fully truncated to zero-length: the
587 * nrpages check is efficient enough in that case.
590 struct shmem_inode_info *info = SHMEM_I(inode);
591 spin_lock(&info->lock);
592 info->flags &= ~SHMEM_PAGEIN;
593 spin_unlock(&info->lock);
598 error = inode_change_ok(inode, attr);
600 error = inode_setattr(inode, attr);
602 page_cache_release(page);
606 static void shmem_delete_inode(struct inode *inode)
608 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
609 struct shmem_inode_info *info = SHMEM_I(inode);
611 if (inode->i_op->truncate == shmem_truncate) {
612 shmem_unacct_size(info->flags, inode->i_size);
614 shmem_truncate(inode);
615 if (!list_empty(&info->swaplist)) {
616 spin_lock(&shmem_swaplist_lock);
617 list_del_init(&info->swaplist);
618 spin_unlock(&shmem_swaplist_lock);
622 BUG_ON(inode->i_blocks);
623 spin_lock(&sbinfo->stat_lock);
624 sbinfo->free_inodes++;
625 spin_unlock(&sbinfo->stat_lock);
630 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
634 for (ptr = dir; ptr < edir; ptr++) {
635 if (ptr->val == entry.val)
641 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
654 ptr = info->i_direct;
655 spin_lock(&info->lock);
656 limit = info->next_index;
658 if (size > SHMEM_NR_DIRECT)
659 size = SHMEM_NR_DIRECT;
660 offset = shmem_find_swp(entry, ptr, ptr+size);
662 shmem_swp_balance_unmap();
665 if (!info->i_indirect)
667 /* we might be racing with shmem_truncate */
668 if (limit <= SHMEM_NR_DIRECT)
671 dir = shmem_dir_map(info->i_indirect);
672 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
674 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
675 if (unlikely(idx == stage)) {
676 shmem_dir_unmap(dir-1);
677 dir = shmem_dir_map(info->i_indirect) +
678 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
681 idx += ENTRIES_PER_PAGEPAGE;
685 stage = idx + ENTRIES_PER_PAGEPAGE;
687 shmem_dir_unmap(dir);
688 dir = shmem_dir_map(subdir);
691 if (subdir && subdir->nr_swapped) {
692 ptr = shmem_swp_map(subdir);
694 if (size > ENTRIES_PER_PAGE)
695 size = ENTRIES_PER_PAGE;
696 offset = shmem_find_swp(entry, ptr, ptr+size);
698 shmem_dir_unmap(dir);
701 shmem_swp_unmap(ptr);
705 shmem_dir_unmap(dir-1);
707 spin_unlock(&info->lock);
711 inode = &info->vfs_inode;
712 if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
713 info->flags |= SHMEM_PAGEIN;
714 shmem_swp_set(info, ptr + offset, 0);
716 shmem_swp_unmap(ptr);
717 spin_unlock(&info->lock);
719 * Decrement swap count even when the entry is left behind:
720 * try_to_unuse will skip over mms, then reincrement count.
727 * shmem_unuse() search for an eventually swapped out shmem page.
729 int shmem_unuse(swp_entry_t entry, struct page *page)
731 struct list_head *p, *next;
732 struct shmem_inode_info *info;
735 spin_lock(&shmem_swaplist_lock);
736 list_for_each_safe(p, next, &shmem_swaplist) {
737 info = list_entry(p, struct shmem_inode_info, swaplist);
739 list_del_init(&info->swaplist);
740 else if (shmem_unuse_inode(info, entry, page)) {
741 /* move head to start search for next from here */
742 list_move_tail(&shmem_swaplist, &info->swaplist);
747 spin_unlock(&shmem_swaplist_lock);
752 * Move the page from the page cache to the swap cache.
754 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
756 struct shmem_inode_info *info;
757 swp_entry_t *entry, swap;
758 struct address_space *mapping;
762 BUG_ON(!PageLocked(page));
763 BUG_ON(page_mapped(page));
765 mapping = page->mapping;
767 inode = mapping->host;
768 info = SHMEM_I(inode);
769 if (info->flags & VM_LOCKED)
771 swap = get_swap_page();
775 spin_lock(&info->lock);
776 shmem_recalc_inode(inode);
777 if (index >= info->next_index) {
778 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
781 entry = shmem_swp_entry(info, index, NULL);
785 if (move_to_swap_cache(page, swap) == 0) {
786 shmem_swp_set(info, entry, swap.val);
787 shmem_swp_unmap(entry);
788 spin_unlock(&info->lock);
789 if (list_empty(&info->swaplist)) {
790 spin_lock(&shmem_swaplist_lock);
791 /* move instead of add in case we're racing */
792 list_move_tail(&info->swaplist, &shmem_swaplist);
793 spin_unlock(&shmem_swaplist_lock);
799 shmem_swp_unmap(entry);
801 spin_unlock(&info->lock);
804 set_page_dirty(page);
805 return WRITEPAGE_ACTIVATE; /* Return with the page locked */
809 static struct page *shmem_swapin_async(struct shared_policy *p,
810 swp_entry_t entry, unsigned long idx)
813 struct vm_area_struct pvma;
815 /* Create a pseudo vma that just contains the policy */
816 memset(&pvma, 0, sizeof(struct vm_area_struct));
817 pvma.vm_end = PAGE_SIZE;
819 pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
820 page = read_swap_cache_async(entry, &pvma, 0);
821 mpol_free(pvma.vm_policy);
825 struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
828 struct shared_policy *p = &info->policy;
831 unsigned long offset;
833 num = valid_swaphandles(entry, &offset);
834 for (i = 0; i < num; offset++, i++) {
835 page = shmem_swapin_async(p,
836 swp_entry(swp_type(entry), offset), idx);
839 page_cache_release(page);
841 lru_add_drain(); /* Push any new pages onto the LRU now */
842 return shmem_swapin_async(p, entry, idx);
846 shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info,
849 struct vm_area_struct pvma;
852 memset(&pvma, 0, sizeof(struct vm_area_struct));
853 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
855 pvma.vm_end = PAGE_SIZE;
856 page = alloc_page_vma(gfp, &pvma, 0);
857 mpol_free(pvma.vm_policy);
861 static inline struct page *
862 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
864 swapin_readahead(entry, 0, NULL);
865 return read_swap_cache_async(entry, NULL, 0);
868 static inline struct page *
869 shmem_alloc_page(unsigned long gfp,struct shmem_inode_info *info,
872 return alloc_page(gfp);
877 * shmem_getpage - either get the page from swap or allocate a new one
879 * If we allocate a new one we do not mark it dirty. That's up to the
880 * vm. If we swap it in we mark it dirty since we also free the swap
881 * entry since a page cannot live in both the swap and page cache
883 static int shmem_getpage(struct inode *inode, unsigned long idx,
884 struct page **pagep, enum sgp_type sgp, int *type)
886 struct address_space *mapping = inode->i_mapping;
887 struct shmem_inode_info *info = SHMEM_I(inode);
888 struct shmem_sb_info *sbinfo;
889 struct page *filepage = *pagep;
890 struct page *swappage;
895 if (idx >= SHMEM_MAX_INDEX)
898 * Normally, filepage is NULL on entry, and either found
899 * uptodate immediately, or allocated and zeroed, or read
900 * in under swappage, which is then assigned to filepage.
901 * But shmem_prepare_write passes in a locked filepage,
902 * which may be found not uptodate by other callers too,
903 * and may need to be copied from the swappage read in.
907 filepage = find_lock_page(mapping, idx);
908 if (filepage && PageUptodate(filepage))
911 if (sgp == SGP_QUICK)
914 spin_lock(&info->lock);
915 shmem_recalc_inode(inode);
916 entry = shmem_swp_alloc(info, idx, sgp);
918 spin_unlock(&info->lock);
919 error = PTR_ERR(entry);
925 /* Look it up and read it in.. */
926 swappage = lookup_swap_cache(swap);
928 shmem_swp_unmap(entry);
929 spin_unlock(&info->lock);
930 /* here we actually do the io */
931 if (type && *type == VM_FAULT_MINOR) {
932 inc_page_state(pgmajfault);
933 *type = VM_FAULT_MAJOR;
935 swappage = shmem_swapin(info, swap, idx);
937 spin_lock(&info->lock);
938 entry = shmem_swp_alloc(info, idx, sgp);
940 error = PTR_ERR(entry);
942 if (entry->val == swap.val)
944 shmem_swp_unmap(entry);
946 spin_unlock(&info->lock);
951 wait_on_page_locked(swappage);
952 page_cache_release(swappage);
956 /* We have to do this with page locked to prevent races */
957 if (TestSetPageLocked(swappage)) {
958 shmem_swp_unmap(entry);
959 spin_unlock(&info->lock);
960 wait_on_page_locked(swappage);
961 page_cache_release(swappage);
964 if (PageWriteback(swappage)) {
965 shmem_swp_unmap(entry);
966 spin_unlock(&info->lock);
967 wait_on_page_writeback(swappage);
968 unlock_page(swappage);
969 page_cache_release(swappage);
972 if (!PageUptodate(swappage)) {
973 shmem_swp_unmap(entry);
974 spin_unlock(&info->lock);
975 unlock_page(swappage);
976 page_cache_release(swappage);
982 shmem_swp_set(info, entry, 0);
983 shmem_swp_unmap(entry);
984 delete_from_swap_cache(swappage);
985 spin_unlock(&info->lock);
986 copy_highpage(filepage, swappage);
987 unlock_page(swappage);
988 page_cache_release(swappage);
989 flush_dcache_page(filepage);
990 SetPageUptodate(filepage);
991 set_page_dirty(filepage);
993 } else if (!(error = move_from_swap_cache(
994 swappage, idx, mapping))) {
995 info->flags |= SHMEM_PAGEIN;
996 shmem_swp_set(info, entry, 0);
997 shmem_swp_unmap(entry);
998 spin_unlock(&info->lock);
1002 shmem_swp_unmap(entry);
1003 spin_unlock(&info->lock);
1004 unlock_page(swappage);
1005 page_cache_release(swappage);
1006 if (error == -ENOMEM) {
1007 /* let kswapd refresh zone for GFP_ATOMICs */
1008 blk_congestion_wait(WRITE, HZ/50);
1012 } else if (sgp == SGP_READ && !filepage) {
1013 shmem_swp_unmap(entry);
1014 filepage = find_get_page(mapping, idx);
1016 (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1017 spin_unlock(&info->lock);
1018 wait_on_page_locked(filepage);
1019 page_cache_release(filepage);
1023 spin_unlock(&info->lock);
1025 shmem_swp_unmap(entry);
1026 sbinfo = SHMEM_SB(inode->i_sb);
1028 spin_lock(&sbinfo->stat_lock);
1029 if (sbinfo->free_blocks == 0 ||
1030 shmem_acct_block(info->flags)) {
1031 spin_unlock(&sbinfo->stat_lock);
1032 spin_unlock(&info->lock);
1036 sbinfo->free_blocks--;
1037 inode->i_blocks += BLOCKS_PER_PAGE;
1038 spin_unlock(&sbinfo->stat_lock);
1039 } else if (shmem_acct_block(info->flags)) {
1040 spin_unlock(&info->lock);
1046 spin_unlock(&info->lock);
1047 filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1051 shmem_unacct_blocks(info->flags, 1);
1052 shmem_free_block(inode);
1057 spin_lock(&info->lock);
1058 entry = shmem_swp_alloc(info, idx, sgp);
1060 error = PTR_ERR(entry);
1063 shmem_swp_unmap(entry);
1065 if (error || swap.val || 0 != add_to_page_cache_lru(
1066 filepage, mapping, idx, GFP_ATOMIC)) {
1067 spin_unlock(&info->lock);
1068 page_cache_release(filepage);
1069 shmem_unacct_blocks(info->flags, 1);
1070 shmem_free_block(inode);
1076 info->flags |= SHMEM_PAGEIN;
1080 spin_unlock(&info->lock);
1081 clear_highpage(filepage);
1082 flush_dcache_page(filepage);
1083 SetPageUptodate(filepage);
1086 if (*pagep != filepage) {
1087 unlock_page(filepage);
1093 if (*pagep != filepage) {
1094 unlock_page(filepage);
1095 page_cache_release(filepage);
1100 struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
1102 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1103 struct page *page = NULL;
1107 idx = (address - vma->vm_start) >> PAGE_SHIFT;
1108 idx += vma->vm_pgoff;
1109 idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1111 error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1113 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1115 mark_page_accessed(page);
1119 static int shmem_populate(struct vm_area_struct *vma,
1120 unsigned long addr, unsigned long len,
1121 pgprot_t prot, unsigned long pgoff, int nonblock)
1123 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1124 struct mm_struct *mm = vma->vm_mm;
1125 enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1128 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1129 if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1132 while ((long) len > 0) {
1133 struct page *page = NULL;
1136 * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1138 err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1142 mark_page_accessed(page);
1143 err = install_page(mm, vma, addr, page, prot);
1145 page_cache_release(page);
1148 } else if (nonblock) {
1149 err = install_file_pte(mm, vma, addr, pgoff, prot);
1162 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1164 struct inode *i = vma->vm_file->f_dentry->d_inode;
1165 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1169 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1171 struct inode *i = vma->vm_file->f_dentry->d_inode;
1174 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1175 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1179 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1181 struct inode *inode = file->f_dentry->d_inode;
1182 struct shmem_inode_info *info = SHMEM_I(inode);
1183 int retval = -ENOMEM;
1185 spin_lock(&info->lock);
1186 if (lock && !(info->flags & VM_LOCKED)) {
1187 if (!user_shm_lock(inode->i_size, user))
1189 info->flags |= VM_LOCKED;
1191 if (!lock && (info->flags & VM_LOCKED) && user) {
1192 user_shm_unlock(inode->i_size, user);
1193 info->flags &= ~VM_LOCKED;
1197 spin_unlock(&info->lock);
1201 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1203 file_accessed(file);
1204 vma->vm_ops = &shmem_vm_ops;
1208 static struct inode *
1209 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1211 struct inode *inode;
1212 struct shmem_inode_info *info;
1213 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1216 spin_lock(&sbinfo->stat_lock);
1217 if (!sbinfo->free_inodes) {
1218 spin_unlock(&sbinfo->stat_lock);
1221 sbinfo->free_inodes--;
1222 spin_unlock(&sbinfo->stat_lock);
1225 inode = new_inode(sb);
1227 inode->i_mode = mode;
1228 inode->i_uid = current->fsuid;
1229 inode->i_gid = current->fsgid;
1230 inode->i_blksize = PAGE_CACHE_SIZE;
1231 inode->i_blocks = 0;
1232 inode->i_mapping->a_ops = &shmem_aops;
1233 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1234 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1235 info = SHMEM_I(inode);
1236 memset(info, 0, (char *)inode - (char *)info);
1237 spin_lock_init(&info->lock);
1238 mpol_shared_policy_init(&info->policy);
1239 INIT_LIST_HEAD(&info->swaplist);
1241 switch (mode & S_IFMT) {
1243 inode->i_op = &shmem_special_inode_operations;
1244 init_special_inode(inode, mode, dev);
1247 inode->i_op = &shmem_inode_operations;
1248 inode->i_fop = &shmem_file_operations;
1252 /* Some things misbehave if size == 0 on a directory */
1253 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1254 inode->i_op = &shmem_dir_inode_operations;
1255 inode->i_fop = &simple_dir_operations;
1260 } else if (sbinfo) {
1261 spin_lock(&sbinfo->stat_lock);
1262 sbinfo->free_inodes++;
1263 spin_unlock(&sbinfo->stat_lock);
1270 static int shmem_set_size(struct shmem_sb_info *sbinfo,
1271 unsigned long max_blocks, unsigned long max_inodes)
1274 unsigned long blocks, inodes;
1276 spin_lock(&sbinfo->stat_lock);
1277 blocks = sbinfo->max_blocks - sbinfo->free_blocks;
1278 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
1280 if (max_blocks < blocks)
1282 if (max_inodes < inodes)
1285 sbinfo->max_blocks = max_blocks;
1286 sbinfo->free_blocks = max_blocks - blocks;
1287 sbinfo->max_inodes = max_inodes;
1288 sbinfo->free_inodes = max_inodes - inodes;
1290 spin_unlock(&sbinfo->stat_lock);
1294 static struct inode_operations shmem_symlink_inode_operations;
1295 static struct inode_operations shmem_symlink_inline_operations;
1298 * Normally tmpfs makes no use of shmem_prepare_write, but it
1299 * lets a tmpfs file be used read-write below the loop driver.
1302 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1304 struct inode *inode = page->mapping->host;
1305 return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1309 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1311 struct inode *inode = file->f_dentry->d_inode;
1313 unsigned long written;
1316 if ((ssize_t) count < 0)
1319 if (!access_ok(VERIFY_READ, buf, count))
1322 down(&inode->i_sem);
1327 err = generic_write_checks(file, &pos, &count, 0);
1331 err = remove_suid(file->f_dentry);
1335 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1338 struct page *page = NULL;
1339 unsigned long bytes, index, offset;
1343 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1344 index = pos >> PAGE_CACHE_SHIFT;
1345 bytes = PAGE_CACHE_SIZE - offset;
1350 * We don't hold page lock across copy from user -
1351 * what would it guard against? - so no deadlock here.
1352 * But it still may be a good idea to prefault below.
1355 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1360 if (PageHighMem(page)) {
1361 volatile unsigned char dummy;
1362 __get_user(dummy, buf);
1363 __get_user(dummy, buf + bytes - 1);
1365 kaddr = kmap_atomic(page, KM_USER0);
1366 left = __copy_from_user_inatomic(kaddr + offset,
1368 kunmap_atomic(kaddr, KM_USER0);
1372 left = __copy_from_user(kaddr + offset, buf, bytes);
1380 if (pos > inode->i_size)
1381 i_size_write(inode, pos);
1383 flush_dcache_page(page);
1384 set_page_dirty(page);
1385 mark_page_accessed(page);
1386 page_cache_release(page);
1396 * Our dirty pages are not counted in nr_dirty,
1397 * and we do not attempt to balance dirty pages.
1411 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1413 struct inode *inode = filp->f_dentry->d_inode;
1414 struct address_space *mapping = inode->i_mapping;
1415 unsigned long index, offset;
1417 index = *ppos >> PAGE_CACHE_SHIFT;
1418 offset = *ppos & ~PAGE_CACHE_MASK;
1421 struct page *page = NULL;
1422 unsigned long end_index, nr, ret;
1423 loff_t i_size = i_size_read(inode);
1425 end_index = i_size >> PAGE_CACHE_SHIFT;
1426 if (index > end_index)
1428 if (index == end_index) {
1429 nr = i_size & ~PAGE_CACHE_MASK;
1434 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1436 if (desc->error == -EINVAL)
1442 * We must evaluate after, since reads (unlike writes)
1443 * are called without i_sem protection against truncate
1445 nr = PAGE_CACHE_SIZE;
1446 i_size = i_size_read(inode);
1447 end_index = i_size >> PAGE_CACHE_SHIFT;
1448 if (index == end_index) {
1449 nr = i_size & ~PAGE_CACHE_MASK;
1452 page_cache_release(page);
1460 * If users can be writing to this page using arbitrary
1461 * virtual addresses, take care about potential aliasing
1462 * before reading the page on the kernel side.
1464 if (mapping_writably_mapped(mapping))
1465 flush_dcache_page(page);
1467 * Mark the page accessed if we read the beginning.
1470 mark_page_accessed(page);
1472 page = ZERO_PAGE(0);
1475 * Ok, we have the page, and it's up-to-date, so
1476 * now we can copy it to user space...
1478 * The actor routine returns how many bytes were actually used..
1479 * NOTE! This may not be the same as how much of a user buffer
1480 * we filled up (we may be padding etc), so we can only update
1481 * "pos" here (the actor routine has to update the user buffer
1482 * pointers and the remaining count).
1484 ret = actor(desc, page, offset, nr);
1486 index += offset >> PAGE_CACHE_SHIFT;
1487 offset &= ~PAGE_CACHE_MASK;
1489 page_cache_release(page);
1490 if (ret != nr || !desc->count)
1496 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1497 file_accessed(filp);
1500 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1502 read_descriptor_t desc;
1504 if ((ssize_t) count < 0)
1506 if (!access_ok(VERIFY_WRITE, buf, count))
1516 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1518 return desc.written;
1522 static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
1523 size_t count, read_actor_t actor, void *target)
1525 read_descriptor_t desc;
1532 desc.arg.data = target;
1535 do_shmem_file_read(in_file, ppos, &desc, actor);
1537 return desc.written;
1541 static int shmem_statfs(struct super_block *sb, struct kstatfs *buf)
1543 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1545 buf->f_type = TMPFS_SUPER_MAGIC;
1546 buf->f_bsize = PAGE_CACHE_SIZE;
1547 buf->f_namelen = NAME_MAX;
1549 spin_lock(&sbinfo->stat_lock);
1550 buf->f_blocks = sbinfo->max_blocks;
1551 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1552 buf->f_files = sbinfo->max_inodes;
1553 buf->f_ffree = sbinfo->free_inodes;
1554 spin_unlock(&sbinfo->stat_lock);
1556 /* else leave those fields 0 like simple_statfs */
1561 * File creation. Allocate an inode, and we're done..
1564 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1566 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1567 int error = -ENOSPC;
1570 if (dir->i_mode & S_ISGID) {
1571 inode->i_gid = dir->i_gid;
1573 inode->i_mode |= S_ISGID;
1575 dir->i_size += BOGO_DIRENT_SIZE;
1576 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1577 d_instantiate(dentry, inode);
1578 dget(dentry); /* Extra count - pin the dentry in core */
1584 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1588 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1594 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1595 struct nameidata *nd)
1597 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1603 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1605 struct inode *inode = old_dentry->d_inode;
1606 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1609 * No ordinary (disk based) filesystem counts links as inodes;
1610 * but each new link needs a new dentry, pinning lowmem, and
1611 * tmpfs dentries cannot be pruned until they are unlinked.
1614 spin_lock(&sbinfo->stat_lock);
1615 if (!sbinfo->free_inodes) {
1616 spin_unlock(&sbinfo->stat_lock);
1619 sbinfo->free_inodes--;
1620 spin_unlock(&sbinfo->stat_lock);
1623 dir->i_size += BOGO_DIRENT_SIZE;
1624 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1626 atomic_inc(&inode->i_count); /* New dentry reference */
1627 dget(dentry); /* Extra pinning count for the created dentry */
1628 d_instantiate(dentry, inode);
1632 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1634 struct inode *inode = dentry->d_inode;
1636 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1637 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1639 spin_lock(&sbinfo->stat_lock);
1640 sbinfo->free_inodes++;
1641 spin_unlock(&sbinfo->stat_lock);
1645 dir->i_size -= BOGO_DIRENT_SIZE;
1646 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1648 dput(dentry); /* Undo the count from "create" - this does all the work */
1652 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1654 if (!simple_empty(dentry))
1658 return shmem_unlink(dir, dentry);
1662 * The VFS layer already does all the dentry stuff for rename,
1663 * we just have to decrement the usage count for the target if
1664 * it exists so that the VFS layer correctly free's it when it
1667 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1669 struct inode *inode = old_dentry->d_inode;
1670 int they_are_dirs = S_ISDIR(inode->i_mode);
1672 if (!simple_empty(new_dentry))
1675 if (new_dentry->d_inode) {
1676 (void) shmem_unlink(new_dir, new_dentry);
1679 } else if (they_are_dirs) {
1684 old_dir->i_size -= BOGO_DIRENT_SIZE;
1685 new_dir->i_size += BOGO_DIRENT_SIZE;
1686 old_dir->i_ctime = old_dir->i_mtime =
1687 new_dir->i_ctime = new_dir->i_mtime =
1688 inode->i_ctime = CURRENT_TIME;
1692 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1696 struct inode *inode;
1697 struct page *page = NULL;
1699 struct shmem_inode_info *info;
1701 len = strlen(symname) + 1;
1702 if (len > PAGE_CACHE_SIZE)
1703 return -ENAMETOOLONG;
1705 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1709 info = SHMEM_I(inode);
1710 inode->i_size = len-1;
1711 if (len <= (char *)inode - (char *)info) {
1713 memcpy(info, symname, len);
1714 inode->i_op = &shmem_symlink_inline_operations;
1716 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1721 inode->i_op = &shmem_symlink_inode_operations;
1722 kaddr = kmap_atomic(page, KM_USER0);
1723 memcpy(kaddr, symname, len);
1724 kunmap_atomic(kaddr, KM_USER0);
1725 set_page_dirty(page);
1726 page_cache_release(page);
1728 if (dir->i_mode & S_ISGID)
1729 inode->i_gid = dir->i_gid;
1730 dir->i_size += BOGO_DIRENT_SIZE;
1731 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1732 d_instantiate(dentry, inode);
1737 static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1739 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1743 static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1745 struct page *page = NULL;
1746 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1747 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1751 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd)
1753 if (!IS_ERR(nd_get_link(nd))) {
1756 page = find_get_page(dentry->d_inode->i_mapping, 0);
1760 mark_page_accessed(page);
1761 page_cache_release(page);
1762 page_cache_release(page);
1766 static struct inode_operations shmem_symlink_inline_operations = {
1767 .readlink = generic_readlink,
1768 .follow_link = shmem_follow_link_inline,
1769 #ifdef CONFIG_TMPFS_XATTR
1770 .setxattr = generic_setxattr,
1771 .getxattr = generic_getxattr,
1772 .listxattr = generic_listxattr,
1773 .removexattr = generic_removexattr,
1777 static struct inode_operations shmem_symlink_inode_operations = {
1778 .truncate = shmem_truncate,
1779 .readlink = generic_readlink,
1780 .follow_link = shmem_follow_link,
1781 .put_link = shmem_put_link,
1782 #ifdef CONFIG_TMPFS_XATTR
1783 .setxattr = generic_setxattr,
1784 .getxattr = generic_getxattr,
1785 .listxattr = generic_listxattr,
1786 .removexattr = generic_removexattr,
1790 static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes)
1792 char *this_char, *value, *rest;
1794 while ((this_char = strsep(&options, ",")) != NULL) {
1797 if ((value = strchr(this_char,'=')) != NULL) {
1801 "tmpfs: No value for mount option '%s'\n",
1806 if (!strcmp(this_char,"size")) {
1807 unsigned long long size;
1808 size = memparse(value,&rest);
1810 size <<= PAGE_SHIFT;
1811 size *= totalram_pages;
1817 *blocks = size >> PAGE_CACHE_SHIFT;
1818 } else if (!strcmp(this_char,"nr_blocks")) {
1819 *blocks = memparse(value,&rest);
1822 } else if (!strcmp(this_char,"nr_inodes")) {
1823 *inodes = memparse(value,&rest);
1826 } else if (!strcmp(this_char,"mode")) {
1829 *mode = simple_strtoul(value,&rest,8);
1832 } else if (!strcmp(this_char,"uid")) {
1835 *uid = simple_strtoul(value,&rest,0);
1838 } else if (!strcmp(this_char,"gid")) {
1841 *gid = simple_strtoul(value,&rest,0);
1845 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
1853 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
1859 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
1861 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1862 unsigned long max_blocks = 0;
1863 unsigned long max_inodes = 0;
1866 max_blocks = sbinfo->max_blocks;
1867 max_inodes = sbinfo->max_inodes;
1869 if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, &max_inodes))
1871 /* Keep it simple: disallow limited <-> unlimited remount */
1872 if ((max_blocks || max_inodes) == !sbinfo)
1874 /* But allow the pointless unlimited -> unlimited remount */
1877 return shmem_set_size(sbinfo, max_blocks, max_inodes);
1881 static void shmem_put_super(struct super_block *sb)
1883 kfree(sb->s_fs_info);
1884 sb->s_fs_info = NULL;
1887 #ifdef CONFIG_TMPFS_XATTR
1888 static struct xattr_handler *shmem_xattr_handlers[];
1890 #define shmem_xattr_handlers NULL
1893 static int shmem_fill_super(struct super_block *sb,
1894 void *data, int silent)
1896 struct inode *inode;
1897 struct dentry *root;
1898 int mode = S_IRWXUGO | S_ISVTX;
1899 uid_t uid = current->fsuid;
1900 gid_t gid = current->fsgid;
1904 unsigned long blocks = 0;
1905 unsigned long inodes = 0;
1908 * Per default we only allow half of the physical ram per
1909 * tmpfs instance, limiting inodes to one per page of lowmem;
1910 * but the internal instance is left unlimited.
1912 if (!(sb->s_flags & MS_NOUSER)) {
1913 blocks = totalram_pages / 2;
1914 inodes = totalram_pages - totalhigh_pages;
1915 if (inodes > blocks)
1918 if (shmem_parse_options(data, &mode,
1919 &uid, &gid, &blocks, &inodes))
1923 if (blocks || inodes) {
1924 struct shmem_sb_info *sbinfo;
1925 sbinfo = kmalloc(sizeof(struct shmem_sb_info), GFP_KERNEL);
1928 sb->s_fs_info = sbinfo;
1929 spin_lock_init(&sbinfo->stat_lock);
1930 sbinfo->max_blocks = blocks;
1931 sbinfo->free_blocks = blocks;
1932 sbinfo->max_inodes = inodes;
1933 sbinfo->free_inodes = inodes;
1935 sb->s_xattr = shmem_xattr_handlers;
1938 sb->s_maxbytes = SHMEM_MAX_BYTES;
1939 sb->s_blocksize = PAGE_CACHE_SIZE;
1940 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1941 sb->s_magic = TMPFS_SUPER_MAGIC;
1942 sb->s_op = &shmem_ops;
1943 inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
1948 root = d_alloc_root(inode);
1957 shmem_put_super(sb);
1961 static kmem_cache_t *shmem_inode_cachep;
1963 static struct inode *shmem_alloc_inode(struct super_block *sb)
1965 struct shmem_inode_info *p;
1966 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
1969 return &p->vfs_inode;
1972 static void shmem_destroy_inode(struct inode *inode)
1974 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
1975 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
1978 static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
1980 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
1982 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
1983 SLAB_CTOR_CONSTRUCTOR) {
1984 inode_init_once(&p->vfs_inode);
1988 static int init_inodecache(void)
1990 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
1991 sizeof(struct shmem_inode_info),
1992 0, 0, init_once, NULL);
1993 if (shmem_inode_cachep == NULL)
1998 static void destroy_inodecache(void)
2000 if (kmem_cache_destroy(shmem_inode_cachep))
2001 printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n");
2004 static struct address_space_operations shmem_aops = {
2005 .writepage = shmem_writepage,
2006 .set_page_dirty = __set_page_dirty_nobuffers,
2008 .prepare_write = shmem_prepare_write,
2009 .commit_write = simple_commit_write,
2013 static struct file_operations shmem_file_operations = {
2016 .llseek = generic_file_llseek,
2017 .read = shmem_file_read,
2018 .write = shmem_file_write,
2019 .fsync = simple_sync_file,
2020 .sendfile = shmem_file_sendfile,
2024 static struct inode_operations shmem_inode_operations = {
2025 .truncate = shmem_truncate,
2026 .setattr = shmem_notify_change,
2027 #ifdef CONFIG_TMPFS_XATTR
2028 .setxattr = generic_setxattr,
2029 .getxattr = generic_getxattr,
2030 .listxattr = generic_listxattr,
2031 .removexattr = generic_removexattr,
2035 static struct inode_operations shmem_dir_inode_operations = {
2037 .create = shmem_create,
2038 .lookup = simple_lookup,
2040 .unlink = shmem_unlink,
2041 .symlink = shmem_symlink,
2042 .mkdir = shmem_mkdir,
2043 .rmdir = shmem_rmdir,
2044 .mknod = shmem_mknod,
2045 .rename = shmem_rename,
2046 #ifdef CONFIG_TMPFS_XATTR
2047 .setxattr = generic_setxattr,
2048 .getxattr = generic_getxattr,
2049 .listxattr = generic_listxattr,
2050 .removexattr = generic_removexattr,
2055 static struct inode_operations shmem_special_inode_operations = {
2056 #ifdef CONFIG_TMPFS_XATTR
2057 .setxattr = generic_setxattr,
2058 .getxattr = generic_getxattr,
2059 .listxattr = generic_listxattr,
2060 .removexattr = generic_removexattr,
2064 static struct super_operations shmem_ops = {
2065 .alloc_inode = shmem_alloc_inode,
2066 .destroy_inode = shmem_destroy_inode,
2068 .statfs = shmem_statfs,
2069 .remount_fs = shmem_remount_fs,
2071 .delete_inode = shmem_delete_inode,
2072 .drop_inode = generic_delete_inode,
2073 .put_super = shmem_put_super,
2076 static struct vm_operations_struct shmem_vm_ops = {
2077 .nopage = shmem_nopage,
2078 .populate = shmem_populate,
2080 .set_policy = shmem_set_policy,
2081 .get_policy = shmem_get_policy,
2086 #ifdef CONFIG_TMPFS_SECURITY
2088 static size_t shmem_xattr_security_list(struct inode *inode, char *list, size_t list_len,
2089 const char *name, size_t name_len)
2091 return security_inode_listsecurity(inode, list, list_len);
2094 static int shmem_xattr_security_get(struct inode *inode, const char *name, void *buffer, size_t size)
2096 if (strcmp(name, "") == 0)
2098 return security_inode_getsecurity(inode, name, buffer, size);
2101 static int shmem_xattr_security_set(struct inode *inode, const char *name, const void *value, size_t size, int flags)
2103 if (strcmp(name, "") == 0)
2105 return security_inode_setsecurity(inode, name, value, size, flags);
2108 struct xattr_handler shmem_xattr_security_handler = {
2109 .prefix = XATTR_SECURITY_PREFIX,
2110 .list = shmem_xattr_security_list,
2111 .get = shmem_xattr_security_get,
2112 .set = shmem_xattr_security_set,
2115 #endif /* CONFIG_TMPFS_SECURITY */
2117 #ifdef CONFIG_TMPFS_XATTR
2119 static struct xattr_handler *shmem_xattr_handlers[] = {
2120 #ifdef CONFIG_TMPFS_SECURITY
2121 &shmem_xattr_security_handler,
2126 #endif /* CONFIG_TMPFS_XATTR */
2128 static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
2129 int flags, const char *dev_name, void *data)
2131 return get_sb_nodev(fs_type, flags, data, shmem_fill_super);
2134 static struct file_system_type tmpfs_fs_type = {
2135 .owner = THIS_MODULE,
2137 .get_sb = shmem_get_sb,
2138 .kill_sb = kill_litter_super,
2140 static struct vfsmount *shm_mnt;
2142 static int __init init_tmpfs(void)
2146 error = init_inodecache();
2150 error = register_filesystem(&tmpfs_fs_type);
2152 printk(KERN_ERR "Could not register tmpfs\n");
2156 devfs_mk_dir("shm");
2158 shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER,
2159 tmpfs_fs_type.name, NULL);
2160 if (IS_ERR(shm_mnt)) {
2161 error = PTR_ERR(shm_mnt);
2162 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2168 unregister_filesystem(&tmpfs_fs_type);
2170 destroy_inodecache();
2172 shm_mnt = ERR_PTR(error);
2175 module_init(init_tmpfs)
2178 * shmem_file_setup - get an unlinked file living in tmpfs
2180 * @name: name for dentry (to be seen in /proc/<pid>/maps
2181 * @size: size to be set for the file
2184 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2188 struct inode *inode;
2189 struct dentry *dentry, *root;
2192 if (IS_ERR(shm_mnt))
2193 return (void *)shm_mnt;
2195 if (size < 0 || size > SHMEM_MAX_BYTES)
2196 return ERR_PTR(-EINVAL);
2198 if (shmem_acct_size(flags, size))
2199 return ERR_PTR(-ENOMEM);
2203 this.len = strlen(name);
2204 this.hash = 0; /* will go */
2205 root = shm_mnt->mnt_root;
2206 dentry = d_alloc(root, &this);
2211 file = get_empty_filp();
2216 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2220 SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2221 d_instantiate(dentry, inode);
2222 inode->i_size = size;
2223 inode->i_nlink = 0; /* It is unlinked */
2224 file->f_vfsmnt = mntget(shm_mnt);
2225 file->f_dentry = dentry;
2226 file->f_mapping = inode->i_mapping;
2227 file->f_op = &shmem_file_operations;
2228 file->f_mode = FMODE_WRITE | FMODE_READ;
2236 shmem_unacct_size(flags, size);
2237 return ERR_PTR(error);
2241 * shmem_zero_setup - setup a shared anonymous mapping
2243 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2245 int shmem_zero_setup(struct vm_area_struct *vma)
2248 loff_t size = vma->vm_end - vma->vm_start;
2250 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2252 return PTR_ERR(file);
2256 vma->vm_file = file;
2257 vma->vm_ops = &shmem_vm_ops;