X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fshmem.c;h=8951a2a5023f3b7b7b141ab35ff98699c8eb7281;hb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;hp=f1438c77e87a81d18c55079b12aff46ae4c0ea10;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/mm/shmem.c b/mm/shmem.c index f1438c77e..8951a2a50 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -6,8 +6,9 @@ * 2000-2001 Christoph Rohland * 2000-2001 SAP AG * 2002 Red Hat Inc. - * Copyright (C) 2002-2003 Hugh Dickins. - * Copyright (C) 2002-2003 VERITAS Software Corporation. + * Copyright (C) 2002-2004 Hugh Dickins. + * Copyright (C) 2002-2004 VERITAS Software Corporation. + * Copyright (C) 2004 Andi Kleen, SuSE Labs * * This file is released under the GPL. */ @@ -37,11 +38,14 @@ #include #include #include +#include +#include +#include #include #include +#include /* This magic number is used in glibc for posix shared memory */ -#define TMPFS_MAGIC 0x01021994 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) @@ -99,23 +103,25 @@ static inline void shmem_dir_unmap(struct page **dir) } static swp_entry_t *shmem_swp_map(struct page *page) +{ + return (swp_entry_t *)kmap_atomic(page, KM_USER1); +} + +static inline void shmem_swp_balance_unmap(void) { /* - * We have to avoid the unconditional inc_preempt_count() - * in kmap_atomic(), since shmem_swp_unmap() will also be - * applied to the low memory addresses within i_direct[]. - * PageHighMem and high_memory tests are good for all arches - * and configs: highmem_start_page and FIXADDR_START are not. + * When passing a pointer to an i_direct entry, to code which + * also handles indirect entries and so will shmem_swp_unmap, + * we must arrange for the preempt count to remain in balance. + * What kmap_atomic of a lowmem page does depends on config + * and architecture, so pretend to kmap_atomic some lowmem page. */ - return PageHighMem(page)? - (swp_entry_t *)kmap_atomic(page, KM_USER1): - (swp_entry_t *)page_address(page); + (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); } static inline void shmem_swp_unmap(swp_entry_t *entry) { - if (entry >= (swp_entry_t *)high_memory) - kunmap_atomic(entry, KM_USER1); + kunmap_atomic(entry, KM_USER1); } static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) @@ -172,16 +178,18 @@ static struct backing_dev_info shmem_backing_dev_info = { .unplug_io_fn = default_unplug_io_fn, }; -LIST_HEAD(shmem_inodes); -static spinlock_t shmem_ilock = SPIN_LOCK_UNLOCKED; +static LIST_HEAD(shmem_swaplist); +static spinlock_t shmem_swaplist_lock = SPIN_LOCK_UNLOCKED; static void shmem_free_block(struct inode *inode) { struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); - spin_lock(&sbinfo->stat_lock); - sbinfo->free_blocks++; - inode->i_blocks -= BLOCKS_PER_PAGE; - spin_unlock(&sbinfo->stat_lock); + if (sbinfo) { + spin_lock(&sbinfo->stat_lock); + sbinfo->free_blocks++; + inode->i_blocks -= BLOCKS_PER_PAGE; + spin_unlock(&sbinfo->stat_lock); + } } /* @@ -206,11 +214,13 @@ static void shmem_recalc_inode(struct inode *inode) if (freed > 0) { struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); info->alloced -= freed; - spin_lock(&sbinfo->stat_lock); - sbinfo->free_blocks += freed; - inode->i_blocks -= freed*BLOCKS_PER_PAGE; - spin_unlock(&sbinfo->stat_lock); shmem_unacct_blocks(info->flags, freed); + if (sbinfo) { + spin_lock(&sbinfo->stat_lock); + sbinfo->free_blocks += freed; + inode->i_blocks -= freed*BLOCKS_PER_PAGE; + spin_unlock(&sbinfo->stat_lock); + } } } @@ -258,8 +268,10 @@ static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long struct page **dir; struct page *subdir; - if (index < SHMEM_NR_DIRECT) + if (index < SHMEM_NR_DIRECT) { + shmem_swp_balance_unmap(); return info->i_direct+index; + } if (!info->i_indirect) { if (page) { info->i_indirect = *page; @@ -301,17 +313,7 @@ static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long *page = NULL; } shmem_dir_unmap(dir); - - /* - * With apologies... caller shmem_swp_alloc passes non-NULL - * page (though perhaps NULL *page); and now we know that this - * indirect page has been allocated, we can shortcut the final - * kmap if we know it contains no swap entries, as is commonly - * the case: return pointer to a 0 which doesn't need kmapping. - */ - return (page && !subdir->nr_swapped)? - (swp_entry_t *)&subdir->nr_swapped: - shmem_swp_map(subdir) + offset; + return shmem_swp_map(subdir) + offset; } static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) @@ -338,7 +340,6 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); struct page *page = NULL; swp_entry_t *entry; - static const swp_entry_t unswapped = { 0 }; if (sgp != SGP_WRITE && ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) @@ -346,20 +347,22 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long while (!(entry = shmem_swp_entry(info, index, &page))) { if (sgp == SGP_READ) - return (swp_entry_t *) &unswapped; + return shmem_swp_map(ZERO_PAGE(0)); /* * Test free_blocks against 1 not 0, since we have 1 data * page (and perhaps indirect index pages) yet to allocate: * a waste to allocate index if we cannot allocate data. */ - spin_lock(&sbinfo->stat_lock); - if (sbinfo->free_blocks <= 1) { + if (sbinfo) { + spin_lock(&sbinfo->stat_lock); + if (sbinfo->free_blocks <= 1) { + spin_unlock(&sbinfo->stat_lock); + return ERR_PTR(-ENOSPC); + } + sbinfo->free_blocks--; + inode->i_blocks += BLOCKS_PER_PAGE; spin_unlock(&sbinfo->stat_lock); - return ERR_PTR(-ENOSPC); } - sbinfo->free_blocks--; - inode->i_blocks += BLOCKS_PER_PAGE; - spin_unlock(&sbinfo->stat_lock); spin_unlock(&info->lock); page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); @@ -600,17 +603,21 @@ static void shmem_delete_inode(struct inode *inode) struct shmem_inode_info *info = SHMEM_I(inode); if (inode->i_op->truncate == shmem_truncate) { - spin_lock(&shmem_ilock); - list_del(&info->list); - spin_unlock(&shmem_ilock); shmem_unacct_size(info->flags, inode->i_size); inode->i_size = 0; shmem_truncate(inode); + if (!list_empty(&info->swaplist)) { + spin_lock(&shmem_swaplist_lock); + list_del_init(&info->swaplist); + spin_unlock(&shmem_swaplist_lock); + } + } + if (sbinfo) { + BUG_ON(inode->i_blocks); + spin_lock(&sbinfo->stat_lock); + sbinfo->free_inodes++; + spin_unlock(&sbinfo->stat_lock); } - BUG_ON(inode->i_blocks); - spin_lock(&sbinfo->stat_lock); - sbinfo->free_inodes++; - spin_unlock(&sbinfo->stat_lock); clear_inode(inode); } @@ -645,8 +652,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s if (size > SHMEM_NR_DIRECT) size = SHMEM_NR_DIRECT; offset = shmem_find_swp(entry, ptr, ptr+size); - if (offset >= 0) + if (offset >= 0) { + shmem_swp_balance_unmap(); goto found; + } if (!info->i_indirect) goto lost2; /* we might be racing with shmem_truncate */ @@ -713,22 +722,23 @@ found: */ int shmem_unuse(swp_entry_t entry, struct page *page) { - struct list_head *p; + struct list_head *p, *next; struct shmem_inode_info *info; int found = 0; - spin_lock(&shmem_ilock); - list_for_each(p, &shmem_inodes) { - info = list_entry(p, struct shmem_inode_info, list); - - if (info->swapped && shmem_unuse_inode(info, entry, page)) { + spin_lock(&shmem_swaplist_lock); + list_for_each_safe(p, next, &shmem_swaplist) { + info = list_entry(p, struct shmem_inode_info, swaplist); + if (!info->swapped) + list_del_init(&info->swaplist); + else if (shmem_unuse_inode(info, entry, page)) { /* move head to start search for next from here */ - list_move_tail(&shmem_inodes, &info->list); + list_move_tail(&shmem_swaplist, &info->swaplist); found = 1; break; } } - spin_unlock(&shmem_ilock); + spin_unlock(&shmem_swaplist_lock); return found; } @@ -770,6 +780,12 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) shmem_swp_set(info, entry, swap.val); shmem_swp_unmap(entry); spin_unlock(&info->lock); + if (list_empty(&info->swaplist)) { + spin_lock(&shmem_swaplist_lock); + /* move instead of add in case we're racing */ + list_move_tail(&info->swaplist, &shmem_swaplist); + spin_unlock(&shmem_swaplist_lock); + } unlock_page(page); return 0; } @@ -783,6 +799,74 @@ redirty: return WRITEPAGE_ACTIVATE; /* Return with the page locked */ } +#ifdef CONFIG_NUMA +static struct page *shmem_swapin_async(struct shared_policy *p, + swp_entry_t entry, unsigned long idx) +{ + struct page *page; + struct vm_area_struct pvma; + + /* Create a pseudo vma that just contains the policy */ + memset(&pvma, 0, sizeof(struct vm_area_struct)); + pvma.vm_end = PAGE_SIZE; + pvma.vm_pgoff = idx; + pvma.vm_policy = mpol_shared_policy_lookup(p, idx); + page = read_swap_cache_async(entry, &pvma, 0); + mpol_free(pvma.vm_policy); + return page; +} + +struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry, + unsigned long idx) +{ + struct shared_policy *p = &info->policy; + int i, num; + struct page *page; + unsigned long offset; + + num = valid_swaphandles(entry, &offset); + for (i = 0; i < num; offset++, i++) { + page = shmem_swapin_async(p, + swp_entry(swp_type(entry), offset), idx); + if (!page) + break; + page_cache_release(page); + } + lru_add_drain(); /* Push any new pages onto the LRU now */ + return shmem_swapin_async(p, entry, idx); +} + +static struct page * +shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info, + unsigned long idx) +{ + struct vm_area_struct pvma; + struct page *page; + + memset(&pvma, 0, sizeof(struct vm_area_struct)); + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); + pvma.vm_pgoff = idx; + pvma.vm_end = PAGE_SIZE; + page = alloc_page_vma(gfp, &pvma, 0); + mpol_free(pvma.vm_policy); + return page; +} +#else +static inline struct page * +shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) +{ + swapin_readahead(entry, 0, NULL); + return read_swap_cache_async(entry, NULL, 0); +} + +static inline struct page * +shmem_alloc_page(unsigned long gfp,struct shmem_inode_info *info, + unsigned long idx) +{ + return alloc_page(gfp); +} +#endif + /* * shmem_getpage - either get the page from swap or allocate a new one * @@ -790,7 +874,8 @@ redirty: * vm. If we swap it in we mark it dirty since we also free the swap * entry since a page cannot live in both the swap and page cache */ -static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp, int *type) +static int shmem_getpage(struct inode *inode, unsigned long idx, + struct page **pagep, enum sgp_type sgp, int *type) { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); @@ -799,7 +884,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p struct page *swappage; swp_entry_t *entry; swp_entry_t swap; - int error, majmin = VM_FAULT_MINOR; + int error; if (idx >= SHMEM_MAX_INDEX) return -EFBIG; @@ -837,11 +922,11 @@ repeat: shmem_swp_unmap(entry); spin_unlock(&info->lock); /* here we actually do the io */ - if (majmin == VM_FAULT_MINOR && type) + if (type && *type == VM_FAULT_MINOR) { inc_page_state(pgmajfault); - majmin = VM_FAULT_MAJOR; - swapin_readahead(swap); - swappage = read_swap_cache_async(swap); + *type = VM_FAULT_MAJOR; + } + swappage = shmem_swapin(info, swap, idx); if (!swappage) { spin_lock(&info->lock); entry = shmem_swp_alloc(info, idx, sgp); @@ -933,20 +1018,29 @@ repeat: } else { shmem_swp_unmap(entry); sbinfo = SHMEM_SB(inode->i_sb); - spin_lock(&sbinfo->stat_lock); - if (sbinfo->free_blocks == 0 || shmem_acct_block(info->flags)) { + if (sbinfo) { + spin_lock(&sbinfo->stat_lock); + if (sbinfo->free_blocks == 0 || + shmem_acct_block(info->flags)) { + spin_unlock(&sbinfo->stat_lock); + spin_unlock(&info->lock); + error = -ENOSPC; + goto failed; + } + sbinfo->free_blocks--; + inode->i_blocks += BLOCKS_PER_PAGE; spin_unlock(&sbinfo->stat_lock); + } else if (shmem_acct_block(info->flags)) { spin_unlock(&info->lock); error = -ENOSPC; goto failed; } - sbinfo->free_blocks--; - inode->i_blocks += BLOCKS_PER_PAGE; - spin_unlock(&sbinfo->stat_lock); if (!filepage) { spin_unlock(&info->lock); - filepage = page_cache_alloc(mapping); + filepage = shmem_alloc_page(mapping_gfp_mask(mapping), + info, + idx); if (!filepage) { shmem_unacct_blocks(info->flags, 1); shmem_free_block(inode); @@ -983,15 +1077,10 @@ repeat: SetPageUptodate(filepage); } done: - if (!*pagep) { - if (filepage) { - unlock_page(filepage); - *pagep = filepage; - } else - *pagep = ZERO_PAGE(0); + if (*pagep != filepage) { + unlock_page(filepage); + *pagep = filepage; } - if (type) - *type = majmin; return 0; failed: @@ -1051,15 +1140,9 @@ static int shmem_populate(struct vm_area_struct *vma, return err; } } else if (nonblock) { - /* - * If a nonlinear mapping then store the file page - * offset in the pte. - */ - if (pgoff != linear_page_index(vma, addr)) { - err = install_file_pte(mm, vma, addr, pgoff, prot); - if (err) - return err; - } + err = install_file_pte(mm, vma, addr, pgoff, prot); + if (err) + return err; } len -= PAGE_SIZE; @@ -1069,17 +1152,44 @@ static int shmem_populate(struct vm_area_struct *vma, return 0; } -void shmem_lock(struct file *file, int lock) +#ifdef CONFIG_NUMA +int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) +{ + struct inode *i = vma->vm_file->f_dentry->d_inode; + return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); +} + +struct mempolicy * +shmem_get_policy(struct vm_area_struct *vma, unsigned long addr) +{ + struct inode *i = vma->vm_file->f_dentry->d_inode; + unsigned long idx; + + idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); +} +#endif + +int shmem_lock(struct file *file, int lock, struct user_struct *user) { struct inode *inode = file->f_dentry->d_inode; struct shmem_inode_info *info = SHMEM_I(inode); + int retval = -ENOMEM; spin_lock(&info->lock); - if (lock) + if (lock && !(info->flags & VM_LOCKED)) { + if (!user_shm_lock(inode->i_size, user)) + goto out_nomem; info->flags |= VM_LOCKED; - else + } + if (!lock && (info->flags & VM_LOCKED) && user) { + user_shm_unlock(inode->i_size, user); info->flags &= ~VM_LOCKED; + } + retval = 0; +out_nomem: spin_unlock(&info->lock); + return retval; } static int shmem_mmap(struct file *file, struct vm_area_struct *vma) @@ -1096,13 +1206,15 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) struct shmem_inode_info *info; struct shmem_sb_info *sbinfo = SHMEM_SB(sb); - spin_lock(&sbinfo->stat_lock); - if (!sbinfo->free_inodes) { + if (sbinfo) { + spin_lock(&sbinfo->stat_lock); + if (!sbinfo->free_inodes) { + spin_unlock(&sbinfo->stat_lock); + return NULL; + } + sbinfo->free_inodes--; spin_unlock(&sbinfo->stat_lock); - return NULL; } - sbinfo->free_inodes--; - spin_unlock(&sbinfo->stat_lock); inode = new_inode(sb); if (inode) { @@ -1117,6 +1229,9 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) info = SHMEM_I(inode); memset(info, 0, (char *)inode - (char *)info); spin_lock_init(&info->lock); + mpol_shared_policy_init(&info->policy); + INIT_LIST_HEAD(&info->swaplist); + switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); @@ -1124,9 +1239,6 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) case S_IFREG: inode->i_op = &shmem_inode_operations; inode->i_fop = &shmem_file_operations; - spin_lock(&shmem_ilock); - list_add_tail(&info->list, &shmem_inodes); - spin_unlock(&shmem_ilock); break; case S_IFDIR: inode->i_nlink++; @@ -1142,32 +1254,32 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) return inode; } -static int shmem_set_size(struct shmem_sb_info *info, +#ifdef CONFIG_TMPFS + +static int shmem_set_size(struct shmem_sb_info *sbinfo, unsigned long max_blocks, unsigned long max_inodes) { int error; unsigned long blocks, inodes; - spin_lock(&info->stat_lock); - blocks = info->max_blocks - info->free_blocks; - inodes = info->max_inodes - info->free_inodes; + spin_lock(&sbinfo->stat_lock); + blocks = sbinfo->max_blocks - sbinfo->free_blocks; + inodes = sbinfo->max_inodes - sbinfo->free_inodes; error = -EINVAL; if (max_blocks < blocks) goto out; if (max_inodes < inodes) goto out; error = 0; - info->max_blocks = max_blocks; - info->free_blocks = max_blocks - blocks; - info->max_inodes = max_inodes; - info->free_inodes = max_inodes - inodes; + sbinfo->max_blocks = max_blocks; + sbinfo->free_blocks = max_blocks - blocks; + sbinfo->max_inodes = max_inodes; + sbinfo->free_inodes = max_inodes - inodes; out: - spin_unlock(&info->stat_lock); + spin_unlock(&sbinfo->stat_lock); return error; } -#ifdef CONFIG_TMPFS - static struct inode_operations shmem_symlink_inode_operations; static struct inode_operations shmem_symlink_inline_operations; @@ -1188,7 +1300,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t struct inode *inode = file->f_dentry->d_inode; loff_t pos; unsigned long written; - int err; + ssize_t err; if ((ssize_t) count < 0) return -EINVAL; @@ -1240,7 +1352,8 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t __get_user(dummy, buf + bytes - 1); kaddr = kmap_atomic(page, KM_USER0); - left = __copy_from_user(kaddr + offset, buf, bytes); + left = __copy_from_user_inatomic(kaddr + offset, + buf, bytes); kunmap_atomic(kaddr, KM_USER0); } if (left) { @@ -1324,13 +1437,14 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ if (index == end_index) { nr = i_size & ~PAGE_CACHE_MASK; if (nr <= offset) { - page_cache_release(page); + if (page) + page_cache_release(page); break; } } nr -= offset; - if (page != ZERO_PAGE(0)) { + if (page) { /* * If users can be writing to this page using arbitrary * virtual addresses, take care about potential aliasing @@ -1343,7 +1457,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ */ if (!offset) mark_page_accessed(page); - } + } else + page = ZERO_PAGE(0); /* * Ok, we have the page, and it's up-to-date, so @@ -1384,7 +1499,7 @@ static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count desc.written = 0; desc.count = count; - desc.buf = buf; + desc.arg.buf = buf; desc.error = 0; do_shmem_file_read(filp, ppos, &desc, file_read_actor); @@ -1394,7 +1509,7 @@ static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count } static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos, - size_t count, read_actor_t actor, void __user *target) + size_t count, read_actor_t actor, void *target) { read_descriptor_t desc; @@ -1403,7 +1518,7 @@ static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos, desc.written = 0; desc.count = count; - desc.buf = target; + desc.arg.data = target; desc.error = 0; do_shmem_file_read(in_file, ppos, &desc, actor); @@ -1416,15 +1531,18 @@ static int shmem_statfs(struct super_block *sb, struct kstatfs *buf) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); - buf->f_type = TMPFS_MAGIC; + buf->f_type = TMPFS_SUPER_MAGIC; buf->f_bsize = PAGE_CACHE_SIZE; - spin_lock(&sbinfo->stat_lock); - buf->f_blocks = sbinfo->max_blocks; - buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; - buf->f_files = sbinfo->max_inodes; - buf->f_ffree = sbinfo->free_inodes; - spin_unlock(&sbinfo->stat_lock); buf->f_namelen = NAME_MAX; + if (sbinfo) { + spin_lock(&sbinfo->stat_lock); + buf->f_blocks = sbinfo->max_blocks; + buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; + buf->f_files = sbinfo->max_inodes; + buf->f_ffree = sbinfo->free_inodes; + spin_unlock(&sbinfo->stat_lock); + } + /* else leave those fields 0 like simple_statfs */ return 0; } @@ -1474,6 +1592,22 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + + /* + * No ordinary (disk based) filesystem counts links as inodes; + * but each new link needs a new dentry, pinning lowmem, and + * tmpfs dentries cannot be pruned until they are unlinked. + */ + if (sbinfo) { + spin_lock(&sbinfo->stat_lock); + if (!sbinfo->free_inodes) { + spin_unlock(&sbinfo->stat_lock); + return -ENOSPC; + } + sbinfo->free_inodes--; + spin_unlock(&sbinfo->stat_lock); + } dir->i_size += BOGO_DIRENT_SIZE; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; @@ -1488,6 +1622,15 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; + if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + if (sbinfo) { + spin_lock(&sbinfo->stat_lock); + sbinfo->free_inodes++; + spin_unlock(&sbinfo->stat_lock); + } + } + dir->i_size -= BOGO_DIRENT_SIZE; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; inode->i_nlink--; @@ -1565,9 +1708,6 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s return error; } inode->i_op = &shmem_symlink_inode_operations; - spin_lock(&shmem_ilock); - list_add_tail(&info->list, &shmem_inodes); - spin_unlock(&shmem_ilock); kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, symname, len); kunmap_atomic(kaddr, KM_USER0); @@ -1583,51 +1723,45 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s return 0; } -static int shmem_readlink_inline(struct dentry *dentry, char __user *buffer, int buflen) -{ - return vfs_readlink(dentry, buffer, buflen, (const char *)SHMEM_I(dentry->d_inode)); -} - static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) { - return vfs_follow_link(nd, (const char *)SHMEM_I(dentry->d_inode)); + nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); + return 0; } -static int shmem_readlink(struct dentry *dentry, char __user *buffer, int buflen) +static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *page = NULL; int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); - if (res) - return res; - res = vfs_readlink(dentry, buffer, buflen, kmap(page)); - kunmap(page); - mark_page_accessed(page); - page_cache_release(page); - return res; + nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); + return 0; } -static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd) +static void shmem_put_link(struct dentry *dentry, struct nameidata *nd) { - struct page *page = NULL; - int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); - if (res) - return res; - res = vfs_follow_link(nd, kmap(page)); - kunmap(page); - mark_page_accessed(page); - page_cache_release(page); - return res; + if (!IS_ERR(nd_get_link(nd))) { + struct page *page; + + page = find_get_page(dentry->d_inode->i_mapping, 0); + if (!page) + BUG(); + kunmap(page); + mark_page_accessed(page); + page_cache_release(page); + page_cache_release(page); + } } static struct inode_operations shmem_symlink_inline_operations = { - .readlink = shmem_readlink_inline, + .readlink = generic_readlink, .follow_link = shmem_follow_link_inline, }; static struct inode_operations shmem_symlink_inode_operations = { .truncate = shmem_truncate, - .readlink = shmem_readlink, + .readlink = generic_readlink, .follow_link = shmem_follow_link, + .put_link = shmem_put_link, }; static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes) @@ -1702,57 +1836,79 @@ bad_val: static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); - unsigned long max_blocks = sbinfo->max_blocks; - unsigned long max_inodes = sbinfo->max_inodes; + unsigned long max_blocks = 0; + unsigned long max_inodes = 0; + if (sbinfo) { + max_blocks = sbinfo->max_blocks; + max_inodes = sbinfo->max_inodes; + } if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, &max_inodes)) return -EINVAL; + /* Keep it simple: disallow limited <-> unlimited remount */ + if ((max_blocks || max_inodes) == !sbinfo) + return -EINVAL; + /* But allow the pointless unlimited -> unlimited remount */ + if (!sbinfo) + return 0; return shmem_set_size(sbinfo, max_blocks, max_inodes); } #endif +static void shmem_put_super(struct super_block *sb) +{ + kfree(sb->s_fs_info); + sb->s_fs_info = NULL; +} + static int shmem_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct dentry *root; - unsigned long blocks, inodes; int mode = S_IRWXUGO | S_ISVTX; uid_t uid = current->fsuid; gid_t gid = current->fsgid; - struct shmem_sb_info *sbinfo; int err = -ENOMEM; - sbinfo = kmalloc(sizeof(struct shmem_sb_info), GFP_KERNEL); - if (!sbinfo) - return -ENOMEM; - sb->s_fs_info = sbinfo; - memset(sbinfo, 0, sizeof(struct shmem_sb_info)); +#ifdef CONFIG_TMPFS + unsigned long blocks = 0; + unsigned long inodes = 0; /* * Per default we only allow half of the physical ram per - * tmpfs instance + * tmpfs instance, limiting inodes to one per page of lowmem; + * but the internal instance is left unlimited. */ - blocks = inodes = totalram_pages / 2; + if (!(sb->s_flags & MS_NOUSER)) { + blocks = totalram_pages / 2; + inodes = totalram_pages - totalhigh_pages; + if (inodes > blocks) + inodes = blocks; + + if (shmem_parse_options(data, &mode, + &uid, &gid, &blocks, &inodes)) + return -EINVAL; + } -#ifdef CONFIG_TMPFS - if (shmem_parse_options(data, &mode, &uid, &gid, &blocks, &inodes)) { - err = -EINVAL; - goto failed; + if (blocks || inodes) { + struct shmem_sb_info *sbinfo; + sbinfo = kmalloc(sizeof(struct shmem_sb_info), GFP_KERNEL); + if (!sbinfo) + return -ENOMEM; + sb->s_fs_info = sbinfo; + spin_lock_init(&sbinfo->stat_lock); + sbinfo->max_blocks = blocks; + sbinfo->free_blocks = blocks; + sbinfo->max_inodes = inodes; + sbinfo->free_inodes = inodes; } -#else - sb->s_flags |= MS_NOUSER; #endif - spin_lock_init(&sbinfo->stat_lock); - sbinfo->max_blocks = blocks; - sbinfo->free_blocks = blocks; - sbinfo->max_inodes = inodes; - sbinfo->free_inodes = inodes; sb->s_maxbytes = SHMEM_MAX_BYTES; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = TMPFS_MAGIC; + sb->s_magic = TMPFS_SUPER_MAGIC; sb->s_op = &shmem_ops; inode = shmem_get_inode(sb, S_IFDIR | mode, 0); if (!inode) @@ -1768,17 +1924,10 @@ static int shmem_fill_super(struct super_block *sb, failed_iput: iput(inode); failed: - kfree(sbinfo); - sb->s_fs_info = NULL; + shmem_put_super(sb); return err; } -static void shmem_put_super(struct super_block *sb) -{ - kfree(sb->s_fs_info); - sb->s_fs_info = NULL; -} - static kmem_cache_t *shmem_inode_cachep; static struct inode *shmem_alloc_inode(struct super_block *sb) @@ -1792,6 +1941,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb) static void shmem_destroy_inode(struct inode *inode) { + mpol_free_shared_policy(&SHMEM_I(inode)->policy); kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); } @@ -1808,9 +1958,8 @@ static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) static int init_inodecache(void) { shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", - sizeof(struct shmem_inode_info), - 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, - init_once, NULL); + sizeof(struct shmem_inode_info), + 0, 0, init_once, NULL); if (shmem_inode_cachep == NULL) return -ENOMEM; return 0; @@ -1876,6 +2025,10 @@ static struct super_operations shmem_ops = { static struct vm_operations_struct shmem_vm_ops = { .nopage = shmem_nopage, .populate = shmem_populate, +#ifdef CONFIG_NUMA + .set_policy = shmem_set_policy, + .get_policy = shmem_get_policy, +#endif }; static struct super_block *shmem_get_sb(struct file_system_type *fs_type, @@ -1908,15 +2061,13 @@ static int __init init_tmpfs(void) #ifdef CONFIG_TMPFS devfs_mk_dir("shm"); #endif - shm_mnt = kern_mount(&tmpfs_fs_type); + shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER, + tmpfs_fs_type.name, NULL); if (IS_ERR(shm_mnt)) { error = PTR_ERR(shm_mnt); printk(KERN_ERR "Could not kern_mount tmpfs\n"); goto out1; } - - /* The internal instance should not do size checking */ - shmem_set_size(SHMEM_SB(shm_mnt->mnt_sb), ULONG_MAX, ULONG_MAX); return 0; out1: @@ -1947,7 +2098,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) if (IS_ERR(shm_mnt)) return (void *)shm_mnt; - if (size > SHMEM_MAX_BYTES) + if (size < 0 || size > SHMEM_MAX_BYTES) return ERR_PTR(-EINVAL); if (shmem_acct_size(flags, size)) @@ -1981,7 +2132,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) file->f_mapping = inode->i_mapping; file->f_op = &shmem_file_operations; file->f_mode = FMODE_WRITE | FMODE_READ; - return(file); + return file; close_file: put_filp(file);