vserver 2.0 rc7
[linux-2.6.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2004 Hugh Dickins.
10  * Copyright (C) 2002-2004 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * This file is released under the GPL.
18  */
19
20 /*
21  * This virtual memory filesystem is heavily based on the ramfs. It
22  * extends ramfs by the ability to use swap and honor resource limits
23  * which makes it a completely usable filesystem.
24  */
25
26 #include <linux/config.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/devfs_fs_kernel.h>
30 #include <linux/fs.h>
31 #include <linux/mm.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include <linux/swap.h>
35 #include <linux/pagemap.h>
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/backing-dev.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/mount.h>
41 #include <linux/writeback.h>
42 #include <linux/vfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/security.h>
45 #include <linux/swapops.h>
46 #include <linux/mempolicy.h>
47 #include <linux/namei.h>
48 #include <linux/xattr.h>
49 #include <asm/uaccess.h>
50 #include <asm/div64.h>
51 #include <asm/pgtable.h>
52
53 /* This magic number is used in glibc for posix shared memory */
54
55 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
56 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
57 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
58
59 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
60 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
61
62 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
63
64 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
65 #define SHMEM_PAGEIN     VM_READ
66 #define SHMEM_TRUNCATE   VM_WRITE
67
68 /* Definition to limit shmem_truncate's steps between cond_rescheds */
69 #define LATENCY_LIMIT    64
70
71 /* Pretend that each entry is of this size in directory's i_size */
72 #define BOGO_DIRENT_SIZE 20
73
74 /* Keep swapped page count in private field of indirect struct page */
75 #define nr_swapped              private
76
77 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
78 enum sgp_type {
79         SGP_QUICK,      /* don't try more than file page cache lookup */
80         SGP_READ,       /* don't exceed i_size, don't allocate page */
81         SGP_CACHE,      /* don't exceed i_size, may allocate page */
82         SGP_WRITE,      /* may exceed i_size, may allocate page */
83 };
84
85 static int shmem_getpage(struct inode *inode, unsigned long idx,
86                          struct page **pagep, enum sgp_type sgp, int *type);
87
88 static inline struct page *shmem_dir_alloc(unsigned int gfp_mask)
89 {
90         /*
91          * The above definition of ENTRIES_PER_PAGE, and the use of
92          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
93          * might be reconsidered if it ever diverges from PAGE_SIZE.
94          */
95         return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
96 }
97
98 static inline void shmem_dir_free(struct page *page)
99 {
100         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
101 }
102
103 static struct page **shmem_dir_map(struct page *page)
104 {
105         return (struct page **)kmap_atomic(page, KM_USER0);
106 }
107
108 static inline void shmem_dir_unmap(struct page **dir)
109 {
110         kunmap_atomic(dir, KM_USER0);
111 }
112
113 static swp_entry_t *shmem_swp_map(struct page *page)
114 {
115         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
116 }
117
118 static inline void shmem_swp_balance_unmap(void)
119 {
120         /*
121          * When passing a pointer to an i_direct entry, to code which
122          * also handles indirect entries and so will shmem_swp_unmap,
123          * we must arrange for the preempt count to remain in balance.
124          * What kmap_atomic of a lowmem page does depends on config
125          * and architecture, so pretend to kmap_atomic some lowmem page.
126          */
127         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
128 }
129
130 static inline void shmem_swp_unmap(swp_entry_t *entry)
131 {
132         kunmap_atomic(entry, KM_USER1);
133 }
134
135 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
136 {
137         return sb->s_fs_info;
138 }
139
140 /*
141  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
142  * for shared memory and for shared anonymous (/dev/zero) mappings
143  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
144  * consistent with the pre-accounting of private mappings ...
145  */
146 static inline int shmem_acct_size(unsigned long flags, loff_t size)
147 {
148         return (flags & VM_ACCOUNT)?
149                 security_vm_enough_memory(VM_ACCT(size)): 0;
150 }
151
152 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
153 {
154         if (flags & VM_ACCOUNT)
155                 vm_unacct_memory(VM_ACCT(size));
156 }
157
158 /*
159  * ... whereas tmpfs objects are accounted incrementally as
160  * pages are allocated, in order to allow huge sparse files.
161  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
162  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
163  */
164 static inline int shmem_acct_block(unsigned long flags)
165 {
166         return (flags & VM_ACCOUNT)?
167                 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
168 }
169
170 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
171 {
172         if (!(flags & VM_ACCOUNT))
173                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
174 }
175
176 static struct super_operations shmem_ops;
177 static struct address_space_operations shmem_aops;
178 static struct file_operations shmem_file_operations;
179 static struct inode_operations shmem_inode_operations;
180 static struct inode_operations shmem_dir_inode_operations;
181 static struct inode_operations shmem_special_inode_operations;
182 static struct vm_operations_struct shmem_vm_ops;
183
184 static struct backing_dev_info shmem_backing_dev_info = {
185         .ra_pages       = 0,    /* No readahead */
186         .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
187         .unplug_io_fn   = default_unplug_io_fn,
188 };
189
190 static LIST_HEAD(shmem_swaplist);
191 static DEFINE_SPINLOCK(shmem_swaplist_lock);
192
193 static void shmem_free_blocks(struct inode *inode, long pages)
194 {
195         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
196         if (sbinfo) {
197                 spin_lock(&sbinfo->stat_lock);
198                 sbinfo->free_blocks += pages;
199                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
200                 spin_unlock(&sbinfo->stat_lock);
201         }
202 }
203
204 /*
205  * shmem_recalc_inode - recalculate the size of an inode
206  *
207  * @inode: inode to recalc
208  *
209  * We have to calculate the free blocks since the mm can drop
210  * undirtied hole pages behind our back.
211  *
212  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
213  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
214  *
215  * It has to be called with the spinlock held.
216  */
217 static void shmem_recalc_inode(struct inode *inode)
218 {
219         struct shmem_inode_info *info = SHMEM_I(inode);
220         long freed;
221
222         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
223         if (freed > 0) {
224                 info->alloced -= freed;
225                 shmem_unacct_blocks(info->flags, freed);
226                 shmem_free_blocks(inode, freed);
227         }
228 }
229
230 /*
231  * shmem_swp_entry - find the swap vector position in the info structure
232  *
233  * @info:  info structure for the inode
234  * @index: index of the page to find
235  * @page:  optional page to add to the structure. Has to be preset to
236  *         all zeros
237  *
238  * If there is no space allocated yet it will return NULL when
239  * page is NULL, else it will use the page for the needed block,
240  * setting it to NULL on return to indicate that it has been used.
241  *
242  * The swap vector is organized the following way:
243  *
244  * There are SHMEM_NR_DIRECT entries directly stored in the
245  * shmem_inode_info structure. So small files do not need an addional
246  * allocation.
247  *
248  * For pages with index > SHMEM_NR_DIRECT there is the pointer
249  * i_indirect which points to a page which holds in the first half
250  * doubly indirect blocks, in the second half triple indirect blocks:
251  *
252  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
253  * following layout (for SHMEM_NR_DIRECT == 16):
254  *
255  * i_indirect -> dir --> 16-19
256  *            |      +-> 20-23
257  *            |
258  *            +-->dir2 --> 24-27
259  *            |        +-> 28-31
260  *            |        +-> 32-35
261  *            |        +-> 36-39
262  *            |
263  *            +-->dir3 --> 40-43
264  *                     +-> 44-47
265  *                     +-> 48-51
266  *                     +-> 52-55
267  */
268 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
269 {
270         unsigned long offset;
271         struct page **dir;
272         struct page *subdir;
273
274         if (index < SHMEM_NR_DIRECT) {
275                 shmem_swp_balance_unmap();
276                 return info->i_direct+index;
277         }
278         if (!info->i_indirect) {
279                 if (page) {
280                         info->i_indirect = *page;
281                         *page = NULL;
282                 }
283                 return NULL;                    /* need another page */
284         }
285
286         index -= SHMEM_NR_DIRECT;
287         offset = index % ENTRIES_PER_PAGE;
288         index /= ENTRIES_PER_PAGE;
289         dir = shmem_dir_map(info->i_indirect);
290
291         if (index >= ENTRIES_PER_PAGE/2) {
292                 index -= ENTRIES_PER_PAGE/2;
293                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
294                 index %= ENTRIES_PER_PAGE;
295                 subdir = *dir;
296                 if (!subdir) {
297                         if (page) {
298                                 *dir = *page;
299                                 *page = NULL;
300                         }
301                         shmem_dir_unmap(dir);
302                         return NULL;            /* need another page */
303                 }
304                 shmem_dir_unmap(dir);
305                 dir = shmem_dir_map(subdir);
306         }
307
308         dir += index;
309         subdir = *dir;
310         if (!subdir) {
311                 if (!page || !(subdir = *page)) {
312                         shmem_dir_unmap(dir);
313                         return NULL;            /* need a page */
314                 }
315                 *dir = subdir;
316                 *page = NULL;
317         }
318         shmem_dir_unmap(dir);
319         return shmem_swp_map(subdir) + offset;
320 }
321
322 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
323 {
324         long incdec = value? 1: -1;
325
326         entry->val = value;
327         info->swapped += incdec;
328         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT)
329                 kmap_atomic_to_page(entry)->nr_swapped += incdec;
330 }
331
332 /*
333  * shmem_swp_alloc - get the position of the swap entry for the page.
334  *                   If it does not exist allocate the entry.
335  *
336  * @info:       info structure for the inode
337  * @index:      index of the page to find
338  * @sgp:        check and recheck i_size? skip allocation?
339  */
340 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
341 {
342         struct inode *inode = &info->vfs_inode;
343         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
344         struct page *page = NULL;
345         swp_entry_t *entry;
346
347         if (sgp != SGP_WRITE &&
348             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
349                 return ERR_PTR(-EINVAL);
350
351         while (!(entry = shmem_swp_entry(info, index, &page))) {
352                 if (sgp == SGP_READ)
353                         return shmem_swp_map(ZERO_PAGE(0));
354                 /*
355                  * Test free_blocks against 1 not 0, since we have 1 data
356                  * page (and perhaps indirect index pages) yet to allocate:
357                  * a waste to allocate index if we cannot allocate data.
358                  */
359                 if (sbinfo) {
360                         spin_lock(&sbinfo->stat_lock);
361                         if (sbinfo->free_blocks <= 1) {
362                                 spin_unlock(&sbinfo->stat_lock);
363                                 return ERR_PTR(-ENOSPC);
364                         }
365                         sbinfo->free_blocks--;
366                         inode->i_blocks += BLOCKS_PER_PAGE;
367                         spin_unlock(&sbinfo->stat_lock);
368                 }
369
370                 spin_unlock(&info->lock);
371                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
372                 if (page) {
373                         page->nr_swapped = 0;
374                 }
375                 spin_lock(&info->lock);
376
377                 if (!page) {
378                         shmem_free_blocks(inode, 1);
379                         return ERR_PTR(-ENOMEM);
380                 }
381                 if (sgp != SGP_WRITE &&
382                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
383                         entry = ERR_PTR(-EINVAL);
384                         break;
385                 }
386                 if (info->next_index <= index)
387                         info->next_index = index + 1;
388         }
389         if (page) {
390                 /* another task gave its page, or truncated the file */
391                 shmem_free_blocks(inode, 1);
392                 shmem_dir_free(page);
393         }
394         if (info->next_index <= index && !IS_ERR(entry))
395                 info->next_index = index + 1;
396         return entry;
397 }
398
399 /*
400  * shmem_free_swp - free some swap entries in a directory
401  *
402  * @dir:   pointer to the directory
403  * @edir:  pointer after last entry of the directory
404  */
405 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
406 {
407         swp_entry_t *ptr;
408         int freed = 0;
409
410         for (ptr = dir; ptr < edir; ptr++) {
411                 if (ptr->val) {
412                         free_swap_and_cache(*ptr);
413                         *ptr = (swp_entry_t){0};
414                         freed++;
415                 }
416         }
417         return freed;
418 }
419
420 static int shmem_map_and_free_swp(struct page *subdir,
421                 int offset, int limit, struct page ***dir)
422 {
423         swp_entry_t *ptr;
424         int freed = 0;
425
426         ptr = shmem_swp_map(subdir);
427         for (; offset < limit; offset += LATENCY_LIMIT) {
428                 int size = limit - offset;
429                 if (size > LATENCY_LIMIT)
430                         size = LATENCY_LIMIT;
431                 freed += shmem_free_swp(ptr+offset, ptr+offset+size);
432                 if (need_resched()) {
433                         shmem_swp_unmap(ptr);
434                         if (*dir) {
435                                 shmem_dir_unmap(*dir);
436                                 *dir = NULL;
437                         }
438                         cond_resched();
439                         ptr = shmem_swp_map(subdir);
440                 }
441         }
442         shmem_swp_unmap(ptr);
443         return freed;
444 }
445
446 static void shmem_free_pages(struct list_head *next)
447 {
448         struct page *page;
449         int freed = 0;
450
451         do {
452                 page = container_of(next, struct page, lru);
453                 next = next->next;
454                 shmem_dir_free(page);
455                 freed++;
456                 if (freed >= LATENCY_LIMIT) {
457                         cond_resched();
458                         freed = 0;
459                 }
460         } while (next);
461 }
462
463 static void shmem_truncate(struct inode *inode)
464 {
465         struct shmem_inode_info *info = SHMEM_I(inode);
466         unsigned long idx;
467         unsigned long size;
468         unsigned long limit;
469         unsigned long stage;
470         unsigned long diroff;
471         struct page **dir;
472         struct page *topdir;
473         struct page *middir;
474         struct page *subdir;
475         swp_entry_t *ptr;
476         LIST_HEAD(pages_to_free);
477         long nr_pages_to_free = 0;
478         long nr_swaps_freed = 0;
479         int offset;
480         int freed;
481
482         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
483         idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
484         if (idx >= info->next_index)
485                 return;
486
487         spin_lock(&info->lock);
488         info->flags |= SHMEM_TRUNCATE;
489         limit = info->next_index;
490         info->next_index = idx;
491         topdir = info->i_indirect;
492         if (topdir && idx <= SHMEM_NR_DIRECT) {
493                 info->i_indirect = NULL;
494                 nr_pages_to_free++;
495                 list_add(&topdir->lru, &pages_to_free);
496         }
497         spin_unlock(&info->lock);
498
499         if (info->swapped && idx < SHMEM_NR_DIRECT) {
500                 ptr = info->i_direct;
501                 size = limit;
502                 if (size > SHMEM_NR_DIRECT)
503                         size = SHMEM_NR_DIRECT;
504                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
505         }
506         if (!topdir)
507                 goto done2;
508
509         BUG_ON(limit <= SHMEM_NR_DIRECT);
510         limit -= SHMEM_NR_DIRECT;
511         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
512         offset = idx % ENTRIES_PER_PAGE;
513         idx -= offset;
514
515         dir = shmem_dir_map(topdir);
516         stage = ENTRIES_PER_PAGEPAGE/2;
517         if (idx < ENTRIES_PER_PAGEPAGE/2) {
518                 middir = topdir;
519                 diroff = idx/ENTRIES_PER_PAGE;
520         } else {
521                 dir += ENTRIES_PER_PAGE/2;
522                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
523                 while (stage <= idx)
524                         stage += ENTRIES_PER_PAGEPAGE;
525                 middir = *dir;
526                 if (*dir) {
527                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
528                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
529                         if (!diroff && !offset) {
530                                 *dir = NULL;
531                                 nr_pages_to_free++;
532                                 list_add(&middir->lru, &pages_to_free);
533                         }
534                         shmem_dir_unmap(dir);
535                         dir = shmem_dir_map(middir);
536                 } else {
537                         diroff = 0;
538                         offset = 0;
539                         idx = stage;
540                 }
541         }
542
543         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
544                 if (unlikely(idx == stage)) {
545                         shmem_dir_unmap(dir);
546                         dir = shmem_dir_map(topdir) +
547                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
548                         while (!*dir) {
549                                 dir++;
550                                 idx += ENTRIES_PER_PAGEPAGE;
551                                 if (idx >= limit)
552                                         goto done1;
553                         }
554                         stage = idx + ENTRIES_PER_PAGEPAGE;
555                         middir = *dir;
556                         *dir = NULL;
557                         nr_pages_to_free++;
558                         list_add(&middir->lru, &pages_to_free);
559                         shmem_dir_unmap(dir);
560                         cond_resched();
561                         dir = shmem_dir_map(middir);
562                         diroff = 0;
563                 }
564                 subdir = dir[diroff];
565                 if (subdir && subdir->nr_swapped) {
566                         size = limit - idx;
567                         if (size > ENTRIES_PER_PAGE)
568                                 size = ENTRIES_PER_PAGE;
569                         freed = shmem_map_and_free_swp(subdir,
570                                                 offset, size, &dir);
571                         if (!dir)
572                                 dir = shmem_dir_map(middir);
573                         nr_swaps_freed += freed;
574                         if (offset)
575                                 spin_lock(&info->lock);
576                         subdir->nr_swapped -= freed;
577                         if (offset)
578                                 spin_unlock(&info->lock);
579                         BUG_ON(subdir->nr_swapped > offset);
580                 }
581                 if (offset)
582                         offset = 0;
583                 else if (subdir) {
584                         dir[diroff] = NULL;
585                         nr_pages_to_free++;
586                         list_add(&subdir->lru, &pages_to_free);
587                 }
588         }
589 done1:
590         shmem_dir_unmap(dir);
591 done2:
592         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
593                 /*
594                  * Call truncate_inode_pages again: racing shmem_unuse_inode
595                  * may have swizzled a page in from swap since vmtruncate or
596                  * generic_delete_inode did it, before we lowered next_index.
597                  * Also, though shmem_getpage checks i_size before adding to
598                  * cache, no recheck after: so fix the narrow window there too.
599                  */
600                 truncate_inode_pages(inode->i_mapping, inode->i_size);
601         }
602
603         spin_lock(&info->lock);
604         info->flags &= ~SHMEM_TRUNCATE;
605         info->swapped -= nr_swaps_freed;
606         if (nr_pages_to_free)
607                 shmem_free_blocks(inode, nr_pages_to_free);
608         shmem_recalc_inode(inode);
609         spin_unlock(&info->lock);
610
611         /*
612          * Empty swap vector directory pages to be freed?
613          */
614         if (!list_empty(&pages_to_free)) {
615                 pages_to_free.prev->next = NULL;
616                 shmem_free_pages(pages_to_free.next);
617         }
618 }
619
620 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
621 {
622         struct inode *inode = dentry->d_inode;
623         struct page *page = NULL;
624         int error;
625
626         if (attr->ia_valid & ATTR_SIZE) {
627                 if (attr->ia_size < inode->i_size) {
628                         /*
629                          * If truncating down to a partial page, then
630                          * if that page is already allocated, hold it
631                          * in memory until the truncation is over, so
632                          * truncate_partial_page cannnot miss it were
633                          * it assigned to swap.
634                          */
635                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
636                                 (void) shmem_getpage(inode,
637                                         attr->ia_size>>PAGE_CACHE_SHIFT,
638                                                 &page, SGP_READ, NULL);
639                         }
640                         /*
641                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
642                          * detect if any pages might have been added to cache
643                          * after truncate_inode_pages.  But we needn't bother
644                          * if it's being fully truncated to zero-length: the
645                          * nrpages check is efficient enough in that case.
646                          */
647                         if (attr->ia_size) {
648                                 struct shmem_inode_info *info = SHMEM_I(inode);
649                                 spin_lock(&info->lock);
650                                 info->flags &= ~SHMEM_PAGEIN;
651                                 spin_unlock(&info->lock);
652                         }
653                 }
654         }
655
656         error = inode_change_ok(inode, attr);
657         if (!error)
658                 error = inode_setattr(inode, attr);
659         if (page)
660                 page_cache_release(page);
661         return error;
662 }
663
664 static void shmem_delete_inode(struct inode *inode)
665 {
666         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
667         struct shmem_inode_info *info = SHMEM_I(inode);
668
669         if (inode->i_op->truncate == shmem_truncate) {
670                 shmem_unacct_size(info->flags, inode->i_size);
671                 inode->i_size = 0;
672                 shmem_truncate(inode);
673                 if (!list_empty(&info->swaplist)) {
674                         spin_lock(&shmem_swaplist_lock);
675                         list_del_init(&info->swaplist);
676                         spin_unlock(&shmem_swaplist_lock);
677                 }
678         }
679         if (sbinfo) {
680                 BUG_ON(inode->i_blocks);
681                 spin_lock(&sbinfo->stat_lock);
682                 sbinfo->free_inodes++;
683                 spin_unlock(&sbinfo->stat_lock);
684         }
685         clear_inode(inode);
686 }
687
688 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
689 {
690         swp_entry_t *ptr;
691
692         for (ptr = dir; ptr < edir; ptr++) {
693                 if (ptr->val == entry.val)
694                         return ptr - dir;
695         }
696         return -1;
697 }
698
699 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
700 {
701         struct inode *inode;
702         unsigned long idx;
703         unsigned long size;
704         unsigned long limit;
705         unsigned long stage;
706         struct page **dir;
707         struct page *subdir;
708         swp_entry_t *ptr;
709         int offset;
710
711         idx = 0;
712         ptr = info->i_direct;
713         spin_lock(&info->lock);
714         limit = info->next_index;
715         size = limit;
716         if (size > SHMEM_NR_DIRECT)
717                 size = SHMEM_NR_DIRECT;
718         offset = shmem_find_swp(entry, ptr, ptr+size);
719         if (offset >= 0) {
720                 shmem_swp_balance_unmap();
721                 goto found;
722         }
723         if (!info->i_indirect)
724                 goto lost2;
725
726         dir = shmem_dir_map(info->i_indirect);
727         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
728
729         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
730                 if (unlikely(idx == stage)) {
731                         shmem_dir_unmap(dir-1);
732                         dir = shmem_dir_map(info->i_indirect) +
733                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
734                         while (!*dir) {
735                                 dir++;
736                                 idx += ENTRIES_PER_PAGEPAGE;
737                                 if (idx >= limit)
738                                         goto lost1;
739                         }
740                         stage = idx + ENTRIES_PER_PAGEPAGE;
741                         subdir = *dir;
742                         shmem_dir_unmap(dir);
743                         dir = shmem_dir_map(subdir);
744                 }
745                 subdir = *dir;
746                 if (subdir && subdir->nr_swapped) {
747                         ptr = shmem_swp_map(subdir);
748                         size = limit - idx;
749                         if (size > ENTRIES_PER_PAGE)
750                                 size = ENTRIES_PER_PAGE;
751                         offset = shmem_find_swp(entry, ptr, ptr+size);
752                         if (offset >= 0) {
753                                 shmem_dir_unmap(dir);
754                                 goto found;
755                         }
756                         shmem_swp_unmap(ptr);
757                 }
758         }
759 lost1:
760         shmem_dir_unmap(dir-1);
761 lost2:
762         spin_unlock(&info->lock);
763         return 0;
764 found:
765         idx += offset;
766         inode = &info->vfs_inode;
767         if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
768                 info->flags |= SHMEM_PAGEIN;
769                 shmem_swp_set(info, ptr + offset, 0);
770         }
771         shmem_swp_unmap(ptr);
772         spin_unlock(&info->lock);
773         /*
774          * Decrement swap count even when the entry is left behind:
775          * try_to_unuse will skip over mms, then reincrement count.
776          */
777         swap_free(entry);
778         return 1;
779 }
780
781 /*
782  * shmem_unuse() search for an eventually swapped out shmem page.
783  */
784 int shmem_unuse(swp_entry_t entry, struct page *page)
785 {
786         struct list_head *p, *next;
787         struct shmem_inode_info *info;
788         int found = 0;
789
790         spin_lock(&shmem_swaplist_lock);
791         list_for_each_safe(p, next, &shmem_swaplist) {
792                 info = list_entry(p, struct shmem_inode_info, swaplist);
793                 if (!info->swapped)
794                         list_del_init(&info->swaplist);
795                 else if (shmem_unuse_inode(info, entry, page)) {
796                         /* move head to start search for next from here */
797                         list_move_tail(&shmem_swaplist, &info->swaplist);
798                         found = 1;
799                         break;
800                 }
801         }
802         spin_unlock(&shmem_swaplist_lock);
803         return found;
804 }
805
806 /*
807  * Move the page from the page cache to the swap cache.
808  */
809 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
810 {
811         struct shmem_inode_info *info;
812         swp_entry_t *entry, swap;
813         struct address_space *mapping;
814         unsigned long index;
815         struct inode *inode;
816
817         BUG_ON(!PageLocked(page));
818         BUG_ON(page_mapped(page));
819
820         mapping = page->mapping;
821         index = page->index;
822         inode = mapping->host;
823         info = SHMEM_I(inode);
824         if (info->flags & VM_LOCKED)
825                 goto redirty;
826         swap = get_swap_page();
827         if (!swap.val)
828                 goto redirty;
829
830         spin_lock(&info->lock);
831         shmem_recalc_inode(inode);
832         if (index >= info->next_index) {
833                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
834                 goto unlock;
835         }
836         entry = shmem_swp_entry(info, index, NULL);
837         BUG_ON(!entry);
838         BUG_ON(entry->val);
839
840         if (move_to_swap_cache(page, swap) == 0) {
841                 shmem_swp_set(info, entry, swap.val);
842                 shmem_swp_unmap(entry);
843                 spin_unlock(&info->lock);
844                 if (list_empty(&info->swaplist)) {
845                         spin_lock(&shmem_swaplist_lock);
846                         /* move instead of add in case we're racing */
847                         list_move_tail(&info->swaplist, &shmem_swaplist);
848                         spin_unlock(&shmem_swaplist_lock);
849                 }
850                 unlock_page(page);
851                 return 0;
852         }
853
854         shmem_swp_unmap(entry);
855 unlock:
856         spin_unlock(&info->lock);
857         swap_free(swap);
858 redirty:
859         set_page_dirty(page);
860         return WRITEPAGE_ACTIVATE;      /* Return with the page locked */
861 }
862
863 #ifdef CONFIG_NUMA
864 static struct page *shmem_swapin_async(struct shared_policy *p,
865                                        swp_entry_t entry, unsigned long idx)
866 {
867         struct page *page;
868         struct vm_area_struct pvma;
869
870         /* Create a pseudo vma that just contains the policy */
871         memset(&pvma, 0, sizeof(struct vm_area_struct));
872         pvma.vm_end = PAGE_SIZE;
873         pvma.vm_pgoff = idx;
874         pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
875         page = read_swap_cache_async(entry, &pvma, 0);
876         mpol_free(pvma.vm_policy);
877         return page;
878 }
879
880 struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
881                           unsigned long idx)
882 {
883         struct shared_policy *p = &info->policy;
884         int i, num;
885         struct page *page;
886         unsigned long offset;
887
888         num = valid_swaphandles(entry, &offset);
889         for (i = 0; i < num; offset++, i++) {
890                 page = shmem_swapin_async(p,
891                                 swp_entry(swp_type(entry), offset), idx);
892                 if (!page)
893                         break;
894                 page_cache_release(page);
895         }
896         lru_add_drain();        /* Push any new pages onto the LRU now */
897         return shmem_swapin_async(p, entry, idx);
898 }
899
900 static struct page *
901 shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info,
902                  unsigned long idx)
903 {
904         struct vm_area_struct pvma;
905         struct page *page;
906
907         memset(&pvma, 0, sizeof(struct vm_area_struct));
908         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
909         pvma.vm_pgoff = idx;
910         pvma.vm_end = PAGE_SIZE;
911         page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
912         mpol_free(pvma.vm_policy);
913         return page;
914 }
915 #else
916 static inline struct page *
917 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
918 {
919         swapin_readahead(entry, 0, NULL);
920         return read_swap_cache_async(entry, NULL, 0);
921 }
922
923 static inline struct page *
924 shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info,
925                                  unsigned long idx)
926 {
927         return alloc_page(gfp | __GFP_ZERO);
928 }
929 #endif
930
931 /*
932  * shmem_getpage - either get the page from swap or allocate a new one
933  *
934  * If we allocate a new one we do not mark it dirty. That's up to the
935  * vm. If we swap it in we mark it dirty since we also free the swap
936  * entry since a page cannot live in both the swap and page cache
937  */
938 static int shmem_getpage(struct inode *inode, unsigned long idx,
939                         struct page **pagep, enum sgp_type sgp, int *type)
940 {
941         struct address_space *mapping = inode->i_mapping;
942         struct shmem_inode_info *info = SHMEM_I(inode);
943         struct shmem_sb_info *sbinfo;
944         struct page *filepage = *pagep;
945         struct page *swappage;
946         swp_entry_t *entry;
947         swp_entry_t swap;
948         int error;
949
950         if (idx >= SHMEM_MAX_INDEX)
951                 return -EFBIG;
952         /*
953          * Normally, filepage is NULL on entry, and either found
954          * uptodate immediately, or allocated and zeroed, or read
955          * in under swappage, which is then assigned to filepage.
956          * But shmem_prepare_write passes in a locked filepage,
957          * which may be found not uptodate by other callers too,
958          * and may need to be copied from the swappage read in.
959          */
960 repeat:
961         if (!filepage)
962                 filepage = find_lock_page(mapping, idx);
963         if (filepage && PageUptodate(filepage))
964                 goto done;
965         error = 0;
966         if (sgp == SGP_QUICK)
967                 goto failed;
968
969         spin_lock(&info->lock);
970         shmem_recalc_inode(inode);
971         entry = shmem_swp_alloc(info, idx, sgp);
972         if (IS_ERR(entry)) {
973                 spin_unlock(&info->lock);
974                 error = PTR_ERR(entry);
975                 goto failed;
976         }
977         swap = *entry;
978
979         if (swap.val) {
980                 /* Look it up and read it in.. */
981                 swappage = lookup_swap_cache(swap);
982                 if (!swappage) {
983                         shmem_swp_unmap(entry);
984                         spin_unlock(&info->lock);
985                         /* here we actually do the io */
986                         if (type && *type == VM_FAULT_MINOR) {
987                                 inc_page_state(pgmajfault);
988                                 *type = VM_FAULT_MAJOR;
989                         }
990                         swappage = shmem_swapin(info, swap, idx);
991                         if (!swappage) {
992                                 spin_lock(&info->lock);
993                                 entry = shmem_swp_alloc(info, idx, sgp);
994                                 if (IS_ERR(entry))
995                                         error = PTR_ERR(entry);
996                                 else {
997                                         if (entry->val == swap.val)
998                                                 error = -ENOMEM;
999                                         shmem_swp_unmap(entry);
1000                                 }
1001                                 spin_unlock(&info->lock);
1002                                 if (error)
1003                                         goto failed;
1004                                 goto repeat;
1005                         }
1006                         wait_on_page_locked(swappage);
1007                         page_cache_release(swappage);
1008                         goto repeat;
1009                 }
1010
1011                 /* We have to do this with page locked to prevent races */
1012                 if (TestSetPageLocked(swappage)) {
1013                         shmem_swp_unmap(entry);
1014                         spin_unlock(&info->lock);
1015                         wait_on_page_locked(swappage);
1016                         page_cache_release(swappage);
1017                         goto repeat;
1018                 }
1019                 if (PageWriteback(swappage)) {
1020                         shmem_swp_unmap(entry);
1021                         spin_unlock(&info->lock);
1022                         wait_on_page_writeback(swappage);
1023                         unlock_page(swappage);
1024                         page_cache_release(swappage);
1025                         goto repeat;
1026                 }
1027                 if (!PageUptodate(swappage)) {
1028                         shmem_swp_unmap(entry);
1029                         spin_unlock(&info->lock);
1030                         unlock_page(swappage);
1031                         page_cache_release(swappage);
1032                         error = -EIO;
1033                         goto failed;
1034                 }
1035
1036                 if (filepage) {
1037                         shmem_swp_set(info, entry, 0);
1038                         shmem_swp_unmap(entry);
1039                         delete_from_swap_cache(swappage);
1040                         spin_unlock(&info->lock);
1041                         copy_highpage(filepage, swappage);
1042                         unlock_page(swappage);
1043                         page_cache_release(swappage);
1044                         flush_dcache_page(filepage);
1045                         SetPageUptodate(filepage);
1046                         set_page_dirty(filepage);
1047                         swap_free(swap);
1048                 } else if (!(error = move_from_swap_cache(
1049                                 swappage, idx, mapping))) {
1050                         info->flags |= SHMEM_PAGEIN;
1051                         shmem_swp_set(info, entry, 0);
1052                         shmem_swp_unmap(entry);
1053                         spin_unlock(&info->lock);
1054                         filepage = swappage;
1055                         swap_free(swap);
1056                 } else {
1057                         shmem_swp_unmap(entry);
1058                         spin_unlock(&info->lock);
1059                         unlock_page(swappage);
1060                         page_cache_release(swappage);
1061                         if (error == -ENOMEM) {
1062                                 /* let kswapd refresh zone for GFP_ATOMICs */
1063                                 blk_congestion_wait(WRITE, HZ/50);
1064                         }
1065                         goto repeat;
1066                 }
1067         } else if (sgp == SGP_READ && !filepage) {
1068                 shmem_swp_unmap(entry);
1069                 filepage = find_get_page(mapping, idx);
1070                 if (filepage &&
1071                     (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1072                         spin_unlock(&info->lock);
1073                         wait_on_page_locked(filepage);
1074                         page_cache_release(filepage);
1075                         filepage = NULL;
1076                         goto repeat;
1077                 }
1078                 spin_unlock(&info->lock);
1079         } else {
1080                 shmem_swp_unmap(entry);
1081                 sbinfo = SHMEM_SB(inode->i_sb);
1082                 if (sbinfo) {
1083                         spin_lock(&sbinfo->stat_lock);
1084                         if (sbinfo->free_blocks == 0 ||
1085                             shmem_acct_block(info->flags)) {
1086                                 spin_unlock(&sbinfo->stat_lock);
1087                                 spin_unlock(&info->lock);
1088                                 error = -ENOSPC;
1089                                 goto failed;
1090                         }
1091                         sbinfo->free_blocks--;
1092                         inode->i_blocks += BLOCKS_PER_PAGE;
1093                         spin_unlock(&sbinfo->stat_lock);
1094                 } else if (shmem_acct_block(info->flags)) {
1095                         spin_unlock(&info->lock);
1096                         error = -ENOSPC;
1097                         goto failed;
1098                 }
1099
1100                 if (!filepage) {
1101                         spin_unlock(&info->lock);
1102                         filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1103                                                     info,
1104                                                     idx);
1105                         if (!filepage) {
1106                                 shmem_unacct_blocks(info->flags, 1);
1107                                 shmem_free_blocks(inode, 1);
1108                                 error = -ENOMEM;
1109                                 goto failed;
1110                         }
1111
1112                         spin_lock(&info->lock);
1113                         entry = shmem_swp_alloc(info, idx, sgp);
1114                         if (IS_ERR(entry))
1115                                 error = PTR_ERR(entry);
1116                         else {
1117                                 swap = *entry;
1118                                 shmem_swp_unmap(entry);
1119                         }
1120                         if (error || swap.val || 0 != add_to_page_cache_lru(
1121                                         filepage, mapping, idx, GFP_ATOMIC)) {
1122                                 spin_unlock(&info->lock);
1123                                 page_cache_release(filepage);
1124                                 shmem_unacct_blocks(info->flags, 1);
1125                                 shmem_free_blocks(inode, 1);
1126                                 filepage = NULL;
1127                                 if (error)
1128                                         goto failed;
1129                                 goto repeat;
1130                         }
1131                         info->flags |= SHMEM_PAGEIN;
1132                 }
1133
1134                 info->alloced++;
1135                 spin_unlock(&info->lock);
1136                 flush_dcache_page(filepage);
1137                 SetPageUptodate(filepage);
1138         }
1139 done:
1140         if (*pagep != filepage) {
1141                 unlock_page(filepage);
1142                 *pagep = filepage;
1143         }
1144         return 0;
1145
1146 failed:
1147         if (*pagep != filepage) {
1148                 unlock_page(filepage);
1149                 page_cache_release(filepage);
1150         }
1151         return error;
1152 }
1153
1154 struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
1155 {
1156         struct inode *inode = vma->vm_file->f_dentry->d_inode;
1157         struct page *page = NULL;
1158         unsigned long idx;
1159         int error;
1160
1161         idx = (address - vma->vm_start) >> PAGE_SHIFT;
1162         idx += vma->vm_pgoff;
1163         idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1164         if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1165                 return NOPAGE_SIGBUS;
1166
1167         error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1168         if (error)
1169                 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1170
1171         mark_page_accessed(page);
1172         return page;
1173 }
1174
1175 static int shmem_populate(struct vm_area_struct *vma,
1176         unsigned long addr, unsigned long len,
1177         pgprot_t prot, unsigned long pgoff, int nonblock)
1178 {
1179         struct inode *inode = vma->vm_file->f_dentry->d_inode;
1180         struct mm_struct *mm = vma->vm_mm;
1181         enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1182         unsigned long size;
1183
1184         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1185         if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1186                 return -EINVAL;
1187
1188         while ((long) len > 0) {
1189                 struct page *page = NULL;
1190                 int err;
1191                 /*
1192                  * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1193                  */
1194                 err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1195                 if (err)
1196                         return err;
1197                 if (page) {
1198                         mark_page_accessed(page);
1199                         err = install_page(mm, vma, addr, page, prot);
1200                         if (err) {
1201                                 page_cache_release(page);
1202                                 return err;
1203                         }
1204                 } else if (nonblock) {
1205                         err = install_file_pte(mm, vma, addr, pgoff, prot);
1206                         if (err)
1207                                 return err;
1208                 }
1209
1210                 len -= PAGE_SIZE;
1211                 addr += PAGE_SIZE;
1212                 pgoff++;
1213         }
1214         return 0;
1215 }
1216
1217 #ifdef CONFIG_NUMA
1218 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1219 {
1220         struct inode *i = vma->vm_file->f_dentry->d_inode;
1221         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1222 }
1223
1224 struct mempolicy *
1225 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1226 {
1227         struct inode *i = vma->vm_file->f_dentry->d_inode;
1228         unsigned long idx;
1229
1230         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1231         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1232 }
1233 #endif
1234
1235 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1236 {
1237         struct inode *inode = file->f_dentry->d_inode;
1238         struct shmem_inode_info *info = SHMEM_I(inode);
1239         int retval = -ENOMEM;
1240
1241         spin_lock(&info->lock);
1242         if (lock && !(info->flags & VM_LOCKED)) {
1243                 if (!user_shm_lock(inode->i_size, user))
1244                         goto out_nomem;
1245                 info->flags |= VM_LOCKED;
1246         }
1247         if (!lock && (info->flags & VM_LOCKED) && user) {
1248                 user_shm_unlock(inode->i_size, user);
1249                 info->flags &= ~VM_LOCKED;
1250         }
1251         retval = 0;
1252 out_nomem:
1253         spin_unlock(&info->lock);
1254         return retval;
1255 }
1256
1257 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1258 {
1259         file_accessed(file);
1260         vma->vm_ops = &shmem_vm_ops;
1261         return 0;
1262 }
1263
1264 static struct inode *
1265 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1266 {
1267         struct inode *inode;
1268         struct shmem_inode_info *info;
1269         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1270
1271         if (sbinfo) {
1272                 spin_lock(&sbinfo->stat_lock);
1273                 if (!sbinfo->free_inodes) {
1274                         spin_unlock(&sbinfo->stat_lock);
1275                         return NULL;
1276                 }
1277                 sbinfo->free_inodes--;
1278                 spin_unlock(&sbinfo->stat_lock);
1279         }
1280
1281         inode = new_inode(sb);
1282         if (inode) {
1283                 inode->i_mode = mode;
1284                 inode->i_uid = current->fsuid;
1285                 inode->i_gid = current->fsgid;
1286                 inode->i_blksize = PAGE_CACHE_SIZE;
1287                 inode->i_blocks = 0;
1288                 inode->i_mapping->a_ops = &shmem_aops;
1289                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1290                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1291                 info = SHMEM_I(inode);
1292                 memset(info, 0, (char *)inode - (char *)info);
1293                 spin_lock_init(&info->lock);
1294                 INIT_LIST_HEAD(&info->swaplist);
1295
1296                 switch (mode & S_IFMT) {
1297                 default:
1298                         inode->i_op = &shmem_special_inode_operations;
1299                         init_special_inode(inode, mode, dev);
1300                         break;
1301                 case S_IFREG:
1302                         inode->i_op = &shmem_inode_operations;
1303                         inode->i_fop = &shmem_file_operations;
1304                         mpol_shared_policy_init(&info->policy);
1305                         break;
1306                 case S_IFDIR:
1307                         inode->i_nlink++;
1308                         /* Some things misbehave if size == 0 on a directory */
1309                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1310                         inode->i_op = &shmem_dir_inode_operations;
1311                         inode->i_fop = &simple_dir_operations;
1312                         break;
1313                 case S_IFLNK:
1314                         /*
1315                          * Must not load anything in the rbtree,
1316                          * mpol_free_shared_policy will not be called.
1317                          */
1318                         mpol_shared_policy_init(&info->policy);
1319                         break;
1320                 }
1321         } else if (sbinfo) {
1322                 spin_lock(&sbinfo->stat_lock);
1323                 sbinfo->free_inodes++;
1324                 spin_unlock(&sbinfo->stat_lock);
1325         }
1326         return inode;
1327 }
1328
1329 #ifdef CONFIG_TMPFS
1330
1331 static int shmem_set_size(struct shmem_sb_info *sbinfo,
1332                           unsigned long max_blocks, unsigned long max_inodes)
1333 {
1334         int error;
1335         unsigned long blocks, inodes;
1336
1337         spin_lock(&sbinfo->stat_lock);
1338         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
1339         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
1340         error = -EINVAL;
1341         if (max_blocks < blocks)
1342                 goto out;
1343         if (max_inodes < inodes)
1344                 goto out;
1345         error = 0;
1346         sbinfo->max_blocks  = max_blocks;
1347         sbinfo->free_blocks = max_blocks - blocks;
1348         sbinfo->max_inodes  = max_inodes;
1349         sbinfo->free_inodes = max_inodes - inodes;
1350 out:
1351         spin_unlock(&sbinfo->stat_lock);
1352         return error;
1353 }
1354
1355 static struct inode_operations shmem_symlink_inode_operations;
1356 static struct inode_operations shmem_symlink_inline_operations;
1357
1358 /*
1359  * Normally tmpfs makes no use of shmem_prepare_write, but it
1360  * lets a tmpfs file be used read-write below the loop driver.
1361  */
1362 static int
1363 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1364 {
1365         struct inode *inode = page->mapping->host;
1366         return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1367 }
1368
1369 static ssize_t
1370 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1371 {
1372         struct inode    *inode = file->f_dentry->d_inode;
1373         loff_t          pos;
1374         unsigned long   written;
1375         ssize_t         err;
1376
1377         if ((ssize_t) count < 0)
1378                 return -EINVAL;
1379
1380         if (!access_ok(VERIFY_READ, buf, count))
1381                 return -EFAULT;
1382
1383         down(&inode->i_sem);
1384
1385         pos = *ppos;
1386         written = 0;
1387
1388         err = generic_write_checks(file, &pos, &count, 0);
1389         if (err || !count)
1390                 goto out;
1391
1392         err = remove_suid(file->f_dentry);
1393         if (err)
1394                 goto out;
1395
1396         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1397
1398         do {
1399                 struct page *page = NULL;
1400                 unsigned long bytes, index, offset;
1401                 char *kaddr;
1402                 int left;
1403
1404                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1405                 index = pos >> PAGE_CACHE_SHIFT;
1406                 bytes = PAGE_CACHE_SIZE - offset;
1407                 if (bytes > count)
1408                         bytes = count;
1409
1410                 /*
1411                  * We don't hold page lock across copy from user -
1412                  * what would it guard against? - so no deadlock here.
1413                  * But it still may be a good idea to prefault below.
1414                  */
1415
1416                 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1417                 if (err)
1418                         break;
1419
1420                 left = bytes;
1421                 if (PageHighMem(page)) {
1422                         volatile unsigned char dummy;
1423                         __get_user(dummy, buf);
1424                         __get_user(dummy, buf + bytes - 1);
1425
1426                         kaddr = kmap_atomic(page, KM_USER0);
1427                         left = __copy_from_user_inatomic(kaddr + offset,
1428                                                         buf, bytes);
1429                         kunmap_atomic(kaddr, KM_USER0);
1430                 }
1431                 if (left) {
1432                         kaddr = kmap(page);
1433                         left = __copy_from_user(kaddr + offset, buf, bytes);
1434                         kunmap(page);
1435                 }
1436
1437                 written += bytes;
1438                 count -= bytes;
1439                 pos += bytes;
1440                 buf += bytes;
1441                 if (pos > inode->i_size)
1442                         i_size_write(inode, pos);
1443
1444                 flush_dcache_page(page);
1445                 set_page_dirty(page);
1446                 mark_page_accessed(page);
1447                 page_cache_release(page);
1448
1449                 if (left) {
1450                         pos -= left;
1451                         written -= left;
1452                         err = -EFAULT;
1453                         break;
1454                 }
1455
1456                 /*
1457                  * Our dirty pages are not counted in nr_dirty,
1458                  * and we do not attempt to balance dirty pages.
1459                  */
1460
1461                 cond_resched();
1462         } while (count);
1463
1464         *ppos = pos;
1465         if (written)
1466                 err = written;
1467 out:
1468         up(&inode->i_sem);
1469         return err;
1470 }
1471
1472 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1473 {
1474         struct inode *inode = filp->f_dentry->d_inode;
1475         struct address_space *mapping = inode->i_mapping;
1476         unsigned long index, offset;
1477
1478         index = *ppos >> PAGE_CACHE_SHIFT;
1479         offset = *ppos & ~PAGE_CACHE_MASK;
1480
1481         for (;;) {
1482                 struct page *page = NULL;
1483                 unsigned long end_index, nr, ret;
1484                 loff_t i_size = i_size_read(inode);
1485
1486                 end_index = i_size >> PAGE_CACHE_SHIFT;
1487                 if (index > end_index)
1488                         break;
1489                 if (index == end_index) {
1490                         nr = i_size & ~PAGE_CACHE_MASK;
1491                         if (nr <= offset)
1492                                 break;
1493                 }
1494
1495                 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1496                 if (desc->error) {
1497                         if (desc->error == -EINVAL)
1498                                 desc->error = 0;
1499                         break;
1500                 }
1501
1502                 /*
1503                  * We must evaluate after, since reads (unlike writes)
1504                  * are called without i_sem protection against truncate
1505                  */
1506                 nr = PAGE_CACHE_SIZE;
1507                 i_size = i_size_read(inode);
1508                 end_index = i_size >> PAGE_CACHE_SHIFT;
1509                 if (index == end_index) {
1510                         nr = i_size & ~PAGE_CACHE_MASK;
1511                         if (nr <= offset) {
1512                                 if (page)
1513                                         page_cache_release(page);
1514                                 break;
1515                         }
1516                 }
1517                 nr -= offset;
1518
1519                 if (page) {
1520                         /*
1521                          * If users can be writing to this page using arbitrary
1522                          * virtual addresses, take care about potential aliasing
1523                          * before reading the page on the kernel side.
1524                          */
1525                         if (mapping_writably_mapped(mapping))
1526                                 flush_dcache_page(page);
1527                         /*
1528                          * Mark the page accessed if we read the beginning.
1529                          */
1530                         if (!offset)
1531                                 mark_page_accessed(page);
1532                 } else
1533                         page = ZERO_PAGE(0);
1534
1535                 /*
1536                  * Ok, we have the page, and it's up-to-date, so
1537                  * now we can copy it to user space...
1538                  *
1539                  * The actor routine returns how many bytes were actually used..
1540                  * NOTE! This may not be the same as how much of a user buffer
1541                  * we filled up (we may be padding etc), so we can only update
1542                  * "pos" here (the actor routine has to update the user buffer
1543                  * pointers and the remaining count).
1544                  */
1545                 ret = actor(desc, page, offset, nr);
1546                 offset += ret;
1547                 index += offset >> PAGE_CACHE_SHIFT;
1548                 offset &= ~PAGE_CACHE_MASK;
1549
1550                 page_cache_release(page);
1551                 if (ret != nr || !desc->count)
1552                         break;
1553
1554                 cond_resched();
1555         }
1556
1557         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1558         file_accessed(filp);
1559 }
1560
1561 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1562 {
1563         read_descriptor_t desc;
1564
1565         if ((ssize_t) count < 0)
1566                 return -EINVAL;
1567         if (!access_ok(VERIFY_WRITE, buf, count))
1568                 return -EFAULT;
1569         if (!count)
1570                 return 0;
1571
1572         desc.written = 0;
1573         desc.count = count;
1574         desc.arg.buf = buf;
1575         desc.error = 0;
1576
1577         do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1578         if (desc.written)
1579                 return desc.written;
1580         return desc.error;
1581 }
1582
1583 static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
1584                          size_t count, read_actor_t actor, void *target)
1585 {
1586         read_descriptor_t desc;
1587
1588         if (!count)
1589                 return 0;
1590
1591         desc.written = 0;
1592         desc.count = count;
1593         desc.arg.data = target;
1594         desc.error = 0;
1595
1596         do_shmem_file_read(in_file, ppos, &desc, actor);
1597         if (desc.written)
1598                 return desc.written;
1599         return desc.error;
1600 }
1601
1602 static int shmem_statfs(struct super_block *sb, struct kstatfs *buf)
1603 {
1604         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1605
1606         buf->f_type = TMPFS_SUPER_MAGIC;
1607         buf->f_bsize = PAGE_CACHE_SIZE;
1608         buf->f_namelen = NAME_MAX;
1609         if (sbinfo) {
1610                 spin_lock(&sbinfo->stat_lock);
1611                 buf->f_blocks = sbinfo->max_blocks;
1612                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1613                 buf->f_files = sbinfo->max_inodes;
1614                 buf->f_ffree = sbinfo->free_inodes;
1615                 spin_unlock(&sbinfo->stat_lock);
1616         }
1617         /* else leave those fields 0 like simple_statfs */
1618         return 0;
1619 }
1620
1621 /*
1622  * File creation. Allocate an inode, and we're done..
1623  */
1624 static int
1625 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1626 {
1627         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1628         int error = -ENOSPC;
1629
1630         if (inode) {
1631                 if (dir->i_mode & S_ISGID) {
1632                         inode->i_gid = dir->i_gid;
1633                         if (S_ISDIR(mode))
1634                                 inode->i_mode |= S_ISGID;
1635                 }
1636                 dir->i_size += BOGO_DIRENT_SIZE;
1637                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1638                 d_instantiate(dentry, inode);
1639                 dget(dentry); /* Extra count - pin the dentry in core */
1640                 error = 0;
1641         }
1642         return error;
1643 }
1644
1645 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1646 {
1647         int error;
1648
1649         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1650                 return error;
1651         dir->i_nlink++;
1652         return 0;
1653 }
1654
1655 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1656                 struct nameidata *nd)
1657 {
1658         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1659 }
1660
1661 /*
1662  * Link a file..
1663  */
1664 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1665 {
1666         struct inode *inode = old_dentry->d_inode;
1667         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1668
1669         /*
1670          * No ordinary (disk based) filesystem counts links as inodes;
1671          * but each new link needs a new dentry, pinning lowmem, and
1672          * tmpfs dentries cannot be pruned until they are unlinked.
1673          */
1674         if (sbinfo) {
1675                 spin_lock(&sbinfo->stat_lock);
1676                 if (!sbinfo->free_inodes) {
1677                         spin_unlock(&sbinfo->stat_lock);
1678                         return -ENOSPC;
1679                 }
1680                 sbinfo->free_inodes--;
1681                 spin_unlock(&sbinfo->stat_lock);
1682         }
1683
1684         dir->i_size += BOGO_DIRENT_SIZE;
1685         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1686         inode->i_nlink++;
1687         atomic_inc(&inode->i_count);    /* New dentry reference */
1688         dget(dentry);           /* Extra pinning count for the created dentry */
1689         d_instantiate(dentry, inode);
1690         return 0;
1691 }
1692
1693 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1694 {
1695         struct inode *inode = dentry->d_inode;
1696
1697         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1698                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1699                 if (sbinfo) {
1700                         spin_lock(&sbinfo->stat_lock);
1701                         sbinfo->free_inodes++;
1702                         spin_unlock(&sbinfo->stat_lock);
1703                 }
1704         }
1705
1706         dir->i_size -= BOGO_DIRENT_SIZE;
1707         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1708         inode->i_nlink--;
1709         dput(dentry);   /* Undo the count from "create" - this does all the work */
1710         return 0;
1711 }
1712
1713 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1714 {
1715         if (!simple_empty(dentry))
1716                 return -ENOTEMPTY;
1717
1718         dir->i_nlink--;
1719         return shmem_unlink(dir, dentry);
1720 }
1721
1722 /*
1723  * The VFS layer already does all the dentry stuff for rename,
1724  * we just have to decrement the usage count for the target if
1725  * it exists so that the VFS layer correctly free's it when it
1726  * gets overwritten.
1727  */
1728 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1729 {
1730         struct inode *inode = old_dentry->d_inode;
1731         int they_are_dirs = S_ISDIR(inode->i_mode);
1732
1733         if (!simple_empty(new_dentry))
1734                 return -ENOTEMPTY;
1735
1736         if (new_dentry->d_inode) {
1737                 (void) shmem_unlink(new_dir, new_dentry);
1738                 if (they_are_dirs)
1739                         old_dir->i_nlink--;
1740         } else if (they_are_dirs) {
1741                 old_dir->i_nlink--;
1742                 new_dir->i_nlink++;
1743         }
1744
1745         old_dir->i_size -= BOGO_DIRENT_SIZE;
1746         new_dir->i_size += BOGO_DIRENT_SIZE;
1747         old_dir->i_ctime = old_dir->i_mtime =
1748         new_dir->i_ctime = new_dir->i_mtime =
1749         inode->i_ctime = CURRENT_TIME;
1750         return 0;
1751 }
1752
1753 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1754 {
1755         int error;
1756         int len;
1757         struct inode *inode;
1758         struct page *page = NULL;
1759         char *kaddr;
1760         struct shmem_inode_info *info;
1761
1762         len = strlen(symname) + 1;
1763         if (len > PAGE_CACHE_SIZE)
1764                 return -ENAMETOOLONG;
1765
1766         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1767         if (!inode)
1768                 return -ENOSPC;
1769
1770         info = SHMEM_I(inode);
1771         inode->i_size = len-1;
1772         if (len <= (char *)inode - (char *)info) {
1773                 /* do it inline */
1774                 memcpy(info, symname, len);
1775                 inode->i_op = &shmem_symlink_inline_operations;
1776         } else {
1777                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1778                 if (error) {
1779                         iput(inode);
1780                         return error;
1781                 }
1782                 inode->i_op = &shmem_symlink_inode_operations;
1783                 kaddr = kmap_atomic(page, KM_USER0);
1784                 memcpy(kaddr, symname, len);
1785                 kunmap_atomic(kaddr, KM_USER0);
1786                 set_page_dirty(page);
1787                 page_cache_release(page);
1788         }
1789         if (dir->i_mode & S_ISGID)
1790                 inode->i_gid = dir->i_gid;
1791         dir->i_size += BOGO_DIRENT_SIZE;
1792         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1793         d_instantiate(dentry, inode);
1794         dget(dentry);
1795         return 0;
1796 }
1797
1798 static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1799 {
1800         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1801         return 0;
1802 }
1803
1804 static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1805 {
1806         struct page *page = NULL;
1807         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1808         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1809         return 0;
1810 }
1811
1812 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd)
1813 {
1814         if (!IS_ERR(nd_get_link(nd))) {
1815                 struct page *page;
1816
1817                 page = find_get_page(dentry->d_inode->i_mapping, 0);
1818                 if (!page)
1819                         BUG();
1820                 kunmap(page);
1821                 mark_page_accessed(page);
1822                 page_cache_release(page);
1823                 page_cache_release(page);
1824         }
1825 }
1826
1827 static struct inode_operations shmem_symlink_inline_operations = {
1828         .readlink       = generic_readlink,
1829         .follow_link    = shmem_follow_link_inline,
1830 #ifdef CONFIG_TMPFS_XATTR
1831         .setxattr       = generic_setxattr,
1832         .getxattr       = generic_getxattr,
1833         .listxattr      = generic_listxattr,
1834         .removexattr    = generic_removexattr,
1835 #endif
1836 };
1837
1838 static struct inode_operations shmem_symlink_inode_operations = {
1839         .truncate       = shmem_truncate,
1840         .readlink       = generic_readlink,
1841         .follow_link    = shmem_follow_link,
1842         .put_link       = shmem_put_link,
1843 #ifdef CONFIG_TMPFS_XATTR
1844         .setxattr       = generic_setxattr,
1845         .getxattr       = generic_getxattr,
1846         .listxattr      = generic_listxattr,
1847         .removexattr    = generic_removexattr,
1848 #endif
1849 };
1850
1851 static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes)
1852 {
1853         char *this_char, *value, *rest;
1854
1855         while ((this_char = strsep(&options, ",")) != NULL) {
1856                 if (!*this_char)
1857                         continue;
1858                 if ((value = strchr(this_char,'=')) != NULL) {
1859                         *value++ = 0;
1860                 } else {
1861                         printk(KERN_ERR
1862                             "tmpfs: No value for mount option '%s'\n",
1863                             this_char);
1864                         return 1;
1865                 }
1866
1867                 if (!strcmp(this_char,"size")) {
1868                         unsigned long long size;
1869                         size = memparse(value,&rest);
1870                         if (*rest == '%') {
1871                                 size <<= PAGE_SHIFT;
1872                                 size *= totalram_pages;
1873                                 do_div(size, 100);
1874                                 rest++;
1875                         }
1876                         if (*rest)
1877                                 goto bad_val;
1878                         *blocks = size >> PAGE_CACHE_SHIFT;
1879                 } else if (!strcmp(this_char,"nr_blocks")) {
1880                         *blocks = memparse(value,&rest);
1881                         if (*rest)
1882                                 goto bad_val;
1883                 } else if (!strcmp(this_char,"nr_inodes")) {
1884                         *inodes = memparse(value,&rest);
1885                         if (*rest)
1886                                 goto bad_val;
1887                 } else if (!strcmp(this_char,"mode")) {
1888                         if (!mode)
1889                                 continue;
1890                         *mode = simple_strtoul(value,&rest,8);
1891                         if (*rest)
1892                                 goto bad_val;
1893                 } else if (!strcmp(this_char,"uid")) {
1894                         if (!uid)
1895                                 continue;
1896                         *uid = simple_strtoul(value,&rest,0);
1897                         if (*rest)
1898                                 goto bad_val;
1899                 } else if (!strcmp(this_char,"gid")) {
1900                         if (!gid)
1901                                 continue;
1902                         *gid = simple_strtoul(value,&rest,0);
1903                         if (*rest)
1904                                 goto bad_val;
1905                 } else {
1906                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
1907                                this_char);
1908                         return 1;
1909                 }
1910         }
1911         return 0;
1912
1913 bad_val:
1914         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
1915                value, this_char);
1916         return 1;
1917
1918 }
1919
1920 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
1921 {
1922         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1923         unsigned long max_blocks = 0;
1924         unsigned long max_inodes = 0;
1925
1926         if (sbinfo) {
1927                 max_blocks = sbinfo->max_blocks;
1928                 max_inodes = sbinfo->max_inodes;
1929         }
1930         if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, &max_inodes))
1931                 return -EINVAL;
1932         /* Keep it simple: disallow limited <-> unlimited remount */
1933         if ((max_blocks || max_inodes) == !sbinfo)
1934                 return -EINVAL;
1935         /* But allow the pointless unlimited -> unlimited remount */
1936         if (!sbinfo)
1937                 return 0;
1938         return shmem_set_size(sbinfo, max_blocks, max_inodes);
1939 }
1940 #endif
1941
1942 static void shmem_put_super(struct super_block *sb)
1943 {
1944         kfree(sb->s_fs_info);
1945         sb->s_fs_info = NULL;
1946 }
1947
1948 #ifdef CONFIG_TMPFS_XATTR
1949 static struct xattr_handler *shmem_xattr_handlers[];
1950 #else
1951 #define shmem_xattr_handlers NULL
1952 #endif
1953
1954 static int shmem_fill_super(struct super_block *sb,
1955                             void *data, int silent)
1956 {
1957         struct inode *inode;
1958         struct dentry *root;
1959         int mode   = S_IRWXUGO | S_ISVTX;
1960         uid_t uid = current->fsuid;
1961         gid_t gid = current->fsgid;
1962         int err = -ENOMEM;
1963
1964 #ifdef CONFIG_TMPFS
1965         unsigned long blocks = 0;
1966         unsigned long inodes = 0;
1967
1968         /*
1969          * Per default we only allow half of the physical ram per
1970          * tmpfs instance, limiting inodes to one per page of lowmem;
1971          * but the internal instance is left unlimited.
1972          */
1973         if (!(sb->s_flags & MS_NOUSER)) {
1974                 blocks = totalram_pages / 2;
1975                 inodes = totalram_pages - totalhigh_pages;
1976                 if (inodes > blocks)
1977                         inodes = blocks;
1978
1979                 if (shmem_parse_options(data, &mode,
1980                                         &uid, &gid, &blocks, &inodes))
1981                         return -EINVAL;
1982         }
1983
1984         if (blocks || inodes) {
1985                 struct shmem_sb_info *sbinfo;
1986                 sbinfo = kmalloc(sizeof(struct shmem_sb_info), GFP_KERNEL);
1987                 if (!sbinfo)
1988                         return -ENOMEM;
1989                 sb->s_fs_info = sbinfo;
1990                 spin_lock_init(&sbinfo->stat_lock);
1991                 sbinfo->max_blocks = blocks;
1992                 sbinfo->free_blocks = blocks;
1993                 sbinfo->max_inodes = inodes;
1994                 sbinfo->free_inodes = inodes;
1995         }
1996         sb->s_xattr = shmem_xattr_handlers;
1997 #else
1998         sb->s_flags |= MS_NOUSER;
1999 #endif
2000
2001         sb->s_maxbytes = SHMEM_MAX_BYTES;
2002         sb->s_blocksize = PAGE_CACHE_SIZE;
2003         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2004         sb->s_magic = TMPFS_SUPER_MAGIC;
2005         sb->s_op = &shmem_ops;
2006         inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2007         if (!inode)
2008                 goto failed;
2009         inode->i_uid = uid;
2010         inode->i_gid = gid;
2011         root = d_alloc_root(inode);
2012         if (!root)
2013                 goto failed_iput;
2014         sb->s_root = root;
2015         return 0;
2016
2017 failed_iput:
2018         iput(inode);
2019 failed:
2020         shmem_put_super(sb);
2021         return err;
2022 }
2023
2024 static kmem_cache_t *shmem_inode_cachep;
2025
2026 static struct inode *shmem_alloc_inode(struct super_block *sb)
2027 {
2028         struct shmem_inode_info *p;
2029         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
2030         if (!p)
2031                 return NULL;
2032         return &p->vfs_inode;
2033 }
2034
2035 static void shmem_destroy_inode(struct inode *inode)
2036 {
2037         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2038                 /* only struct inode is valid if it's an inline symlink */
2039                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2040         }
2041         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2042 }
2043
2044 static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
2045 {
2046         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2047
2048         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2049             SLAB_CTOR_CONSTRUCTOR) {
2050                 inode_init_once(&p->vfs_inode);
2051         }
2052 }
2053
2054 static int init_inodecache(void)
2055 {
2056         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2057                                 sizeof(struct shmem_inode_info),
2058                                 0, 0, init_once, NULL);
2059         if (shmem_inode_cachep == NULL)
2060                 return -ENOMEM;
2061         return 0;
2062 }
2063
2064 static void destroy_inodecache(void)
2065 {
2066         if (kmem_cache_destroy(shmem_inode_cachep))
2067                 printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n");
2068 }
2069
2070 static struct address_space_operations shmem_aops = {
2071         .writepage      = shmem_writepage,
2072         .set_page_dirty = __set_page_dirty_nobuffers,
2073 #ifdef CONFIG_TMPFS
2074         .prepare_write  = shmem_prepare_write,
2075         .commit_write   = simple_commit_write,
2076 #endif
2077 };
2078
2079 static struct file_operations shmem_file_operations = {
2080         .mmap           = shmem_mmap,
2081 #ifdef CONFIG_TMPFS
2082         .llseek         = generic_file_llseek,
2083         .read           = shmem_file_read,
2084         .write          = shmem_file_write,
2085         .fsync          = simple_sync_file,
2086         .sendfile       = shmem_file_sendfile,
2087 #endif
2088 };
2089
2090 static struct inode_operations shmem_inode_operations = {
2091         .truncate       = shmem_truncate,
2092         .setattr        = shmem_notify_change,
2093 #ifdef CONFIG_TMPFS_XATTR
2094         .setxattr       = generic_setxattr,
2095         .getxattr       = generic_getxattr,
2096         .listxattr      = generic_listxattr,
2097         .removexattr    = generic_removexattr,
2098 #endif
2099 };
2100
2101 static struct inode_operations shmem_dir_inode_operations = {
2102 #ifdef CONFIG_TMPFS
2103         .create         = shmem_create,
2104         .lookup         = simple_lookup,
2105         .link           = shmem_link,
2106         .unlink         = shmem_unlink,
2107         .symlink        = shmem_symlink,
2108         .mkdir          = shmem_mkdir,
2109         .rmdir          = shmem_rmdir,
2110         .mknod          = shmem_mknod,
2111         .rename         = shmem_rename,
2112 #ifdef CONFIG_TMPFS_XATTR
2113         .setxattr       = generic_setxattr,
2114         .getxattr       = generic_getxattr,
2115         .listxattr      = generic_listxattr,
2116         .removexattr    = generic_removexattr,
2117 #endif
2118 #endif
2119 };
2120
2121 static struct inode_operations shmem_special_inode_operations = {
2122 #ifdef CONFIG_TMPFS_XATTR
2123         .setxattr       = generic_setxattr,
2124         .getxattr       = generic_getxattr,
2125         .listxattr      = generic_listxattr,
2126         .removexattr    = generic_removexattr,
2127 #endif
2128 };
2129
2130 static struct super_operations shmem_ops = {
2131         .alloc_inode    = shmem_alloc_inode,
2132         .destroy_inode  = shmem_destroy_inode,
2133 #ifdef CONFIG_TMPFS
2134         .statfs         = shmem_statfs,
2135         .remount_fs     = shmem_remount_fs,
2136 #endif
2137         .delete_inode   = shmem_delete_inode,
2138         .drop_inode     = generic_delete_inode,
2139         .put_super      = shmem_put_super,
2140 };
2141
2142 static struct vm_operations_struct shmem_vm_ops = {
2143         .nopage         = shmem_nopage,
2144         .populate       = shmem_populate,
2145 #ifdef CONFIG_NUMA
2146         .set_policy     = shmem_set_policy,
2147         .get_policy     = shmem_get_policy,
2148 #endif
2149 };
2150
2151
2152 #ifdef CONFIG_TMPFS_SECURITY
2153
2154 static size_t shmem_xattr_security_list(struct inode *inode, char *list, size_t list_len,
2155                                         const char *name, size_t name_len)
2156 {
2157         return security_inode_listsecurity(inode, list, list_len);
2158 }
2159
2160 static int shmem_xattr_security_get(struct inode *inode, const char *name, void *buffer, size_t size)
2161 {
2162         if (strcmp(name, "") == 0)
2163                 return -EINVAL;
2164         return security_inode_getsecurity(inode, name, buffer, size);
2165 }
2166
2167 static int shmem_xattr_security_set(struct inode *inode, const char *name, const void *value, size_t size, int flags)
2168 {
2169         if (strcmp(name, "") == 0)
2170                 return -EINVAL;
2171         return security_inode_setsecurity(inode, name, value, size, flags);
2172 }
2173
2174 static struct xattr_handler shmem_xattr_security_handler = {
2175         .prefix = XATTR_SECURITY_PREFIX,
2176         .list   = shmem_xattr_security_list,
2177         .get    = shmem_xattr_security_get,
2178         .set    = shmem_xattr_security_set,
2179 };
2180
2181 #endif  /* CONFIG_TMPFS_SECURITY */
2182
2183 #ifdef CONFIG_TMPFS_XATTR
2184
2185 static struct xattr_handler *shmem_xattr_handlers[] = {
2186 #ifdef CONFIG_TMPFS_SECURITY
2187         &shmem_xattr_security_handler,
2188 #endif
2189         NULL
2190 };
2191
2192 #endif  /* CONFIG_TMPFS_XATTR */
2193
2194 static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
2195         int flags, const char *dev_name, void *data)
2196 {
2197         return get_sb_nodev(fs_type, flags, data, shmem_fill_super);
2198 }
2199
2200 static struct file_system_type tmpfs_fs_type = {
2201         .owner          = THIS_MODULE,
2202         .name           = "tmpfs",
2203         .get_sb         = shmem_get_sb,
2204         .kill_sb        = kill_litter_super,
2205 };
2206 static struct vfsmount *shm_mnt;
2207
2208 static int __init init_tmpfs(void)
2209 {
2210         int error;
2211
2212         error = init_inodecache();
2213         if (error)
2214                 goto out3;
2215
2216         error = register_filesystem(&tmpfs_fs_type);
2217         if (error) {
2218                 printk(KERN_ERR "Could not register tmpfs\n");
2219                 goto out2;
2220         }
2221 #ifdef CONFIG_TMPFS
2222         devfs_mk_dir("shm");
2223 #endif
2224         shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER,
2225                                 tmpfs_fs_type.name, NULL);
2226         if (IS_ERR(shm_mnt)) {
2227                 error = PTR_ERR(shm_mnt);
2228                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2229                 goto out1;
2230         }
2231         return 0;
2232
2233 out1:
2234         unregister_filesystem(&tmpfs_fs_type);
2235 out2:
2236         destroy_inodecache();
2237 out3:
2238         shm_mnt = ERR_PTR(error);
2239         return error;
2240 }
2241 module_init(init_tmpfs)
2242
2243 /*
2244  * shmem_file_setup - get an unlinked file living in tmpfs
2245  *
2246  * @name: name for dentry (to be seen in /proc/<pid>/maps
2247  * @size: size to be set for the file
2248  *
2249  */
2250 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2251 {
2252         int error;
2253         struct file *file;
2254         struct inode *inode;
2255         struct dentry *dentry, *root;
2256         struct qstr this;
2257
2258         if (IS_ERR(shm_mnt))
2259                 return (void *)shm_mnt;
2260
2261         if (size < 0 || size > SHMEM_MAX_BYTES)
2262                 return ERR_PTR(-EINVAL);
2263
2264         if (shmem_acct_size(flags, size))
2265                 return ERR_PTR(-ENOMEM);
2266
2267         error = -ENOMEM;
2268         this.name = name;
2269         this.len = strlen(name);
2270         this.hash = 0; /* will go */
2271         root = shm_mnt->mnt_root;
2272         dentry = d_alloc(root, &this);
2273         if (!dentry)
2274                 goto put_memory;
2275
2276         error = -ENFILE;
2277         file = get_empty_filp();
2278         if (!file)
2279                 goto put_dentry;
2280
2281         error = -ENOSPC;
2282         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2283         if (!inode)
2284                 goto close_file;
2285
2286         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2287         d_instantiate(dentry, inode);
2288         inode->i_size = size;
2289         inode->i_nlink = 0;     /* It is unlinked */
2290         file->f_vfsmnt = mntget(shm_mnt);
2291         file->f_dentry = dentry;
2292         file->f_mapping = inode->i_mapping;
2293         file->f_op = &shmem_file_operations;
2294         file->f_mode = FMODE_WRITE | FMODE_READ;
2295         return file;
2296
2297 close_file:
2298         put_filp(file);
2299 put_dentry:
2300         dput(dentry);
2301 put_memory:
2302         shmem_unacct_size(flags, size);
2303         return ERR_PTR(error);
2304 }
2305
2306 /*
2307  * shmem_zero_setup - setup a shared anonymous mapping
2308  *
2309  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2310  */
2311 int shmem_zero_setup(struct vm_area_struct *vma)
2312 {
2313         struct file *file;
2314         loff_t size = vma->vm_end - vma->vm_start;
2315
2316         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2317         if (IS_ERR(file))
2318                 return PTR_ERR(file);
2319
2320         if (vma->vm_file)
2321                 fput(vma->vm_file);
2322         vma->vm_file = file;
2323         vma->vm_ops = &shmem_vm_ops;
2324         return 0;
2325 }