vserver 1.9.3
[linux-2.6.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2004 Hugh Dickins.
10  * Copyright (C) 2002-2004 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * This file is released under the GPL.
14  */
15
16 /*
17  * This virtual memory filesystem is heavily based on the ramfs. It
18  * extends ramfs by the ability to use swap and honor resource limits
19  * which makes it a completely usable filesystem.
20  */
21
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/devfs_fs_kernel.h>
26 #include <linux/fs.h>
27 #include <linux/mm.h>
28 #include <linux/mman.h>
29 #include <linux/file.h>
30 #include <linux/swap.h>
31 #include <linux/pagemap.h>
32 #include <linux/string.h>
33 #include <linux/slab.h>
34 #include <linux/backing-dev.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/mount.h>
37 #include <linux/writeback.h>
38 #include <linux/vfs.h>
39 #include <linux/blkdev.h>
40 #include <linux/security.h>
41 #include <linux/swapops.h>
42 #include <linux/mempolicy.h>
43 #include <linux/namei.h>
44 #include <asm/uaccess.h>
45 #include <asm/div64.h>
46 #include <asm/pgtable.h>
47
48 /* This magic number is used in glibc for posix shared memory */
49
50 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
51 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
52 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
53
54 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
55 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
56
57 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
58
59 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
60 #define SHMEM_PAGEIN     VM_READ
61 #define SHMEM_TRUNCATE   VM_WRITE
62
63 /* Pretend that each entry is of this size in directory's i_size */
64 #define BOGO_DIRENT_SIZE 20
65
66 /* Keep swapped page count in private field of indirect struct page */
67 #define nr_swapped              private
68
69 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
70 enum sgp_type {
71         SGP_QUICK,      /* don't try more than file page cache lookup */
72         SGP_READ,       /* don't exceed i_size, don't allocate page */
73         SGP_CACHE,      /* don't exceed i_size, may allocate page */
74         SGP_WRITE,      /* may exceed i_size, may allocate page */
75 };
76
77 static int shmem_getpage(struct inode *inode, unsigned long idx,
78                          struct page **pagep, enum sgp_type sgp, int *type);
79
80 static inline struct page *shmem_dir_alloc(unsigned int gfp_mask)
81 {
82         /*
83          * The above definition of ENTRIES_PER_PAGE, and the use of
84          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
85          * might be reconsidered if it ever diverges from PAGE_SIZE.
86          */
87         return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
88 }
89
90 static inline void shmem_dir_free(struct page *page)
91 {
92         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
93 }
94
95 static struct page **shmem_dir_map(struct page *page)
96 {
97         return (struct page **)kmap_atomic(page, KM_USER0);
98 }
99
100 static inline void shmem_dir_unmap(struct page **dir)
101 {
102         kunmap_atomic(dir, KM_USER0);
103 }
104
105 static swp_entry_t *shmem_swp_map(struct page *page)
106 {
107         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
108 }
109
110 static inline void shmem_swp_balance_unmap(void)
111 {
112         /*
113          * When passing a pointer to an i_direct entry, to code which
114          * also handles indirect entries and so will shmem_swp_unmap,
115          * we must arrange for the preempt count to remain in balance.
116          * What kmap_atomic of a lowmem page does depends on config
117          * and architecture, so pretend to kmap_atomic some lowmem page.
118          */
119         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
120 }
121
122 static inline void shmem_swp_unmap(swp_entry_t *entry)
123 {
124         kunmap_atomic(entry, KM_USER1);
125 }
126
127 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
128 {
129         return sb->s_fs_info;
130 }
131
132 /*
133  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
134  * for shared memory and for shared anonymous (/dev/zero) mappings
135  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
136  * consistent with the pre-accounting of private mappings ...
137  */
138 static inline int shmem_acct_size(unsigned long flags, loff_t size)
139 {
140         return (flags & VM_ACCOUNT)?
141                 security_vm_enough_memory(VM_ACCT(size)): 0;
142 }
143
144 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
145 {
146         if (flags & VM_ACCOUNT)
147                 vm_unacct_memory(VM_ACCT(size));
148 }
149
150 /*
151  * ... whereas tmpfs objects are accounted incrementally as
152  * pages are allocated, in order to allow huge sparse files.
153  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
154  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
155  */
156 static inline int shmem_acct_block(unsigned long flags)
157 {
158         return (flags & VM_ACCOUNT)?
159                 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
160 }
161
162 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
163 {
164         if (!(flags & VM_ACCOUNT))
165                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
166 }
167
168 static struct super_operations shmem_ops;
169 static struct address_space_operations shmem_aops;
170 static struct file_operations shmem_file_operations;
171 static struct inode_operations shmem_inode_operations;
172 static struct inode_operations shmem_dir_inode_operations;
173 static struct vm_operations_struct shmem_vm_ops;
174
175 static struct backing_dev_info shmem_backing_dev_info = {
176         .ra_pages       = 0,    /* No readahead */
177         .memory_backed  = 1,    /* Does not contribute to dirty memory */
178         .unplug_io_fn = default_unplug_io_fn,
179 };
180
181 static LIST_HEAD(shmem_swaplist);
182 static spinlock_t shmem_swaplist_lock = SPIN_LOCK_UNLOCKED;
183
184 static void shmem_free_block(struct inode *inode)
185 {
186         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
187         if (sbinfo) {
188                 spin_lock(&sbinfo->stat_lock);
189                 sbinfo->free_blocks++;
190                 inode->i_blocks -= BLOCKS_PER_PAGE;
191                 spin_unlock(&sbinfo->stat_lock);
192         }
193 }
194
195 /*
196  * shmem_recalc_inode - recalculate the size of an inode
197  *
198  * @inode: inode to recalc
199  *
200  * We have to calculate the free blocks since the mm can drop
201  * undirtied hole pages behind our back.
202  *
203  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
204  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
205  *
206  * It has to be called with the spinlock held.
207  */
208 static void shmem_recalc_inode(struct inode *inode)
209 {
210         struct shmem_inode_info *info = SHMEM_I(inode);
211         long freed;
212
213         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
214         if (freed > 0) {
215                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
216                 info->alloced -= freed;
217                 shmem_unacct_blocks(info->flags, freed);
218                 if (sbinfo) {
219                         spin_lock(&sbinfo->stat_lock);
220                         sbinfo->free_blocks += freed;
221                         inode->i_blocks -= freed*BLOCKS_PER_PAGE;
222                         spin_unlock(&sbinfo->stat_lock);
223                 }
224         }
225 }
226
227 /*
228  * shmem_swp_entry - find the swap vector position in the info structure
229  *
230  * @info:  info structure for the inode
231  * @index: index of the page to find
232  * @page:  optional page to add to the structure. Has to be preset to
233  *         all zeros
234  *
235  * If there is no space allocated yet it will return NULL when
236  * page is NULL, else it will use the page for the needed block,
237  * setting it to NULL on return to indicate that it has been used.
238  *
239  * The swap vector is organized the following way:
240  *
241  * There are SHMEM_NR_DIRECT entries directly stored in the
242  * shmem_inode_info structure. So small files do not need an addional
243  * allocation.
244  *
245  * For pages with index > SHMEM_NR_DIRECT there is the pointer
246  * i_indirect which points to a page which holds in the first half
247  * doubly indirect blocks, in the second half triple indirect blocks:
248  *
249  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
250  * following layout (for SHMEM_NR_DIRECT == 16):
251  *
252  * i_indirect -> dir --> 16-19
253  *            |      +-> 20-23
254  *            |
255  *            +-->dir2 --> 24-27
256  *            |        +-> 28-31
257  *            |        +-> 32-35
258  *            |        +-> 36-39
259  *            |
260  *            +-->dir3 --> 40-43
261  *                     +-> 44-47
262  *                     +-> 48-51
263  *                     +-> 52-55
264  */
265 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
266 {
267         unsigned long offset;
268         struct page **dir;
269         struct page *subdir;
270
271         if (index < SHMEM_NR_DIRECT) {
272                 shmem_swp_balance_unmap();
273                 return info->i_direct+index;
274         }
275         if (!info->i_indirect) {
276                 if (page) {
277                         info->i_indirect = *page;
278                         *page = NULL;
279                 }
280                 return NULL;                    /* need another page */
281         }
282
283         index -= SHMEM_NR_DIRECT;
284         offset = index % ENTRIES_PER_PAGE;
285         index /= ENTRIES_PER_PAGE;
286         dir = shmem_dir_map(info->i_indirect);
287
288         if (index >= ENTRIES_PER_PAGE/2) {
289                 index -= ENTRIES_PER_PAGE/2;
290                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
291                 index %= ENTRIES_PER_PAGE;
292                 subdir = *dir;
293                 if (!subdir) {
294                         if (page) {
295                                 *dir = *page;
296                                 *page = NULL;
297                         }
298                         shmem_dir_unmap(dir);
299                         return NULL;            /* need another page */
300                 }
301                 shmem_dir_unmap(dir);
302                 dir = shmem_dir_map(subdir);
303         }
304
305         dir += index;
306         subdir = *dir;
307         if (!subdir) {
308                 if (!page || !(subdir = *page)) {
309                         shmem_dir_unmap(dir);
310                         return NULL;            /* need a page */
311                 }
312                 *dir = subdir;
313                 *page = NULL;
314         }
315         shmem_dir_unmap(dir);
316         return shmem_swp_map(subdir) + offset;
317 }
318
319 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
320 {
321         long incdec = value? 1: -1;
322
323         entry->val = value;
324         info->swapped += incdec;
325         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT)
326                 kmap_atomic_to_page(entry)->nr_swapped += incdec;
327 }
328
329 /*
330  * shmem_swp_alloc - get the position of the swap entry for the page.
331  *                   If it does not exist allocate the entry.
332  *
333  * @info:       info structure for the inode
334  * @index:      index of the page to find
335  * @sgp:        check and recheck i_size? skip allocation?
336  */
337 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
338 {
339         struct inode *inode = &info->vfs_inode;
340         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
341         struct page *page = NULL;
342         swp_entry_t *entry;
343
344         if (sgp != SGP_WRITE &&
345             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
346                 return ERR_PTR(-EINVAL);
347
348         while (!(entry = shmem_swp_entry(info, index, &page))) {
349                 if (sgp == SGP_READ)
350                         return shmem_swp_map(ZERO_PAGE(0));
351                 /*
352                  * Test free_blocks against 1 not 0, since we have 1 data
353                  * page (and perhaps indirect index pages) yet to allocate:
354                  * a waste to allocate index if we cannot allocate data.
355                  */
356                 if (sbinfo) {
357                         spin_lock(&sbinfo->stat_lock);
358                         if (sbinfo->free_blocks <= 1) {
359                                 spin_unlock(&sbinfo->stat_lock);
360                                 return ERR_PTR(-ENOSPC);
361                         }
362                         sbinfo->free_blocks--;
363                         inode->i_blocks += BLOCKS_PER_PAGE;
364                         spin_unlock(&sbinfo->stat_lock);
365                 }
366
367                 spin_unlock(&info->lock);
368                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
369                 if (page) {
370                         clear_highpage(page);
371                         page->nr_swapped = 0;
372                 }
373                 spin_lock(&info->lock);
374
375                 if (!page) {
376                         shmem_free_block(inode);
377                         return ERR_PTR(-ENOMEM);
378                 }
379                 if (sgp != SGP_WRITE &&
380                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
381                         entry = ERR_PTR(-EINVAL);
382                         break;
383                 }
384                 if (info->next_index <= index)
385                         info->next_index = index + 1;
386         }
387         if (page) {
388                 /* another task gave its page, or truncated the file */
389                 shmem_free_block(inode);
390                 shmem_dir_free(page);
391         }
392         if (info->next_index <= index && !IS_ERR(entry))
393                 info->next_index = index + 1;
394         return entry;
395 }
396
397 /*
398  * shmem_free_swp - free some swap entries in a directory
399  *
400  * @dir:   pointer to the directory
401  * @edir:  pointer after last entry of the directory
402  */
403 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
404 {
405         swp_entry_t *ptr;
406         int freed = 0;
407
408         for (ptr = dir; ptr < edir; ptr++) {
409                 if (ptr->val) {
410                         free_swap_and_cache(*ptr);
411                         *ptr = (swp_entry_t){0};
412                         freed++;
413                 }
414         }
415         return freed;
416 }
417
418 static void shmem_truncate(struct inode *inode)
419 {
420         struct shmem_inode_info *info = SHMEM_I(inode);
421         unsigned long idx;
422         unsigned long size;
423         unsigned long limit;
424         unsigned long stage;
425         struct page **dir;
426         struct page *subdir;
427         struct page *empty;
428         swp_entry_t *ptr;
429         int offset;
430         int freed;
431
432         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
433         idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
434         if (idx >= info->next_index)
435                 return;
436
437         spin_lock(&info->lock);
438         info->flags |= SHMEM_TRUNCATE;
439         limit = info->next_index;
440         info->next_index = idx;
441         if (info->swapped && idx < SHMEM_NR_DIRECT) {
442                 ptr = info->i_direct;
443                 size = limit;
444                 if (size > SHMEM_NR_DIRECT)
445                         size = SHMEM_NR_DIRECT;
446                 info->swapped -= shmem_free_swp(ptr+idx, ptr+size);
447         }
448         if (!info->i_indirect)
449                 goto done2;
450
451         BUG_ON(limit <= SHMEM_NR_DIRECT);
452         limit -= SHMEM_NR_DIRECT;
453         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
454         offset = idx % ENTRIES_PER_PAGE;
455         idx -= offset;
456
457         empty = NULL;
458         dir = shmem_dir_map(info->i_indirect);
459         stage = ENTRIES_PER_PAGEPAGE/2;
460         if (idx < ENTRIES_PER_PAGEPAGE/2)
461                 dir += idx/ENTRIES_PER_PAGE;
462         else {
463                 dir += ENTRIES_PER_PAGE/2;
464                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
465                 while (stage <= idx)
466                         stage += ENTRIES_PER_PAGEPAGE;
467                 if (*dir) {
468                         subdir = *dir;
469                         size = ((idx - ENTRIES_PER_PAGEPAGE/2) %
470                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
471                         if (!size && !offset) {
472                                 empty = subdir;
473                                 *dir = NULL;
474                         }
475                         shmem_dir_unmap(dir);
476                         dir = shmem_dir_map(subdir) + size;
477                 } else {
478                         offset = 0;
479                         idx = stage;
480                 }
481         }
482
483         for (; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
484                 if (unlikely(idx == stage)) {
485                         shmem_dir_unmap(dir-1);
486                         dir = shmem_dir_map(info->i_indirect) +
487                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
488                         while (!*dir) {
489                                 dir++;
490                                 idx += ENTRIES_PER_PAGEPAGE;
491                                 if (idx >= limit)
492                                         goto done1;
493                         }
494                         stage = idx + ENTRIES_PER_PAGEPAGE;
495                         subdir = *dir;
496                         *dir = NULL;
497                         shmem_dir_unmap(dir);
498                         if (empty) {
499                                 shmem_dir_free(empty);
500                                 shmem_free_block(inode);
501                         }
502                         empty = subdir;
503                         cond_resched_lock(&info->lock);
504                         dir = shmem_dir_map(subdir);
505                 }
506                 subdir = *dir;
507                 if (subdir && subdir->nr_swapped) {
508                         ptr = shmem_swp_map(subdir);
509                         size = limit - idx;
510                         if (size > ENTRIES_PER_PAGE)
511                                 size = ENTRIES_PER_PAGE;
512                         freed = shmem_free_swp(ptr+offset, ptr+size);
513                         shmem_swp_unmap(ptr);
514                         info->swapped -= freed;
515                         subdir->nr_swapped -= freed;
516                         BUG_ON(subdir->nr_swapped > offset);
517                 }
518                 if (offset)
519                         offset = 0;
520                 else if (subdir) {
521                         *dir = NULL;
522                         shmem_dir_free(subdir);
523                         shmem_free_block(inode);
524                 }
525         }
526 done1:
527         shmem_dir_unmap(dir-1);
528         if (empty) {
529                 shmem_dir_free(empty);
530                 shmem_free_block(inode);
531         }
532         if (info->next_index <= SHMEM_NR_DIRECT) {
533                 shmem_dir_free(info->i_indirect);
534                 info->i_indirect = NULL;
535                 shmem_free_block(inode);
536         }
537 done2:
538         BUG_ON(info->swapped > info->next_index);
539         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
540                 /*
541                  * Call truncate_inode_pages again: racing shmem_unuse_inode
542                  * may have swizzled a page in from swap since vmtruncate or
543                  * generic_delete_inode did it, before we lowered next_index.
544                  * Also, though shmem_getpage checks i_size before adding to
545                  * cache, no recheck after: so fix the narrow window there too.
546                  */
547                 spin_unlock(&info->lock);
548                 truncate_inode_pages(inode->i_mapping, inode->i_size);
549                 spin_lock(&info->lock);
550         }
551         info->flags &= ~SHMEM_TRUNCATE;
552         shmem_recalc_inode(inode);
553         spin_unlock(&info->lock);
554 }
555
556 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
557 {
558         struct inode *inode = dentry->d_inode;
559         struct page *page = NULL;
560         int error;
561
562         if (attr->ia_valid & ATTR_SIZE) {
563                 if (attr->ia_size < inode->i_size) {
564                         /*
565                          * If truncating down to a partial page, then
566                          * if that page is already allocated, hold it
567                          * in memory until the truncation is over, so
568                          * truncate_partial_page cannnot miss it were
569                          * it assigned to swap.
570                          */
571                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
572                                 (void) shmem_getpage(inode,
573                                         attr->ia_size>>PAGE_CACHE_SHIFT,
574                                                 &page, SGP_READ, NULL);
575                         }
576                         /*
577                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
578                          * detect if any pages might have been added to cache
579                          * after truncate_inode_pages.  But we needn't bother
580                          * if it's being fully truncated to zero-length: the
581                          * nrpages check is efficient enough in that case.
582                          */
583                         if (attr->ia_size) {
584                                 struct shmem_inode_info *info = SHMEM_I(inode);
585                                 spin_lock(&info->lock);
586                                 info->flags &= ~SHMEM_PAGEIN;
587                                 spin_unlock(&info->lock);
588                         }
589                 }
590         }
591
592         error = inode_change_ok(inode, attr);
593         if (!error)
594                 error = inode_setattr(inode, attr);
595         if (page)
596                 page_cache_release(page);
597         return error;
598 }
599
600 static void shmem_delete_inode(struct inode *inode)
601 {
602         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
603         struct shmem_inode_info *info = SHMEM_I(inode);
604
605         if (inode->i_op->truncate == shmem_truncate) {
606                 shmem_unacct_size(info->flags, inode->i_size);
607                 inode->i_size = 0;
608                 shmem_truncate(inode);
609                 if (!list_empty(&info->swaplist)) {
610                         spin_lock(&shmem_swaplist_lock);
611                         list_del_init(&info->swaplist);
612                         spin_unlock(&shmem_swaplist_lock);
613                 }
614         }
615         if (sbinfo) {
616                 BUG_ON(inode->i_blocks);
617                 spin_lock(&sbinfo->stat_lock);
618                 sbinfo->free_inodes++;
619                 spin_unlock(&sbinfo->stat_lock);
620         }
621         clear_inode(inode);
622 }
623
624 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
625 {
626         swp_entry_t *ptr;
627
628         for (ptr = dir; ptr < edir; ptr++) {
629                 if (ptr->val == entry.val)
630                         return ptr - dir;
631         }
632         return -1;
633 }
634
635 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
636 {
637         struct inode *inode;
638         unsigned long idx;
639         unsigned long size;
640         unsigned long limit;
641         unsigned long stage;
642         struct page **dir;
643         struct page *subdir;
644         swp_entry_t *ptr;
645         int offset;
646
647         idx = 0;
648         ptr = info->i_direct;
649         spin_lock(&info->lock);
650         limit = info->next_index;
651         size = limit;
652         if (size > SHMEM_NR_DIRECT)
653                 size = SHMEM_NR_DIRECT;
654         offset = shmem_find_swp(entry, ptr, ptr+size);
655         if (offset >= 0) {
656                 shmem_swp_balance_unmap();
657                 goto found;
658         }
659         if (!info->i_indirect)
660                 goto lost2;
661         /* we might be racing with shmem_truncate */
662         if (limit <= SHMEM_NR_DIRECT)
663                 goto lost2;
664
665         dir = shmem_dir_map(info->i_indirect);
666         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
667
668         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
669                 if (unlikely(idx == stage)) {
670                         shmem_dir_unmap(dir-1);
671                         dir = shmem_dir_map(info->i_indirect) +
672                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
673                         while (!*dir) {
674                                 dir++;
675                                 idx += ENTRIES_PER_PAGEPAGE;
676                                 if (idx >= limit)
677                                         goto lost1;
678                         }
679                         stage = idx + ENTRIES_PER_PAGEPAGE;
680                         subdir = *dir;
681                         shmem_dir_unmap(dir);
682                         dir = shmem_dir_map(subdir);
683                 }
684                 subdir = *dir;
685                 if (subdir && subdir->nr_swapped) {
686                         ptr = shmem_swp_map(subdir);
687                         size = limit - idx;
688                         if (size > ENTRIES_PER_PAGE)
689                                 size = ENTRIES_PER_PAGE;
690                         offset = shmem_find_swp(entry, ptr, ptr+size);
691                         if (offset >= 0) {
692                                 shmem_dir_unmap(dir);
693                                 goto found;
694                         }
695                         shmem_swp_unmap(ptr);
696                 }
697         }
698 lost1:
699         shmem_dir_unmap(dir-1);
700 lost2:
701         spin_unlock(&info->lock);
702         return 0;
703 found:
704         idx += offset;
705         inode = &info->vfs_inode;
706         if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
707                 info->flags |= SHMEM_PAGEIN;
708                 shmem_swp_set(info, ptr + offset, 0);
709         }
710         shmem_swp_unmap(ptr);
711         spin_unlock(&info->lock);
712         /*
713          * Decrement swap count even when the entry is left behind:
714          * try_to_unuse will skip over mms, then reincrement count.
715          */
716         swap_free(entry);
717         return 1;
718 }
719
720 /*
721  * shmem_unuse() search for an eventually swapped out shmem page.
722  */
723 int shmem_unuse(swp_entry_t entry, struct page *page)
724 {
725         struct list_head *p, *next;
726         struct shmem_inode_info *info;
727         int found = 0;
728
729         spin_lock(&shmem_swaplist_lock);
730         list_for_each_safe(p, next, &shmem_swaplist) {
731                 info = list_entry(p, struct shmem_inode_info, swaplist);
732                 if (!info->swapped)
733                         list_del_init(&info->swaplist);
734                 else if (shmem_unuse_inode(info, entry, page)) {
735                         /* move head to start search for next from here */
736                         list_move_tail(&shmem_swaplist, &info->swaplist);
737                         found = 1;
738                         break;
739                 }
740         }
741         spin_unlock(&shmem_swaplist_lock);
742         return found;
743 }
744
745 /*
746  * Move the page from the page cache to the swap cache.
747  */
748 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
749 {
750         struct shmem_inode_info *info;
751         swp_entry_t *entry, swap;
752         struct address_space *mapping;
753         unsigned long index;
754         struct inode *inode;
755
756         BUG_ON(!PageLocked(page));
757         BUG_ON(page_mapped(page));
758
759         mapping = page->mapping;
760         index = page->index;
761         inode = mapping->host;
762         info = SHMEM_I(inode);
763         if (info->flags & VM_LOCKED)
764                 goto redirty;
765         swap = get_swap_page();
766         if (!swap.val)
767                 goto redirty;
768
769         spin_lock(&info->lock);
770         shmem_recalc_inode(inode);
771         if (index >= info->next_index) {
772                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
773                 goto unlock;
774         }
775         entry = shmem_swp_entry(info, index, NULL);
776         BUG_ON(!entry);
777         BUG_ON(entry->val);
778
779         if (move_to_swap_cache(page, swap) == 0) {
780                 shmem_swp_set(info, entry, swap.val);
781                 shmem_swp_unmap(entry);
782                 spin_unlock(&info->lock);
783                 if (list_empty(&info->swaplist)) {
784                         spin_lock(&shmem_swaplist_lock);
785                         /* move instead of add in case we're racing */
786                         list_move_tail(&info->swaplist, &shmem_swaplist);
787                         spin_unlock(&shmem_swaplist_lock);
788                 }
789                 unlock_page(page);
790                 return 0;
791         }
792
793         shmem_swp_unmap(entry);
794 unlock:
795         spin_unlock(&info->lock);
796         swap_free(swap);
797 redirty:
798         set_page_dirty(page);
799         return WRITEPAGE_ACTIVATE;      /* Return with the page locked */
800 }
801
802 #ifdef CONFIG_NUMA
803 static struct page *shmem_swapin_async(struct shared_policy *p,
804                                        swp_entry_t entry, unsigned long idx)
805 {
806         struct page *page;
807         struct vm_area_struct pvma;
808
809         /* Create a pseudo vma that just contains the policy */
810         memset(&pvma, 0, sizeof(struct vm_area_struct));
811         pvma.vm_end = PAGE_SIZE;
812         pvma.vm_pgoff = idx;
813         pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
814         page = read_swap_cache_async(entry, &pvma, 0);
815         mpol_free(pvma.vm_policy);
816         return page;
817 }
818
819 struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
820                           unsigned long idx)
821 {
822         struct shared_policy *p = &info->policy;
823         int i, num;
824         struct page *page;
825         unsigned long offset;
826
827         num = valid_swaphandles(entry, &offset);
828         for (i = 0; i < num; offset++, i++) {
829                 page = shmem_swapin_async(p,
830                                 swp_entry(swp_type(entry), offset), idx);
831                 if (!page)
832                         break;
833                 page_cache_release(page);
834         }
835         lru_add_drain();        /* Push any new pages onto the LRU now */
836         return shmem_swapin_async(p, entry, idx);
837 }
838
839 static struct page *
840 shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info,
841                  unsigned long idx)
842 {
843         struct vm_area_struct pvma;
844         struct page *page;
845
846         memset(&pvma, 0, sizeof(struct vm_area_struct));
847         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
848         pvma.vm_pgoff = idx;
849         pvma.vm_end = PAGE_SIZE;
850         page = alloc_page_vma(gfp, &pvma, 0);
851         mpol_free(pvma.vm_policy);
852         return page;
853 }
854 #else
855 static inline struct page *
856 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
857 {
858         swapin_readahead(entry, 0, NULL);
859         return read_swap_cache_async(entry, NULL, 0);
860 }
861
862 static inline struct page *
863 shmem_alloc_page(unsigned long gfp,struct shmem_inode_info *info,
864                                  unsigned long idx)
865 {
866         return alloc_page(gfp);
867 }
868 #endif
869
870 /*
871  * shmem_getpage - either get the page from swap or allocate a new one
872  *
873  * If we allocate a new one we do not mark it dirty. That's up to the
874  * vm. If we swap it in we mark it dirty since we also free the swap
875  * entry since a page cannot live in both the swap and page cache
876  */
877 static int shmem_getpage(struct inode *inode, unsigned long idx,
878                         struct page **pagep, enum sgp_type sgp, int *type)
879 {
880         struct address_space *mapping = inode->i_mapping;
881         struct shmem_inode_info *info = SHMEM_I(inode);
882         struct shmem_sb_info *sbinfo;
883         struct page *filepage = *pagep;
884         struct page *swappage;
885         swp_entry_t *entry;
886         swp_entry_t swap;
887         int error;
888
889         if (idx >= SHMEM_MAX_INDEX)
890                 return -EFBIG;
891         /*
892          * Normally, filepage is NULL on entry, and either found
893          * uptodate immediately, or allocated and zeroed, or read
894          * in under swappage, which is then assigned to filepage.
895          * But shmem_prepare_write passes in a locked filepage,
896          * which may be found not uptodate by other callers too,
897          * and may need to be copied from the swappage read in.
898          */
899 repeat:
900         if (!filepage)
901                 filepage = find_lock_page(mapping, idx);
902         if (filepage && PageUptodate(filepage))
903                 goto done;
904         error = 0;
905         if (sgp == SGP_QUICK)
906                 goto failed;
907
908         spin_lock(&info->lock);
909         shmem_recalc_inode(inode);
910         entry = shmem_swp_alloc(info, idx, sgp);
911         if (IS_ERR(entry)) {
912                 spin_unlock(&info->lock);
913                 error = PTR_ERR(entry);
914                 goto failed;
915         }
916         swap = *entry;
917
918         if (swap.val) {
919                 /* Look it up and read it in.. */
920                 swappage = lookup_swap_cache(swap);
921                 if (!swappage) {
922                         shmem_swp_unmap(entry);
923                         spin_unlock(&info->lock);
924                         /* here we actually do the io */
925                         if (type && *type == VM_FAULT_MINOR) {
926                                 inc_page_state(pgmajfault);
927                                 *type = VM_FAULT_MAJOR;
928                         }
929                         swappage = shmem_swapin(info, swap, idx);
930                         if (!swappage) {
931                                 spin_lock(&info->lock);
932                                 entry = shmem_swp_alloc(info, idx, sgp);
933                                 if (IS_ERR(entry))
934                                         error = PTR_ERR(entry);
935                                 else {
936                                         if (entry->val == swap.val)
937                                                 error = -ENOMEM;
938                                         shmem_swp_unmap(entry);
939                                 }
940                                 spin_unlock(&info->lock);
941                                 if (error)
942                                         goto failed;
943                                 goto repeat;
944                         }
945                         wait_on_page_locked(swappage);
946                         page_cache_release(swappage);
947                         goto repeat;
948                 }
949
950                 /* We have to do this with page locked to prevent races */
951                 if (TestSetPageLocked(swappage)) {
952                         shmem_swp_unmap(entry);
953                         spin_unlock(&info->lock);
954                         wait_on_page_locked(swappage);
955                         page_cache_release(swappage);
956                         goto repeat;
957                 }
958                 if (PageWriteback(swappage)) {
959                         shmem_swp_unmap(entry);
960                         spin_unlock(&info->lock);
961                         wait_on_page_writeback(swappage);
962                         unlock_page(swappage);
963                         page_cache_release(swappage);
964                         goto repeat;
965                 }
966                 if (!PageUptodate(swappage)) {
967                         shmem_swp_unmap(entry);
968                         spin_unlock(&info->lock);
969                         unlock_page(swappage);
970                         page_cache_release(swappage);
971                         error = -EIO;
972                         goto failed;
973                 }
974
975                 if (filepage) {
976                         shmem_swp_set(info, entry, 0);
977                         shmem_swp_unmap(entry);
978                         delete_from_swap_cache(swappage);
979                         spin_unlock(&info->lock);
980                         copy_highpage(filepage, swappage);
981                         unlock_page(swappage);
982                         page_cache_release(swappage);
983                         flush_dcache_page(filepage);
984                         SetPageUptodate(filepage);
985                         set_page_dirty(filepage);
986                         swap_free(swap);
987                 } else if (!(error = move_from_swap_cache(
988                                 swappage, idx, mapping))) {
989                         info->flags |= SHMEM_PAGEIN;
990                         shmem_swp_set(info, entry, 0);
991                         shmem_swp_unmap(entry);
992                         spin_unlock(&info->lock);
993                         filepage = swappage;
994                         swap_free(swap);
995                 } else {
996                         shmem_swp_unmap(entry);
997                         spin_unlock(&info->lock);
998                         unlock_page(swappage);
999                         page_cache_release(swappage);
1000                         if (error == -ENOMEM) {
1001                                 /* let kswapd refresh zone for GFP_ATOMICs */
1002                                 blk_congestion_wait(WRITE, HZ/50);
1003                         }
1004                         goto repeat;
1005                 }
1006         } else if (sgp == SGP_READ && !filepage) {
1007                 shmem_swp_unmap(entry);
1008                 filepage = find_get_page(mapping, idx);
1009                 if (filepage &&
1010                     (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1011                         spin_unlock(&info->lock);
1012                         wait_on_page_locked(filepage);
1013                         page_cache_release(filepage);
1014                         filepage = NULL;
1015                         goto repeat;
1016                 }
1017                 spin_unlock(&info->lock);
1018         } else {
1019                 shmem_swp_unmap(entry);
1020                 sbinfo = SHMEM_SB(inode->i_sb);
1021                 if (sbinfo) {
1022                         spin_lock(&sbinfo->stat_lock);
1023                         if (sbinfo->free_blocks == 0 ||
1024                             shmem_acct_block(info->flags)) {
1025                                 spin_unlock(&sbinfo->stat_lock);
1026                                 spin_unlock(&info->lock);
1027                                 error = -ENOSPC;
1028                                 goto failed;
1029                         }
1030                         sbinfo->free_blocks--;
1031                         inode->i_blocks += BLOCKS_PER_PAGE;
1032                         spin_unlock(&sbinfo->stat_lock);
1033                 } else if (shmem_acct_block(info->flags)) {
1034                         spin_unlock(&info->lock);
1035                         error = -ENOSPC;
1036                         goto failed;
1037                 }
1038
1039                 if (!filepage) {
1040                         spin_unlock(&info->lock);
1041                         filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1042                                                     info,
1043                                                     idx);
1044                         if (!filepage) {
1045                                 shmem_unacct_blocks(info->flags, 1);
1046                                 shmem_free_block(inode);
1047                                 error = -ENOMEM;
1048                                 goto failed;
1049                         }
1050
1051                         spin_lock(&info->lock);
1052                         entry = shmem_swp_alloc(info, idx, sgp);
1053                         if (IS_ERR(entry))
1054                                 error = PTR_ERR(entry);
1055                         else {
1056                                 swap = *entry;
1057                                 shmem_swp_unmap(entry);
1058                         }
1059                         if (error || swap.val || 0 != add_to_page_cache_lru(
1060                                         filepage, mapping, idx, GFP_ATOMIC)) {
1061                                 spin_unlock(&info->lock);
1062                                 page_cache_release(filepage);
1063                                 shmem_unacct_blocks(info->flags, 1);
1064                                 shmem_free_block(inode);
1065                                 filepage = NULL;
1066                                 if (error)
1067                                         goto failed;
1068                                 goto repeat;
1069                         }
1070                         info->flags |= SHMEM_PAGEIN;
1071                 }
1072
1073                 info->alloced++;
1074                 spin_unlock(&info->lock);
1075                 clear_highpage(filepage);
1076                 flush_dcache_page(filepage);
1077                 SetPageUptodate(filepage);
1078         }
1079 done:
1080         if (*pagep != filepage) {
1081                 unlock_page(filepage);
1082                 *pagep = filepage;
1083         }
1084         return 0;
1085
1086 failed:
1087         if (*pagep != filepage) {
1088                 unlock_page(filepage);
1089                 page_cache_release(filepage);
1090         }
1091         return error;
1092 }
1093
1094 struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
1095 {
1096         struct inode *inode = vma->vm_file->f_dentry->d_inode;
1097         struct page *page = NULL;
1098         unsigned long idx;
1099         int error;
1100
1101         idx = (address - vma->vm_start) >> PAGE_SHIFT;
1102         idx += vma->vm_pgoff;
1103         idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1104
1105         error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1106         if (error)
1107                 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1108
1109         mark_page_accessed(page);
1110         return page;
1111 }
1112
1113 static int shmem_populate(struct vm_area_struct *vma,
1114         unsigned long addr, unsigned long len,
1115         pgprot_t prot, unsigned long pgoff, int nonblock)
1116 {
1117         struct inode *inode = vma->vm_file->f_dentry->d_inode;
1118         struct mm_struct *mm = vma->vm_mm;
1119         enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1120         unsigned long size;
1121
1122         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1123         if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1124                 return -EINVAL;
1125
1126         while ((long) len > 0) {
1127                 struct page *page = NULL;
1128                 int err;
1129                 /*
1130                  * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1131                  */
1132                 err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1133                 if (err)
1134                         return err;
1135                 if (page) {
1136                         mark_page_accessed(page);
1137                         err = install_page(mm, vma, addr, page, prot);
1138                         if (err) {
1139                                 page_cache_release(page);
1140                                 return err;
1141                         }
1142                 } else if (nonblock) {
1143                         err = install_file_pte(mm, vma, addr, pgoff, prot);
1144                         if (err)
1145                                 return err;
1146                 }
1147
1148                 len -= PAGE_SIZE;
1149                 addr += PAGE_SIZE;
1150                 pgoff++;
1151         }
1152         return 0;
1153 }
1154
1155 #ifdef CONFIG_NUMA
1156 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1157 {
1158         struct inode *i = vma->vm_file->f_dentry->d_inode;
1159         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1160 }
1161
1162 struct mempolicy *
1163 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1164 {
1165         struct inode *i = vma->vm_file->f_dentry->d_inode;
1166         unsigned long idx;
1167
1168         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1169         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1170 }
1171 #endif
1172
1173 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1174 {
1175         struct inode *inode = file->f_dentry->d_inode;
1176         struct shmem_inode_info *info = SHMEM_I(inode);
1177         int retval = -ENOMEM;
1178
1179         spin_lock(&info->lock);
1180         if (lock && !(info->flags & VM_LOCKED)) {
1181                 if (!user_shm_lock(inode->i_size, user))
1182                         goto out_nomem;
1183                 info->flags |= VM_LOCKED;
1184         }
1185         if (!lock && (info->flags & VM_LOCKED) && user) {
1186                 user_shm_unlock(inode->i_size, user);
1187                 info->flags &= ~VM_LOCKED;
1188         }
1189         retval = 0;
1190 out_nomem:
1191         spin_unlock(&info->lock);
1192         return retval;
1193 }
1194
1195 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1196 {
1197         file_accessed(file);
1198         vma->vm_ops = &shmem_vm_ops;
1199         return 0;
1200 }
1201
1202 static struct inode *
1203 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1204 {
1205         struct inode *inode;
1206         struct shmem_inode_info *info;
1207         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1208
1209         if (sbinfo) {
1210                 spin_lock(&sbinfo->stat_lock);
1211                 if (!sbinfo->free_inodes) {
1212                         spin_unlock(&sbinfo->stat_lock);
1213                         return NULL;
1214                 }
1215                 sbinfo->free_inodes--;
1216                 spin_unlock(&sbinfo->stat_lock);
1217         }
1218
1219         inode = new_inode(sb);
1220         if (inode) {
1221                 inode->i_mode = mode;
1222                 inode->i_uid = current->fsuid;
1223                 inode->i_gid = current->fsgid;
1224                 inode->i_blksize = PAGE_CACHE_SIZE;
1225                 inode->i_blocks = 0;
1226                 inode->i_mapping->a_ops = &shmem_aops;
1227                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1228                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1229                 info = SHMEM_I(inode);
1230                 memset(info, 0, (char *)inode - (char *)info);
1231                 spin_lock_init(&info->lock);
1232                 mpol_shared_policy_init(&info->policy);
1233                 INIT_LIST_HEAD(&info->swaplist);
1234
1235                 switch (mode & S_IFMT) {
1236                 default:
1237                         init_special_inode(inode, mode, dev);
1238                         break;
1239                 case S_IFREG:
1240                         inode->i_op = &shmem_inode_operations;
1241                         inode->i_fop = &shmem_file_operations;
1242                         break;
1243                 case S_IFDIR:
1244                         inode->i_nlink++;
1245                         /* Some things misbehave if size == 0 on a directory */
1246                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1247                         inode->i_op = &shmem_dir_inode_operations;
1248                         inode->i_fop = &simple_dir_operations;
1249                         break;
1250                 case S_IFLNK:
1251                         break;
1252                 }
1253         }
1254         return inode;
1255 }
1256
1257 #ifdef CONFIG_TMPFS
1258
1259 static int shmem_set_size(struct shmem_sb_info *sbinfo,
1260                           unsigned long max_blocks, unsigned long max_inodes)
1261 {
1262         int error;
1263         unsigned long blocks, inodes;
1264
1265         spin_lock(&sbinfo->stat_lock);
1266         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
1267         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
1268         error = -EINVAL;
1269         if (max_blocks < blocks)
1270                 goto out;
1271         if (max_inodes < inodes)
1272                 goto out;
1273         error = 0;
1274         sbinfo->max_blocks  = max_blocks;
1275         sbinfo->free_blocks = max_blocks - blocks;
1276         sbinfo->max_inodes  = max_inodes;
1277         sbinfo->free_inodes = max_inodes - inodes;
1278 out:
1279         spin_unlock(&sbinfo->stat_lock);
1280         return error;
1281 }
1282
1283 static struct inode_operations shmem_symlink_inode_operations;
1284 static struct inode_operations shmem_symlink_inline_operations;
1285
1286 /*
1287  * Normally tmpfs makes no use of shmem_prepare_write, but it
1288  * lets a tmpfs file be used read-write below the loop driver.
1289  */
1290 static int
1291 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1292 {
1293         struct inode *inode = page->mapping->host;
1294         return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1295 }
1296
1297 static ssize_t
1298 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1299 {
1300         struct inode    *inode = file->f_dentry->d_inode;
1301         loff_t          pos;
1302         unsigned long   written;
1303         ssize_t         err;
1304
1305         if ((ssize_t) count < 0)
1306                 return -EINVAL;
1307
1308         if (!access_ok(VERIFY_READ, buf, count))
1309                 return -EFAULT;
1310
1311         down(&inode->i_sem);
1312
1313         pos = *ppos;
1314         written = 0;
1315
1316         err = generic_write_checks(file, &pos, &count, 0);
1317         if (err || !count)
1318                 goto out;
1319
1320         err = remove_suid(file->f_dentry);
1321         if (err)
1322                 goto out;
1323
1324         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1325
1326         do {
1327                 struct page *page = NULL;
1328                 unsigned long bytes, index, offset;
1329                 char *kaddr;
1330                 int left;
1331
1332                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1333                 index = pos >> PAGE_CACHE_SHIFT;
1334                 bytes = PAGE_CACHE_SIZE - offset;
1335                 if (bytes > count)
1336                         bytes = count;
1337
1338                 /*
1339                  * We don't hold page lock across copy from user -
1340                  * what would it guard against? - so no deadlock here.
1341                  * But it still may be a good idea to prefault below.
1342                  */
1343
1344                 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1345                 if (err)
1346                         break;
1347
1348                 left = bytes;
1349                 if (PageHighMem(page)) {
1350                         volatile unsigned char dummy;
1351                         __get_user(dummy, buf);
1352                         __get_user(dummy, buf + bytes - 1);
1353
1354                         kaddr = kmap_atomic(page, KM_USER0);
1355                         left = __copy_from_user_inatomic(kaddr + offset,
1356                                                         buf, bytes);
1357                         kunmap_atomic(kaddr, KM_USER0);
1358                 }
1359                 if (left) {
1360                         kaddr = kmap(page);
1361                         left = __copy_from_user(kaddr + offset, buf, bytes);
1362                         kunmap(page);
1363                 }
1364
1365                 written += bytes;
1366                 count -= bytes;
1367                 pos += bytes;
1368                 buf += bytes;
1369                 if (pos > inode->i_size)
1370                         i_size_write(inode, pos);
1371
1372                 flush_dcache_page(page);
1373                 set_page_dirty(page);
1374                 mark_page_accessed(page);
1375                 page_cache_release(page);
1376
1377                 if (left) {
1378                         pos -= left;
1379                         written -= left;
1380                         err = -EFAULT;
1381                         break;
1382                 }
1383
1384                 /*
1385                  * Our dirty pages are not counted in nr_dirty,
1386                  * and we do not attempt to balance dirty pages.
1387                  */
1388
1389                 cond_resched();
1390         } while (count);
1391
1392         *ppos = pos;
1393         if (written)
1394                 err = written;
1395 out:
1396         up(&inode->i_sem);
1397         return err;
1398 }
1399
1400 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1401 {
1402         struct inode *inode = filp->f_dentry->d_inode;
1403         struct address_space *mapping = inode->i_mapping;
1404         unsigned long index, offset;
1405
1406         index = *ppos >> PAGE_CACHE_SHIFT;
1407         offset = *ppos & ~PAGE_CACHE_MASK;
1408
1409         for (;;) {
1410                 struct page *page = NULL;
1411                 unsigned long end_index, nr, ret;
1412                 loff_t i_size = i_size_read(inode);
1413
1414                 end_index = i_size >> PAGE_CACHE_SHIFT;
1415                 if (index > end_index)
1416                         break;
1417                 if (index == end_index) {
1418                         nr = i_size & ~PAGE_CACHE_MASK;
1419                         if (nr <= offset)
1420                                 break;
1421                 }
1422
1423                 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1424                 if (desc->error) {
1425                         if (desc->error == -EINVAL)
1426                                 desc->error = 0;
1427                         break;
1428                 }
1429
1430                 /*
1431                  * We must evaluate after, since reads (unlike writes)
1432                  * are called without i_sem protection against truncate
1433                  */
1434                 nr = PAGE_CACHE_SIZE;
1435                 i_size = i_size_read(inode);
1436                 end_index = i_size >> PAGE_CACHE_SHIFT;
1437                 if (index == end_index) {
1438                         nr = i_size & ~PAGE_CACHE_MASK;
1439                         if (nr <= offset) {
1440                                 if (page)
1441                                         page_cache_release(page);
1442                                 break;
1443                         }
1444                 }
1445                 nr -= offset;
1446
1447                 if (page) {
1448                         /*
1449                          * If users can be writing to this page using arbitrary
1450                          * virtual addresses, take care about potential aliasing
1451                          * before reading the page on the kernel side.
1452                          */
1453                         if (mapping_writably_mapped(mapping))
1454                                 flush_dcache_page(page);
1455                         /*
1456                          * Mark the page accessed if we read the beginning.
1457                          */
1458                         if (!offset)
1459                                 mark_page_accessed(page);
1460                 } else
1461                         page = ZERO_PAGE(0);
1462
1463                 /*
1464                  * Ok, we have the page, and it's up-to-date, so
1465                  * now we can copy it to user space...
1466                  *
1467                  * The actor routine returns how many bytes were actually used..
1468                  * NOTE! This may not be the same as how much of a user buffer
1469                  * we filled up (we may be padding etc), so we can only update
1470                  * "pos" here (the actor routine has to update the user buffer
1471                  * pointers and the remaining count).
1472                  */
1473                 ret = actor(desc, page, offset, nr);
1474                 offset += ret;
1475                 index += offset >> PAGE_CACHE_SHIFT;
1476                 offset &= ~PAGE_CACHE_MASK;
1477
1478                 page_cache_release(page);
1479                 if (ret != nr || !desc->count)
1480                         break;
1481
1482                 cond_resched();
1483         }
1484
1485         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1486         file_accessed(filp);
1487 }
1488
1489 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1490 {
1491         read_descriptor_t desc;
1492
1493         if ((ssize_t) count < 0)
1494                 return -EINVAL;
1495         if (!access_ok(VERIFY_WRITE, buf, count))
1496                 return -EFAULT;
1497         if (!count)
1498                 return 0;
1499
1500         desc.written = 0;
1501         desc.count = count;
1502         desc.arg.buf = buf;
1503         desc.error = 0;
1504
1505         do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1506         if (desc.written)
1507                 return desc.written;
1508         return desc.error;
1509 }
1510
1511 static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
1512                          size_t count, read_actor_t actor, void *target)
1513 {
1514         read_descriptor_t desc;
1515
1516         if (!count)
1517                 return 0;
1518
1519         desc.written = 0;
1520         desc.count = count;
1521         desc.arg.data = target;
1522         desc.error = 0;
1523
1524         do_shmem_file_read(in_file, ppos, &desc, actor);
1525         if (desc.written)
1526                 return desc.written;
1527         return desc.error;
1528 }
1529
1530 static int shmem_statfs(struct super_block *sb, struct kstatfs *buf)
1531 {
1532         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1533
1534         buf->f_type = TMPFS_SUPER_MAGIC;
1535         buf->f_bsize = PAGE_CACHE_SIZE;
1536         buf->f_namelen = NAME_MAX;
1537         if (sbinfo) {
1538                 spin_lock(&sbinfo->stat_lock);
1539                 buf->f_blocks = sbinfo->max_blocks;
1540                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1541                 buf->f_files = sbinfo->max_inodes;
1542                 buf->f_ffree = sbinfo->free_inodes;
1543                 spin_unlock(&sbinfo->stat_lock);
1544         }
1545         /* else leave those fields 0 like simple_statfs */
1546         return 0;
1547 }
1548
1549 /*
1550  * File creation. Allocate an inode, and we're done..
1551  */
1552 static int
1553 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1554 {
1555         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1556         int error = -ENOSPC;
1557
1558         if (inode) {
1559                 if (dir->i_mode & S_ISGID) {
1560                         inode->i_gid = dir->i_gid;
1561                         if (S_ISDIR(mode))
1562                                 inode->i_mode |= S_ISGID;
1563                 }
1564                 dir->i_size += BOGO_DIRENT_SIZE;
1565                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1566                 d_instantiate(dentry, inode);
1567                 dget(dentry); /* Extra count - pin the dentry in core */
1568                 error = 0;
1569         }
1570         return error;
1571 }
1572
1573 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1574 {
1575         int error;
1576
1577         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1578                 return error;
1579         dir->i_nlink++;
1580         return 0;
1581 }
1582
1583 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1584                 struct nameidata *nd)
1585 {
1586         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1587 }
1588
1589 /*
1590  * Link a file..
1591  */
1592 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1593 {
1594         struct inode *inode = old_dentry->d_inode;
1595         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1596
1597         /*
1598          * No ordinary (disk based) filesystem counts links as inodes;
1599          * but each new link needs a new dentry, pinning lowmem, and
1600          * tmpfs dentries cannot be pruned until they are unlinked.
1601          */
1602         if (sbinfo) {
1603                 spin_lock(&sbinfo->stat_lock);
1604                 if (!sbinfo->free_inodes) {
1605                         spin_unlock(&sbinfo->stat_lock);
1606                         return -ENOSPC;
1607                 }
1608                 sbinfo->free_inodes--;
1609                 spin_unlock(&sbinfo->stat_lock);
1610         }
1611
1612         dir->i_size += BOGO_DIRENT_SIZE;
1613         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1614         inode->i_nlink++;
1615         atomic_inc(&inode->i_count);    /* New dentry reference */
1616         dget(dentry);           /* Extra pinning count for the created dentry */
1617         d_instantiate(dentry, inode);
1618         return 0;
1619 }
1620
1621 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1622 {
1623         struct inode *inode = dentry->d_inode;
1624
1625         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1626                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1627                 if (sbinfo) {
1628                         spin_lock(&sbinfo->stat_lock);
1629                         sbinfo->free_inodes++;
1630                         spin_unlock(&sbinfo->stat_lock);
1631                 }
1632         }
1633
1634         dir->i_size -= BOGO_DIRENT_SIZE;
1635         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1636         inode->i_nlink--;
1637         dput(dentry);   /* Undo the count from "create" - this does all the work */
1638         return 0;
1639 }
1640
1641 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1642 {
1643         if (!simple_empty(dentry))
1644                 return -ENOTEMPTY;
1645
1646         dir->i_nlink--;
1647         return shmem_unlink(dir, dentry);
1648 }
1649
1650 /*
1651  * The VFS layer already does all the dentry stuff for rename,
1652  * we just have to decrement the usage count for the target if
1653  * it exists so that the VFS layer correctly free's it when it
1654  * gets overwritten.
1655  */
1656 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1657 {
1658         struct inode *inode = old_dentry->d_inode;
1659         int they_are_dirs = S_ISDIR(inode->i_mode);
1660
1661         if (!simple_empty(new_dentry))
1662                 return -ENOTEMPTY;
1663
1664         if (new_dentry->d_inode) {
1665                 (void) shmem_unlink(new_dir, new_dentry);
1666                 if (they_are_dirs)
1667                         old_dir->i_nlink--;
1668         } else if (they_are_dirs) {
1669                 old_dir->i_nlink--;
1670                 new_dir->i_nlink++;
1671         }
1672
1673         old_dir->i_size -= BOGO_DIRENT_SIZE;
1674         new_dir->i_size += BOGO_DIRENT_SIZE;
1675         old_dir->i_ctime = old_dir->i_mtime =
1676         new_dir->i_ctime = new_dir->i_mtime =
1677         inode->i_ctime = CURRENT_TIME;
1678         return 0;
1679 }
1680
1681 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1682 {
1683         int error;
1684         int len;
1685         struct inode *inode;
1686         struct page *page = NULL;
1687         char *kaddr;
1688         struct shmem_inode_info *info;
1689
1690         len = strlen(symname) + 1;
1691         if (len > PAGE_CACHE_SIZE)
1692                 return -ENAMETOOLONG;
1693
1694         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1695         if (!inode)
1696                 return -ENOSPC;
1697
1698         info = SHMEM_I(inode);
1699         inode->i_size = len-1;
1700         if (len <= (char *)inode - (char *)info) {
1701                 /* do it inline */
1702                 memcpy(info, symname, len);
1703                 inode->i_op = &shmem_symlink_inline_operations;
1704         } else {
1705                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1706                 if (error) {
1707                         iput(inode);
1708                         return error;
1709                 }
1710                 inode->i_op = &shmem_symlink_inode_operations;
1711                 kaddr = kmap_atomic(page, KM_USER0);
1712                 memcpy(kaddr, symname, len);
1713                 kunmap_atomic(kaddr, KM_USER0);
1714                 set_page_dirty(page);
1715                 page_cache_release(page);
1716         }
1717         if (dir->i_mode & S_ISGID)
1718                 inode->i_gid = dir->i_gid;
1719         dir->i_size += BOGO_DIRENT_SIZE;
1720         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1721         d_instantiate(dentry, inode);
1722         dget(dentry);
1723         return 0;
1724 }
1725
1726 static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1727 {
1728         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1729         return 0;
1730 }
1731
1732 static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1733 {
1734         struct page *page = NULL;
1735         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1736         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1737         return 0;
1738 }
1739
1740 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd)
1741 {
1742         if (!IS_ERR(nd_get_link(nd))) {
1743                 struct page *page;
1744
1745                 page = find_get_page(dentry->d_inode->i_mapping, 0);
1746                 if (!page)
1747                         BUG();
1748                 kunmap(page);
1749                 mark_page_accessed(page);
1750                 page_cache_release(page);
1751                 page_cache_release(page);
1752         }
1753 }
1754
1755 static struct inode_operations shmem_symlink_inline_operations = {
1756         .readlink       = generic_readlink,
1757         .follow_link    = shmem_follow_link_inline,
1758 };
1759
1760 static struct inode_operations shmem_symlink_inode_operations = {
1761         .truncate       = shmem_truncate,
1762         .readlink       = generic_readlink,
1763         .follow_link    = shmem_follow_link,
1764         .put_link       = shmem_put_link,
1765 };
1766
1767 static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes)
1768 {
1769         char *this_char, *value, *rest;
1770
1771         while ((this_char = strsep(&options, ",")) != NULL) {
1772                 if (!*this_char)
1773                         continue;
1774                 if ((value = strchr(this_char,'=')) != NULL) {
1775                         *value++ = 0;
1776                 } else {
1777                         printk(KERN_ERR
1778                             "tmpfs: No value for mount option '%s'\n",
1779                             this_char);
1780                         return 1;
1781                 }
1782
1783                 if (!strcmp(this_char,"size")) {
1784                         unsigned long long size;
1785                         size = memparse(value,&rest);
1786                         if (*rest == '%') {
1787                                 size <<= PAGE_SHIFT;
1788                                 size *= totalram_pages;
1789                                 do_div(size, 100);
1790                                 rest++;
1791                         }
1792                         if (*rest)
1793                                 goto bad_val;
1794                         *blocks = size >> PAGE_CACHE_SHIFT;
1795                 } else if (!strcmp(this_char,"nr_blocks")) {
1796                         *blocks = memparse(value,&rest);
1797                         if (*rest)
1798                                 goto bad_val;
1799                 } else if (!strcmp(this_char,"nr_inodes")) {
1800                         *inodes = memparse(value,&rest);
1801                         if (*rest)
1802                                 goto bad_val;
1803                 } else if (!strcmp(this_char,"mode")) {
1804                         if (!mode)
1805                                 continue;
1806                         *mode = simple_strtoul(value,&rest,8);
1807                         if (*rest)
1808                                 goto bad_val;
1809                 } else if (!strcmp(this_char,"uid")) {
1810                         if (!uid)
1811                                 continue;
1812                         *uid = simple_strtoul(value,&rest,0);
1813                         if (*rest)
1814                                 goto bad_val;
1815                 } else if (!strcmp(this_char,"gid")) {
1816                         if (!gid)
1817                                 continue;
1818                         *gid = simple_strtoul(value,&rest,0);
1819                         if (*rest)
1820                                 goto bad_val;
1821                 } else {
1822                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
1823                                this_char);
1824                         return 1;
1825                 }
1826         }
1827         return 0;
1828
1829 bad_val:
1830         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
1831                value, this_char);
1832         return 1;
1833
1834 }
1835
1836 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
1837 {
1838         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1839         unsigned long max_blocks = 0;
1840         unsigned long max_inodes = 0;
1841
1842         if (sbinfo) {
1843                 max_blocks = sbinfo->max_blocks;
1844                 max_inodes = sbinfo->max_inodes;
1845         }
1846         if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, &max_inodes))
1847                 return -EINVAL;
1848         /* Keep it simple: disallow limited <-> unlimited remount */
1849         if ((max_blocks || max_inodes) == !sbinfo)
1850                 return -EINVAL;
1851         /* But allow the pointless unlimited -> unlimited remount */
1852         if (!sbinfo)
1853                 return 0;
1854         return shmem_set_size(sbinfo, max_blocks, max_inodes);
1855 }
1856 #endif
1857
1858 static void shmem_put_super(struct super_block *sb)
1859 {
1860         kfree(sb->s_fs_info);
1861         sb->s_fs_info = NULL;
1862 }
1863
1864 static int shmem_fill_super(struct super_block *sb,
1865                             void *data, int silent)
1866 {
1867         struct inode *inode;
1868         struct dentry *root;
1869         int mode   = S_IRWXUGO | S_ISVTX;
1870         uid_t uid = current->fsuid;
1871         gid_t gid = current->fsgid;
1872         int err = -ENOMEM;
1873
1874 #ifdef CONFIG_TMPFS
1875         unsigned long blocks = 0;
1876         unsigned long inodes = 0;
1877
1878         /*
1879          * Per default we only allow half of the physical ram per
1880          * tmpfs instance, limiting inodes to one per page of lowmem;
1881          * but the internal instance is left unlimited.
1882          */
1883         if (!(sb->s_flags & MS_NOUSER)) {
1884                 blocks = totalram_pages / 2;
1885                 inodes = totalram_pages - totalhigh_pages;
1886                 if (inodes > blocks)
1887                         inodes = blocks;
1888
1889                 if (shmem_parse_options(data, &mode,
1890                                         &uid, &gid, &blocks, &inodes))
1891                         return -EINVAL;
1892         }
1893
1894         if (blocks || inodes) {
1895                 struct shmem_sb_info *sbinfo;
1896                 sbinfo = kmalloc(sizeof(struct shmem_sb_info), GFP_KERNEL);
1897                 if (!sbinfo)
1898                         return -ENOMEM;
1899                 sb->s_fs_info = sbinfo;
1900                 spin_lock_init(&sbinfo->stat_lock);
1901                 sbinfo->max_blocks = blocks;
1902                 sbinfo->free_blocks = blocks;
1903                 sbinfo->max_inodes = inodes;
1904                 sbinfo->free_inodes = inodes;
1905         }
1906 #endif
1907
1908         sb->s_maxbytes = SHMEM_MAX_BYTES;
1909         sb->s_blocksize = PAGE_CACHE_SIZE;
1910         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1911         sb->s_magic = TMPFS_SUPER_MAGIC;
1912         sb->s_op = &shmem_ops;
1913         inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
1914         if (!inode)
1915                 goto failed;
1916         inode->i_uid = uid;
1917         inode->i_gid = gid;
1918         root = d_alloc_root(inode);
1919         if (!root)
1920                 goto failed_iput;
1921         sb->s_root = root;
1922         return 0;
1923
1924 failed_iput:
1925         iput(inode);
1926 failed:
1927         shmem_put_super(sb);
1928         return err;
1929 }
1930
1931 static kmem_cache_t *shmem_inode_cachep;
1932
1933 static struct inode *shmem_alloc_inode(struct super_block *sb)
1934 {
1935         struct shmem_inode_info *p;
1936         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
1937         if (!p)
1938                 return NULL;
1939         return &p->vfs_inode;
1940 }
1941
1942 static void shmem_destroy_inode(struct inode *inode)
1943 {
1944         mpol_free_shared_policy(&SHMEM_I(inode)->policy);
1945         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
1946 }
1947
1948 static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
1949 {
1950         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
1951
1952         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
1953             SLAB_CTOR_CONSTRUCTOR) {
1954                 inode_init_once(&p->vfs_inode);
1955         }
1956 }
1957
1958 static int init_inodecache(void)
1959 {
1960         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
1961                                 sizeof(struct shmem_inode_info),
1962                                 0, 0, init_once, NULL);
1963         if (shmem_inode_cachep == NULL)
1964                 return -ENOMEM;
1965         return 0;
1966 }
1967
1968 static void destroy_inodecache(void)
1969 {
1970         if (kmem_cache_destroy(shmem_inode_cachep))
1971                 printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n");
1972 }
1973
1974 static struct address_space_operations shmem_aops = {
1975         .writepage      = shmem_writepage,
1976         .set_page_dirty = __set_page_dirty_nobuffers,
1977 #ifdef CONFIG_TMPFS
1978         .prepare_write  = shmem_prepare_write,
1979         .commit_write   = simple_commit_write,
1980 #endif
1981 };
1982
1983 static struct file_operations shmem_file_operations = {
1984         .mmap           = shmem_mmap,
1985 #ifdef CONFIG_TMPFS
1986         .llseek         = generic_file_llseek,
1987         .read           = shmem_file_read,
1988         .write          = shmem_file_write,
1989         .fsync          = simple_sync_file,
1990         .sendfile       = shmem_file_sendfile,
1991 #endif
1992 };
1993
1994 static struct inode_operations shmem_inode_operations = {
1995         .truncate       = shmem_truncate,
1996         .setattr        = shmem_notify_change,
1997 };
1998
1999 static struct inode_operations shmem_dir_inode_operations = {
2000 #ifdef CONFIG_TMPFS
2001         .create         = shmem_create,
2002         .lookup         = simple_lookup,
2003         .link           = shmem_link,
2004         .unlink         = shmem_unlink,
2005         .symlink        = shmem_symlink,
2006         .mkdir          = shmem_mkdir,
2007         .rmdir          = shmem_rmdir,
2008         .mknod          = shmem_mknod,
2009         .rename         = shmem_rename,
2010 #endif
2011 };
2012
2013 static struct super_operations shmem_ops = {
2014         .alloc_inode    = shmem_alloc_inode,
2015         .destroy_inode  = shmem_destroy_inode,
2016 #ifdef CONFIG_TMPFS
2017         .statfs         = shmem_statfs,
2018         .remount_fs     = shmem_remount_fs,
2019 #endif
2020         .delete_inode   = shmem_delete_inode,
2021         .drop_inode     = generic_delete_inode,
2022         .put_super      = shmem_put_super,
2023 };
2024
2025 static struct vm_operations_struct shmem_vm_ops = {
2026         .nopage         = shmem_nopage,
2027         .populate       = shmem_populate,
2028 #ifdef CONFIG_NUMA
2029         .set_policy     = shmem_set_policy,
2030         .get_policy     = shmem_get_policy,
2031 #endif
2032 };
2033
2034 static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
2035         int flags, const char *dev_name, void *data)
2036 {
2037         return get_sb_nodev(fs_type, flags, data, shmem_fill_super);
2038 }
2039
2040 static struct file_system_type tmpfs_fs_type = {
2041         .owner          = THIS_MODULE,
2042         .name           = "tmpfs",
2043         .get_sb         = shmem_get_sb,
2044         .kill_sb        = kill_litter_super,
2045 };
2046 static struct vfsmount *shm_mnt;
2047
2048 static int __init init_tmpfs(void)
2049 {
2050         int error;
2051
2052         error = init_inodecache();
2053         if (error)
2054                 goto out3;
2055
2056         error = register_filesystem(&tmpfs_fs_type);
2057         if (error) {
2058                 printk(KERN_ERR "Could not register tmpfs\n");
2059                 goto out2;
2060         }
2061 #ifdef CONFIG_TMPFS
2062         devfs_mk_dir("shm");
2063 #endif
2064         shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER,
2065                                 tmpfs_fs_type.name, NULL);
2066         if (IS_ERR(shm_mnt)) {
2067                 error = PTR_ERR(shm_mnt);
2068                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2069                 goto out1;
2070         }
2071         return 0;
2072
2073 out1:
2074         unregister_filesystem(&tmpfs_fs_type);
2075 out2:
2076         destroy_inodecache();
2077 out3:
2078         shm_mnt = ERR_PTR(error);
2079         return error;
2080 }
2081 module_init(init_tmpfs)
2082
2083 /*
2084  * shmem_file_setup - get an unlinked file living in tmpfs
2085  *
2086  * @name: name for dentry (to be seen in /proc/<pid>/maps
2087  * @size: size to be set for the file
2088  *
2089  */
2090 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2091 {
2092         int error;
2093         struct file *file;
2094         struct inode *inode;
2095         struct dentry *dentry, *root;
2096         struct qstr this;
2097
2098         if (IS_ERR(shm_mnt))
2099                 return (void *)shm_mnt;
2100
2101         if (size < 0 || size > SHMEM_MAX_BYTES)
2102                 return ERR_PTR(-EINVAL);
2103
2104         if (shmem_acct_size(flags, size))
2105                 return ERR_PTR(-ENOMEM);
2106
2107         error = -ENOMEM;
2108         this.name = name;
2109         this.len = strlen(name);
2110         this.hash = 0; /* will go */
2111         root = shm_mnt->mnt_root;
2112         dentry = d_alloc(root, &this);
2113         if (!dentry)
2114                 goto put_memory;
2115
2116         error = -ENFILE;
2117         file = get_empty_filp();
2118         if (!file)
2119                 goto put_dentry;
2120
2121         error = -ENOSPC;
2122         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2123         if (!inode)
2124                 goto close_file;
2125
2126         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2127         d_instantiate(dentry, inode);
2128         inode->i_size = size;
2129         inode->i_nlink = 0;     /* It is unlinked */
2130         file->f_vfsmnt = mntget(shm_mnt);
2131         file->f_dentry = dentry;
2132         file->f_mapping = inode->i_mapping;
2133         file->f_op = &shmem_file_operations;
2134         file->f_mode = FMODE_WRITE | FMODE_READ;
2135         return file;
2136
2137 close_file:
2138         put_filp(file);
2139 put_dentry:
2140         dput(dentry);
2141 put_memory:
2142         shmem_unacct_size(flags, size);
2143         return ERR_PTR(error);
2144 }
2145
2146 /*
2147  * shmem_zero_setup - setup a shared anonymous mapping
2148  *
2149  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2150  */
2151 int shmem_zero_setup(struct vm_area_struct *vma)
2152 {
2153         struct file *file;
2154         loff_t size = vma->vm_end - vma->vm_start;
2155
2156         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2157         if (IS_ERR(file))
2158                 return PTR_ERR(file);
2159
2160         if (vma->vm_file)
2161                 fput(vma->vm_file);
2162         vma->vm_file = file;
2163         vma->vm_ops = &shmem_vm_ops;
2164         return 0;
2165 }
2166
2167 EXPORT_SYMBOL(shmem_file_setup);