1 /* cf-interface.c: CacheFiles to FS-Cache interface
3 * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/file.h>
16 #include <linux/mount.h>
17 #include <linux/statfs.h>
18 #include <linux/buffer_head.h>
21 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
22 #define log2(n) ffz(~(n))
24 /*****************************************************************************/
26 * look up the nominated node in this cache, creating it if necessary
28 static struct fscache_object *cachefiles_lookup_object(
29 struct fscache_cache *_cache,
30 struct fscache_object *_parent,
31 struct fscache_cookie *cookie)
33 struct cachefiles_object *parent, *object;
34 struct cachefiles_cache *cache;
35 struct cachefiles_xattr *auxdata;
36 unsigned keylen, auxlen;
43 cache = container_of(_cache, struct cachefiles_cache, cache);
44 parent = container_of(_parent, struct cachefiles_object, fscache);
46 _enter("{%s},%p,%p", cache->cache.identifier, parent, cookie);
48 /* create a new object record and a temporary leaf image */
49 object = kmem_cache_alloc(cachefiles_object_jar, SLAB_KERNEL);
53 atomic_set(&object->usage, 1);
54 atomic_set(&object->fscache_usage, 1);
56 fscache_object_init(&object->fscache);
57 object->fscache.cookie = cookie;
58 object->fscache.cache = parent->fscache.cache;
60 object->type = cookie->def->type;
62 /* get hold of the raw key
63 * - stick the length on the front and leave space on the back for the
66 buffer = kmalloc((2 + 512) + 3, GFP_KERNEL);
70 keylen = cookie->def->get_key(cookie->netfs_data, buffer + 2, 512);
71 ASSERTCMP(keylen, <, 512);
73 *(uint16_t *)buffer = keylen;
74 ((char *)buffer)[keylen + 2] = 0;
75 ((char *)buffer)[keylen + 3] = 0;
76 ((char *)buffer)[keylen + 4] = 0;
78 /* turn the raw key into something that can work with as a filename */
79 key = cachefiles_cook_key(buffer, keylen + 2, object->type);
83 /* get hold of the auxiliary data and prepend the object type */
86 if (cookie->def->get_aux) {
87 auxlen = cookie->def->get_aux(cookie->netfs_data,
89 ASSERTCMP(auxlen, <, 511);
92 auxdata->len = auxlen + 1;
93 auxdata->type = cookie->def->type;
95 /* look up the key, creating any missing bits */
96 ret = cachefiles_walk_to_object(parent, object, key, auxdata);
102 _leave(" = %p", &object->fscache);
103 return &object->fscache;
106 kmem_cache_free(cachefiles_object_jar, object);
109 _leave(" = %d", ret);
115 kmem_cache_free(cachefiles_object_jar, object);
117 _leave(" = -ENOMEM");
118 return ERR_PTR(-ENOMEM);
122 /*****************************************************************************/
124 * increment the usage count on an inode object (may fail if unmounting)
126 static struct fscache_object *cachefiles_grab_object(struct fscache_object *_object)
128 struct cachefiles_object *object;
130 _enter("%p", _object);
132 object = container_of(_object, struct cachefiles_object, fscache);
134 #ifdef CACHEFILES_DEBUG_SLAB
135 ASSERT((atomic_read(&object->fscache_usage) & 0xffff0000) != 0x6b6b0000);
138 atomic_inc(&object->fscache_usage);
139 return &object->fscache;
143 /*****************************************************************************/
145 * lock the semaphore on an object object
147 static void cachefiles_lock_object(struct fscache_object *_object)
149 struct cachefiles_object *object;
151 _enter("%p", _object);
153 object = container_of(_object, struct cachefiles_object, fscache);
155 #ifdef CACHEFILES_DEBUG_SLAB
156 ASSERT((atomic_read(&object->fscache_usage) & 0xffff0000) != 0x6b6b0000);
159 down_write(&object->sem);
163 /*****************************************************************************/
165 * unlock the semaphore on an object object
167 static void cachefiles_unlock_object(struct fscache_object *_object)
169 struct cachefiles_object *object;
171 _enter("%p", _object);
173 object = container_of(_object, struct cachefiles_object, fscache);
174 up_write(&object->sem);
178 /*****************************************************************************/
180 * update the auxilliary data for an object object on disk
182 static void cachefiles_update_object(struct fscache_object *_object)
184 struct cachefiles_object *object;
185 struct cachefiles_cache *cache;
187 _enter("%p", _object);
189 object = container_of(_object, struct cachefiles_object, fscache);
190 cache = container_of(object->fscache.cache, struct cachefiles_cache, cache);
192 //cachefiles_tree_update_object(super, object);
196 /*****************************************************************************/
198 * dispose of a reference to an object object
200 static void cachefiles_put_object(struct fscache_object *_object)
202 struct cachefiles_object *object;
203 struct cachefiles_cache *cache;
207 object = container_of(_object, struct cachefiles_object, fscache);
208 _enter("%p{%d}", object, atomic_read(&object->usage));
212 cache = container_of(object->fscache.cache,
213 struct cachefiles_cache, cache);
215 #ifdef CACHEFILES_DEBUG_SLAB
216 ASSERT((atomic_read(&object->fscache_usage) & 0xffff0000) != 0x6b6b0000);
219 if (!atomic_dec_and_test(&object->fscache_usage))
222 _debug("- kill object %p", object);
224 /* delete retired objects */
225 if (test_bit(FSCACHE_OBJECT_RECYCLING, &object->fscache.flags) &&
226 _object != cache->cache.fsdef
228 _debug("- retire object %p", object);
229 cachefiles_delete_object(cache, object);
232 /* close the filesystem stuff attached to the object */
233 if (object->backer != object->dentry) {
234 dput(object->backer);
235 object->backer = NULL;
238 /* note that an object is now inactive */
239 write_lock(&cache->active_lock);
240 rb_erase(&object->active_node, &cache->active_nodes);
241 write_unlock(&cache->active_lock);
243 dput(object->dentry);
244 object->dentry = NULL;
246 /* then dispose of the object */
247 kmem_cache_free(cachefiles_object_jar, object);
253 /*****************************************************************************/
257 static void cachefiles_sync_cache(struct fscache_cache *_cache)
259 struct cachefiles_cache *cache;
262 _enter("%p", _cache);
264 cache = container_of(_cache, struct cachefiles_cache, cache);
266 /* make sure all pages pinned by operations on behalf of the netfs are
268 ret = fsync_super(cache->mnt->mnt_sb);
270 cachefiles_io_error(cache,
271 "Attempt to sync backing fs superblock"
272 " returned error %d",
277 /*****************************************************************************/
279 * set the data size on an object
281 static int cachefiles_set_i_size(struct fscache_object *_object, loff_t i_size)
283 struct cachefiles_object *object;
284 struct iattr newattrs;
287 _enter("%p,%llu", _object, i_size);
289 object = container_of(_object, struct cachefiles_object, fscache);
291 if (i_size == object->i_size)
297 ASSERT(S_ISREG(object->backer->d_inode->i_mode));
299 newattrs.ia_size = i_size;
300 newattrs.ia_valid = ATTR_SIZE;
302 mutex_lock(&object->backer->d_inode->i_mutex);
303 ret = notify_change(object->backer, &newattrs);
304 mutex_unlock(&object->backer->d_inode->i_mutex);
307 cachefiles_io_error_obj(object, "Size set failed");
311 _leave(" = %d", ret);
316 /*****************************************************************************/
318 * see if we have space for a number of pages in the cache
320 int cachefiles_has_space(struct cachefiles_cache *cache, unsigned nr)
322 struct kstatfs stats;
325 _enter("{%llu,%llu,%llu},%d",
326 cache->brun, cache->bcull, cache->bstop, nr);
328 /* find out how many pages of blockdev are available */
329 memset(&stats, 0, sizeof(stats));
331 ret = cache->mnt->mnt_sb->s_op->statfs(cache->mnt->mnt_root, &stats);
334 cachefiles_io_error(cache, "statfs failed");
338 stats.f_bavail >>= cache->bshift;
340 _debug("avail %llu", stats.f_bavail);
342 /* see if there is sufficient space */
343 stats.f_bavail -= nr;
346 if (stats.f_bavail < cache->bstop)
350 if (stats.f_bavail < cache->bcull)
353 if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
354 stats.f_bavail >= cache->brun
356 if (test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)) {
357 _debug("cease culling");
358 send_sigurg(&cache->cachefilesd->f_owner);
366 if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
367 _debug("### CULL CACHE ###");
368 send_sigurg(&cache->cachefilesd->f_owner);
371 _leave(" = %d", ret);
376 /*****************************************************************************/
378 * waiting reading backing files
380 static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
381 int sync, void *_key)
383 struct cachefiles_one_read *monitor =
384 container_of(wait, struct cachefiles_one_read, monitor);
385 struct wait_bit_key *key = _key;
386 struct page *page = wait->private;
390 _enter("{%lu},%u,%d,{%p,%u}",
391 monitor->netfs_page->index, mode, sync,
392 key->flags, key->bit_nr);
394 if (key->flags != &page->flags ||
395 key->bit_nr != PG_locked)
398 _debug("--- monitor %p %lx ---", page, page->flags);
400 if (!PageUptodate(page) && !PageError(page))
403 /* remove from the waitqueue */
404 list_del(&wait->task_list);
406 /* move onto the action list and queue for keventd */
407 ASSERT(monitor->object);
409 spin_lock(&monitor->object->work_lock);
410 list_move(&monitor->obj_link, &monitor->object->read_list);
411 spin_unlock(&monitor->object->work_lock);
413 schedule_work(&monitor->object->read_work);
419 /*****************************************************************************/
421 * let keventd drive the copying of pages
423 void cachefiles_read_copier_work(void *_object)
425 struct cachefiles_one_read *monitor;
426 struct cachefiles_object *object = _object;
427 struct fscache_cookie *cookie = object->fscache.cookie;
428 struct pagevec pagevec;
431 _enter("{ino=%lu}", object->backer->d_inode->i_ino);
433 pagevec_init(&pagevec, 0);
436 spin_lock_irq(&object->work_lock);
438 while (!list_empty(&object->read_list)) {
439 monitor = list_entry(object->read_list.next,
440 struct cachefiles_one_read, obj_link);
441 list_del(&monitor->obj_link);
443 spin_unlock_irq(&object->work_lock);
445 _debug("- copy {%lu}", monitor->back_page->index);
448 if (PageUptodate(monitor->back_page)) {
449 copy_highpage(monitor->netfs_page, monitor->back_page);
451 pagevec_add(&pagevec, monitor->netfs_page);
452 cookie->def->mark_pages_cached(
454 monitor->netfs_page->mapping,
456 pagevec_reinit(&pagevec);
462 cachefiles_io_error_obj(
464 "readpage failed on backing file %lx",
465 (unsigned long) monitor->back_page->flags);
467 page_cache_release(monitor->back_page);
469 monitor->end_io_func(monitor->netfs_page,
473 page_cache_release(monitor->netfs_page);
474 fscache_put_context(cookie, monitor->context);
477 /* let keventd have some air occasionally */
479 if (max < 0 || need_resched()) {
480 if (!list_empty(&object->read_list))
481 schedule_work(&object->read_work);
482 _leave(" [maxed out]");
486 spin_lock_irq(&object->work_lock);
489 spin_unlock_irq(&object->work_lock);
495 /*****************************************************************************/
497 * read the corresponding page to the given set from the backing file
498 * - an uncertain page is simply discarded, to be tried again another time
500 static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
501 fscache_rw_complete_t end_io_func,
503 struct page *netpage,
504 struct pagevec *lru_pvec)
506 struct cachefiles_one_read *monitor;
507 struct address_space *bmapping;
508 struct page *newpage, *backpage;
513 ASSERTCMP(pagevec_count(lru_pvec), ==, 0);
514 pagevec_reinit(lru_pvec);
516 _debug("read back %p{%lu,%d}",
517 netpage, netpage->index, page_count(netpage));
519 monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
523 monitor->netfs_page = netpage;
524 monitor->object = object;
525 monitor->end_io_func = end_io_func;
526 monitor->context = fscache_get_context(object->fscache.cookie,
529 init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
531 /* attempt to get hold of the backing page */
532 bmapping = object->backer->d_inode->i_mapping;
536 backpage = find_get_page(bmapping, netpage->index);
538 goto backing_page_already_present;
541 newpage = page_cache_alloc_cold(bmapping);
546 ret = add_to_page_cache(newpage, bmapping,
547 netpage->index, GFP_KERNEL);
549 goto installed_new_backing_page;
554 /* we've installed a new backing page, so now we need to add it
555 * to the LRU list and start it reading */
556 installed_new_backing_page:
557 _debug("- new %p", newpage);
562 page_cache_get(backpage);
563 pagevec_add(lru_pvec, backpage);
564 __pagevec_lru_add(lru_pvec);
566 ret = bmapping->a_ops->readpage(NULL, backpage);
570 /* set the monitor to transfer the data across */
571 monitor_backing_page:
572 _debug("- monitor add");
574 /* install the monitor */
575 page_cache_get(monitor->netfs_page);
576 page_cache_get(backpage);
577 monitor->back_page = backpage;
579 spin_lock_irq(&object->work_lock);
580 list_add_tail(&monitor->obj_link, &object->read_pend_list);
581 spin_unlock_irq(&object->work_lock);
583 monitor->monitor.private = backpage;
584 install_page_waitqueue_monitor(backpage, &monitor->monitor);
587 /* but the page may have been read before the monitor was
588 * installed, so the monitor may miss the event - so we have to
589 * ensure that we do get one in such a case */
590 if (!TestSetPageLocked(backpage))
591 unlock_page(backpage);
594 /* if the backing page is already present, it can be in one of
595 * three states: read in progress, read failed or read okay */
596 backing_page_already_present:
600 page_cache_release(newpage);
604 if (PageError(backpage))
607 if (PageUptodate(backpage))
608 goto backing_page_already_uptodate;
610 goto monitor_backing_page;
612 /* the backing page is already up to date, attach the netfs
613 * page to the pagecache and LRU and copy the data across */
614 backing_page_already_uptodate:
615 _debug("- uptodate");
617 copy_highpage(netpage, backpage);
618 end_io_func(netpage, context, 0);
626 page_cache_release(backpage);
628 fscache_put_context(object->fscache.cookie, monitor->context);
632 _leave(" = %d", ret);
636 _debug("read error %d", ret);
640 cachefiles_io_error_obj(object, "page read error on backing file");
645 page_cache_release(newpage);
647 fscache_put_context(object->fscache.cookie, monitor->context);
650 _leave(" = -ENOMEM");
655 /*****************************************************************************/
657 * read a page from the cache or allocate a block in which to store it
658 * - cache withdrawal is prevented by the caller
659 * - returns -EINTR if interrupted
660 * - returns -ENOMEM if ran out of memory
661 * - returns -ENOBUFS if no buffers can be made available
662 * - returns -ENOBUFS if page is beyond EOF
663 * - if the page is backed by a block in the cache:
664 * - a read will be started which will call the callback on completion
665 * - 0 will be returned
666 * - else if the page is unbacked:
667 * - the metadata will be retained
668 * - -ENODATA will be returned
670 static int cachefiles_read_or_alloc_page(struct fscache_object *_object,
672 fscache_rw_complete_t end_io_func,
676 struct cachefiles_object *object;
677 struct cachefiles_cache *cache;
678 struct fscache_cookie *cookie;
679 struct pagevec pagevec;
681 sector_t block0, block;
685 object = container_of(_object, struct cachefiles_object, fscache);
686 cache = container_of(object->fscache.cache, struct cachefiles_cache, cache);
688 _enter("{%p},{%lx},,,", object, page->index);
693 inode = object->backer->d_inode;
694 ASSERT(S_ISREG(inode->i_mode));
695 ASSERT(inode->i_mapping->a_ops->bmap);
696 ASSERT(inode->i_mapping->a_ops->readpages);
698 /* calculate the shift required to use bmap */
699 if (inode->i_sb->s_blocksize > PAGE_SIZE)
702 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
704 cookie = object->fscache.cookie;
706 pagevec_init(&pagevec, 0);
708 /* we assume the absence or presence of the first block is a good
709 * enough indication for the page as a whole
710 * - TODO: don't use bmap() for this as it is _not_ actually good
711 * enough for this as it doesn't indicate errors, but it's all we've
714 block0 = page->index;
717 block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
718 _debug("%llx -> %llx", block0, block);
721 /* submit the apparently valid page to the backing fs to be
723 ret = cachefiles_read_backing_file_one(object,
729 } else if (cachefiles_has_space(cache, 1) == 0) {
730 /* there's space in the cache we can use */
731 pagevec_add(&pagevec, page);
732 cookie->def->mark_pages_cached(cookie->netfs_data,
733 page->mapping, &pagevec);
739 _leave(" = %d", ret);
744 /*****************************************************************************/
746 * read the corresponding pages to the given set from the backing file
747 * - any uncertain pages are simply discarded, to be tried again another time
749 static int cachefiles_read_backing_file(struct cachefiles_object *object,
750 fscache_rw_complete_t end_io_func,
752 struct address_space *mapping,
753 struct list_head *list,
754 struct pagevec *lru_pvec)
756 struct cachefiles_one_read *monitor = NULL;
757 struct address_space *bmapping = object->backer->d_inode->i_mapping;
758 struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
763 ASSERTCMP(pagevec_count(lru_pvec), ==, 0);
764 pagevec_reinit(lru_pvec);
766 list_for_each_entry_safe(netpage, _n, list, lru) {
767 list_del(&netpage->lru);
769 _debug("read back %p{%lu,%d}",
770 netpage, netpage->index, page_count(netpage));
773 monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
777 monitor->object = object;
778 monitor->end_io_func = end_io_func;
779 monitor->context = fscache_get_context(
780 object->fscache.cookie, context);
782 init_waitqueue_func_entry(&monitor->monitor,
783 cachefiles_read_waiter);
787 backpage = find_get_page(bmapping, netpage->index);
789 goto backing_page_already_present;
792 newpage = page_cache_alloc_cold(bmapping);
797 ret = add_to_page_cache(newpage, bmapping,
798 netpage->index, GFP_KERNEL);
800 goto installed_new_backing_page;
805 /* we've installed a new backing page, so now we need to add it
806 * to the LRU list and start it reading */
807 installed_new_backing_page:
808 _debug("- new %p", newpage);
813 page_cache_get(backpage);
814 if (!pagevec_add(lru_pvec, backpage))
815 __pagevec_lru_add(lru_pvec);
818 ret = bmapping->a_ops->readpage(NULL, backpage);
822 /* add the netfs page to the pagecache and LRU, and set the
823 * monitor to transfer the data across */
824 monitor_backing_page:
825 _debug("- monitor add");
827 ret = add_to_page_cache(netpage, mapping, netpage->index,
830 if (ret == -EEXIST) {
831 page_cache_release(netpage);
837 page_cache_get(netpage);
838 if (!pagevec_add(lru_pvec, netpage))
839 __pagevec_lru_add(lru_pvec);
841 /* install a monitor */
842 page_cache_get(netpage);
843 monitor->netfs_page = netpage;
845 page_cache_get(backpage);
846 monitor->back_page = backpage;
848 spin_lock_irq(&object->work_lock);
849 list_add_tail(&monitor->obj_link, &object->read_pend_list);
850 spin_unlock_irq(&object->work_lock);
852 monitor->monitor.private = backpage;
853 install_page_waitqueue_monitor(backpage, &monitor->monitor);
856 /* but the page may have been read before the monitor was
857 * installed, so the monitor may miss the event - so we have to
858 * ensure that we do get one in such a case */
859 if (!TestSetPageLocked(backpage)) {
860 _debug("2unlock %p", backpage);
861 unlock_page(backpage);
864 page_cache_release(backpage);
867 page_cache_release(netpage);
871 /* if the backing page is already present, it can be in one of
872 * three states: read in progress, read failed or read okay */
873 backing_page_already_present:
874 _debug("- present %p", backpage);
876 if (PageError(backpage))
879 if (PageUptodate(backpage))
880 goto backing_page_already_uptodate;
882 _debug("- not ready %p{%lx}", backpage, backpage->flags);
884 if (TestSetPageLocked(backpage))
885 goto monitor_backing_page;
887 if (PageError(backpage)) {
888 unlock_page(backpage);
892 if (PageUptodate(backpage))
893 goto backing_page_already_uptodate_unlock;
895 /* we've locked a page that's neither up to date nor erroneous,
896 * so we need to attempt to read it again */
897 goto reread_backing_page;
899 /* the backing page is already up to date, attach the netfs
900 * page to the pagecache and LRU and copy the data across */
901 backing_page_already_uptodate_unlock:
902 unlock_page(backpage);
903 backing_page_already_uptodate:
904 _debug("- uptodate");
906 ret = add_to_page_cache(netpage, mapping, netpage->index,
909 if (ret == -EEXIST) {
910 page_cache_release(netpage);
916 copy_highpage(netpage, backpage);
918 page_cache_release(backpage);
921 page_cache_get(netpage);
922 if (!pagevec_add(lru_pvec, netpage))
923 __pagevec_lru_add(lru_pvec);
925 end_io_func(netpage, context, 0);
927 page_cache_release(netpage);
938 pagevec_lru_add(lru_pvec);
941 page_cache_release(newpage);
943 page_cache_release(netpage);
945 page_cache_release(backpage);
947 fscache_put_context(object->fscache.cookie, monitor->context);
951 list_for_each_entry_safe(netpage, _n, list, lru) {
952 list_del(&netpage->lru);
953 page_cache_release(netpage);
956 _leave(" = %d", ret);
965 _debug("read error %d", ret);
969 cachefiles_io_error_obj(object, "page read error on backing file");
975 /*****************************************************************************/
977 * read a list of pages from the cache or allocate blocks in which to store
980 static int cachefiles_read_or_alloc_pages(struct fscache_object *_object,
981 struct address_space *mapping,
982 struct list_head *pages,
984 fscache_rw_complete_t end_io_func,
988 struct cachefiles_object *object;
989 struct cachefiles_cache *cache;
990 struct fscache_cookie *cookie;
991 struct list_head backpages;
992 struct pagevec pagevec;
994 struct page *page, *_n;
995 unsigned shift, nrbackpages;
996 int ret, ret2, space;
998 object = container_of(_object, struct cachefiles_object, fscache);
999 cache = container_of(object->fscache.cache, struct cachefiles_cache, cache);
1001 _enter("{%p},,%d,,", object, *nr_pages);
1003 if (!object->backer)
1007 if (cachefiles_has_space(cache, *nr_pages) < 0)
1010 inode = object->backer->d_inode;
1011 ASSERT(S_ISREG(inode->i_mode));
1012 ASSERT(inode->i_mapping->a_ops->bmap);
1013 ASSERT(inode->i_mapping->a_ops->readpages);
1015 /* calculate the shift required to use bmap */
1016 if (inode->i_sb->s_blocksize > PAGE_SIZE)
1019 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
1021 pagevec_init(&pagevec, 0);
1023 cookie = object->fscache.cookie;
1025 INIT_LIST_HEAD(&backpages);
1028 ret = space ? -ENODATA : -ENOBUFS;
1029 list_for_each_entry_safe(page, _n, pages, lru) {
1030 sector_t block0, block;
1032 /* we assume the absence or presence of the first block is a
1033 * good enough indication for the page as a whole
1034 * - TODO: don't use bmap() for this as it is _not_ actually
1035 * good enough for this as it doesn't indicate errors, but
1036 * it's all we've got for the moment
1038 block0 = page->index;
1041 block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
1043 _debug("%llx -> %llx", block0, block);
1046 /* we have data - add it to the list to give to the
1048 list_move(&page->lru, &backpages);
1051 } else if (space && pagevec_add(&pagevec, page) == 0) {
1052 cookie->def->mark_pages_cached(cookie->netfs_data,
1054 pagevec_reinit(&pagevec);
1059 if (pagevec_count(&pagevec) > 0) {
1060 cookie->def->mark_pages_cached(cookie->netfs_data,
1062 pagevec_reinit(&pagevec);
1065 if (list_empty(pages))
1068 /* submit the apparently valid pages to the backing fs to be read from disk */
1069 if (nrbackpages > 0) {
1070 ret2 = cachefiles_read_backing_file(object,
1077 ASSERTCMP(pagevec_count(&pagevec), ==, 0);
1079 if (ret2 == -ENOMEM || ret2 == -EINTR)
1083 _leave(" = %d [nr=%u%s]",
1084 ret, *nr_pages, list_empty(pages) ? " empty" : "");
1089 /*****************************************************************************/
1091 * read a page from the cache or allocate a block in which to store it
1092 * - cache withdrawal is prevented by the caller
1093 * - returns -EINTR if interrupted
1094 * - returns -ENOMEM if ran out of memory
1095 * - returns -ENOBUFS if no buffers can be made available
1096 * - returns -ENOBUFS if page is beyond EOF
1098 * - the metadata will be retained
1099 * - 0 will be returned
1101 static int cachefiles_allocate_page(struct fscache_object *_object,
1105 struct cachefiles_object *object;
1106 struct cachefiles_cache *cache;
1108 object = container_of(_object, struct cachefiles_object, fscache);
1109 cache = container_of(object->fscache.cache,
1110 struct cachefiles_cache, cache);
1112 _enter("%p,{%lx},,,", object, page->index);
1114 return cachefiles_has_space(cache, 1);
1118 /*****************************************************************************/
1122 void cachefiles_write_work(void *_object)
1124 struct cachefiles_one_write *writer;
1125 struct cachefiles_object *object = _object;
1128 _enter("%p", object);
1130 ASSERT(!irqs_disabled());
1132 spin_lock_irq(&object->work_lock);
1135 while (!list_empty(&object->write_list)) {
1136 writer = list_entry(object->write_list.next,
1137 struct cachefiles_one_write, obj_link);
1138 list_del(&writer->obj_link);
1140 spin_unlock_irq(&object->work_lock);
1142 _debug("- store {%lu}", writer->netfs_page->index);
1144 ret = generic_file_buffered_write_one_kernel_page(
1145 object->backer->d_inode->i_mapping,
1146 writer->netfs_page->index,
1147 writer->netfs_page);
1149 if (ret == -ENOSPC) {
1151 } else if (ret == -EIO) {
1152 cachefiles_io_error_obj(object,
1153 "write page to backing file"
1158 _debug("- callback");
1159 writer->end_io_func(writer->netfs_page,
1163 _debug("- put net");
1164 page_cache_release(writer->netfs_page);
1165 fscache_put_context(object->fscache.cookie, writer->context);
1168 /* let keventd have some air occasionally */
1170 if (max < 0 || need_resched()) {
1171 if (!list_empty(&object->write_list))
1172 schedule_work(&object->write_work);
1173 _leave(" [maxed out]");
1178 spin_lock_irq(&object->work_lock);
1181 spin_unlock_irq(&object->work_lock);
1186 /*****************************************************************************/
1188 * request a page be stored in the cache
1189 * - cache withdrawal is prevented by the caller
1190 * - this request may be ignored if there's no cache block available, in which
1191 * case -ENOBUFS will be returned
1192 * - if the op is in progress, 0 will be returned
1194 static int cachefiles_write_page(struct fscache_object *_object,
1196 fscache_rw_complete_t end_io_func,
1200 // struct cachefiles_one_write *writer;
1201 struct cachefiles_object *object;
1204 object = container_of(_object, struct cachefiles_object, fscache);
1206 _enter("%p,%p{%lx},,,", object, page, page->index);
1208 if (!object->backer)
1211 ASSERT(S_ISREG(object->backer->d_inode->i_mode));
1213 #if 0 // set to 1 for deferred writing
1214 /* queue the operation for deferred processing by keventd */
1215 writer = kzalloc(sizeof(*writer), GFP_KERNEL);
1219 page_cache_get(page);
1220 writer->netfs_page = page;
1221 writer->object = object;
1222 writer->end_io_func = end_io_func;
1223 writer->context = facache_get_context(object->fscache.cookie, context);
1225 spin_lock_irq(&object->work_lock);
1226 list_add_tail(&writer->obj_link, &object->write_list);
1227 spin_unlock_irq(&object->work_lock);
1229 schedule_work(&object->write_work);
1233 /* copy the page to ext3 and let it store it in its own time */
1234 ret = generic_file_buffered_write_one_kernel_page(
1235 object->backer->d_inode->i_mapping, page->index, page);
1239 cachefiles_io_error_obj(object,
1240 "write page to backing file"
1245 end_io_func(page, context, ret);
1248 _leave(" = %d", ret);
1253 /*****************************************************************************/
1255 * detach a backing block from a page
1256 * - cache withdrawal is prevented by the caller
1258 static void cachefiles_uncache_pages(struct fscache_object *_object,
1259 struct pagevec *pagevec)
1261 struct cachefiles_object *object;
1262 struct cachefiles_cache *cache;
1264 object = container_of(_object, struct cachefiles_object, fscache);
1265 cache = container_of(object->fscache.cache,
1266 struct cachefiles_cache, cache);
1268 _enter("%p,{%lu,%lx},,,",
1269 object, pagevec->nr, pagevec->pages[0]->index);
1273 /*****************************************************************************/
1275 * dissociate a cache from all the pages it was backing
1277 static void cachefiles_dissociate_pages(struct fscache_cache *cache)
1283 struct fscache_cache_ops cachefiles_cache_ops = {
1284 .name = "cachefiles",
1285 .lookup_object = cachefiles_lookup_object,
1286 .grab_object = cachefiles_grab_object,
1287 .lock_object = cachefiles_lock_object,
1288 .unlock_object = cachefiles_unlock_object,
1289 .update_object = cachefiles_update_object,
1290 .put_object = cachefiles_put_object,
1291 .sync_cache = cachefiles_sync_cache,
1292 .set_i_size = cachefiles_set_i_size,
1293 .read_or_alloc_page = cachefiles_read_or_alloc_page,
1294 .read_or_alloc_pages = cachefiles_read_or_alloc_pages,
1295 .allocate_page = cachefiles_allocate_page,
1296 .write_page = cachefiles_write_page,
1297 .uncache_pages = cachefiles_uncache_pages,
1298 .dissociate_pages = cachefiles_dissociate_pages,