X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fhfsplus%2Fbnode.c;h=29da6574ba77941b22c15b4e959cd4c84b8bbee1;hb=refs%2Fheads%2Fvserver;hp=5ce5d7713013344d897e3832733ccbc4873455c8;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index 5ce5d7713..29da6574b 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -13,13 +13,10 @@ #include #include #include -#include #include "hfsplus_fs.h" #include "hfsplus_raw.h" -#define REF_PAGES 0 - /* Copy a specified range of bytes from the raw data of a node */ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) { @@ -44,7 +41,7 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) { - u16 data; + __be16 data; // optimize later... hfs_bnode_read(node, &data, off, 2); return be16_to_cpu(data); @@ -55,7 +52,7 @@ u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) u8 data; // optimize later... hfs_bnode_read(node, &data, off, 1); - return be16_to_cpu(data); + return data; } void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) @@ -98,9 +95,9 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) { - data = cpu_to_be16(data); + __be16 v = cpu_to_be16(data); // optimize later... - hfs_bnode_write(node, &data, off, 2); + hfs_bnode_write(node, &v, off, 2); } void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) @@ -297,7 +294,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) void hfs_bnode_dump(struct hfs_bnode *node) { struct hfs_bnode_desc desc; - u32 cnid; + __be32 cnid; int i, off, key_off; dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this); @@ -334,7 +331,7 @@ void hfs_bnode_unlink(struct hfs_bnode *node) { struct hfs_btree *tree; struct hfs_bnode *tmp; - u32 cnid; + __be32 cnid; tree = node->tree; if (node->prev) { @@ -361,7 +358,7 @@ void hfs_bnode_unlink(struct hfs_bnode *node) // move down? if (!node->prev && !node->next) { - printk("hfs_btree_del_level\n"); + printk(KERN_DEBUG "hfs_btree_del_level\n"); } if (!node->parent) { tree->root = 0; @@ -382,7 +379,7 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) struct hfs_bnode *node; if (cnid >= tree->node_count) { - printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid); + printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); return NULL; } @@ -405,17 +402,16 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) loff_t off; if (cnid >= tree->node_count) { - printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid); + printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); return NULL; } sb = tree->inode->i_sb; size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * sizeof(struct page *); - node = kmalloc(size, GFP_KERNEL); + node = kzalloc(size, GFP_KERNEL); if (!node) return NULL; - memset(node, 0, size); node->tree = tree; node->this = cnid; set_bit(HFS_BNODE_NEW, &node->flags); @@ -443,12 +439,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) block = off >> PAGE_CACHE_SHIFT; node->page_offset = off & ~PAGE_CACHE_MASK; for (i = 0; i < tree->pages_per_bnode; block++, i++) { - page = read_cache_page(mapping, block, (filler_t *)mapping->a_ops->readpage, NULL); + page = read_mapping_page(mapping, block, NULL); if (IS_ERR(page)) goto fail; -#if !REF_PAGES + if (PageError(page)) { + page_cache_release(page); + goto fail; + } page_cache_release(page); -#endif node->page[i] = page; } @@ -467,8 +465,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node) for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; *p && *p != node; p = &(*p)->next_hash) ; - if (!*p) - BUG(); + BUG_ON(!*p); *p = node->next_hash; node->tree->node_hash_cnt--; } @@ -487,12 +484,16 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) hfs_bnode_get(node); spin_unlock(&tree->hash_lock); wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); + if (test_bit(HFS_BNODE_ERROR, &node->flags)) + goto node_error; return node; } spin_unlock(&tree->hash_lock); node = __hfs_bnode_create(tree, num); if (!node) return ERR_PTR(-ENOMEM); + if (test_bit(HFS_BNODE_ERROR, &node->flags)) + goto node_error; if (!test_bit(HFS_BNODE_NEW, &node->flags)) return node; @@ -573,12 +574,17 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) node = hfs_bnode_findhash(tree, num); spin_unlock(&tree->hash_lock); if (node) { - printk("new node %u already hashed?\n", num); - BUG(); + printk(KERN_CRIT "new node %u already hashed?\n", num); + WARN_ON(1); + return node; } node = __hfs_bnode_create(tree, num); if (!node) return ERR_PTR(-ENOMEM); + if (test_bit(HFS_BNODE_ERROR, &node->flags)) { + hfs_bnode_put(node); + return ERR_PTR(-EIO); + } pagep = node->page; memset(kmap(*pagep) + node->page_offset, 0, @@ -600,13 +606,6 @@ void hfs_bnode_get(struct hfs_bnode *node) { if (node) { atomic_inc(&node->refcnt); -#if REF_PAGES - { - int i; - for (i = 0; i < node->tree->pages_per_bnode; i++) - get_page(node->page[i]); - } -#endif dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); } @@ -621,20 +620,13 @@ void hfs_bnode_put(struct hfs_bnode *node) dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); - if (!atomic_read(&node->refcnt)) - BUG(); - if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) { -#if REF_PAGES - for (i = 0; i < tree->pages_per_bnode; i++) - put_page(node->page[i]); -#endif + BUG_ON(!atomic_read(&node->refcnt)); + if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) return; - } for (i = 0; i < tree->pages_per_bnode; i++) { + if (!node->page[i]) + continue; mark_page_accessed(node->page[i]); -#if REF_PAGES - put_page(node->page[i]); -#endif } if (test_bit(HFS_BNODE_DELETED, &node->flags)) { @@ -648,14 +640,3 @@ void hfs_bnode_put(struct hfs_bnode *node) } } -void hfsplus_lock_bnode(struct hfs_bnode *node) -{ - wait_event(node->lock_wq, !test_and_set_bit(HFS_BNODE_LOCK, &node->flags)); -} - -void hfsplus_unlock_bnode(struct hfs_bnode *node) -{ - clear_bit(HFS_BNODE_LOCK, &node->flags); - if (waitqueue_active(&node->lock_wq)) - wake_up(&node->lock_wq); -}