return n;
}
-static inline int block_to_cpu(struct sysv_sb_info *sbi, u32 nr)
+static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr)
{
return sbi->s_block_base + fs32_to_cpu(sbi, nr);
}
typedef struct {
- u32 *p;
- u32 key;
+ sysv_zone_t *p;
+ sysv_zone_t key;
struct buffer_head *bh;
} Indirect;
-static rwlock_t pointers_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(pointers_lock);
-static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v)
+static inline void add_chain(Indirect *p, struct buffer_head *bh, sysv_zone_t *v)
{
p->key = *(p->p = v);
p->bh = bh;
return (from > to);
}
-static inline u32 *block_end(struct buffer_head *bh)
+static inline sysv_zone_t *block_end(struct buffer_head *bh)
{
- return (u32*)((char*)bh->b_data + bh->b_size);
+ return (sysv_zone_t*)((char*)bh->b_data + bh->b_size);
}
+/*
+ * Requires read_lock(&pointers_lock) or write_lock(&pointers_lock)
+ */
static Indirect *get_branch(struct inode *inode,
int depth,
int offsets[],
bh = sb_bread(sb, block);
if (!bh)
goto failure;
- read_lock(&pointers_lock);
if (!verify_chain(chain, p))
goto changed;
- add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
- read_unlock(&pointers_lock);
+ add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets);
if (!p->key)
goto no_block;
}
return NULL;
changed:
- read_unlock(&pointers_lock);
brelse(bh);
*err = -EAGAIN;
goto no_block;
lock_buffer(bh);
memset(bh->b_data, 0, blocksize);
branch[n].bh = bh;
- branch[n].p = (u32*) bh->b_data + offsets[n];
+ branch[n].p = (sysv_zone_t*) bh->b_data + offsets[n];
*branch[n].p = branch[n].key;
set_buffer_uptodate(bh);
unlock_buffer(bh);
*where->p = where->key;
write_unlock(&pointers_lock);
- inode->i_ctime = CURRENT_TIME;
+ inode->i_ctime = CURRENT_TIME_SEC;
/* had we spliced it onto indirect block? */
if (where->bh)
goto out;
reread:
+ read_lock(&pointers_lock);
partial = get_branch(inode, depth, offsets, chain, &err);
+ read_unlock(&pointers_lock);
/* Simplest case - block found, no allocation needed */
if (!partial) {
goto reread;
}
-static inline int all_zeroes(u32 *p, u32 *q)
+static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q)
{
while (p < q)
if (*p++)
int depth,
int offsets[],
Indirect chain[],
- u32 *top)
+ sysv_zone_t *top)
{
Indirect *partial, *p;
int k, err;
write_unlock(&pointers_lock);
goto no_top;
}
- for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)
+ for (p=partial; p>chain && all_zeroes((sysv_zone_t*)p->bh->b_data,p->p); p--)
;
/*
* OK, we've found the last block that must survive. The rest of our
return partial;
}
-static inline void free_data(struct inode *inode, u32 *p, u32 *q)
+static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q)
{
for ( ; p < q ; p++) {
- u32 nr = *p;
+ sysv_zone_t nr = *p;
if (nr) {
*p = 0;
sysv_free_block(inode->i_sb, nr);
}
}
-static void free_branches(struct inode *inode, u32 *p, u32 *q, int depth)
+static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth)
{
struct buffer_head * bh;
struct super_block *sb = inode->i_sb;
if (depth--) {
for ( ; p < q ; p++) {
int block;
- u32 nr = *p;
+ sysv_zone_t nr = *p;
if (!nr)
continue;
*p = 0;
bh = sb_bread(sb, block);
if (!bh)
continue;
- free_branches(inode, (u32*)bh->b_data,
+ free_branches(inode, (sysv_zone_t*)bh->b_data,
block_end(bh), depth);
bforget(bh);
sysv_free_block(sb, nr);
void sysv_truncate (struct inode * inode)
{
- u32 *i_data = SYSV_I(inode)->i_data;
+ sysv_zone_t *i_data = SYSV_I(inode)->i_data;
int offsets[DEPTH];
Indirect chain[DEPTH];
Indirect *partial;
- int nr = 0;
+ sysv_zone_t nr = 0;
int n;
long iblock;
unsigned blocksize;
}
n++;
}
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
if (IS_SYNC(inode))
sysv_sync_inode (inode);
else
{
return generic_block_bmap(mapping,block,get_block);
}
-struct address_space_operations sysv_aops = {
+const struct address_space_operations sysv_aops = {
.readpage = sysv_readpage,
.writepage = sysv_writepage,
.sync_page = block_sync_page,