#include "xfs_mac.h"
#include "xfs_acl.h"
-#include <linux/vserver/xid.h>
+#include <linux/vs_tag.h>
kmem_zone_t *xfs_ifork_zone;
kmem_zone_t *xfs_inode_zone;
xfs_dinode_core_t *mem_core = (xfs_dinode_core_t *)dip;
xfs_arch_t arch = ARCH_CONVERT;
uint32_t uid = 0, gid = 0;
- uint16_t xid = 0;
+ uint16_t tag = 0;
ASSERT(dir);
if (dir < 0) {
- xid = mem_core->di_xid;
+ tag = mem_core->di_tag;
/* FIXME: supposed to use superblock flag */
- uid = XIDINO_UID(1, mem_core->di_uid, xid);
- gid = XIDINO_GID(1, mem_core->di_gid, xid);
- xid = XIDINO_XID(1, xid);
+ uid = TAGINO_UID(1, mem_core->di_uid, tag);
+ gid = TAGINO_GID(1, mem_core->di_gid, tag);
+ tag = TAGINO_TAG(1, tag);
}
INT_XLATE(buf_core->di_magic, mem_core->di_magic, dir, arch);
INT_XLATE(buf_core->di_onlink, mem_core->di_onlink, dir, arch);
INT_XLATE(buf_core->di_uid, uid, dir, arch);
INT_XLATE(buf_core->di_gid, gid, dir, arch);
- INT_XLATE(buf_core->di_xid, xid, dir, arch);
+ INT_XLATE(buf_core->di_tag, tag, dir, arch);
INT_XLATE(buf_core->di_nlink, mem_core->di_nlink, dir, arch);
INT_XLATE(buf_core->di_projid, mem_core->di_projid, dir, arch);
if (dir > 0) {
/* FIXME: supposed to use superblock flag */
- mem_core->di_uid = INOXID_UID(1, uid, gid);
- mem_core->di_gid = INOXID_GID(1, uid, gid);
- mem_core->di_xid = INOXID_XID(1, uid, gid, xid);
+ mem_core->di_uid = INOTAG_UID(1, uid, gid);
+ mem_core->di_gid = INOTAG_GID(1, uid, gid);
+ mem_core->di_tag = INOTAG_TAG(1, uid, gid, tag);
memcpy(mem_core->di_pad, buf_core->di_pad,
sizeof(buf_core->di_pad));
} else {
xfs_trans_t *tp,
xfs_ino_t ino,
xfs_inode_t **ipp,
- xfs_daddr_t bno)
+ xfs_daddr_t bno,
+ uint imap_flags)
{
xfs_buf_t *bp;
xfs_dinode_t *dip;
ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
ip->i_ino = ino;
ip->i_mount = mp;
+ spin_lock_init(&ip->i_flags_lock);
/*
* Get pointer's to the on-disk inode and the buffer containing it.
* return NULL as well. Set i_blkno to 0 so that xfs_itobp() will
* know that this is a new incore inode.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, 0);
+ error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags);
if (error) {
kmem_zone_free(xfs_inode_zone, ip);
return error;
* to prevent others from looking at until we're done.
*/
error = xfs_trans_iget(tp->t_mountp, tp, ino,
- IGET_CREATE, XFS_ILOCK_EXCL, &ip);
+ XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
if (error != 0) {
return error;
}
ASSERT(ip->i_d.di_nlink == nlink);
ip->i_d.di_uid = current_fsuid(cr);
ip->i_d.di_gid = current_fsgid(cr);
- ip->i_d.di_xid = current_fsxid(cr, vp);
+ ip->i_d.di_tag = current_fstag(cr, vp);
ip->i_d.di_projid = prid;
memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
/* Inode not in memory or we found it already,
* nothing to do
*/
- if (!ip || (ip->i_flags & XFS_ISTALE)) {
+ if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
read_unlock(&ih->ih_lock);
continue;
}
if (ip == free_ip) {
if (xfs_iflock_nowait(ip)) {
- ip->i_flags |= XFS_ISTALE;
-
+ xfs_iflags_set(ip, XFS_ISTALE);
if (xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
} else {
if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
if (xfs_iflock_nowait(ip)) {
- ip->i_flags |= XFS_ISTALE;
+ xfs_iflags_set(ip, XFS_ISTALE);
if (xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
AIL_LOCK(mp,s);
iip->ili_flush_lsn = iip->ili_item.li_lsn;
AIL_UNLOCK(mp, s);
- iip->ili_inode->i_flags |= XFS_ISTALE;
+ xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
pre_flushed++;
}
lip = lip->li_bio_list;
{
ASSERT(atomic_read(&ip->i_pincount) > 0);
- if (atomic_dec_and_test(&ip->i_pincount)) {
+ if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) {
+
/*
- * If the inode is currently being reclaimed, the
- * linux inode _and_ the xfs vnode may have been
- * freed so we cannot reference either of them safely.
- * Hence we should not try to do anything to them
- * if the xfs inode is currently in the reclaim
- * path.
+ * If the inode is currently being reclaimed, the link between
+ * the bhv_vnode and the xfs_inode will be broken after the
+ * XFS_IRECLAIM* flag is set. Hence, if these flags are not
+ * set, then we can move forward and mark the linux inode dirty
+ * knowing that it is still valid as it won't freed until after
+ * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The
+ * i_flags_lock is used to synchronise the setting of the
+ * XFS_IRECLAIM* flags and the breaking of the link, and so we
+ * can execute atomically w.r.t to reclaim by holding this lock
+ * here.
*
- * However, we still need to issue the unpin wakeup
- * call as the inode reclaim may be blocked waiting for
- * the inode to become unpinned.
+ * However, we still need to issue the unpin wakeup call as the
+ * inode reclaim may be blocked waiting for the inode to become
+ * unpinned.
*/
- if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) {
+
+ if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) {
bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
+ struct inode *inode = NULL;
- /* make sync come back and flush this inode */
- if (vp) {
- struct inode *inode = vn_to_inode(vp);
+ BUG_ON(vp == NULL);
+ inode = vn_to_inode(vp);
+ BUG_ON(inode->i_state & I_CLEAR);
- if (!(inode->i_state &
- (I_NEW|I_FREEING|I_CLEAR)))
- mark_inode_dirty_sync(inode);
- }
+ /* make sync come back and flush this inode */
+ if (!(inode->i_state & (I_NEW|I_FREEING)))
+ mark_inode_dirty_sync(inode);
}
+ spin_unlock(&ip->i_flags_lock);
wake_up(&ip->i_ipin_wait);
}
}