fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / fs / xfs / xfs_iget.c
index 8e380a1..acf9535 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir.h"
 #include "xfs_dir2.h"
 #include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
 #include "xfs_dir2_sf.h"
 #include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
@@ -52,7 +50,7 @@ void
 xfs_ihash_init(xfs_mount_t *mp)
 {
        __uint64_t      icount;
-       uint            i, flags = KM_SLEEP | KM_MAYFAIL;
+       uint            i;
 
        if (!mp->m_ihsize) {
                icount = mp->m_maxicount ? mp->m_maxicount :
@@ -63,14 +61,13 @@ xfs_ihash_init(xfs_mount_t *mp)
                                        (64 * NBPP) / sizeof(xfs_ihash_t));
        }
 
-       while (!(mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize *
-                                               sizeof(xfs_ihash_t), flags))) {
-               if ((mp->m_ihsize >>= 1) <= NBPP)
-                       flags = KM_SLEEP;
-       }
-       for (i = 0; i < mp->m_ihsize; i++) {
+       mp->m_ihash = kmem_zalloc_greedy(&mp->m_ihsize,
+                                        NBPC * sizeof(xfs_ihash_t),
+                                        mp->m_ihsize * sizeof(xfs_ihash_t),
+                                        KM_SLEEP | KM_MAYFAIL | KM_LARGE);
+       mp->m_ihsize /= sizeof(xfs_ihash_t);
+       for (i = 0; i < mp->m_ihsize; i++)
                rwlock_init(&(mp->m_ihash[i].ih_lock));
-       }
 }
 
 /*
@@ -79,7 +76,7 @@ xfs_ihash_init(xfs_mount_t *mp)
 void
 xfs_ihash_free(xfs_mount_t *mp)
 {
-       kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t));
+       kmem_free(mp->m_ihash, mp->m_ihsize * sizeof(xfs_ihash_t));
        mp->m_ihash = NULL;
 }
 
@@ -97,7 +94,7 @@ xfs_chash_init(xfs_mount_t *mp)
        mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize);
        mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize
                                                 * sizeof(xfs_chash_t),
-                                                KM_SLEEP);
+                                                KM_SLEEP | KM_LARGE);
        for (i = 0; i < mp->m_chsize; i++) {
                spinlock_init(&mp->m_chash[i].ch_lock,"xfshash");
        }
@@ -186,7 +183,7 @@ xfs_ihash_promote(
  */
 STATIC int
 xfs_iget_core(
-       vnode_t         *vp,
+       bhv_vnode_t     *vp,
        xfs_mount_t     *mp,
        xfs_trans_t     *tp,
        xfs_ino_t       ino,
@@ -198,7 +195,7 @@ xfs_iget_core(
        xfs_ihash_t     *ih;
        xfs_inode_t     *ip;
        xfs_inode_t     *iq;
-       vnode_t         *inode_vp;
+       bhv_vnode_t     *inode_vp;
        ulong           version;
        int             error;
        /* REFERENCED */
@@ -218,7 +215,7 @@ again:
                         * If INEW is set this inode is being set up
                         * we need to pause and try again.
                         */
-                       if (ip->i_flags & XFS_INEW) {
+                       if (xfs_iflags_test(ip, XFS_INEW)) {
                                read_unlock(&ih->ih_lock);
                                delay(1);
                                XFS_STATS_INC(xs_ig_frecycle);
@@ -233,20 +230,50 @@ again:
                                 * on its way out of the system,
                                 * we need to pause and try again.
                                 */
-                               if (ip->i_flags & XFS_IRECLAIM) {
+                               if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
                                        read_unlock(&ih->ih_lock);
                                        delay(1);
                                        XFS_STATS_INC(xs_ig_frecycle);
 
                                        goto again;
                                }
+                               ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE));
+
+                               /*
+                                * If lookup is racing with unlink, then we
+                                * should return an error immediately so we
+                                * don't remove it from the reclaim list and
+                                * potentially leak the inode.
+                                */
+                               if ((ip->i_d.di_mode == 0) &&
+                                   !(flags & XFS_IGET_CREATE)) {
+                                       read_unlock(&ih->ih_lock);
+                                       return ENOENT;
+                               }
+
+                               /*
+                                * There may be transactions sitting in the
+                                * incore log buffers or being flushed to disk
+                                * at this time.  We can't clear the
+                                * XFS_IRECLAIMABLE flag until these
+                                * transactions have hit the disk, otherwise we
+                                * will void the guarantee the flag provides
+                                * xfs_iunpin()
+                                */
+                               if (xfs_ipincount(ip)) {
+                                       read_unlock(&ih->ih_lock);
+                                       xfs_log_force(mp, 0,
+                                               XFS_LOG_FORCE|XFS_LOG_SYNC);
+                                       XFS_STATS_INC(xs_ig_frecycle);
+                                       goto again;
+                               }
 
                                vn_trace_exit(vp, "xfs_iget.alloc",
                                        (inst_t *)__return_address);
 
                                XFS_STATS_INC(xs_ig_found);
 
-                               ip->i_flags &= ~XFS_IRECLAIMABLE;
+                               xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
                                version = ih->ih_version;
                                read_unlock(&ih->ih_lock);
                                xfs_ihash_promote(ih, ip, version);
@@ -258,7 +285,7 @@ again:
                                goto finish_inode;
 
                        } else if (vp != inode_vp) {
-                               struct inode *inode = LINVFS_GET_IP(inode_vp);
+                               struct inode *inode = vn_to_inode(inode_vp);
 
                                /* The inode is being torn down, pause and
                                 * try again.
@@ -292,16 +319,15 @@ again:
 
 finish_inode:
                        if (ip->i_d.di_mode == 0) {
-                               if (!(flags & IGET_CREATE))
+                               if (!(flags & XFS_IGET_CREATE))
                                        return ENOENT;
                                xfs_iocore_inode_reinit(ip);
                        }
-       
+
                        if (lock_flags != 0)
                                xfs_ilock(ip, lock_flags);
 
-                       ip->i_flags &= ~XFS_ISTALE;
-
+                       xfs_iflags_clear(ip, XFS_ISTALE);
                        vn_trace_exit(vp, "xfs_iget.found",
                                                (inst_t *)__return_address);
                        goto return_ip;
@@ -322,21 +348,20 @@ finish_inode:
         * Read the disk inode attributes into a new inode structure and get
         * a new vnode for it. This should also initialize i_ino and i_mount.
         */
-       error = xfs_iread(mp, tp, ino, &ip, bno);
-       if (error) {
+       error = xfs_iread(mp, tp, ino, &ip, bno,
+                         (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0);
+       if (error)
                return error;
-       }
 
        vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address);
 
        xfs_inode_lock_init(ip, vp);
        xfs_iocore_inode_init(ip);
 
-       if (lock_flags != 0) {
+       if (lock_flags)
                xfs_ilock(ip, lock_flags);
-       }
-               
-       if ((ip->i_d.di_mode == 0) && !(flags & IGET_CREATE)) {
+
+       if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
                xfs_idestroy(ip);
                return ENOENT;
        }
@@ -371,8 +396,7 @@ finish_inode:
        ih->ih_next = ip;
        ip->i_udquot = ip->i_gdquot = NULL;
        ih->ih_version++;
-       ip->i_flags |= XFS_INEW;
-
+       xfs_iflags_set(ip, XFS_INEW);
        write_unlock(&ih->ih_lock);
 
        /*
@@ -421,7 +445,10 @@ finish_inode:
                        ip->i_chash = chlnew;
                        chlnew->chl_ip = ip;
                        chlnew->chl_blkno = ip->i_blkno;
+                       if (ch->ch_list)
+                               ch->ch_list->chl_prev = chlnew;
                        chlnew->chl_next = ch->ch_list;
+                       chlnew->chl_prev = NULL;
                        ch->ch_list = chlnew;
                        chlnew = NULL;
                }
@@ -465,7 +492,7 @@ finish_inode:
         * If we have a real type for an on-disk inode, we can set ops(&unlock)
         * now.  If it's a new inode being created, xfs_ialloc will handle it.
         */
-       VFS_INIT_VNODE(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1);
+       bhv_vfs_init_vnode(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1);
 
        return 0;
 }
@@ -486,7 +513,7 @@ xfs_iget(
        xfs_daddr_t     bno)
 {
        struct inode    *inode;
-       vnode_t         *vp = NULL;
+       bhv_vnode_t     *vp = NULL;
        int             error;
 
        XFS_STATS_INC(xs_ig_attempts);
@@ -495,7 +522,7 @@ retry:
        if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) {
                xfs_inode_t     *ip;
 
-               vp = LINVFS_GET_VP(inode);
+               vp = vn_from_inode(inode);
                if (inode->i_state & I_NEW) {
                        vn_initialize(inode);
                        error = xfs_iget_core(vp, mp, tp, ino, flags,
@@ -509,7 +536,7 @@ retry:
                } else {
                        /*
                         * If the inode is not fully constructed due to
-                        * filehandle mistmatches wait for the inode to go
+                        * filehandle mismatches wait for the inode to go
                         * away and try again.
                         *
                         * iget_locked will call __wait_on_freeing_inode
@@ -540,14 +567,14 @@ retry:
 void
 xfs_inode_lock_init(
        xfs_inode_t     *ip,
-       vnode_t         *vp)
+       bhv_vnode_t     *vp)
 {
        mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
                     "xfsino", (long)vp->v_number);
        mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number);
        init_waitqueue_head(&ip->i_ipin_wait);
        atomic_set(&ip->i_pincount, 0);
-       init_sema(&ip->i_flock, 1, "xfsfino", vp->v_number);
+       initnsema(&ip->i_flock, 1, "xfsfino");
 }
 
 /*
@@ -600,12 +627,10 @@ void
 xfs_iput(xfs_inode_t   *ip,
         uint           lock_flags)
 {
-       vnode_t *vp = XFS_ITOV(ip);
+       bhv_vnode_t     *vp = XFS_ITOV(ip);
 
        vn_trace_entry(vp, "xfs_iput", (inst_t *)__return_address);
-
        xfs_iunlock(ip, lock_flags);
-
        VN_RELE(vp);
 }
 
@@ -616,13 +641,13 @@ void
 xfs_iput_new(xfs_inode_t       *ip,
             uint               lock_flags)
 {
-       vnode_t         *vp = XFS_ITOV(ip);
-       struct inode    *inode = LINVFS_GET_IP(vp);
+       bhv_vnode_t     *vp = XFS_ITOV(ip);
+       struct inode    *inode = vn_to_inode(vp);
 
        vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);
 
        if ((ip->i_d.di_mode == 0)) {
-               ASSERT(!(ip->i_flags & XFS_IRECLAIMABLE));
+               ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
                vn_mark_bad(vp);
        }
        if (inode->i_state & I_NEW)
@@ -642,7 +667,7 @@ xfs_iput_new(xfs_inode_t    *ip,
 void
 xfs_ireclaim(xfs_inode_t *ip)
 {
-       vnode_t         *vp;
+       bhv_vnode_t     *vp;
 
        /*
         * Remove from old hash list and mount list.
@@ -677,9 +702,12 @@ xfs_ireclaim(xfs_inode_t *ip)
                vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip));
        }
 
+       xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
        /*
         * Free all memory associated with the inode.
         */
+       xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
        xfs_idestroy(ip);
 }
 
@@ -723,23 +751,15 @@ xfs_iextract(
                ASSERT(ip->i_cnext == ip && ip->i_cprev == ip);
                ASSERT(ip->i_chash != NULL);
                chm=NULL;
-               for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) {
-                       if (chl->chl_blkno == ip->i_blkno) {
-                               if (chm == NULL) {
-                                       /* first item on the list */
-                                       ch->ch_list = chl->chl_next;
-                               } else {
-                                       chm->chl_next = chl->chl_next;
-                               }
-                               kmem_zone_free(xfs_chashlist_zone, chl);
-                               break;
-                       } else {
-                               ASSERT(chl->chl_ip != ip);
-                               chm = chl;
-                       }
-               }
-               ASSERT_ALWAYS(chl != NULL);
-       } else {
+               chl = ip->i_chash;
+               if (chl->chl_prev)
+                       chl->chl_prev->chl_next = chl->chl_next;
+               else
+                       ch->ch_list = chl->chl_next;
+               if (chl->chl_next)
+                       chl->chl_next->chl_prev = chl->chl_prev;
+               kmem_zone_free(xfs_chashlist_zone, chl);
+       } else {
                /* delete one inode from a non-empty list */
                iq = ip->i_cnext;
                iq->i_cprev = ip->i_cprev;
@@ -1038,6 +1058,6 @@ xfs_iflock_nowait(xfs_inode_t *ip)
 void
 xfs_ifunlock(xfs_inode_t *ip)
 {
-       ASSERT(valusema(&(ip->i_flock)) <= 0);
+       ASSERT(issemalocked(&(ip->i_flock)));
        vsema(&(ip->i_flock));
 }