VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / fs / xfs / linux-2.6 / xfs_buf.c
index b6dc7d9..ed8abf2 100644 (file)
@@ -65,7 +65,8 @@
  */
 
 STATIC kmem_cache_t *pagebuf_cache;
-STATIC void pagebuf_daemon_wakeup(void);
+STATIC kmem_shaker_t pagebuf_shake;
+STATIC int pagebuf_daemon_wakeup(int, unsigned int);
 STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
 STATIC struct workqueue_struct *pagebuf_logio_workqueue;
 STATIC struct workqueue_struct *pagebuf_dataio_workqueue;
@@ -384,13 +385,13 @@ _pagebuf_lookup_pages(
                         * But until all the XFS lowlevel code is revamped to
                         * handle buffer allocation failures we can't do much.
                         */
-                       if (!(++retries % 100)) {
-                               printk(KERN_ERR "possibly deadlocking in %s\n",
-                                               __FUNCTION__);
-                       }
+                       if (!(++retries % 100))
+                               printk(KERN_ERR
+                                       "possible deadlock in %s (mode:0x%x)\n",
+                                       __FUNCTION__, gfp_mask);
 
                        XFS_STATS_INC(pb_page_retries);
-                       pagebuf_daemon_wakeup();
+                       pagebuf_daemon_wakeup(0, gfp_mask);
                        set_current_state(TASK_UNINTERRUPTIBLE);
                        schedule_timeout(10);
                        goto retry;
@@ -1566,11 +1567,20 @@ void
 pagebuf_delwri_dequeue(
        xfs_buf_t               *pb)
 {
-       PB_TRACE(pb, "delwri_uq", 0);
+       int                     dequeued = 0;
+
        spin_lock(&pbd_delwrite_lock);
-       list_del_init(&pb->pb_list);
+       if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
+               list_del_init(&pb->pb_list);
+               dequeued = 1;
+       }
        pb->pb_flags &= ~PBF_DELWRI;
        spin_unlock(&pbd_delwrite_lock);
+
+       if (dequeued)
+               pagebuf_rele(pb);
+
+       PB_TRACE(pb, "delwri_dq", (long)dequeued);
 }
 
 STATIC void
@@ -1586,12 +1596,16 @@ STATIC struct task_struct *pagebuf_daemon_task;
 STATIC int pagebuf_daemon_active;
 STATIC int force_flush;
 
-STATIC void
-pagebuf_daemon_wakeup(void)
+
+STATIC int
+pagebuf_daemon_wakeup(
+       int                     priority,
+       unsigned int            mask)
 {
        force_flush = 1;
        barrier();
        wake_up_process(pagebuf_daemon_task);
+       return 0;
 }
 
 STATIC int
@@ -1600,6 +1614,7 @@ pagebuf_daemon(
 {
        struct list_head        tmp;
        unsigned long           age;
+       xfs_buftarg_t           *target;
        xfs_buf_t               *pb, *n;
 
        /*  Set up the thread  */
@@ -1642,9 +1657,12 @@ pagebuf_daemon(
 
                while (!list_empty(&tmp)) {
                        pb = list_entry(tmp.next, xfs_buf_t, pb_list);
+                       target = pb->pb_target;
+
                        list_del_init(&pb->pb_list);
                        pagebuf_iostrategy(pb);
-                       blk_run_address_space(pb->pb_target->pbr_mapping);
+
+                       blk_run_address_space(target->pbr_mapping);
                }
 
                if (as_list_len > 0)
@@ -1775,21 +1793,28 @@ pagebuf_init(void)
        pagebuf_cache = kmem_cache_create("xfs_buf_t", sizeof(xfs_buf_t), 0,
                        SLAB_HWCACHE_ALIGN, NULL, NULL);
        if (pagebuf_cache == NULL) {
-               printk("pagebuf: couldn't init pagebuf cache\n");
+               printk("XFS: couldn't init xfs_buf_t cache\n");
                pagebuf_terminate();
                return -ENOMEM;
        }
 
-       for (i = 0; i < NHASH; i++) {
-               spin_lock_init(&pbhash[i].pb_hash_lock);
-               INIT_LIST_HEAD(&pbhash[i].pb_hash);
-       }
-
 #ifdef PAGEBUF_TRACE
        pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
 #endif
 
        pagebuf_daemon_start();
+
+       pagebuf_shake = kmem_shake_register(pagebuf_daemon_wakeup);
+       if (pagebuf_shake == NULL) {
+               pagebuf_terminate();
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < NHASH; i++) {
+               spin_lock_init(&pbhash[i].pb_hash_lock);
+               INIT_LIST_HEAD(&pbhash[i].pb_hash);
+       }
+
        return 0;
 }
 
@@ -1808,5 +1833,6 @@ pagebuf_terminate(void)
        ktrace_free(pagebuf_trace_buf);
 #endif
 
-       kmem_cache_destroy(pagebuf_cache);
+       kmem_zone_destroy(pagebuf_cache);
+       kmem_shake_deregister(pagebuf_shake);
 }