ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / fs / jbd / commit.c
1 /*
2  * linux/fs/commit.c
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5  *
6  * Copyright 1998 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Journal commit routines for the generic filesystem journaling code;
13  * part of the ext2fs journaling system.
14  */
15
16 #include <linux/time.h>
17 #include <linux/fs.h>
18 #include <linux/jbd.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/mm.h>
22 #include <linux/pagemap.h>
23 #include <linux/smp_lock.h>
24
25 /*
26  * Default IO end handler for temporary BJ_IO buffer_heads.
27  */
28 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
29 {
30         BUFFER_TRACE(bh, "");
31         if (uptodate)
32                 set_buffer_uptodate(bh);
33         else
34                 clear_buffer_uptodate(bh);
35         unlock_buffer(bh);
36 }
37
38 /*
39  * When an ext3-ordered file is truncated, it is possible that many pages are
40  * not sucessfully freed, because they are attached to a committing transaction.
41  * After the transaction commits, these pages are left on the LRU, with no
42  * ->mapping, and with attached buffers.  These pages are trivially reclaimable
43  * by the VM, but their apparent absence upsets the VM accounting, and it makes
44  * the numbers in /proc/meminfo look odd.
45  *
46  * So here, we have a buffer which has just come off the forget list.  Look to
47  * see if we can strip all buffers from the backing page.
48  *
49  * Called under lock_journal(), and possibly under journal_datalist_lock.  The
50  * caller provided us with a ref against the buffer, and we drop that here.
51  */
52 static void release_buffer_page(struct buffer_head *bh)
53 {
54         struct page *page;
55
56         if (buffer_dirty(bh))
57                 goto nope;
58         if (atomic_read(&bh->b_count) != 1)
59                 goto nope;
60         page = bh->b_page;
61         if (!page)
62                 goto nope;
63         if (page->mapping)
64                 goto nope;
65
66         /* OK, it's a truncated page */
67         if (TestSetPageLocked(page))
68                 goto nope;
69
70         page_cache_get(page);
71         __brelse(bh);
72         try_to_free_buffers(page);
73         unlock_page(page);
74         page_cache_release(page);
75         return;
76
77 nope:
78         __brelse(bh);
79 }
80
81 /*
82  * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
83  * held.  For ranking reasons we must trylock.  If we lose, schedule away and
84  * return 0.  j_list_lock is dropped in this case.
85  */
86 static int inverted_lock(journal_t *journal, struct buffer_head *bh)
87 {
88         if (!jbd_trylock_bh_state(bh)) {
89                 spin_unlock(&journal->j_list_lock);
90                 schedule();
91                 return 0;
92         }
93         return 1;
94 }
95
96 /*
97  * journal_commit_transaction
98  *
99  * The primary function for committing a transaction to the log.  This
100  * function is called by the journal thread to begin a complete commit.
101  */
102 void journal_commit_transaction(journal_t *journal)
103 {
104         transaction_t *commit_transaction;
105         struct journal_head *jh, *new_jh, *descriptor;
106         struct buffer_head *wbuf[64];
107         int bufs;
108         int flags;
109         int err;
110         unsigned long blocknr;
111         char *tagp = NULL;
112         journal_header_t *header;
113         journal_block_tag_t *tag = NULL;
114         int space_left = 0;
115         int first_tag = 0;
116         int tag_flag;
117         int i;
118
119         /*
120          * First job: lock down the current transaction and wait for
121          * all outstanding updates to complete.
122          */
123
124 #ifdef COMMIT_STATS
125         spin_lock(&journal->j_list_lock);
126         summarise_journal_usage(journal);
127         spin_unlock(&journal->j_list_lock);
128 #endif
129
130         /* Do we need to erase the effects of a prior journal_flush? */
131         if (journal->j_flags & JFS_FLUSHED) {
132                 jbd_debug(3, "super block updated\n");
133                 journal_update_superblock(journal, 1);
134         } else {
135                 jbd_debug(3, "superblock not updated\n");
136         }
137
138         J_ASSERT(journal->j_running_transaction != NULL);
139         J_ASSERT(journal->j_committing_transaction == NULL);
140
141         commit_transaction = journal->j_running_transaction;
142         J_ASSERT(commit_transaction->t_state == T_RUNNING);
143
144         jbd_debug(1, "JBD: starting commit of transaction %d\n",
145                         commit_transaction->t_tid);
146
147         spin_lock(&journal->j_state_lock);
148         commit_transaction->t_state = T_LOCKED;
149
150         spin_lock(&commit_transaction->t_handle_lock);
151         while (commit_transaction->t_updates) {
152                 DEFINE_WAIT(wait);
153
154                 prepare_to_wait(&journal->j_wait_updates, &wait,
155                                         TASK_UNINTERRUPTIBLE);
156                 if (commit_transaction->t_updates) {
157                         spin_unlock(&commit_transaction->t_handle_lock);
158                         spin_unlock(&journal->j_state_lock);
159                         schedule();
160                         spin_lock(&journal->j_state_lock);
161                         spin_lock(&commit_transaction->t_handle_lock);
162                 }
163                 finish_wait(&journal->j_wait_updates, &wait);
164         }
165         spin_unlock(&commit_transaction->t_handle_lock);
166
167         J_ASSERT (commit_transaction->t_outstanding_credits <=
168                         journal->j_max_transaction_buffers);
169
170         /*
171          * First thing we are allowed to do is to discard any remaining
172          * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
173          * that there are no such buffers: if a large filesystem
174          * operation like a truncate needs to split itself over multiple
175          * transactions, then it may try to do a journal_restart() while
176          * there are still BJ_Reserved buffers outstanding.  These must
177          * be released cleanly from the current transaction.
178          *
179          * In this case, the filesystem must still reserve write access
180          * again before modifying the buffer in the new transaction, but
181          * we do not require it to remember exactly which old buffers it
182          * has reserved.  This is consistent with the existing behaviour
183          * that multiple journal_get_write_access() calls to the same
184          * buffer are perfectly permissable.
185          */
186         while (commit_transaction->t_reserved_list) {
187                 jh = commit_transaction->t_reserved_list;
188                 JBUFFER_TRACE(jh, "reserved, unused: refile");
189                 /*
190                  * A journal_get_undo_access()+journal_release_buffer() may
191                  * leave undo-committed data.
192                  */
193                 if (jh->b_committed_data) {
194                         struct buffer_head *bh = jh2bh(jh);
195
196                         jbd_lock_bh_state(bh);
197                         if (jh->b_committed_data) {
198                                 kfree(jh->b_committed_data);
199                                 jh->b_committed_data = NULL;
200                         }
201                         jbd_unlock_bh_state(bh);
202                 }
203                 journal_refile_buffer(journal, jh);
204         }
205
206         /*
207          * Now try to drop any written-back buffers from the journal's
208          * checkpoint lists.  We do this *before* commit because it potentially
209          * frees some memory
210          */
211         spin_lock(&journal->j_list_lock);
212         __journal_clean_checkpoint_list(journal);
213         spin_unlock(&journal->j_list_lock);
214
215         jbd_debug (3, "JBD: commit phase 1\n");
216
217         /*
218          * Switch to a new revoke table.
219          */
220         journal_switch_revoke_table(journal);
221
222         commit_transaction->t_state = T_FLUSH;
223         journal->j_committing_transaction = commit_transaction;
224         journal->j_running_transaction = NULL;
225         commit_transaction->t_log_start = journal->j_head;
226         wake_up(&journal->j_wait_transaction_locked);
227         spin_unlock(&journal->j_state_lock);
228
229         jbd_debug (3, "JBD: commit phase 2\n");
230
231         /*
232          * Now start flushing things to disk, in the order they appear
233          * on the transaction lists.  Data blocks go first.
234          */
235
236         err = 0;
237         /*
238          * Whenever we unlock the journal and sleep, things can get added
239          * onto ->t_sync_datalist, so we have to keep looping back to
240          * write_out_data until we *know* that the list is empty.
241          */
242         bufs = 0;
243         /*
244          * Cleanup any flushed data buffers from the data list.  Even in
245          * abort mode, we want to flush this out as soon as possible.
246          */
247 write_out_data:
248         cond_resched();
249         spin_lock(&journal->j_list_lock);
250
251         while (commit_transaction->t_sync_datalist) {
252                 struct buffer_head *bh;
253
254                 jh = commit_transaction->t_sync_datalist;
255                 commit_transaction->t_sync_datalist = jh->b_tnext;
256                 bh = jh2bh(jh);
257                 if (buffer_locked(bh)) {
258                         BUFFER_TRACE(bh, "locked");
259                         if (!inverted_lock(journal, bh))
260                                 goto write_out_data;
261                         __journal_unfile_buffer(jh);
262                         __journal_file_buffer(jh, commit_transaction,
263                                                 BJ_Locked);
264                         jbd_unlock_bh_state(bh);
265                         if (need_resched()) {
266                                 spin_unlock(&journal->j_list_lock);
267                                 goto write_out_data;
268                         }
269                 } else {
270                         if (buffer_dirty(bh)) {
271                                 BUFFER_TRACE(bh, "start journal writeout");
272                                 get_bh(bh);
273                                 wbuf[bufs++] = bh;
274                                 if (bufs == ARRAY_SIZE(wbuf)) {
275                                         jbd_debug(2, "submit %d writes\n",
276                                                         bufs);
277                                         spin_unlock(&journal->j_list_lock);
278                                         ll_rw_block(WRITE, bufs, wbuf);
279                                         journal_brelse_array(wbuf, bufs);
280                                         bufs = 0;
281                                         goto write_out_data;
282                                 }
283                         } else {
284                                 BUFFER_TRACE(bh, "writeout complete: unfile");
285                                 if (!inverted_lock(journal, bh))
286                                         goto write_out_data;
287                                 __journal_unfile_buffer(jh);
288                                 jbd_unlock_bh_state(bh);
289                                 journal_remove_journal_head(bh);
290                                 put_bh(bh);
291                                 if (need_resched()) {
292                                         spin_unlock(&journal->j_list_lock);
293                                         goto write_out_data;
294                                 }
295                         }
296                 }
297         }
298
299         if (bufs) {
300                 spin_unlock(&journal->j_list_lock);
301                 ll_rw_block(WRITE, bufs, wbuf);
302                 journal_brelse_array(wbuf, bufs);
303                 spin_lock(&journal->j_list_lock);
304         }
305
306         /*
307          * Wait for all previously submitted IO to complete.
308          */
309         while (commit_transaction->t_locked_list) {
310                 struct buffer_head *bh;
311
312                 jh = commit_transaction->t_locked_list->b_tprev;
313                 bh = jh2bh(jh);
314                 get_bh(bh);
315                 if (buffer_locked(bh)) {
316                         spin_unlock(&journal->j_list_lock);
317                         wait_on_buffer(bh);
318                         if (unlikely(!buffer_uptodate(bh)))
319                                 err = -EIO;
320                         spin_lock(&journal->j_list_lock);
321                 }
322                 if (!inverted_lock(journal, bh)) {
323                         put_bh(bh);
324                         spin_lock(&journal->j_list_lock);
325                         continue;
326                 }
327                 if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
328                         __journal_unfile_buffer(jh);
329                         jbd_unlock_bh_state(bh);
330                         journal_remove_journal_head(bh);
331                         put_bh(bh);
332                 } else {
333                         jbd_unlock_bh_state(bh);
334                 }
335                 put_bh(bh);
336                 if (need_resched()) {
337                         spin_unlock(&journal->j_list_lock);
338                         cond_resched();
339                         spin_lock(&journal->j_list_lock);
340                 }
341         }
342         spin_unlock(&journal->j_list_lock);
343
344         journal_write_revoke_records(journal, commit_transaction);
345
346         jbd_debug(3, "JBD: commit phase 2\n");
347
348         /*
349          * If we found any dirty or locked buffers, then we should have
350          * looped back up to the write_out_data label.  If there weren't
351          * any then journal_clean_data_list should have wiped the list
352          * clean by now, so check that it is in fact empty.
353          */
354         J_ASSERT (commit_transaction->t_sync_datalist == NULL);
355
356         jbd_debug (3, "JBD: commit phase 3\n");
357
358         /*
359          * Way to go: we have now written out all of the data for a
360          * transaction!  Now comes the tricky part: we need to write out
361          * metadata.  Loop over the transaction's entire buffer list:
362          */
363         commit_transaction->t_state = T_COMMIT;
364
365         descriptor = 0;
366         bufs = 0;
367         while (commit_transaction->t_buffers) {
368
369                 /* Find the next buffer to be journaled... */
370
371                 jh = commit_transaction->t_buffers;
372
373                 /* If we're in abort mode, we just un-journal the buffer and
374                    release it for background writing. */
375
376                 if (is_journal_aborted(journal)) {
377                         JBUFFER_TRACE(jh, "journal is aborting: refile");
378                         journal_refile_buffer(journal, jh);
379                         /* If that was the last one, we need to clean up
380                          * any descriptor buffers which may have been
381                          * already allocated, even if we are now
382                          * aborting. */
383                         if (!commit_transaction->t_buffers)
384                                 goto start_journal_io;
385                         continue;
386                 }
387
388                 /* Make sure we have a descriptor block in which to
389                    record the metadata buffer. */
390
391                 if (!descriptor) {
392                         struct buffer_head *bh;
393
394                         J_ASSERT (bufs == 0);
395
396                         jbd_debug(4, "JBD: get descriptor\n");
397
398                         descriptor = journal_get_descriptor_buffer(journal);
399                         if (!descriptor) {
400                                 __journal_abort_hard(journal);
401                                 continue;
402                         }
403
404                         bh = jh2bh(descriptor);
405                         jbd_debug(4, "JBD: got buffer %llu (%p)\n",
406                                 (unsigned long long)bh->b_blocknr, bh->b_data);
407                         header = (journal_header_t *)&bh->b_data[0];
408                         header->h_magic     = htonl(JFS_MAGIC_NUMBER);
409                         header->h_blocktype = htonl(JFS_DESCRIPTOR_BLOCK);
410                         header->h_sequence  = htonl(commit_transaction->t_tid);
411
412                         tagp = &bh->b_data[sizeof(journal_header_t)];
413                         space_left = bh->b_size - sizeof(journal_header_t);
414                         first_tag = 1;
415                         set_bit(BH_JWrite, &bh->b_state);
416                         wbuf[bufs++] = bh;
417
418                         /* Record it so that we can wait for IO
419                            completion later */
420                         BUFFER_TRACE(bh, "ph3: file as descriptor");
421                         journal_file_buffer(descriptor, commit_transaction,
422                                         BJ_LogCtl);
423                 }
424
425                 /* Where is the buffer to be written? */
426
427                 err = journal_next_log_block(journal, &blocknr);
428                 /* If the block mapping failed, just abandon the buffer
429                    and repeat this loop: we'll fall into the
430                    refile-on-abort condition above. */
431                 if (err) {
432                         __journal_abort_hard(journal);
433                         continue;
434                 }
435
436                 /*
437                  * start_this_handle() uses t_outstanding_credits to determine
438                  * the free space in the log, but this counter is changed
439                  * by journal_next_log_block() also.
440                  */
441                 commit_transaction->t_outstanding_credits--;
442
443                 /* Bump b_count to prevent truncate from stumbling over
444                    the shadowed buffer!  @@@ This can go if we ever get
445                    rid of the BJ_IO/BJ_Shadow pairing of buffers. */
446                 atomic_inc(&jh2bh(jh)->b_count);
447
448                 /* Make a temporary IO buffer with which to write it out
449                    (this will requeue both the metadata buffer and the
450                    temporary IO buffer). new_bh goes on BJ_IO*/
451
452                 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
453                 /*
454                  * akpm: journal_write_metadata_buffer() sets
455                  * new_bh->b_transaction to commit_transaction.
456                  * We need to clean this up before we release new_bh
457                  * (which is of type BJ_IO)
458                  */
459                 JBUFFER_TRACE(jh, "ph3: write metadata");
460                 flags = journal_write_metadata_buffer(commit_transaction,
461                                                       jh, &new_jh, blocknr);
462                 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
463                 wbuf[bufs++] = jh2bh(new_jh);
464
465                 /* Record the new block's tag in the current descriptor
466                    buffer */
467
468                 tag_flag = 0;
469                 if (flags & 1)
470                         tag_flag |= JFS_FLAG_ESCAPE;
471                 if (!first_tag)
472                         tag_flag |= JFS_FLAG_SAME_UUID;
473
474                 tag = (journal_block_tag_t *) tagp;
475                 tag->t_blocknr = htonl(jh2bh(jh)->b_blocknr);
476                 tag->t_flags = htonl(tag_flag);
477                 tagp += sizeof(journal_block_tag_t);
478                 space_left -= sizeof(journal_block_tag_t);
479
480                 if (first_tag) {
481                         memcpy (tagp, journal->j_uuid, 16);
482                         tagp += 16;
483                         space_left -= 16;
484                         first_tag = 0;
485                 }
486
487                 /* If there's no more to do, or if the descriptor is full,
488                    let the IO rip! */
489
490                 if (bufs == ARRAY_SIZE(wbuf) ||
491                     commit_transaction->t_buffers == NULL ||
492                     space_left < sizeof(journal_block_tag_t) + 16) {
493
494                         jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
495
496                         /* Write an end-of-descriptor marker before
497                            submitting the IOs.  "tag" still points to
498                            the last tag we set up. */
499
500                         tag->t_flags |= htonl(JFS_FLAG_LAST_TAG);
501
502 start_journal_io:
503                         for (i = 0; i < bufs; i++) {
504                                 struct buffer_head *bh = wbuf[i];
505                                 set_buffer_locked(bh);
506                                 clear_buffer_dirty(bh);
507                                 set_buffer_uptodate(bh);
508                                 bh->b_end_io = journal_end_buffer_io_sync;
509                                 submit_bh(WRITE, bh);
510                         }
511                         cond_resched();
512
513                         /* Force a new descriptor to be generated next
514                            time round the loop. */
515                         descriptor = NULL;
516                         bufs = 0;
517                 }
518         }
519
520         /* Lo and behold: we have just managed to send a transaction to
521            the log.  Before we can commit it, wait for the IO so far to
522            complete.  Control buffers being written are on the
523            transaction's t_log_list queue, and metadata buffers are on
524            the t_iobuf_list queue.
525
526            Wait for the buffers in reverse order.  That way we are
527            less likely to be woken up until all IOs have completed, and
528            so we incur less scheduling load.
529         */
530
531         jbd_debug(3, "JBD: commit phase 4\n");
532
533         /*
534          * akpm: these are BJ_IO, and j_list_lock is not needed.
535          * See __journal_try_to_free_buffer.
536          */
537 wait_for_iobuf:
538         while (commit_transaction->t_iobuf_list != NULL) {
539                 struct buffer_head *bh;
540
541                 jh = commit_transaction->t_iobuf_list->b_tprev;
542                 bh = jh2bh(jh);
543                 if (buffer_locked(bh)) {
544                         wait_on_buffer(bh);
545                         goto wait_for_iobuf;
546                 }
547
548                 if (unlikely(!buffer_uptodate(bh)))
549                         err = -EIO;
550
551                 clear_buffer_jwrite(bh);
552
553                 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
554                 journal_unfile_buffer(journal, jh);
555
556                 /*
557                  * ->t_iobuf_list should contain only dummy buffer_heads
558                  * which were created by journal_write_metadata_buffer().
559                  */
560                 BUFFER_TRACE(bh, "dumping temporary bh");
561                 journal_put_journal_head(jh);
562                 __brelse(bh);
563                 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
564                 free_buffer_head(bh);
565
566                 /* We also have to unlock and free the corresponding
567                    shadowed buffer */
568                 jh = commit_transaction->t_shadow_list->b_tprev;
569                 bh = jh2bh(jh);
570                 clear_bit(BH_JWrite, &bh->b_state);
571                 J_ASSERT_BH(bh, buffer_jbddirty(bh));
572
573                 /* The metadata is now released for reuse, but we need
574                    to remember it against this transaction so that when
575                    we finally commit, we can do any checkpointing
576                    required. */
577                 JBUFFER_TRACE(jh, "file as BJ_Forget");
578                 journal_file_buffer(jh, commit_transaction, BJ_Forget);
579                 /* Wake up any transactions which were waiting for this
580                    IO to complete */
581                 wake_up_buffer(bh);
582                 JBUFFER_TRACE(jh, "brelse shadowed buffer");
583                 __brelse(bh);
584         }
585
586         J_ASSERT (commit_transaction->t_shadow_list == NULL);
587
588         jbd_debug(3, "JBD: commit phase 5\n");
589
590         /* Here we wait for the revoke record and descriptor record buffers */
591  wait_for_ctlbuf:
592         while (commit_transaction->t_log_list != NULL) {
593                 struct buffer_head *bh;
594
595                 jh = commit_transaction->t_log_list->b_tprev;
596                 bh = jh2bh(jh);
597                 if (buffer_locked(bh)) {
598                         wait_on_buffer(bh);
599                         goto wait_for_ctlbuf;
600                 }
601
602                 if (unlikely(!buffer_uptodate(bh)))
603                         err = -EIO;
604
605                 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
606                 clear_buffer_jwrite(bh);
607                 journal_unfile_buffer(journal, jh);
608                 journal_put_journal_head(jh);
609                 __brelse(bh);           /* One for getblk */
610                 /* AKPM: bforget here */
611         }
612
613         jbd_debug(3, "JBD: commit phase 6\n");
614
615         if (is_journal_aborted(journal))
616                 goto skip_commit;
617
618         /* Done it all: now write the commit record.  We should have
619          * cleaned up our previous buffers by now, so if we are in abort
620          * mode we can now just skip the rest of the journal write
621          * entirely. */
622
623         descriptor = journal_get_descriptor_buffer(journal);
624         if (!descriptor) {
625                 __journal_abort_hard(journal);
626                 goto skip_commit;
627         }
628
629         /* AKPM: buglet - add `i' to tmp! */
630         for (i = 0; i < jh2bh(descriptor)->b_size; i += 512) {
631                 journal_header_t *tmp =
632                         (journal_header_t*)jh2bh(descriptor)->b_data;
633                 tmp->h_magic = htonl(JFS_MAGIC_NUMBER);
634                 tmp->h_blocktype = htonl(JFS_COMMIT_BLOCK);
635                 tmp->h_sequence = htonl(commit_transaction->t_tid);
636         }
637
638         JBUFFER_TRACE(descriptor, "write commit block");
639         {
640                 struct buffer_head *bh = jh2bh(descriptor);
641                 set_buffer_uptodate(bh);
642                 sync_dirty_buffer(bh);
643                 if (unlikely(!buffer_uptodate(bh)))
644                         err = -EIO;
645                 put_bh(bh);             /* One for getblk() */
646                 journal_put_journal_head(descriptor);
647         }
648
649         /* End of a transaction!  Finally, we can do checkpoint
650            processing: any buffers committed as a result of this
651            transaction can be removed from any checkpoint list it was on
652            before. */
653
654 skip_commit: /* The journal should be unlocked by now. */
655
656         if (err)
657                 __journal_abort_hard(journal);
658
659         /*
660          * Call any callbacks that had been registered for handles in this
661          * transaction.  It is up to the callback to free any allocated
662          * memory.
663          *
664          * The spinlocking (t_jcb_lock) here is surely unnecessary...
665          */
666         spin_lock(&commit_transaction->t_jcb_lock);
667         if (!list_empty(&commit_transaction->t_jcb)) {
668                 struct list_head *p, *n;
669                 int error = is_journal_aborted(journal);
670
671                 list_for_each_safe(p, n, &commit_transaction->t_jcb) {
672                         struct journal_callback *jcb;
673
674                         jcb = list_entry(p, struct journal_callback, jcb_list);
675                         list_del(p);
676                         spin_unlock(&commit_transaction->t_jcb_lock);
677                         jcb->jcb_func(jcb, error);
678                         spin_lock(&commit_transaction->t_jcb_lock);
679                 }
680         }
681         spin_unlock(&commit_transaction->t_jcb_lock);
682
683         jbd_debug(3, "JBD: commit phase 7\n");
684
685         J_ASSERT(commit_transaction->t_sync_datalist == NULL);
686         J_ASSERT(commit_transaction->t_buffers == NULL);
687         J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
688         J_ASSERT(commit_transaction->t_iobuf_list == NULL);
689         J_ASSERT(commit_transaction->t_shadow_list == NULL);
690         J_ASSERT(commit_transaction->t_log_list == NULL);
691
692         while (commit_transaction->t_forget) {
693                 transaction_t *cp_transaction;
694                 struct buffer_head *bh;
695
696                 jh = commit_transaction->t_forget;
697                 bh = jh2bh(jh);
698                 jbd_lock_bh_state(bh);
699                 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
700                         jh->b_transaction == journal->j_running_transaction);
701
702                 /*
703                  * If there is undo-protected committed data against
704                  * this buffer, then we can remove it now.  If it is a
705                  * buffer needing such protection, the old frozen_data
706                  * field now points to a committed version of the
707                  * buffer, so rotate that field to the new committed
708                  * data.
709                  *
710                  * Otherwise, we can just throw away the frozen data now.
711                  */
712                 if (jh->b_committed_data) {
713                         kfree(jh->b_committed_data);
714                         jh->b_committed_data = NULL;
715                         if (jh->b_frozen_data) {
716                                 jh->b_committed_data = jh->b_frozen_data;
717                                 jh->b_frozen_data = NULL;
718                         }
719                 } else if (jh->b_frozen_data) {
720                         kfree(jh->b_frozen_data);
721                         jh->b_frozen_data = NULL;
722                 }
723
724                 spin_lock(&journal->j_list_lock);
725                 cp_transaction = jh->b_cp_transaction;
726                 if (cp_transaction) {
727                         JBUFFER_TRACE(jh, "remove from old cp transaction");
728                         __journal_remove_checkpoint(jh);
729                 }
730
731                 /* Only re-checkpoint the buffer_head if it is marked
732                  * dirty.  If the buffer was added to the BJ_Forget list
733                  * by journal_forget, it may no longer be dirty and
734                  * there's no point in keeping a checkpoint record for
735                  * it. */
736
737                 /* A buffer which has been freed while still being
738                  * journaled by a previous transaction may end up still
739                  * being dirty here, but we want to avoid writing back
740                  * that buffer in the future now that the last use has
741                  * been committed.  That's not only a performance gain,
742                  * it also stops aliasing problems if the buffer is left
743                  * behind for writeback and gets reallocated for another
744                  * use in a different page. */
745                 if (buffer_freed(bh)) {
746                         clear_buffer_freed(bh);
747                         clear_buffer_jbddirty(bh);
748                 }
749
750                 if (buffer_jbddirty(bh)) {
751                         JBUFFER_TRACE(jh, "add to new checkpointing trans");
752                         __journal_insert_checkpoint(jh, commit_transaction);
753                         JBUFFER_TRACE(jh, "refile for checkpoint writeback");
754                         __journal_refile_buffer(jh);
755                         jbd_unlock_bh_state(bh);
756                 } else {
757                         J_ASSERT_BH(bh, !buffer_dirty(bh));
758                         J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
759                         __journal_unfile_buffer(jh);
760                         jbd_unlock_bh_state(bh);
761                         journal_remove_journal_head(bh);  /* needs a brelse */
762                         release_buffer_page(bh);
763                 }
764                 spin_unlock(&journal->j_list_lock);
765         }
766
767         /* Done with this transaction! */
768
769         jbd_debug(3, "JBD: commit phase 8\n");
770
771         J_ASSERT(commit_transaction->t_state == T_COMMIT);
772
773         /*
774          * This is a bit sleazy.  We borrow j_list_lock to protect
775          * journal->j_committing_transaction in __journal_remove_checkpoint.
776          * Really, __jornal_remove_checkpoint should be using j_state_lock but
777          * it's a bit hassle to hold that across __journal_remove_checkpoint
778          */
779         spin_lock(&journal->j_state_lock);
780         spin_lock(&journal->j_list_lock);
781         commit_transaction->t_state = T_FINISHED;
782         J_ASSERT(commit_transaction == journal->j_committing_transaction);
783         journal->j_commit_sequence = commit_transaction->t_tid;
784         journal->j_committing_transaction = NULL;
785         spin_unlock(&journal->j_state_lock);
786
787         if (commit_transaction->t_checkpoint_list == NULL) {
788                 __journal_drop_transaction(journal, commit_transaction);
789         } else {
790                 if (journal->j_checkpoint_transactions == NULL) {
791                         journal->j_checkpoint_transactions = commit_transaction;
792                         commit_transaction->t_cpnext = commit_transaction;
793                         commit_transaction->t_cpprev = commit_transaction;
794                 } else {
795                         commit_transaction->t_cpnext =
796                                 journal->j_checkpoint_transactions;
797                         commit_transaction->t_cpprev =
798                                 commit_transaction->t_cpnext->t_cpprev;
799                         commit_transaction->t_cpnext->t_cpprev =
800                                 commit_transaction;
801                         commit_transaction->t_cpprev->t_cpnext =
802                                 commit_transaction;
803                 }
804         }
805         spin_unlock(&journal->j_list_lock);
806
807         jbd_debug(1, "JBD: commit %d complete, head %d\n",
808                   journal->j_commit_sequence, journal->j_tail_sequence);
809
810         wake_up(&journal->j_wait_done_commit);
811 }