vserver 2.0 rc7
[linux-2.6.git] / fs / xfs / xfs_log_recover.c
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.  Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32
33 #include "xfs.h"
34 #include "xfs_macros.h"
35 #include "xfs_types.h"
36 #include "xfs_inum.h"
37 #include "xfs_log.h"
38 #include "xfs_ag.h"
39 #include "xfs_sb.h"
40 #include "xfs_trans.h"
41 #include "xfs_dir.h"
42 #include "xfs_dir2.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_mount.h"
45 #include "xfs_error.h"
46 #include "xfs_bmap_btree.h"
47 #include "xfs_alloc.h"
48 #include "xfs_attr_sf.h"
49 #include "xfs_dir_sf.h"
50 #include "xfs_dir2_sf.h"
51 #include "xfs_dinode.h"
52 #include "xfs_imap.h"
53 #include "xfs_inode_item.h"
54 #include "xfs_inode.h"
55 #include "xfs_ialloc_btree.h"
56 #include "xfs_ialloc.h"
57 #include "xfs_log_priv.h"
58 #include "xfs_buf_item.h"
59 #include "xfs_alloc_btree.h"
60 #include "xfs_log_recover.h"
61 #include "xfs_extfree_item.h"
62 #include "xfs_trans_priv.h"
63 #include "xfs_bit.h"
64 #include "xfs_quota.h"
65 #include "xfs_rw.h"
66
67 STATIC int      xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
68 STATIC int      xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
69 STATIC void     xlog_recover_insert_item_backq(xlog_recover_item_t **q,
70                                                xlog_recover_item_t *item);
71 #if defined(DEBUG)
72 STATIC void     xlog_recover_check_summary(xlog_t *);
73 STATIC void     xlog_recover_check_ail(xfs_mount_t *, xfs_log_item_t *, int);
74 #else
75 #define xlog_recover_check_summary(log)
76 #define xlog_recover_check_ail(mp, lip, gen)
77 #endif
78
79
80 /*
81  * Sector aligned buffer routines for buffer create/read/write/access
82  */
83
84 #define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs)   \
85         ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
86         ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
87 #define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno)   ((bno) & ~(log)->l_sectbb_mask)
88
89 xfs_buf_t *
90 xlog_get_bp(
91         xlog_t          *log,
92         int             num_bblks)
93 {
94         ASSERT(num_bblks > 0);
95
96         if (log->l_sectbb_log) {
97                 if (num_bblks > 1)
98                         num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
99                 num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks);
100         }
101         return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp);
102 }
103
104 void
105 xlog_put_bp(
106         xfs_buf_t       *bp)
107 {
108         xfs_buf_free(bp);
109 }
110
111
112 /*
113  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
114  */
115 int
116 xlog_bread(
117         xlog_t          *log,
118         xfs_daddr_t     blk_no,
119         int             nbblks,
120         xfs_buf_t       *bp)
121 {
122         int             error;
123
124         if (log->l_sectbb_log) {
125                 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
126                 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
127         }
128
129         ASSERT(nbblks > 0);
130         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
131         ASSERT(bp);
132
133         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
134         XFS_BUF_READ(bp);
135         XFS_BUF_BUSY(bp);
136         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
137         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
138
139         xfsbdstrat(log->l_mp, bp);
140         if ((error = xfs_iowait(bp)))
141                 xfs_ioerror_alert("xlog_bread", log->l_mp,
142                                   bp, XFS_BUF_ADDR(bp));
143         return error;
144 }
145
146 /*
147  * Write out the buffer at the given block for the given number of blocks.
148  * The buffer is kept locked across the write and is returned locked.
149  * This can only be used for synchronous log writes.
150  */
151 int
152 xlog_bwrite(
153         xlog_t          *log,
154         xfs_daddr_t     blk_no,
155         int             nbblks,
156         xfs_buf_t       *bp)
157 {
158         int             error;
159
160         if (log->l_sectbb_log) {
161                 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
162                 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
163         }
164
165         ASSERT(nbblks > 0);
166         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
167
168         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
169         XFS_BUF_ZEROFLAGS(bp);
170         XFS_BUF_BUSY(bp);
171         XFS_BUF_HOLD(bp);
172         XFS_BUF_PSEMA(bp, PRIBIO);
173         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
174         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
175
176         if ((error = xfs_bwrite(log->l_mp, bp)))
177                 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
178                                   bp, XFS_BUF_ADDR(bp));
179         return error;
180 }
181
182 xfs_caddr_t
183 xlog_align(
184         xlog_t          *log,
185         xfs_daddr_t     blk_no,
186         int             nbblks,
187         xfs_buf_t       *bp)
188 {
189         xfs_caddr_t     ptr;
190
191         if (!log->l_sectbb_log)
192                 return XFS_BUF_PTR(bp);
193
194         ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
195         ASSERT(XFS_BUF_SIZE(bp) >=
196                 BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
197         return ptr;
198 }
199
200 #ifdef DEBUG
201 /*
202  * dump debug superblock and log record information
203  */
204 STATIC void
205 xlog_header_check_dump(
206         xfs_mount_t             *mp,
207         xlog_rec_header_t       *head)
208 {
209         int                     b;
210
211         printk("%s:  SB : uuid = ", __FUNCTION__);
212         for (b = 0; b < 16; b++)
213                 printk("%02x",((unsigned char *)&mp->m_sb.sb_uuid)[b]);
214         printk(", fmt = %d\n", XLOG_FMT);
215         printk("    log : uuid = ");
216         for (b = 0; b < 16; b++)
217                 printk("%02x",((unsigned char *)&head->h_fs_uuid)[b]);
218         printk(", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
219 }
220 #else
221 #define xlog_header_check_dump(mp, head)
222 #endif
223
224 /*
225  * check log record header for recovery
226  */
227 STATIC int
228 xlog_header_check_recover(
229         xfs_mount_t             *mp,
230         xlog_rec_header_t       *head)
231 {
232         ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
233
234         /*
235          * IRIX doesn't write the h_fmt field and leaves it zeroed
236          * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
237          * a dirty log created in IRIX.
238          */
239         if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) {
240                 xlog_warn(
241         "XFS: dirty log written in incompatible format - can't recover");
242                 xlog_header_check_dump(mp, head);
243                 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
244                                  XFS_ERRLEVEL_HIGH, mp);
245                 return XFS_ERROR(EFSCORRUPTED);
246         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
247                 xlog_warn(
248         "XFS: dirty log entry has mismatched uuid - can't recover");
249                 xlog_header_check_dump(mp, head);
250                 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
251                                  XFS_ERRLEVEL_HIGH, mp);
252                 return XFS_ERROR(EFSCORRUPTED);
253         }
254         return 0;
255 }
256
257 /*
258  * read the head block of the log and check the header
259  */
260 STATIC int
261 xlog_header_check_mount(
262         xfs_mount_t             *mp,
263         xlog_rec_header_t       *head)
264 {
265         ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
266
267         if (uuid_is_nil(&head->h_fs_uuid)) {
268                 /*
269                  * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
270                  * h_fs_uuid is nil, we assume this log was last mounted
271                  * by IRIX and continue.
272                  */
273                 xlog_warn("XFS: nil uuid in log - IRIX style log");
274         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
275                 xlog_warn("XFS: log has mismatched uuid - can't recover");
276                 xlog_header_check_dump(mp, head);
277                 XFS_ERROR_REPORT("xlog_header_check_mount",
278                                  XFS_ERRLEVEL_HIGH, mp);
279                 return XFS_ERROR(EFSCORRUPTED);
280         }
281         return 0;
282 }
283
284 STATIC void
285 xlog_recover_iodone(
286         struct xfs_buf  *bp)
287 {
288         xfs_mount_t     *mp;
289
290         ASSERT(XFS_BUF_FSPRIVATE(bp, void *));
291
292         if (XFS_BUF_GETERROR(bp)) {
293                 /*
294                  * We're not going to bother about retrying
295                  * this during recovery. One strike!
296                  */
297                 mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *);
298                 xfs_ioerror_alert("xlog_recover_iodone",
299                                   mp, bp, XFS_BUF_ADDR(bp));
300                 xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
301         }
302         XFS_BUF_SET_FSPRIVATE(bp, NULL);
303         XFS_BUF_CLR_IODONE_FUNC(bp);
304         xfs_biodone(bp);
305 }
306
307 /*
308  * This routine finds (to an approximation) the first block in the physical
309  * log which contains the given cycle.  It uses a binary search algorithm.
310  * Note that the algorithm can not be perfect because the disk will not
311  * necessarily be perfect.
312  */
313 int
314 xlog_find_cycle_start(
315         xlog_t          *log,
316         xfs_buf_t       *bp,
317         xfs_daddr_t     first_blk,
318         xfs_daddr_t     *last_blk,
319         uint            cycle)
320 {
321         xfs_caddr_t     offset;
322         xfs_daddr_t     mid_blk;
323         uint            mid_cycle;
324         int             error;
325
326         mid_blk = BLK_AVG(first_blk, *last_blk);
327         while (mid_blk != first_blk && mid_blk != *last_blk) {
328                 if ((error = xlog_bread(log, mid_blk, 1, bp)))
329                         return error;
330                 offset = xlog_align(log, mid_blk, 1, bp);
331                 mid_cycle = GET_CYCLE(offset, ARCH_CONVERT);
332                 if (mid_cycle == cycle) {
333                         *last_blk = mid_blk;
334                         /* last_half_cycle == mid_cycle */
335                 } else {
336                         first_blk = mid_blk;
337                         /* first_half_cycle == mid_cycle */
338                 }
339                 mid_blk = BLK_AVG(first_blk, *last_blk);
340         }
341         ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
342                (mid_blk == *last_blk && mid_blk-1 == first_blk));
343
344         return 0;
345 }
346
347 /*
348  * Check that the range of blocks does not contain the cycle number
349  * given.  The scan needs to occur from front to back and the ptr into the
350  * region must be updated since a later routine will need to perform another
351  * test.  If the region is completely good, we end up returning the same
352  * last block number.
353  *
354  * Set blkno to -1 if we encounter no errors.  This is an invalid block number
355  * since we don't ever expect logs to get this large.
356  */
357 STATIC int
358 xlog_find_verify_cycle(
359         xlog_t          *log,
360         xfs_daddr_t     start_blk,
361         int             nbblks,
362         uint            stop_on_cycle_no,
363         xfs_daddr_t     *new_blk)
364 {
365         xfs_daddr_t     i, j;
366         uint            cycle;
367         xfs_buf_t       *bp;
368         xfs_daddr_t     bufblks;
369         xfs_caddr_t     buf = NULL;
370         int             error = 0;
371
372         bufblks = 1 << ffs(nbblks);
373
374         while (!(bp = xlog_get_bp(log, bufblks))) {
375                 /* can't get enough memory to do everything in one big buffer */
376                 bufblks >>= 1;
377                 if (bufblks <= log->l_sectbb_log)
378                         return ENOMEM;
379         }
380
381         for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
382                 int     bcount;
383
384                 bcount = min(bufblks, (start_blk + nbblks - i));
385
386                 if ((error = xlog_bread(log, i, bcount, bp)))
387                         goto out;
388
389                 buf = xlog_align(log, i, bcount, bp);
390                 for (j = 0; j < bcount; j++) {
391                         cycle = GET_CYCLE(buf, ARCH_CONVERT);
392                         if (cycle == stop_on_cycle_no) {
393                                 *new_blk = i+j;
394                                 goto out;
395                         }
396
397                         buf += BBSIZE;
398                 }
399         }
400
401         *new_blk = -1;
402
403 out:
404         xlog_put_bp(bp);
405         return error;
406 }
407
408 /*
409  * Potentially backup over partial log record write.
410  *
411  * In the typical case, last_blk is the number of the block directly after
412  * a good log record.  Therefore, we subtract one to get the block number
413  * of the last block in the given buffer.  extra_bblks contains the number
414  * of blocks we would have read on a previous read.  This happens when the
415  * last log record is split over the end of the physical log.
416  *
417  * extra_bblks is the number of blocks potentially verified on a previous
418  * call to this routine.
419  */
420 STATIC int
421 xlog_find_verify_log_record(
422         xlog_t                  *log,
423         xfs_daddr_t             start_blk,
424         xfs_daddr_t             *last_blk,
425         int                     extra_bblks)
426 {
427         xfs_daddr_t             i;
428         xfs_buf_t               *bp;
429         xfs_caddr_t             offset = NULL;
430         xlog_rec_header_t       *head = NULL;
431         int                     error = 0;
432         int                     smallmem = 0;
433         int                     num_blks = *last_blk - start_blk;
434         int                     xhdrs;
435
436         ASSERT(start_blk != 0 || *last_blk != start_blk);
437
438         if (!(bp = xlog_get_bp(log, num_blks))) {
439                 if (!(bp = xlog_get_bp(log, 1)))
440                         return ENOMEM;
441                 smallmem = 1;
442         } else {
443                 if ((error = xlog_bread(log, start_blk, num_blks, bp)))
444                         goto out;
445                 offset = xlog_align(log, start_blk, num_blks, bp);
446                 offset += ((num_blks - 1) << BBSHIFT);
447         }
448
449         for (i = (*last_blk) - 1; i >= 0; i--) {
450                 if (i < start_blk) {
451                         /* valid log record not found */
452                         xlog_warn(
453                 "XFS: Log inconsistent (didn't find previous header)");
454                         ASSERT(0);
455                         error = XFS_ERROR(EIO);
456                         goto out;
457                 }
458
459                 if (smallmem) {
460                         if ((error = xlog_bread(log, i, 1, bp)))
461                                 goto out;
462                         offset = xlog_align(log, i, 1, bp);
463                 }
464
465                 head = (xlog_rec_header_t *)offset;
466
467                 if (XLOG_HEADER_MAGIC_NUM ==
468                     INT_GET(head->h_magicno, ARCH_CONVERT))
469                         break;
470
471                 if (!smallmem)
472                         offset -= BBSIZE;
473         }
474
475         /*
476          * We hit the beginning of the physical log & still no header.  Return
477          * to caller.  If caller can handle a return of -1, then this routine
478          * will be called again for the end of the physical log.
479          */
480         if (i == -1) {
481                 error = -1;
482                 goto out;
483         }
484
485         /*
486          * We have the final block of the good log (the first block
487          * of the log record _before_ the head. So we check the uuid.
488          */
489         if ((error = xlog_header_check_mount(log->l_mp, head)))
490                 goto out;
491
492         /*
493          * We may have found a log record header before we expected one.
494          * last_blk will be the 1st block # with a given cycle #.  We may end
495          * up reading an entire log record.  In this case, we don't want to
496          * reset last_blk.  Only when last_blk points in the middle of a log
497          * record do we update last_blk.
498          */
499         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
500                 uint    h_size = INT_GET(head->h_size, ARCH_CONVERT);
501
502                 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
503                 if (h_size % XLOG_HEADER_CYCLE_SIZE)
504                         xhdrs++;
505         } else {
506                 xhdrs = 1;
507         }
508
509         if (*last_blk - i + extra_bblks
510                         != BTOBB(INT_GET(head->h_len, ARCH_CONVERT)) + xhdrs)
511                 *last_blk = i;
512
513 out:
514         xlog_put_bp(bp);
515         return error;
516 }
517
518 /*
519  * Head is defined to be the point of the log where the next log write
520  * write could go.  This means that incomplete LR writes at the end are
521  * eliminated when calculating the head.  We aren't guaranteed that previous
522  * LR have complete transactions.  We only know that a cycle number of
523  * current cycle number -1 won't be present in the log if we start writing
524  * from our current block number.
525  *
526  * last_blk contains the block number of the first block with a given
527  * cycle number.
528  *
529  * Return: zero if normal, non-zero if error.
530  */
531 int
532 xlog_find_head(
533         xlog_t          *log,
534         xfs_daddr_t     *return_head_blk)
535 {
536         xfs_buf_t       *bp;
537         xfs_caddr_t     offset;
538         xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
539         int             num_scan_bblks;
540         uint            first_half_cycle, last_half_cycle;
541         uint            stop_on_cycle;
542         int             error, log_bbnum = log->l_logBBsize;
543
544         /* Is the end of the log device zeroed? */
545         if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
546                 *return_head_blk = first_blk;
547
548                 /* Is the whole lot zeroed? */
549                 if (!first_blk) {
550                         /* Linux XFS shouldn't generate totally zeroed logs -
551                          * mkfs etc write a dummy unmount record to a fresh
552                          * log so we can store the uuid in there
553                          */
554                         xlog_warn("XFS: totally zeroed log");
555                 }
556
557                 return 0;
558         } else if (error) {
559                 xlog_warn("XFS: empty log check failed");
560                 return error;
561         }
562
563         first_blk = 0;                  /* get cycle # of 1st block */
564         bp = xlog_get_bp(log, 1);
565         if (!bp)
566                 return ENOMEM;
567         if ((error = xlog_bread(log, 0, 1, bp)))
568                 goto bp_err;
569         offset = xlog_align(log, 0, 1, bp);
570         first_half_cycle = GET_CYCLE(offset, ARCH_CONVERT);
571
572         last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
573         if ((error = xlog_bread(log, last_blk, 1, bp)))
574                 goto bp_err;
575         offset = xlog_align(log, last_blk, 1, bp);
576         last_half_cycle = GET_CYCLE(offset, ARCH_CONVERT);
577         ASSERT(last_half_cycle != 0);
578
579         /*
580          * If the 1st half cycle number is equal to the last half cycle number,
581          * then the entire log is stamped with the same cycle number.  In this
582          * case, head_blk can't be set to zero (which makes sense).  The below
583          * math doesn't work out properly with head_blk equal to zero.  Instead,
584          * we set it to log_bbnum which is an invalid block number, but this
585          * value makes the math correct.  If head_blk doesn't changed through
586          * all the tests below, *head_blk is set to zero at the very end rather
587          * than log_bbnum.  In a sense, log_bbnum and zero are the same block
588          * in a circular file.
589          */
590         if (first_half_cycle == last_half_cycle) {
591                 /*
592                  * In this case we believe that the entire log should have
593                  * cycle number last_half_cycle.  We need to scan backwards
594                  * from the end verifying that there are no holes still
595                  * containing last_half_cycle - 1.  If we find such a hole,
596                  * then the start of that hole will be the new head.  The
597                  * simple case looks like
598                  *        x | x ... | x - 1 | x
599                  * Another case that fits this picture would be
600                  *        x | x + 1 | x ... | x
601                  * In this case the head really is somwhere at the end of the
602                  * log, as one of the latest writes at the beginning was
603                  * incomplete.
604                  * One more case is
605                  *        x | x + 1 | x ... | x - 1 | x
606                  * This is really the combination of the above two cases, and
607                  * the head has to end up at the start of the x-1 hole at the
608                  * end of the log.
609                  *
610                  * In the 256k log case, we will read from the beginning to the
611                  * end of the log and search for cycle numbers equal to x-1.
612                  * We don't worry about the x+1 blocks that we encounter,
613                  * because we know that they cannot be the head since the log
614                  * started with x.
615                  */
616                 head_blk = log_bbnum;
617                 stop_on_cycle = last_half_cycle - 1;
618         } else {
619                 /*
620                  * In this case we want to find the first block with cycle
621                  * number matching last_half_cycle.  We expect the log to be
622                  * some variation on
623                  *        x + 1 ... | x ...
624                  * The first block with cycle number x (last_half_cycle) will
625                  * be where the new head belongs.  First we do a binary search
626                  * for the first occurrence of last_half_cycle.  The binary
627                  * search may not be totally accurate, so then we scan back
628                  * from there looking for occurrences of last_half_cycle before
629                  * us.  If that backwards scan wraps around the beginning of
630                  * the log, then we look for occurrences of last_half_cycle - 1
631                  * at the end of the log.  The cases we're looking for look
632                  * like
633                  *        x + 1 ... | x | x + 1 | x ...
634                  *                               ^ binary search stopped here
635                  * or
636                  *        x + 1 ... | x ... | x - 1 | x
637                  *        <---------> less than scan distance
638                  */
639                 stop_on_cycle = last_half_cycle;
640                 if ((error = xlog_find_cycle_start(log, bp, first_blk,
641                                                 &head_blk, last_half_cycle)))
642                         goto bp_err;
643         }
644
645         /*
646          * Now validate the answer.  Scan back some number of maximum possible
647          * blocks and make sure each one has the expected cycle number.  The
648          * maximum is determined by the total possible amount of buffering
649          * in the in-core log.  The following number can be made tighter if
650          * we actually look at the block size of the filesystem.
651          */
652         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
653         if (head_blk >= num_scan_bblks) {
654                 /*
655                  * We are guaranteed that the entire check can be performed
656                  * in one buffer.
657                  */
658                 start_blk = head_blk - num_scan_bblks;
659                 if ((error = xlog_find_verify_cycle(log,
660                                                 start_blk, num_scan_bblks,
661                                                 stop_on_cycle, &new_blk)))
662                         goto bp_err;
663                 if (new_blk != -1)
664                         head_blk = new_blk;
665         } else {                /* need to read 2 parts of log */
666                 /*
667                  * We are going to scan backwards in the log in two parts.
668                  * First we scan the physical end of the log.  In this part
669                  * of the log, we are looking for blocks with cycle number
670                  * last_half_cycle - 1.
671                  * If we find one, then we know that the log starts there, as
672                  * we've found a hole that didn't get written in going around
673                  * the end of the physical log.  The simple case for this is
674                  *        x + 1 ... | x ... | x - 1 | x
675                  *        <---------> less than scan distance
676                  * If all of the blocks at the end of the log have cycle number
677                  * last_half_cycle, then we check the blocks at the start of
678                  * the log looking for occurrences of last_half_cycle.  If we
679                  * find one, then our current estimate for the location of the
680                  * first occurrence of last_half_cycle is wrong and we move
681                  * back to the hole we've found.  This case looks like
682                  *        x + 1 ... | x | x + 1 | x ...
683                  *                               ^ binary search stopped here
684                  * Another case we need to handle that only occurs in 256k
685                  * logs is
686                  *        x + 1 ... | x ... | x+1 | x ...
687                  *                   ^ binary search stops here
688                  * In a 256k log, the scan at the end of the log will see the
689                  * x + 1 blocks.  We need to skip past those since that is
690                  * certainly not the head of the log.  By searching for
691                  * last_half_cycle-1 we accomplish that.
692                  */
693                 start_blk = log_bbnum - num_scan_bblks + head_blk;
694                 ASSERT(head_blk <= INT_MAX &&
695                         (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
696                 if ((error = xlog_find_verify_cycle(log, start_blk,
697                                         num_scan_bblks - (int)head_blk,
698                                         (stop_on_cycle - 1), &new_blk)))
699                         goto bp_err;
700                 if (new_blk != -1) {
701                         head_blk = new_blk;
702                         goto bad_blk;
703                 }
704
705                 /*
706                  * Scan beginning of log now.  The last part of the physical
707                  * log is good.  This scan needs to verify that it doesn't find
708                  * the last_half_cycle.
709                  */
710                 start_blk = 0;
711                 ASSERT(head_blk <= INT_MAX);
712                 if ((error = xlog_find_verify_cycle(log,
713                                         start_blk, (int)head_blk,
714                                         stop_on_cycle, &new_blk)))
715                         goto bp_err;
716                 if (new_blk != -1)
717                         head_blk = new_blk;
718         }
719
720  bad_blk:
721         /*
722          * Now we need to make sure head_blk is not pointing to a block in
723          * the middle of a log record.
724          */
725         num_scan_bblks = XLOG_REC_SHIFT(log);
726         if (head_blk >= num_scan_bblks) {
727                 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
728
729                 /* start ptr at last block ptr before head_blk */
730                 if ((error = xlog_find_verify_log_record(log, start_blk,
731                                                         &head_blk, 0)) == -1) {
732                         error = XFS_ERROR(EIO);
733                         goto bp_err;
734                 } else if (error)
735                         goto bp_err;
736         } else {
737                 start_blk = 0;
738                 ASSERT(head_blk <= INT_MAX);
739                 if ((error = xlog_find_verify_log_record(log, start_blk,
740                                                         &head_blk, 0)) == -1) {
741                         /* We hit the beginning of the log during our search */
742                         start_blk = log_bbnum - num_scan_bblks + head_blk;
743                         new_blk = log_bbnum;
744                         ASSERT(start_blk <= INT_MAX &&
745                                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
746                         ASSERT(head_blk <= INT_MAX);
747                         if ((error = xlog_find_verify_log_record(log,
748                                                         start_blk, &new_blk,
749                                                         (int)head_blk)) == -1) {
750                                 error = XFS_ERROR(EIO);
751                                 goto bp_err;
752                         } else if (error)
753                                 goto bp_err;
754                         if (new_blk != log_bbnum)
755                                 head_blk = new_blk;
756                 } else if (error)
757                         goto bp_err;
758         }
759
760         xlog_put_bp(bp);
761         if (head_blk == log_bbnum)
762                 *return_head_blk = 0;
763         else
764                 *return_head_blk = head_blk;
765         /*
766          * When returning here, we have a good block number.  Bad block
767          * means that during a previous crash, we didn't have a clean break
768          * from cycle number N to cycle number N-1.  In this case, we need
769          * to find the first block with cycle number N-1.
770          */
771         return 0;
772
773  bp_err:
774         xlog_put_bp(bp);
775
776         if (error)
777             xlog_warn("XFS: failed to find log head");
778         return error;
779 }
780
781 /*
782  * Find the sync block number or the tail of the log.
783  *
784  * This will be the block number of the last record to have its
785  * associated buffers synced to disk.  Every log record header has
786  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
787  * to get a sync block number.  The only concern is to figure out which
788  * log record header to believe.
789  *
790  * The following algorithm uses the log record header with the largest
791  * lsn.  The entire log record does not need to be valid.  We only care
792  * that the header is valid.
793  *
794  * We could speed up search by using current head_blk buffer, but it is not
795  * available.
796  */
797 int
798 xlog_find_tail(
799         xlog_t                  *log,
800         xfs_daddr_t             *head_blk,
801         xfs_daddr_t             *tail_blk,
802         int                     readonly)
803 {
804         xlog_rec_header_t       *rhead;
805         xlog_op_header_t        *op_head;
806         xfs_caddr_t             offset = NULL;
807         xfs_buf_t               *bp;
808         int                     error, i, found;
809         xfs_daddr_t             umount_data_blk;
810         xfs_daddr_t             after_umount_blk;
811         xfs_lsn_t               tail_lsn;
812         int                     hblks;
813
814         found = 0;
815
816         /*
817          * Find previous log record
818          */
819         if ((error = xlog_find_head(log, head_blk)))
820                 return error;
821
822         bp = xlog_get_bp(log, 1);
823         if (!bp)
824                 return ENOMEM;
825         if (*head_blk == 0) {                           /* special case */
826                 if ((error = xlog_bread(log, 0, 1, bp)))
827                         goto bread_err;
828                 offset = xlog_align(log, 0, 1, bp);
829                 if (GET_CYCLE(offset, ARCH_CONVERT) == 0) {
830                         *tail_blk = 0;
831                         /* leave all other log inited values alone */
832                         goto exit;
833                 }
834         }
835
836         /*
837          * Search backwards looking for log record header block
838          */
839         ASSERT(*head_blk < INT_MAX);
840         for (i = (int)(*head_blk) - 1; i >= 0; i--) {
841                 if ((error = xlog_bread(log, i, 1, bp)))
842                         goto bread_err;
843                 offset = xlog_align(log, i, 1, bp);
844                 if (XLOG_HEADER_MAGIC_NUM ==
845                     INT_GET(*(uint *)offset, ARCH_CONVERT)) {
846                         found = 1;
847                         break;
848                 }
849         }
850         /*
851          * If we haven't found the log record header block, start looking
852          * again from the end of the physical log.  XXXmiken: There should be
853          * a check here to make sure we didn't search more than N blocks in
854          * the previous code.
855          */
856         if (!found) {
857                 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
858                         if ((error = xlog_bread(log, i, 1, bp)))
859                                 goto bread_err;
860                         offset = xlog_align(log, i, 1, bp);
861                         if (XLOG_HEADER_MAGIC_NUM ==
862                             INT_GET(*(uint*)offset, ARCH_CONVERT)) {
863                                 found = 2;
864                                 break;
865                         }
866                 }
867         }
868         if (!found) {
869                 xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
870                 ASSERT(0);
871                 return XFS_ERROR(EIO);
872         }
873
874         /* find blk_no of tail of log */
875         rhead = (xlog_rec_header_t *)offset;
876         *tail_blk = BLOCK_LSN(INT_GET(rhead->h_tail_lsn, ARCH_CONVERT));
877
878         /*
879          * Reset log values according to the state of the log when we
880          * crashed.  In the case where head_blk == 0, we bump curr_cycle
881          * one because the next write starts a new cycle rather than
882          * continuing the cycle of the last good log record.  At this
883          * point we have guaranteed that all partial log records have been
884          * accounted for.  Therefore, we know that the last good log record
885          * written was complete and ended exactly on the end boundary
886          * of the physical log.
887          */
888         log->l_prev_block = i;
889         log->l_curr_block = (int)*head_blk;
890         log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT);
891         if (found == 2)
892                 log->l_curr_cycle++;
893         log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT);
894         log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT);
895         log->l_grant_reserve_cycle = log->l_curr_cycle;
896         log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
897         log->l_grant_write_cycle = log->l_curr_cycle;
898         log->l_grant_write_bytes = BBTOB(log->l_curr_block);
899
900         /*
901          * Look for unmount record.  If we find it, then we know there
902          * was a clean unmount.  Since 'i' could be the last block in
903          * the physical log, we convert to a log block before comparing
904          * to the head_blk.
905          *
906          * Save the current tail lsn to use to pass to
907          * xlog_clear_stale_blocks() below.  We won't want to clear the
908          * unmount record if there is one, so we pass the lsn of the
909          * unmount record rather than the block after it.
910          */
911         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
912                 int     h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
913                 int     h_version = INT_GET(rhead->h_version, ARCH_CONVERT);
914
915                 if ((h_version & XLOG_VERSION_2) &&
916                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
917                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
918                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
919                                 hblks++;
920                 } else {
921                         hblks = 1;
922                 }
923         } else {
924                 hblks = 1;
925         }
926         after_umount_blk = (i + hblks + (int)
927                 BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize;
928         tail_lsn = log->l_tail_lsn;
929         if (*head_blk == after_umount_blk &&
930             INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) {
931                 umount_data_blk = (i + hblks) % log->l_logBBsize;
932                 if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
933                         goto bread_err;
934                 }
935                 offset = xlog_align(log, umount_data_blk, 1, bp);
936                 op_head = (xlog_op_header_t *)offset;
937                 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
938                         /*
939                          * Set tail and last sync so that newly written
940                          * log records will point recovery to after the
941                          * current unmount record.
942                          */
943                         ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, log->l_curr_cycle,
944                                         after_umount_blk);
945                         ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle,
946                                         after_umount_blk);
947                         *tail_blk = after_umount_blk;
948                 }
949         }
950
951         /*
952          * Make sure that there are no blocks in front of the head
953          * with the same cycle number as the head.  This can happen
954          * because we allow multiple outstanding log writes concurrently,
955          * and the later writes might make it out before earlier ones.
956          *
957          * We use the lsn from before modifying it so that we'll never
958          * overwrite the unmount record after a clean unmount.
959          *
960          * Do this only if we are going to recover the filesystem
961          *
962          * NOTE: This used to say "if (!readonly)"
963          * However on Linux, we can & do recover a read-only filesystem.
964          * We only skip recovery if NORECOVERY is specified on mount,
965          * in which case we would not be here.
966          *
967          * But... if the -device- itself is readonly, just skip this.
968          * We can't recover this device anyway, so it won't matter.
969          */
970         if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
971                 error = xlog_clear_stale_blocks(log, tail_lsn);
972         }
973
974 bread_err:
975 exit:
976         xlog_put_bp(bp);
977
978         if (error)
979                 xlog_warn("XFS: failed to locate log tail");
980         return error;
981 }
982
983 /*
984  * Is the log zeroed at all?
985  *
986  * The last binary search should be changed to perform an X block read
987  * once X becomes small enough.  You can then search linearly through
988  * the X blocks.  This will cut down on the number of reads we need to do.
989  *
990  * If the log is partially zeroed, this routine will pass back the blkno
991  * of the first block with cycle number 0.  It won't have a complete LR
992  * preceding it.
993  *
994  * Return:
995  *      0  => the log is completely written to
996  *      -1 => use *blk_no as the first block of the log
997  *      >0 => error has occurred
998  */
999 int
1000 xlog_find_zeroed(
1001         xlog_t          *log,
1002         xfs_daddr_t     *blk_no)
1003 {
1004         xfs_buf_t       *bp;
1005         xfs_caddr_t     offset;
1006         uint            first_cycle, last_cycle;
1007         xfs_daddr_t     new_blk, last_blk, start_blk;
1008         xfs_daddr_t     num_scan_bblks;
1009         int             error, log_bbnum = log->l_logBBsize;
1010
1011         /* check totally zeroed log */
1012         bp = xlog_get_bp(log, 1);
1013         if (!bp)
1014                 return ENOMEM;
1015         if ((error = xlog_bread(log, 0, 1, bp)))
1016                 goto bp_err;
1017         offset = xlog_align(log, 0, 1, bp);
1018         first_cycle = GET_CYCLE(offset, ARCH_CONVERT);
1019         if (first_cycle == 0) {         /* completely zeroed log */
1020                 *blk_no = 0;
1021                 xlog_put_bp(bp);
1022                 return -1;
1023         }
1024
1025         /* check partially zeroed log */
1026         if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
1027                 goto bp_err;
1028         offset = xlog_align(log, log_bbnum-1, 1, bp);
1029         last_cycle = GET_CYCLE(offset, ARCH_CONVERT);
1030         if (last_cycle != 0) {          /* log completely written to */
1031                 xlog_put_bp(bp);
1032                 return 0;
1033         } else if (first_cycle != 1) {
1034                 /*
1035                  * If the cycle of the last block is zero, the cycle of
1036                  * the first block must be 1. If it's not, maybe we're
1037                  * not looking at a log... Bail out.
1038                  */
1039                 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
1040                 return XFS_ERROR(EINVAL);
1041         }
1042
1043         /* we have a partially zeroed log */
1044         last_blk = log_bbnum-1;
1045         if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1046                 goto bp_err;
1047
1048         /*
1049          * Validate the answer.  Because there is no way to guarantee that
1050          * the entire log is made up of log records which are the same size,
1051          * we scan over the defined maximum blocks.  At this point, the maximum
1052          * is not chosen to mean anything special.   XXXmiken
1053          */
1054         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1055         ASSERT(num_scan_bblks <= INT_MAX);
1056
1057         if (last_blk < num_scan_bblks)
1058                 num_scan_bblks = last_blk;
1059         start_blk = last_blk - num_scan_bblks;
1060
1061         /*
1062          * We search for any instances of cycle number 0 that occur before
1063          * our current estimate of the head.  What we're trying to detect is
1064          *        1 ... | 0 | 1 | 0...
1065          *                       ^ binary search ends here
1066          */
1067         if ((error = xlog_find_verify_cycle(log, start_blk,
1068                                          (int)num_scan_bblks, 0, &new_blk)))
1069                 goto bp_err;
1070         if (new_blk != -1)
1071                 last_blk = new_blk;
1072
1073         /*
1074          * Potentially backup over partial log record write.  We don't need
1075          * to search the end of the log because we know it is zero.
1076          */
1077         if ((error = xlog_find_verify_log_record(log, start_blk,
1078                                 &last_blk, 0)) == -1) {
1079             error = XFS_ERROR(EIO);
1080             goto bp_err;
1081         } else if (error)
1082             goto bp_err;
1083
1084         *blk_no = last_blk;
1085 bp_err:
1086         xlog_put_bp(bp);
1087         if (error)
1088                 return error;
1089         return -1;
1090 }
1091
1092 /*
1093  * These are simple subroutines used by xlog_clear_stale_blocks() below
1094  * to initialize a buffer full of empty log record headers and write
1095  * them into the log.
1096  */
1097 STATIC void
1098 xlog_add_record(
1099         xlog_t                  *log,
1100         xfs_caddr_t             buf,
1101         int                     cycle,
1102         int                     block,
1103         int                     tail_cycle,
1104         int                     tail_block)
1105 {
1106         xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1107
1108         memset(buf, 0, BBSIZE);
1109         INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
1110         INT_SET(recp->h_cycle, ARCH_CONVERT, cycle);
1111         INT_SET(recp->h_version, ARCH_CONVERT,
1112                         XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
1113         ASSIGN_ANY_LSN_DISK(recp->h_lsn, cycle, block);
1114         ASSIGN_ANY_LSN_DISK(recp->h_tail_lsn, tail_cycle, tail_block);
1115         INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT);
1116         memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1117 }
1118
1119 STATIC int
1120 xlog_write_log_records(
1121         xlog_t          *log,
1122         int             cycle,
1123         int             start_block,
1124         int             blocks,
1125         int             tail_cycle,
1126         int             tail_block)
1127 {
1128         xfs_caddr_t     offset;
1129         xfs_buf_t       *bp;
1130         int             balign, ealign;
1131         int             sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
1132         int             end_block = start_block + blocks;
1133         int             bufblks;
1134         int             error = 0;
1135         int             i, j = 0;
1136
1137         bufblks = 1 << ffs(blocks);
1138         while (!(bp = xlog_get_bp(log, bufblks))) {
1139                 bufblks >>= 1;
1140                 if (bufblks <= log->l_sectbb_log)
1141                         return ENOMEM;
1142         }
1143
1144         /* We may need to do a read at the start to fill in part of
1145          * the buffer in the starting sector not covered by the first
1146          * write below.
1147          */
1148         balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
1149         if (balign != start_block) {
1150                 if ((error = xlog_bread(log, start_block, 1, bp))) {
1151                         xlog_put_bp(bp);
1152                         return error;
1153                 }
1154                 j = start_block - balign;
1155         }
1156
1157         for (i = start_block; i < end_block; i += bufblks) {
1158                 int             bcount, endcount;
1159
1160                 bcount = min(bufblks, end_block - start_block);
1161                 endcount = bcount - j;
1162
1163                 /* We may need to do a read at the end to fill in part of
1164                  * the buffer in the final sector not covered by the write.
1165                  * If this is the same sector as the above read, skip it.
1166                  */
1167                 ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
1168                 if (j == 0 && (start_block + endcount > ealign)) {
1169                         offset = XFS_BUF_PTR(bp);
1170                         balign = BBTOB(ealign - start_block);
1171                         XFS_BUF_SET_PTR(bp, offset + balign, BBTOB(sectbb));
1172                         if ((error = xlog_bread(log, ealign, sectbb, bp)))
1173                                 break;
1174                         XFS_BUF_SET_PTR(bp, offset, bufblks);
1175                 }
1176
1177                 offset = xlog_align(log, start_block, endcount, bp);
1178                 for (; j < endcount; j++) {
1179                         xlog_add_record(log, offset, cycle, i+j,
1180                                         tail_cycle, tail_block);
1181                         offset += BBSIZE;
1182                 }
1183                 error = xlog_bwrite(log, start_block, endcount, bp);
1184                 if (error)
1185                         break;
1186                 start_block += endcount;
1187                 j = 0;
1188         }
1189         xlog_put_bp(bp);
1190         return error;
1191 }
1192
1193 /*
1194  * This routine is called to blow away any incomplete log writes out
1195  * in front of the log head.  We do this so that we won't become confused
1196  * if we come up, write only a little bit more, and then crash again.
1197  * If we leave the partial log records out there, this situation could
1198  * cause us to think those partial writes are valid blocks since they
1199  * have the current cycle number.  We get rid of them by overwriting them
1200  * with empty log records with the old cycle number rather than the
1201  * current one.
1202  *
1203  * The tail lsn is passed in rather than taken from
1204  * the log so that we will not write over the unmount record after a
1205  * clean unmount in a 512 block log.  Doing so would leave the log without
1206  * any valid log records in it until a new one was written.  If we crashed
1207  * during that time we would not be able to recover.
1208  */
1209 STATIC int
1210 xlog_clear_stale_blocks(
1211         xlog_t          *log,
1212         xfs_lsn_t       tail_lsn)
1213 {
1214         int             tail_cycle, head_cycle;
1215         int             tail_block, head_block;
1216         int             tail_distance, max_distance;
1217         int             distance;
1218         int             error;
1219
1220         tail_cycle = CYCLE_LSN(tail_lsn);
1221         tail_block = BLOCK_LSN(tail_lsn);
1222         head_cycle = log->l_curr_cycle;
1223         head_block = log->l_curr_block;
1224
1225         /*
1226          * Figure out the distance between the new head of the log
1227          * and the tail.  We want to write over any blocks beyond the
1228          * head that we may have written just before the crash, but
1229          * we don't want to overwrite the tail of the log.
1230          */
1231         if (head_cycle == tail_cycle) {
1232                 /*
1233                  * The tail is behind the head in the physical log,
1234                  * so the distance from the head to the tail is the
1235                  * distance from the head to the end of the log plus
1236                  * the distance from the beginning of the log to the
1237                  * tail.
1238                  */
1239                 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1240                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1241                                          XFS_ERRLEVEL_LOW, log->l_mp);
1242                         return XFS_ERROR(EFSCORRUPTED);
1243                 }
1244                 tail_distance = tail_block + (log->l_logBBsize - head_block);
1245         } else {
1246                 /*
1247                  * The head is behind the tail in the physical log,
1248                  * so the distance from the head to the tail is just
1249                  * the tail block minus the head block.
1250                  */
1251                 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1252                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1253                                          XFS_ERRLEVEL_LOW, log->l_mp);
1254                         return XFS_ERROR(EFSCORRUPTED);
1255                 }
1256                 tail_distance = tail_block - head_block;
1257         }
1258
1259         /*
1260          * If the head is right up against the tail, we can't clear
1261          * anything.
1262          */
1263         if (tail_distance <= 0) {
1264                 ASSERT(tail_distance == 0);
1265                 return 0;
1266         }
1267
1268         max_distance = XLOG_TOTAL_REC_SHIFT(log);
1269         /*
1270          * Take the smaller of the maximum amount of outstanding I/O
1271          * we could have and the distance to the tail to clear out.
1272          * We take the smaller so that we don't overwrite the tail and
1273          * we don't waste all day writing from the head to the tail
1274          * for no reason.
1275          */
1276         max_distance = MIN(max_distance, tail_distance);
1277
1278         if ((head_block + max_distance) <= log->l_logBBsize) {
1279                 /*
1280                  * We can stomp all the blocks we need to without
1281                  * wrapping around the end of the log.  Just do it
1282                  * in a single write.  Use the cycle number of the
1283                  * current cycle minus one so that the log will look like:
1284                  *     n ... | n - 1 ...
1285                  */
1286                 error = xlog_write_log_records(log, (head_cycle - 1),
1287                                 head_block, max_distance, tail_cycle,
1288                                 tail_block);
1289                 if (error)
1290                         return error;
1291         } else {
1292                 /*
1293                  * We need to wrap around the end of the physical log in
1294                  * order to clear all the blocks.  Do it in two separate
1295                  * I/Os.  The first write should be from the head to the
1296                  * end of the physical log, and it should use the current
1297                  * cycle number minus one just like above.
1298                  */
1299                 distance = log->l_logBBsize - head_block;
1300                 error = xlog_write_log_records(log, (head_cycle - 1),
1301                                 head_block, distance, tail_cycle,
1302                                 tail_block);
1303
1304                 if (error)
1305                         return error;
1306
1307                 /*
1308                  * Now write the blocks at the start of the physical log.
1309                  * This writes the remainder of the blocks we want to clear.
1310                  * It uses the current cycle number since we're now on the
1311                  * same cycle as the head so that we get:
1312                  *    n ... n ... | n - 1 ...
1313                  *    ^^^^^ blocks we're writing
1314                  */
1315                 distance = max_distance - (log->l_logBBsize - head_block);
1316                 error = xlog_write_log_records(log, head_cycle, 0, distance,
1317                                 tail_cycle, tail_block);
1318                 if (error)
1319                         return error;
1320         }
1321
1322         return 0;
1323 }
1324
1325 /******************************************************************************
1326  *
1327  *              Log recover routines
1328  *
1329  ******************************************************************************
1330  */
1331
1332 STATIC xlog_recover_t *
1333 xlog_recover_find_tid(
1334         xlog_recover_t          *q,
1335         xlog_tid_t              tid)
1336 {
1337         xlog_recover_t          *p = q;
1338
1339         while (p != NULL) {
1340                 if (p->r_log_tid == tid)
1341                     break;
1342                 p = p->r_next;
1343         }
1344         return p;
1345 }
1346
1347 STATIC void
1348 xlog_recover_put_hashq(
1349         xlog_recover_t          **q,
1350         xlog_recover_t          *trans)
1351 {
1352         trans->r_next = *q;
1353         *q = trans;
1354 }
1355
1356 STATIC void
1357 xlog_recover_add_item(
1358         xlog_recover_item_t     **itemq)
1359 {
1360         xlog_recover_item_t     *item;
1361
1362         item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1363         xlog_recover_insert_item_backq(itemq, item);
1364 }
1365
1366 STATIC int
1367 xlog_recover_add_to_cont_trans(
1368         xlog_recover_t          *trans,
1369         xfs_caddr_t             dp,
1370         int                     len)
1371 {
1372         xlog_recover_item_t     *item;
1373         xfs_caddr_t             ptr, old_ptr;
1374         int                     old_len;
1375
1376         item = trans->r_itemq;
1377         if (item == 0) {
1378                 /* finish copying rest of trans header */
1379                 xlog_recover_add_item(&trans->r_itemq);
1380                 ptr = (xfs_caddr_t) &trans->r_theader +
1381                                 sizeof(xfs_trans_header_t) - len;
1382                 memcpy(ptr, dp, len); /* d, s, l */
1383                 return 0;
1384         }
1385         item = item->ri_prev;
1386
1387         old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1388         old_len = item->ri_buf[item->ri_cnt-1].i_len;
1389
1390         ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0);
1391         memcpy(&ptr[old_len], dp, len); /* d, s, l */
1392         item->ri_buf[item->ri_cnt-1].i_len += len;
1393         item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1394         return 0;
1395 }
1396
1397 /*
1398  * The next region to add is the start of a new region.  It could be
1399  * a whole region or it could be the first part of a new region.  Because
1400  * of this, the assumption here is that the type and size fields of all
1401  * format structures fit into the first 32 bits of the structure.
1402  *
1403  * This works because all regions must be 32 bit aligned.  Therefore, we
1404  * either have both fields or we have neither field.  In the case we have
1405  * neither field, the data part of the region is zero length.  We only have
1406  * a log_op_header and can throw away the header since a new one will appear
1407  * later.  If we have at least 4 bytes, then we can determine how many regions
1408  * will appear in the current log item.
1409  */
1410 STATIC int
1411 xlog_recover_add_to_trans(
1412         xlog_recover_t          *trans,
1413         xfs_caddr_t             dp,
1414         int                     len)
1415 {
1416         xfs_inode_log_format_t  *in_f;                  /* any will do */
1417         xlog_recover_item_t     *item;
1418         xfs_caddr_t             ptr;
1419
1420         if (!len)
1421                 return 0;
1422         item = trans->r_itemq;
1423         if (item == 0) {
1424                 ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
1425                 if (len == sizeof(xfs_trans_header_t))
1426                         xlog_recover_add_item(&trans->r_itemq);
1427                 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1428                 return 0;
1429         }
1430
1431         ptr = kmem_alloc(len, KM_SLEEP);
1432         memcpy(ptr, dp, len);
1433         in_f = (xfs_inode_log_format_t *)ptr;
1434
1435         if (item->ri_prev->ri_total != 0 &&
1436              item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
1437                 xlog_recover_add_item(&trans->r_itemq);
1438         }
1439         item = trans->r_itemq;
1440         item = item->ri_prev;
1441
1442         if (item->ri_total == 0) {              /* first region to be added */
1443                 item->ri_total  = in_f->ilf_size;
1444                 ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM);
1445                 item->ri_buf = kmem_zalloc((item->ri_total *
1446                                             sizeof(xfs_log_iovec_t)), KM_SLEEP);
1447         }
1448         ASSERT(item->ri_total > item->ri_cnt);
1449         /* Description region is ri_buf[0] */
1450         item->ri_buf[item->ri_cnt].i_addr = ptr;
1451         item->ri_buf[item->ri_cnt].i_len  = len;
1452         item->ri_cnt++;
1453         return 0;
1454 }
1455
1456 STATIC void
1457 xlog_recover_new_tid(
1458         xlog_recover_t          **q,
1459         xlog_tid_t              tid,
1460         xfs_lsn_t               lsn)
1461 {
1462         xlog_recover_t          *trans;
1463
1464         trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1465         trans->r_log_tid   = tid;
1466         trans->r_lsn       = lsn;
1467         xlog_recover_put_hashq(q, trans);
1468 }
1469
1470 STATIC int
1471 xlog_recover_unlink_tid(
1472         xlog_recover_t          **q,
1473         xlog_recover_t          *trans)
1474 {
1475         xlog_recover_t          *tp;
1476         int                     found = 0;
1477
1478         ASSERT(trans != 0);
1479         if (trans == *q) {
1480                 *q = (*q)->r_next;
1481         } else {
1482                 tp = *q;
1483                 while (tp != 0) {
1484                         if (tp->r_next == trans) {
1485                                 found = 1;
1486                                 break;
1487                         }
1488                         tp = tp->r_next;
1489                 }
1490                 if (!found) {
1491                         xlog_warn(
1492                              "XFS: xlog_recover_unlink_tid: trans not found");
1493                         ASSERT(0);
1494                         return XFS_ERROR(EIO);
1495                 }
1496                 tp->r_next = tp->r_next->r_next;
1497         }
1498         return 0;
1499 }
1500
1501 STATIC void
1502 xlog_recover_insert_item_backq(
1503         xlog_recover_item_t     **q,
1504         xlog_recover_item_t     *item)
1505 {
1506         if (*q == 0) {
1507                 item->ri_prev = item->ri_next = item;
1508                 *q = item;
1509         } else {
1510                 item->ri_next           = *q;
1511                 item->ri_prev           = (*q)->ri_prev;
1512                 (*q)->ri_prev           = item;
1513                 item->ri_prev->ri_next  = item;
1514         }
1515 }
1516
1517 STATIC void
1518 xlog_recover_insert_item_frontq(
1519         xlog_recover_item_t     **q,
1520         xlog_recover_item_t     *item)
1521 {
1522         xlog_recover_insert_item_backq(q, item);
1523         *q = item;
1524 }
1525
1526 STATIC int
1527 xlog_recover_reorder_trans(
1528         xlog_t                  *log,
1529         xlog_recover_t          *trans)
1530 {
1531         xlog_recover_item_t     *first_item, *itemq, *itemq_next;
1532         xfs_buf_log_format_t    *buf_f;
1533         xfs_buf_log_format_v1_t *obuf_f;
1534         ushort                  flags = 0;
1535
1536         first_item = itemq = trans->r_itemq;
1537         trans->r_itemq = NULL;
1538         do {
1539                 itemq_next = itemq->ri_next;
1540                 buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
1541                 switch (ITEM_TYPE(itemq)) {
1542                 case XFS_LI_BUF:
1543                         flags = buf_f->blf_flags;
1544                         break;
1545                 case XFS_LI_6_1_BUF:
1546                 case XFS_LI_5_3_BUF:
1547                         obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
1548                         flags = obuf_f->blf_flags;
1549                         break;
1550                 }
1551
1552                 switch (ITEM_TYPE(itemq)) {
1553                 case XFS_LI_BUF:
1554                 case XFS_LI_6_1_BUF:
1555                 case XFS_LI_5_3_BUF:
1556                         if (!(flags & XFS_BLI_CANCEL)) {
1557                                 xlog_recover_insert_item_frontq(&trans->r_itemq,
1558                                                                 itemq);
1559                                 break;
1560                         }
1561                 case XFS_LI_INODE:
1562                 case XFS_LI_6_1_INODE:
1563                 case XFS_LI_5_3_INODE:
1564                 case XFS_LI_DQUOT:
1565                 case XFS_LI_QUOTAOFF:
1566                 case XFS_LI_EFD:
1567                 case XFS_LI_EFI:
1568                         xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
1569                         break;
1570                 default:
1571                         xlog_warn(
1572         "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
1573                         ASSERT(0);
1574                         return XFS_ERROR(EIO);
1575                 }
1576                 itemq = itemq_next;
1577         } while (first_item != itemq);
1578         return 0;
1579 }
1580
1581 /*
1582  * Build up the table of buf cancel records so that we don't replay
1583  * cancelled data in the second pass.  For buffer records that are
1584  * not cancel records, there is nothing to do here so we just return.
1585  *
1586  * If we get a cancel record which is already in the table, this indicates
1587  * that the buffer was cancelled multiple times.  In order to ensure
1588  * that during pass 2 we keep the record in the table until we reach its
1589  * last occurrence in the log, we keep a reference count in the cancel
1590  * record in the table to tell us how many times we expect to see this
1591  * record during the second pass.
1592  */
1593 STATIC void
1594 xlog_recover_do_buffer_pass1(
1595         xlog_t                  *log,
1596         xfs_buf_log_format_t    *buf_f)
1597 {
1598         xfs_buf_cancel_t        *bcp;
1599         xfs_buf_cancel_t        *nextp;
1600         xfs_buf_cancel_t        *prevp;
1601         xfs_buf_cancel_t        **bucket;
1602         xfs_buf_log_format_v1_t *obuf_f;
1603         xfs_daddr_t             blkno = 0;
1604         uint                    len = 0;
1605         ushort                  flags = 0;
1606
1607         switch (buf_f->blf_type) {
1608         case XFS_LI_BUF:
1609                 blkno = buf_f->blf_blkno;
1610                 len = buf_f->blf_len;
1611                 flags = buf_f->blf_flags;
1612                 break;
1613         case XFS_LI_6_1_BUF:
1614         case XFS_LI_5_3_BUF:
1615                 obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
1616                 blkno = (xfs_daddr_t) obuf_f->blf_blkno;
1617                 len = obuf_f->blf_len;
1618                 flags = obuf_f->blf_flags;
1619                 break;
1620         }
1621
1622         /*
1623          * If this isn't a cancel buffer item, then just return.
1624          */
1625         if (!(flags & XFS_BLI_CANCEL))
1626                 return;
1627
1628         /*
1629          * Insert an xfs_buf_cancel record into the hash table of
1630          * them.  If there is already an identical record, bump
1631          * its reference count.
1632          */
1633         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1634                                           XLOG_BC_TABLE_SIZE];
1635         /*
1636          * If the hash bucket is empty then just insert a new record into
1637          * the bucket.
1638          */
1639         if (*bucket == NULL) {
1640                 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1641                                                      KM_SLEEP);
1642                 bcp->bc_blkno = blkno;
1643                 bcp->bc_len = len;
1644                 bcp->bc_refcount = 1;
1645                 bcp->bc_next = NULL;
1646                 *bucket = bcp;
1647                 return;
1648         }
1649
1650         /*
1651          * The hash bucket is not empty, so search for duplicates of our
1652          * record.  If we find one them just bump its refcount.  If not
1653          * then add us at the end of the list.
1654          */
1655         prevp = NULL;
1656         nextp = *bucket;
1657         while (nextp != NULL) {
1658                 if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
1659                         nextp->bc_refcount++;
1660                         return;
1661                 }
1662                 prevp = nextp;
1663                 nextp = nextp->bc_next;
1664         }
1665         ASSERT(prevp != NULL);
1666         bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1667                                              KM_SLEEP);
1668         bcp->bc_blkno = blkno;
1669         bcp->bc_len = len;
1670         bcp->bc_refcount = 1;
1671         bcp->bc_next = NULL;
1672         prevp->bc_next = bcp;
1673 }
1674
1675 /*
1676  * Check to see whether the buffer being recovered has a corresponding
1677  * entry in the buffer cancel record table.  If it does then return 1
1678  * so that it will be cancelled, otherwise return 0.  If the buffer is
1679  * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
1680  * the refcount on the entry in the table and remove it from the table
1681  * if this is the last reference.
1682  *
1683  * We remove the cancel record from the table when we encounter its
1684  * last occurrence in the log so that if the same buffer is re-used
1685  * again after its last cancellation we actually replay the changes
1686  * made at that point.
1687  */
1688 STATIC int
1689 xlog_check_buffer_cancelled(
1690         xlog_t                  *log,
1691         xfs_daddr_t             blkno,
1692         uint                    len,
1693         ushort                  flags)
1694 {
1695         xfs_buf_cancel_t        *bcp;
1696         xfs_buf_cancel_t        *prevp;
1697         xfs_buf_cancel_t        **bucket;
1698
1699         if (log->l_buf_cancel_table == NULL) {
1700                 /*
1701                  * There is nothing in the table built in pass one,
1702                  * so this buffer must not be cancelled.
1703                  */
1704                 ASSERT(!(flags & XFS_BLI_CANCEL));
1705                 return 0;
1706         }
1707
1708         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1709                                           XLOG_BC_TABLE_SIZE];
1710         bcp = *bucket;
1711         if (bcp == NULL) {
1712                 /*
1713                  * There is no corresponding entry in the table built
1714                  * in pass one, so this buffer has not been cancelled.
1715                  */
1716                 ASSERT(!(flags & XFS_BLI_CANCEL));
1717                 return 0;
1718         }
1719
1720         /*
1721          * Search for an entry in the buffer cancel table that
1722          * matches our buffer.
1723          */
1724         prevp = NULL;
1725         while (bcp != NULL) {
1726                 if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
1727                         /*
1728                          * We've go a match, so return 1 so that the
1729                          * recovery of this buffer is cancelled.
1730                          * If this buffer is actually a buffer cancel
1731                          * log item, then decrement the refcount on the
1732                          * one in the table and remove it if this is the
1733                          * last reference.
1734                          */
1735                         if (flags & XFS_BLI_CANCEL) {
1736                                 bcp->bc_refcount--;
1737                                 if (bcp->bc_refcount == 0) {
1738                                         if (prevp == NULL) {
1739                                                 *bucket = bcp->bc_next;
1740                                         } else {
1741                                                 prevp->bc_next = bcp->bc_next;
1742                                         }
1743                                         kmem_free(bcp,
1744                                                   sizeof(xfs_buf_cancel_t));
1745                                 }
1746                         }
1747                         return 1;
1748                 }
1749                 prevp = bcp;
1750                 bcp = bcp->bc_next;
1751         }
1752         /*
1753          * We didn't find a corresponding entry in the table, so
1754          * return 0 so that the buffer is NOT cancelled.
1755          */
1756         ASSERT(!(flags & XFS_BLI_CANCEL));
1757         return 0;
1758 }
1759
1760 STATIC int
1761 xlog_recover_do_buffer_pass2(
1762         xlog_t                  *log,
1763         xfs_buf_log_format_t    *buf_f)
1764 {
1765         xfs_buf_log_format_v1_t *obuf_f;
1766         xfs_daddr_t             blkno = 0;
1767         ushort                  flags = 0;
1768         uint                    len = 0;
1769
1770         switch (buf_f->blf_type) {
1771         case XFS_LI_BUF:
1772                 blkno = buf_f->blf_blkno;
1773                 flags = buf_f->blf_flags;
1774                 len = buf_f->blf_len;
1775                 break;
1776         case XFS_LI_6_1_BUF:
1777         case XFS_LI_5_3_BUF:
1778                 obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
1779                 blkno = (xfs_daddr_t) obuf_f->blf_blkno;
1780                 flags = obuf_f->blf_flags;
1781                 len = (xfs_daddr_t) obuf_f->blf_len;
1782                 break;
1783         }
1784
1785         return xlog_check_buffer_cancelled(log, blkno, len, flags);
1786 }
1787
1788 /*
1789  * Perform recovery for a buffer full of inodes.  In these buffers,
1790  * the only data which should be recovered is that which corresponds
1791  * to the di_next_unlinked pointers in the on disk inode structures.
1792  * The rest of the data for the inodes is always logged through the
1793  * inodes themselves rather than the inode buffer and is recovered
1794  * in xlog_recover_do_inode_trans().
1795  *
1796  * The only time when buffers full of inodes are fully recovered is
1797  * when the buffer is full of newly allocated inodes.  In this case
1798  * the buffer will not be marked as an inode buffer and so will be
1799  * sent to xlog_recover_do_reg_buffer() below during recovery.
1800  */
1801 STATIC int
1802 xlog_recover_do_inode_buffer(
1803         xfs_mount_t             *mp,
1804         xlog_recover_item_t     *item,
1805         xfs_buf_t               *bp,
1806         xfs_buf_log_format_t    *buf_f)
1807 {
1808         int                     i;
1809         int                     item_index;
1810         int                     bit;
1811         int                     nbits;
1812         int                     reg_buf_offset;
1813         int                     reg_buf_bytes;
1814         int                     next_unlinked_offset;
1815         int                     inodes_per_buf;
1816         xfs_agino_t             *logged_nextp;
1817         xfs_agino_t             *buffer_nextp;
1818         xfs_buf_log_format_v1_t *obuf_f;
1819         unsigned int            *data_map = NULL;
1820         unsigned int            map_size = 0;
1821
1822         switch (buf_f->blf_type) {
1823         case XFS_LI_BUF:
1824                 data_map = buf_f->blf_data_map;
1825                 map_size = buf_f->blf_map_size;
1826                 break;
1827         case XFS_LI_6_1_BUF:
1828         case XFS_LI_5_3_BUF:
1829                 obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
1830                 data_map = obuf_f->blf_data_map;
1831                 map_size = obuf_f->blf_map_size;
1832                 break;
1833         }
1834         /*
1835          * Set the variables corresponding to the current region to
1836          * 0 so that we'll initialize them on the first pass through
1837          * the loop.
1838          */
1839         reg_buf_offset = 0;
1840         reg_buf_bytes = 0;
1841         bit = 0;
1842         nbits = 0;
1843         item_index = 0;
1844         inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1845         for (i = 0; i < inodes_per_buf; i++) {
1846                 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1847                         offsetof(xfs_dinode_t, di_next_unlinked);
1848
1849                 while (next_unlinked_offset >=
1850                        (reg_buf_offset + reg_buf_bytes)) {
1851                         /*
1852                          * The next di_next_unlinked field is beyond
1853                          * the current logged region.  Find the next
1854                          * logged region that contains or is beyond
1855                          * the current di_next_unlinked field.
1856                          */
1857                         bit += nbits;
1858                         bit = xfs_next_bit(data_map, map_size, bit);
1859
1860                         /*
1861                          * If there are no more logged regions in the
1862                          * buffer, then we're done.
1863                          */
1864                         if (bit == -1) {
1865                                 return 0;
1866                         }
1867
1868                         nbits = xfs_contig_bits(data_map, map_size,
1869                                                          bit);
1870                         ASSERT(nbits > 0);
1871                         reg_buf_offset = bit << XFS_BLI_SHIFT;
1872                         reg_buf_bytes = nbits << XFS_BLI_SHIFT;
1873                         item_index++;
1874                 }
1875
1876                 /*
1877                  * If the current logged region starts after the current
1878                  * di_next_unlinked field, then move on to the next
1879                  * di_next_unlinked field.
1880                  */
1881                 if (next_unlinked_offset < reg_buf_offset) {
1882                         continue;
1883                 }
1884
1885                 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1886                 ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
1887                 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1888
1889                 /*
1890                  * The current logged region contains a copy of the
1891                  * current di_next_unlinked field.  Extract its value
1892                  * and copy it to the buffer copy.
1893                  */
1894                 logged_nextp = (xfs_agino_t *)
1895                                ((char *)(item->ri_buf[item_index].i_addr) +
1896                                 (next_unlinked_offset - reg_buf_offset));
1897                 if (unlikely(*logged_nextp == 0)) {
1898                         xfs_fs_cmn_err(CE_ALERT, mp,
1899                                 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",
1900                                 item, bp);
1901                         XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1902                                          XFS_ERRLEVEL_LOW, mp);
1903                         return XFS_ERROR(EFSCORRUPTED);
1904                 }
1905
1906                 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1907                                               next_unlinked_offset);
1908                 INT_SET(*buffer_nextp, ARCH_CONVERT, *logged_nextp);
1909         }
1910
1911         return 0;
1912 }
1913
1914 /*
1915  * Perform a 'normal' buffer recovery.  Each logged region of the
1916  * buffer should be copied over the corresponding region in the
1917  * given buffer.  The bitmap in the buf log format structure indicates
1918  * where to place the logged data.
1919  */
1920 /*ARGSUSED*/
1921 STATIC void
1922 xlog_recover_do_reg_buffer(
1923         xfs_mount_t             *mp,
1924         xlog_recover_item_t     *item,
1925         xfs_buf_t               *bp,
1926         xfs_buf_log_format_t    *buf_f)
1927 {
1928         int                     i;
1929         int                     bit;
1930         int                     nbits;
1931         xfs_buf_log_format_v1_t *obuf_f;
1932         unsigned int            *data_map = NULL;
1933         unsigned int            map_size = 0;
1934         int                     error;
1935
1936         switch (buf_f->blf_type) {
1937         case XFS_LI_BUF:
1938                 data_map = buf_f->blf_data_map;
1939                 map_size = buf_f->blf_map_size;
1940                 break;
1941         case XFS_LI_6_1_BUF:
1942         case XFS_LI_5_3_BUF:
1943                 obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
1944                 data_map = obuf_f->blf_data_map;
1945                 map_size = obuf_f->blf_map_size;
1946                 break;
1947         }
1948         bit = 0;
1949         i = 1;  /* 0 is the buf format structure */
1950         while (1) {
1951                 bit = xfs_next_bit(data_map, map_size, bit);
1952                 if (bit == -1)
1953                         break;
1954                 nbits = xfs_contig_bits(data_map, map_size, bit);
1955                 ASSERT(nbits > 0);
1956                 ASSERT(item->ri_buf[i].i_addr != 0);
1957                 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
1958                 ASSERT(XFS_BUF_COUNT(bp) >=
1959                        ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
1960
1961                 /*
1962                  * Do a sanity check if this is a dquot buffer. Just checking
1963                  * the first dquot in the buffer should do. XXXThis is
1964                  * probably a good thing to do for other buf types also.
1965                  */
1966                 error = 0;
1967                 if (buf_f->blf_flags & (XFS_BLI_UDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
1968                         error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
1969                                                item->ri_buf[i].i_addr,
1970                                                -1, 0, XFS_QMOPT_DOWARN,
1971                                                "dquot_buf_recover");
1972                 }
1973                 if (!error)
1974                         memcpy(xfs_buf_offset(bp,
1975                                 (uint)bit << XFS_BLI_SHIFT),    /* dest */
1976                                 item->ri_buf[i].i_addr,         /* source */
1977                                 nbits<<XFS_BLI_SHIFT);          /* length */
1978                 i++;
1979                 bit += nbits;
1980         }
1981
1982         /* Shouldn't be any more regions */
1983         ASSERT(i == item->ri_total);
1984 }
1985
1986 /*
1987  * Do some primitive error checking on ondisk dquot data structures.
1988  */
1989 int
1990 xfs_qm_dqcheck(
1991         xfs_disk_dquot_t *ddq,
1992         xfs_dqid_t       id,
1993         uint             type,    /* used only when IO_dorepair is true */
1994         uint             flags,
1995         char             *str)
1996 {
1997         xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
1998         int             errs = 0;
1999
2000         /*
2001          * We can encounter an uninitialized dquot buffer for 2 reasons:
2002          * 1. If we crash while deleting the quotainode(s), and those blks got
2003          *    used for user data. This is because we take the path of regular
2004          *    file deletion; however, the size field of quotainodes is never
2005          *    updated, so all the tricks that we play in itruncate_finish
2006          *    don't quite matter.
2007          *
2008          * 2. We don't play the quota buffers when there's a quotaoff logitem.
2009          *    But the allocation will be replayed so we'll end up with an
2010          *    uninitialized quota block.
2011          *
2012          * This is all fine; things are still consistent, and we haven't lost
2013          * any quota information. Just don't complain about bad dquot blks.
2014          */
2015         if (INT_GET(ddq->d_magic, ARCH_CONVERT) != XFS_DQUOT_MAGIC) {
2016                 if (flags & XFS_QMOPT_DOWARN)
2017                         cmn_err(CE_ALERT,
2018                         "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2019                         str, id,
2020                         INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_MAGIC);
2021                 errs++;
2022         }
2023         if (INT_GET(ddq->d_version, ARCH_CONVERT) != XFS_DQUOT_VERSION) {
2024                 if (flags & XFS_QMOPT_DOWARN)
2025                         cmn_err(CE_ALERT,
2026                         "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2027                         str, id,
2028                         INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_VERSION);
2029                 errs++;
2030         }
2031
2032         if (INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_USER &&
2033             INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_GROUP) {
2034                 if (flags & XFS_QMOPT_DOWARN)
2035                         cmn_err(CE_ALERT,
2036                         "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2037                         str, id, INT_GET(ddq->d_flags, ARCH_CONVERT));
2038                 errs++;
2039         }
2040
2041         if (id != -1 && id != INT_GET(ddq->d_id, ARCH_CONVERT)) {
2042                 if (flags & XFS_QMOPT_DOWARN)
2043                         cmn_err(CE_ALERT,
2044                         "%s : ondisk-dquot 0x%p, ID mismatch: "
2045                         "0x%x expected, found id 0x%x",
2046                         str, ddq, id, INT_GET(ddq->d_id, ARCH_CONVERT));
2047                 errs++;
2048         }
2049
2050         if (!errs && ddq->d_id) {
2051                 if (INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT) &&
2052                     INT_GET(ddq->d_bcount, ARCH_CONVERT) >=
2053                                 INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT)) {
2054                         if (!ddq->d_btimer) {
2055                                 if (flags & XFS_QMOPT_DOWARN)
2056                                         cmn_err(CE_ALERT,
2057                                         "%s : Dquot ID 0x%x (0x%p) "
2058                                         "BLK TIMER NOT STARTED",
2059                                         str, (int)
2060                                         INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
2061                                 errs++;
2062                         }
2063                 }
2064                 if (INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT) &&
2065                     INT_GET(ddq->d_icount, ARCH_CONVERT) >=
2066                                 INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT)) {
2067                         if (!ddq->d_itimer) {
2068                                 if (flags & XFS_QMOPT_DOWARN)
2069                                         cmn_err(CE_ALERT,
2070                                         "%s : Dquot ID 0x%x (0x%p) "
2071                                         "INODE TIMER NOT STARTED",
2072                                         str, (int)
2073                                         INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
2074                                 errs++;
2075                         }
2076                 }
2077                 if (INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT) &&
2078                     INT_GET(ddq->d_rtbcount, ARCH_CONVERT) >=
2079                                 INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT)) {
2080                         if (!ddq->d_rtbtimer) {
2081                                 if (flags & XFS_QMOPT_DOWARN)
2082                                         cmn_err(CE_ALERT,
2083                                         "%s : Dquot ID 0x%x (0x%p) "
2084                                         "RTBLK TIMER NOT STARTED",
2085                                         str, (int)
2086                                         INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
2087                                 errs++;
2088                         }
2089                 }
2090         }
2091
2092         if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2093                 return errs;
2094
2095         if (flags & XFS_QMOPT_DOWARN)
2096                 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
2097
2098         /*
2099          * Typically, a repair is only requested by quotacheck.
2100          */
2101         ASSERT(id != -1);
2102         ASSERT(flags & XFS_QMOPT_DQREPAIR);
2103         memset(d, 0, sizeof(xfs_dqblk_t));
2104         INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC);
2105         INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION);
2106         INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id);
2107         INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type);
2108
2109         return errs;
2110 }
2111
2112 /*
2113  * Perform a dquot buffer recovery.
2114  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2115  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2116  * Else, treat it as a regular buffer and do recovery.
2117  */
2118 STATIC void
2119 xlog_recover_do_dquot_buffer(
2120         xfs_mount_t             *mp,
2121         xlog_t                  *log,
2122         xlog_recover_item_t     *item,
2123         xfs_buf_t               *bp,
2124         xfs_buf_log_format_t    *buf_f)
2125 {
2126         uint                    type;
2127
2128         /*
2129          * Filesystems are required to send in quota flags at mount time.
2130          */
2131         if (mp->m_qflags == 0) {
2132                 return;
2133         }
2134
2135         type = 0;
2136         if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
2137                 type |= XFS_DQ_USER;
2138         if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
2139                 type |= XFS_DQ_GROUP;
2140         /*
2141          * This type of quotas was turned off, so ignore this buffer
2142          */
2143         if (log->l_quotaoffs_flag & type)
2144                 return;
2145
2146         xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2147 }
2148
2149 /*
2150  * This routine replays a modification made to a buffer at runtime.
2151  * There are actually two types of buffer, regular and inode, which
2152  * are handled differently.  Inode buffers are handled differently
2153  * in that we only recover a specific set of data from them, namely
2154  * the inode di_next_unlinked fields.  This is because all other inode
2155  * data is actually logged via inode records and any data we replay
2156  * here which overlaps that may be stale.
2157  *
2158  * When meta-data buffers are freed at run time we log a buffer item
2159  * with the XFS_BLI_CANCEL bit set to indicate that previous copies
2160  * of the buffer in the log should not be replayed at recovery time.
2161  * This is so that if the blocks covered by the buffer are reused for
2162  * file data before we crash we don't end up replaying old, freed
2163  * meta-data into a user's file.
2164  *
2165  * To handle the cancellation of buffer log items, we make two passes
2166  * over the log during recovery.  During the first we build a table of
2167  * those buffers which have been cancelled, and during the second we
2168  * only replay those buffers which do not have corresponding cancel
2169  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2170  * for more details on the implementation of the table of cancel records.
2171  */
2172 STATIC int
2173 xlog_recover_do_buffer_trans(
2174         xlog_t                  *log,
2175         xlog_recover_item_t     *item,
2176         int                     pass)
2177 {
2178         xfs_buf_log_format_t    *buf_f;
2179         xfs_buf_log_format_v1_t *obuf_f;
2180         xfs_mount_t             *mp;
2181         xfs_buf_t               *bp;
2182         int                     error;
2183         int                     cancel;
2184         xfs_daddr_t             blkno;
2185         int                     len;
2186         ushort                  flags;
2187
2188         buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
2189
2190         if (pass == XLOG_RECOVER_PASS1) {
2191                 /*
2192                  * In this pass we're only looking for buf items
2193                  * with the XFS_BLI_CANCEL bit set.
2194                  */
2195                 xlog_recover_do_buffer_pass1(log, buf_f);
2196                 return 0;
2197         } else {
2198                 /*
2199                  * In this pass we want to recover all the buffers
2200                  * which have not been cancelled and are not
2201                  * cancellation buffers themselves.  The routine
2202                  * we call here will tell us whether or not to
2203                  * continue with the replay of this buffer.
2204                  */
2205                 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2206                 if (cancel) {
2207                         return 0;
2208                 }
2209         }
2210         switch (buf_f->blf_type) {
2211         case XFS_LI_BUF:
2212                 blkno = buf_f->blf_blkno;
2213                 len = buf_f->blf_len;
2214                 flags = buf_f->blf_flags;
2215                 break;
2216         case XFS_LI_6_1_BUF:
2217         case XFS_LI_5_3_BUF:
2218                 obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
2219                 blkno = obuf_f->blf_blkno;
2220                 len = obuf_f->blf_len;
2221                 flags = obuf_f->blf_flags;
2222                 break;
2223         default:
2224                 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2225                         "xfs_log_recover: unknown buffer type 0x%x, dev %s",
2226                         buf_f->blf_type, XFS_BUFTARG_NAME(log->l_targ));
2227                 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2228                                  XFS_ERRLEVEL_LOW, log->l_mp);
2229                 return XFS_ERROR(EFSCORRUPTED);
2230         }
2231
2232         mp = log->l_mp;
2233         if (flags & XFS_BLI_INODE_BUF) {
2234                 bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,
2235                                                                 XFS_BUF_LOCK);
2236         } else {
2237                 bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);
2238         }
2239         if (XFS_BUF_ISERROR(bp)) {
2240                 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2241                                   bp, blkno);
2242                 error = XFS_BUF_GETERROR(bp);
2243                 xfs_buf_relse(bp);
2244                 return error;
2245         }
2246
2247         error = 0;
2248         if (flags & XFS_BLI_INODE_BUF) {
2249                 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2250         } else if (flags & (XFS_BLI_UDQUOT_BUF | XFS_BLI_GDQUOT_BUF)) {
2251                 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2252         } else {
2253                 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2254         }
2255         if (error)
2256                 return XFS_ERROR(error);
2257
2258         /*
2259          * Perform delayed write on the buffer.  Asynchronous writes will be
2260          * slower when taking into account all the buffers to be flushed.
2261          *
2262          * Also make sure that only inode buffers with good sizes stay in
2263          * the buffer cache.  The kernel moves inodes in buffers of 1 block
2264          * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2265          * buffers in the log can be a different size if the log was generated
2266          * by an older kernel using unclustered inode buffers or a newer kernel
2267          * running with a different inode cluster size.  Regardless, if the
2268          * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2269          * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2270          * the buffer out of the buffer cache so that the buffer won't
2271          * overlap with future reads of those inodes.
2272          */
2273         if (XFS_DINODE_MAGIC ==
2274             INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) &&
2275             (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2276                         (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2277                 XFS_BUF_STALE(bp);
2278                 error = xfs_bwrite(mp, bp);
2279         } else {
2280                 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2281                        XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2282                 XFS_BUF_SET_FSPRIVATE(bp, mp);
2283                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2284                 xfs_bdwrite(mp, bp);
2285         }
2286
2287         return (error);
2288 }
2289
2290 STATIC int
2291 xlog_recover_do_inode_trans(
2292         xlog_t                  *log,
2293         xlog_recover_item_t     *item,
2294         int                     pass)
2295 {
2296         xfs_inode_log_format_t  *in_f;
2297         xfs_mount_t             *mp;
2298         xfs_buf_t               *bp;
2299         xfs_imap_t              imap;
2300         xfs_dinode_t            *dip;
2301         xfs_ino_t               ino;
2302         int                     len;
2303         xfs_caddr_t             src;
2304         xfs_caddr_t             dest;
2305         int                     error;
2306         int                     attr_index;
2307         uint                    fields;
2308         xfs_dinode_core_t       *dicp;
2309
2310         if (pass == XLOG_RECOVER_PASS1) {
2311                 return 0;
2312         }
2313
2314         in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
2315         ino = in_f->ilf_ino;
2316         mp = log->l_mp;
2317         if (ITEM_TYPE(item) == XFS_LI_INODE) {
2318                 imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno;
2319                 imap.im_len = in_f->ilf_len;
2320                 imap.im_boffset = in_f->ilf_boffset;
2321         } else {
2322                 /*
2323                  * It's an old inode format record.  We don't know where
2324                  * its cluster is located on disk, and we can't allow
2325                  * xfs_imap() to figure it out because the inode btrees
2326                  * are not ready to be used.  Therefore do not pass the
2327                  * XFS_IMAP_LOOKUP flag to xfs_imap().  This will give
2328                  * us only the single block in which the inode lives
2329                  * rather than its cluster, so we must make sure to
2330                  * invalidate the buffer when we write it out below.
2331                  */
2332                 imap.im_blkno = 0;
2333                 xfs_imap(log->l_mp, NULL, ino, &imap, 0);
2334         }
2335
2336         /*
2337          * Inode buffers can be freed, look out for it,
2338          * and do not replay the inode.
2339          */
2340         if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0))
2341                 return 0;
2342
2343         bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len,
2344                                                                 XFS_BUF_LOCK);
2345         if (XFS_BUF_ISERROR(bp)) {
2346                 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2347                                   bp, imap.im_blkno);
2348                 error = XFS_BUF_GETERROR(bp);
2349                 xfs_buf_relse(bp);
2350                 return error;
2351         }
2352         error = 0;
2353         ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2354         dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
2355
2356         /*
2357          * Make sure the place we're flushing out to really looks
2358          * like an inode!
2359          */
2360         if (unlikely(INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC)) {
2361                 xfs_buf_relse(bp);
2362                 xfs_fs_cmn_err(CE_ALERT, mp,
2363                         "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2364                         dip, bp, ino);
2365                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
2366                                  XFS_ERRLEVEL_LOW, mp);
2367                 return XFS_ERROR(EFSCORRUPTED);
2368         }
2369         dicp = (xfs_dinode_core_t*)(item->ri_buf[1].i_addr);
2370         if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2371                 xfs_buf_relse(bp);
2372                 xfs_fs_cmn_err(CE_ALERT, mp,
2373                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2374                         item, ino);
2375                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
2376                                  XFS_ERRLEVEL_LOW, mp);
2377                 return XFS_ERROR(EFSCORRUPTED);
2378         }
2379
2380         /* Skip replay when the on disk inode is newer than the log one */
2381         if (dicp->di_flushiter <
2382             INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT)) {
2383                 /*
2384                  * Deal with the wrap case, DI_MAX_FLUSH is less
2385                  * than smaller numbers
2386                  */
2387                 if ((INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT)
2388                                                         == DI_MAX_FLUSH) &&
2389                     (dicp->di_flushiter < (DI_MAX_FLUSH>>1))) {
2390                         /* do nothing */
2391                 } else {
2392                         xfs_buf_relse(bp);
2393                         return 0;
2394                 }
2395         }
2396         /* Take the opportunity to reset the flush iteration count */
2397         dicp->di_flushiter = 0;
2398
2399         if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2400                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2401                     (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2402                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
2403                                          XFS_ERRLEVEL_LOW, mp, dicp);
2404                         xfs_buf_relse(bp);
2405                         xfs_fs_cmn_err(CE_ALERT, mp,
2406                                 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2407                                 item, dip, bp, ino);
2408                         return XFS_ERROR(EFSCORRUPTED);
2409                 }
2410         } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2411                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2412                     (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2413                     (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2414                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
2415                                              XFS_ERRLEVEL_LOW, mp, dicp);
2416                         xfs_buf_relse(bp);
2417                         xfs_fs_cmn_err(CE_ALERT, mp,
2418                                 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2419                                 item, dip, bp, ino);
2420                         return XFS_ERROR(EFSCORRUPTED);
2421                 }
2422         }
2423         if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2424                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
2425                                      XFS_ERRLEVEL_LOW, mp, dicp);
2426                 xfs_buf_relse(bp);
2427                 xfs_fs_cmn_err(CE_ALERT, mp,
2428                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2429                         item, dip, bp, ino,
2430                         dicp->di_nextents + dicp->di_anextents,
2431                         dicp->di_nblocks);
2432                 return XFS_ERROR(EFSCORRUPTED);
2433         }
2434         if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2435                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
2436                                      XFS_ERRLEVEL_LOW, mp, dicp);
2437                 xfs_buf_relse(bp);
2438                 xfs_fs_cmn_err(CE_ALERT, mp,
2439                         "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2440                         item, dip, bp, ino, dicp->di_forkoff);
2441                 return XFS_ERROR(EFSCORRUPTED);
2442         }
2443         if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) {
2444                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
2445                                      XFS_ERRLEVEL_LOW, mp, dicp);
2446                 xfs_buf_relse(bp);
2447                 xfs_fs_cmn_err(CE_ALERT, mp,
2448                         "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
2449                         item->ri_buf[1].i_len, item);
2450                 return XFS_ERROR(EFSCORRUPTED);
2451         }
2452
2453         /* The core is in in-core format */
2454         xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core,
2455                               (xfs_dinode_core_t*)item->ri_buf[1].i_addr, -1);
2456
2457         /* the rest is in on-disk format */
2458         if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) {
2459                 memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t),
2460                         item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t),
2461                         item->ri_buf[1].i_len  - sizeof(xfs_dinode_core_t));
2462         }
2463
2464         fields = in_f->ilf_fields;
2465         switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2466         case XFS_ILOG_DEV:
2467                 INT_SET(dip->di_u.di_dev, ARCH_CONVERT, in_f->ilf_u.ilfu_rdev);
2468
2469                 break;
2470         case XFS_ILOG_UUID:
2471                 dip->di_u.di_muuid = in_f->ilf_u.ilfu_uuid;
2472                 break;
2473         }
2474
2475         if (in_f->ilf_size == 2)
2476                 goto write_inode_buffer;
2477         len = item->ri_buf[2].i_len;
2478         src = item->ri_buf[2].i_addr;
2479         ASSERT(in_f->ilf_size <= 4);
2480         ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2481         ASSERT(!(fields & XFS_ILOG_DFORK) ||
2482                (len == in_f->ilf_dsize));
2483
2484         switch (fields & XFS_ILOG_DFORK) {
2485         case XFS_ILOG_DDATA:
2486         case XFS_ILOG_DEXT:
2487                 memcpy(&dip->di_u, src, len);
2488                 break;
2489
2490         case XFS_ILOG_DBROOT:
2491                 xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
2492                                  &(dip->di_u.di_bmbt),
2493                                  XFS_DFORK_DSIZE(dip, mp));
2494                 break;
2495
2496         default:
2497                 /*
2498                  * There are no data fork flags set.
2499                  */
2500                 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2501                 break;
2502         }
2503
2504         /*
2505          * If we logged any attribute data, recover it.  There may or
2506          * may not have been any other non-core data logged in this
2507          * transaction.
2508          */
2509         if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2510                 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2511                         attr_index = 3;
2512                 } else {
2513                         attr_index = 2;
2514                 }
2515                 len = item->ri_buf[attr_index].i_len;
2516                 src = item->ri_buf[attr_index].i_addr;
2517                 ASSERT(len == in_f->ilf_asize);
2518
2519                 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2520                 case XFS_ILOG_ADATA:
2521                 case XFS_ILOG_AEXT:
2522                         dest = XFS_DFORK_APTR(dip);
2523                         ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2524                         memcpy(dest, src, len);
2525                         break;
2526
2527                 case XFS_ILOG_ABROOT:
2528                         dest = XFS_DFORK_APTR(dip);
2529                         xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
2530                                          (xfs_bmdr_block_t*)dest,
2531                                          XFS_DFORK_ASIZE(dip, mp));
2532                         break;
2533
2534                 default:
2535                         xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
2536                         ASSERT(0);
2537                         xfs_buf_relse(bp);
2538                         return XFS_ERROR(EIO);
2539                 }
2540         }
2541
2542 write_inode_buffer:
2543         if (ITEM_TYPE(item) == XFS_LI_INODE) {
2544                 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2545                        XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2546                 XFS_BUF_SET_FSPRIVATE(bp, mp);
2547                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2548                 xfs_bdwrite(mp, bp);
2549         } else {
2550                 XFS_BUF_STALE(bp);
2551                 error = xfs_bwrite(mp, bp);
2552         }
2553
2554         return (error);
2555 }
2556
2557 /*
2558  * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2559  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2560  * of that type.
2561  */
2562 STATIC int
2563 xlog_recover_do_quotaoff_trans(
2564         xlog_t                  *log,
2565         xlog_recover_item_t     *item,
2566         int                     pass)
2567 {
2568         xfs_qoff_logformat_t    *qoff_f;
2569
2570         if (pass == XLOG_RECOVER_PASS2) {
2571                 return (0);
2572         }
2573
2574         qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
2575         ASSERT(qoff_f);
2576
2577         /*
2578          * The logitem format's flag tells us if this was user quotaoff,
2579          * group quotaoff or both.
2580          */
2581         if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2582                 log->l_quotaoffs_flag |= XFS_DQ_USER;
2583         if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2584                 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2585
2586         return (0);
2587 }
2588
2589 /*
2590  * Recover a dquot record
2591  */
2592 STATIC int
2593 xlog_recover_do_dquot_trans(
2594         xlog_t                  *log,
2595         xlog_recover_item_t     *item,
2596         int                     pass)
2597 {
2598         xfs_mount_t             *mp;
2599         xfs_buf_t               *bp;
2600         struct xfs_disk_dquot   *ddq, *recddq;
2601         int                     error;
2602         xfs_dq_logformat_t      *dq_f;
2603         uint                    type;
2604
2605         if (pass == XLOG_RECOVER_PASS1) {
2606                 return 0;
2607         }
2608         mp = log->l_mp;
2609
2610         /*
2611          * Filesystems are required to send in quota flags at mount time.
2612          */
2613         if (mp->m_qflags == 0)
2614                 return (0);
2615
2616         recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
2617         ASSERT(recddq);
2618         /*
2619          * This type of quotas was turned off, so ignore this record.
2620          */
2621         type = INT_GET(recddq->d_flags, ARCH_CONVERT) &
2622                         (XFS_DQ_USER | XFS_DQ_GROUP);
2623         ASSERT(type);
2624         if (log->l_quotaoffs_flag & type)
2625                 return (0);
2626
2627         /*
2628          * At this point we know that quota was _not_ turned off.
2629          * Since the mount flags are not indicating to us otherwise, this
2630          * must mean that quota is on, and the dquot needs to be replayed.
2631          * Remember that we may not have fully recovered the superblock yet,
2632          * so we can't do the usual trick of looking at the SB quota bits.
2633          *
2634          * The other possibility, of course, is that the quota subsystem was
2635          * removed since the last mount - ENOSYS.
2636          */
2637         dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
2638         ASSERT(dq_f);
2639         if ((error = xfs_qm_dqcheck(recddq,
2640                            dq_f->qlf_id,
2641                            0, XFS_QMOPT_DOWARN,
2642                            "xlog_recover_do_dquot_trans (log copy)"))) {
2643                 return XFS_ERROR(EIO);
2644         }
2645         ASSERT(dq_f->qlf_len == 1);
2646
2647         error = xfs_read_buf(mp, mp->m_ddev_targp,
2648                              dq_f->qlf_blkno,
2649                              XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2650                              0, &bp);
2651         if (error) {
2652                 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2653                                   bp, dq_f->qlf_blkno);
2654                 return error;
2655         }
2656         ASSERT(bp);
2657         ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2658
2659         /*
2660          * At least the magic num portion should be on disk because this
2661          * was among a chunk of dquots created earlier, and we did some
2662          * minimal initialization then.
2663          */
2664         if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2665                            "xlog_recover_do_dquot_trans")) {
2666                 xfs_buf_relse(bp);
2667                 return XFS_ERROR(EIO);
2668         }
2669
2670         memcpy(ddq, recddq, item->ri_buf[1].i_len);
2671
2672         ASSERT(dq_f->qlf_size == 2);
2673         ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2674                XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2675         XFS_BUF_SET_FSPRIVATE(bp, mp);
2676         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2677         xfs_bdwrite(mp, bp);
2678
2679         return (0);
2680 }
2681
2682 /*
2683  * This routine is called to create an in-core extent free intent
2684  * item from the efi format structure which was logged on disk.
2685  * It allocates an in-core efi, copies the extents from the format
2686  * structure into it, and adds the efi to the AIL with the given
2687  * LSN.
2688  */
2689 STATIC void
2690 xlog_recover_do_efi_trans(
2691         xlog_t                  *log,
2692         xlog_recover_item_t     *item,
2693         xfs_lsn_t               lsn,
2694         int                     pass)
2695 {
2696         xfs_mount_t             *mp;
2697         xfs_efi_log_item_t      *efip;
2698         xfs_efi_log_format_t    *efi_formatp;
2699         SPLDECL(s);
2700
2701         if (pass == XLOG_RECOVER_PASS1) {
2702                 return;
2703         }
2704
2705         efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
2706         ASSERT(item->ri_buf[0].i_len ==
2707                (sizeof(xfs_efi_log_format_t) +
2708                 ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t))));
2709
2710         mp = log->l_mp;
2711         efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2712         memcpy((char *)&(efip->efi_format), (char *)efi_formatp,
2713               sizeof(xfs_efi_log_format_t) +
2714               ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t)));
2715         efip->efi_next_extent = efi_formatp->efi_nextents;
2716         efip->efi_flags |= XFS_EFI_COMMITTED;
2717
2718         AIL_LOCK(mp,s);
2719         /*
2720          * xfs_trans_update_ail() drops the AIL lock.
2721          */
2722         xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn, s);
2723 }
2724
2725
2726 /*
2727  * This routine is called when an efd format structure is found in
2728  * a committed transaction in the log.  It's purpose is to cancel
2729  * the corresponding efi if it was still in the log.  To do this
2730  * it searches the AIL for the efi with an id equal to that in the
2731  * efd format structure.  If we find it, we remove the efi from the
2732  * AIL and free it.
2733  */
2734 STATIC void
2735 xlog_recover_do_efd_trans(
2736         xlog_t                  *log,
2737         xlog_recover_item_t     *item,
2738         int                     pass)
2739 {
2740         xfs_mount_t             *mp;
2741         xfs_efd_log_format_t    *efd_formatp;
2742         xfs_efi_log_item_t      *efip = NULL;
2743         xfs_log_item_t          *lip;
2744         int                     gen;
2745         int                     nexts;
2746         __uint64_t              efi_id;
2747         SPLDECL(s);
2748
2749         if (pass == XLOG_RECOVER_PASS1) {
2750                 return;
2751         }
2752
2753         efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
2754         ASSERT(item->ri_buf[0].i_len ==
2755                (sizeof(xfs_efd_log_format_t) +
2756                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_t))));
2757         efi_id = efd_formatp->efd_efi_id;
2758
2759         /*
2760          * Search for the efi with the id in the efd format structure
2761          * in the AIL.
2762          */
2763         mp = log->l_mp;
2764         AIL_LOCK(mp,s);
2765         lip = xfs_trans_first_ail(mp, &gen);
2766         while (lip != NULL) {
2767                 if (lip->li_type == XFS_LI_EFI) {
2768                         efip = (xfs_efi_log_item_t *)lip;
2769                         if (efip->efi_format.efi_id == efi_id) {
2770                                 /*
2771                                  * xfs_trans_delete_ail() drops the
2772                                  * AIL lock.
2773                                  */
2774                                 xfs_trans_delete_ail(mp, lip, s);
2775                                 break;
2776                         }
2777                 }
2778                 lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
2779         }
2780         if (lip == NULL) {
2781                 AIL_UNLOCK(mp, s);
2782         }
2783
2784         /*
2785          * If we found it, then free it up.  If it wasn't there, it
2786          * must have been overwritten in the log.  Oh well.
2787          */
2788         if (lip != NULL) {
2789                 nexts = efip->efi_format.efi_nextents;
2790                 if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
2791                         kmem_free(lip, sizeof(xfs_efi_log_item_t) +
2792                                   ((nexts - 1) * sizeof(xfs_extent_t)));
2793                 } else {
2794                         kmem_zone_free(xfs_efi_zone, efip);
2795                 }
2796         }
2797 }
2798
2799 /*
2800  * Perform the transaction
2801  *
2802  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2803  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2804  */
2805 STATIC int
2806 xlog_recover_do_trans(
2807         xlog_t                  *log,
2808         xlog_recover_t          *trans,
2809         int                     pass)
2810 {
2811         int                     error = 0;
2812         xlog_recover_item_t     *item, *first_item;
2813
2814         if ((error = xlog_recover_reorder_trans(log, trans)))
2815                 return error;
2816         first_item = item = trans->r_itemq;
2817         do {
2818                 /*
2819                  * we don't need to worry about the block number being
2820                  * truncated in > 1 TB buffers because in user-land,
2821                  * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
2822                  * the blkno's will get through the user-mode buffer
2823                  * cache properly.  The only bad case is o32 kernels
2824                  * where xfs_daddr_t is 32-bits but mount will warn us
2825                  * off a > 1 TB filesystem before we get here.
2826                  */
2827                 if ((ITEM_TYPE(item) == XFS_LI_BUF) ||
2828                     (ITEM_TYPE(item) == XFS_LI_6_1_BUF) ||
2829                     (ITEM_TYPE(item) == XFS_LI_5_3_BUF)) {
2830                         if  ((error = xlog_recover_do_buffer_trans(log, item,
2831                                                                  pass)))
2832                                 break;
2833                 } else if ((ITEM_TYPE(item) == XFS_LI_INODE) ||
2834                            (ITEM_TYPE(item) == XFS_LI_6_1_INODE) ||
2835                            (ITEM_TYPE(item) == XFS_LI_5_3_INODE)) {
2836                         if ((error = xlog_recover_do_inode_trans(log, item,
2837                                                                 pass)))
2838                                 break;
2839                 } else if (ITEM_TYPE(item) == XFS_LI_EFI) {
2840                         xlog_recover_do_efi_trans(log, item, trans->r_lsn,
2841                                                   pass);
2842                 } else if (ITEM_TYPE(item) == XFS_LI_EFD) {
2843                         xlog_recover_do_efd_trans(log, item, pass);
2844                 } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) {
2845                         if ((error = xlog_recover_do_dquot_trans(log, item,
2846                                                                    pass)))
2847                                         break;
2848                 } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) {
2849                         if ((error = xlog_recover_do_quotaoff_trans(log, item,
2850                                                                    pass)))
2851                                         break;
2852                 } else {
2853                         xlog_warn("XFS: xlog_recover_do_trans");
2854                         ASSERT(0);
2855                         error = XFS_ERROR(EIO);
2856                         break;
2857                 }
2858                 item = item->ri_next;
2859         } while (first_item != item);
2860
2861         return error;
2862 }
2863
2864 /*
2865  * Free up any resources allocated by the transaction
2866  *
2867  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2868  */
2869 STATIC void
2870 xlog_recover_free_trans(
2871         xlog_recover_t          *trans)
2872 {
2873         xlog_recover_item_t     *first_item, *item, *free_item;
2874         int                     i;
2875
2876         item = first_item = trans->r_itemq;
2877         do {
2878                 free_item = item;
2879                 item = item->ri_next;
2880                  /* Free the regions in the item. */
2881                 for (i = 0; i < free_item->ri_cnt; i++) {
2882                         kmem_free(free_item->ri_buf[i].i_addr,
2883                                   free_item->ri_buf[i].i_len);
2884                 }
2885                 /* Free the item itself */
2886                 kmem_free(free_item->ri_buf,
2887                           (free_item->ri_total * sizeof(xfs_log_iovec_t)));
2888                 kmem_free(free_item, sizeof(xlog_recover_item_t));
2889         } while (first_item != item);
2890         /* Free the transaction recover structure */
2891         kmem_free(trans, sizeof(xlog_recover_t));
2892 }
2893
2894 STATIC int
2895 xlog_recover_commit_trans(
2896         xlog_t                  *log,
2897         xlog_recover_t          **q,
2898         xlog_recover_t          *trans,
2899         int                     pass)
2900 {
2901         int                     error;
2902
2903         if ((error = xlog_recover_unlink_tid(q, trans)))
2904                 return error;
2905         if ((error = xlog_recover_do_trans(log, trans, pass)))
2906                 return error;
2907         xlog_recover_free_trans(trans);                 /* no error */
2908         return 0;
2909 }
2910
2911 STATIC int
2912 xlog_recover_unmount_trans(
2913         xlog_recover_t          *trans)
2914 {
2915         /* Do nothing now */
2916         xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
2917         return 0;
2918 }
2919
2920 /*
2921  * There are two valid states of the r_state field.  0 indicates that the
2922  * transaction structure is in a normal state.  We have either seen the
2923  * start of the transaction or the last operation we added was not a partial
2924  * operation.  If the last operation we added to the transaction was a
2925  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2926  *
2927  * NOTE: skip LRs with 0 data length.
2928  */
2929 STATIC int
2930 xlog_recover_process_data(
2931         xlog_t                  *log,
2932         xlog_recover_t          *rhash[],
2933         xlog_rec_header_t       *rhead,
2934         xfs_caddr_t             dp,
2935         int                     pass)
2936 {
2937         xfs_caddr_t             lp;
2938         int                     num_logops;
2939         xlog_op_header_t        *ohead;
2940         xlog_recover_t          *trans;
2941         xlog_tid_t              tid;
2942         int                     error;
2943         unsigned long           hash;
2944         uint                    flags;
2945
2946         lp = dp + INT_GET(rhead->h_len, ARCH_CONVERT);
2947         num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT);
2948
2949         /* check the log format matches our own - else we can't recover */
2950         if (xlog_header_check_recover(log->l_mp, rhead))
2951                 return (XFS_ERROR(EIO));
2952
2953         while ((dp < lp) && num_logops) {
2954                 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2955                 ohead = (xlog_op_header_t *)dp;
2956                 dp += sizeof(xlog_op_header_t);
2957                 if (ohead->oh_clientid != XFS_TRANSACTION &&
2958                     ohead->oh_clientid != XFS_LOG) {
2959                         xlog_warn(
2960                 "XFS: xlog_recover_process_data: bad clientid");
2961                         ASSERT(0);
2962                         return (XFS_ERROR(EIO));
2963                 }
2964                 tid = INT_GET(ohead->oh_tid, ARCH_CONVERT);
2965                 hash = XLOG_RHASH(tid);
2966                 trans = xlog_recover_find_tid(rhash[hash], tid);
2967                 if (trans == NULL) {               /* not found; add new tid */
2968                         if (ohead->oh_flags & XLOG_START_TRANS)
2969                                 xlog_recover_new_tid(&rhash[hash], tid,
2970                                         INT_GET(rhead->h_lsn, ARCH_CONVERT));
2971                 } else {
2972                         ASSERT(dp+INT_GET(ohead->oh_len, ARCH_CONVERT) <= lp);
2973                         flags = ohead->oh_flags & ~XLOG_END_TRANS;
2974                         if (flags & XLOG_WAS_CONT_TRANS)
2975                                 flags &= ~XLOG_CONTINUE_TRANS;
2976                         switch (flags) {
2977                         case XLOG_COMMIT_TRANS:
2978                                 error = xlog_recover_commit_trans(log,
2979                                                 &rhash[hash], trans, pass);
2980                                 break;
2981                         case XLOG_UNMOUNT_TRANS:
2982                                 error = xlog_recover_unmount_trans(trans);
2983                                 break;
2984                         case XLOG_WAS_CONT_TRANS:
2985                                 error = xlog_recover_add_to_cont_trans(trans,
2986                                                 dp, INT_GET(ohead->oh_len,
2987                                                         ARCH_CONVERT));
2988                                 break;
2989                         case XLOG_START_TRANS:
2990                                 xlog_warn(
2991                         "XFS: xlog_recover_process_data: bad transaction");
2992                                 ASSERT(0);
2993                                 error = XFS_ERROR(EIO);
2994                                 break;
2995                         case 0:
2996                         case XLOG_CONTINUE_TRANS:
2997                                 error = xlog_recover_add_to_trans(trans,
2998                                                 dp, INT_GET(ohead->oh_len,
2999                                                         ARCH_CONVERT));
3000                                 break;
3001                         default:
3002                                 xlog_warn(
3003                         "XFS: xlog_recover_process_data: bad flag");
3004                                 ASSERT(0);
3005                                 error = XFS_ERROR(EIO);
3006                                 break;
3007                         }
3008                         if (error)
3009                                 return error;
3010                 }
3011                 dp += INT_GET(ohead->oh_len, ARCH_CONVERT);
3012                 num_logops--;
3013         }
3014         return 0;
3015 }
3016
3017 /*
3018  * Process an extent free intent item that was recovered from
3019  * the log.  We need to free the extents that it describes.
3020  */
3021 STATIC void
3022 xlog_recover_process_efi(
3023         xfs_mount_t             *mp,
3024         xfs_efi_log_item_t      *efip)
3025 {
3026         xfs_efd_log_item_t      *efdp;
3027         xfs_trans_t             *tp;
3028         int                     i;
3029         xfs_extent_t            *extp;
3030         xfs_fsblock_t           startblock_fsb;
3031
3032         ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
3033
3034         /*
3035          * First check the validity of the extents described by the
3036          * EFI.  If any are bad, then assume that all are bad and
3037          * just toss the EFI.
3038          */
3039         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3040                 extp = &(efip->efi_format.efi_extents[i]);
3041                 startblock_fsb = XFS_BB_TO_FSB(mp,
3042                                    XFS_FSB_TO_DADDR(mp, extp->ext_start));
3043                 if ((startblock_fsb == 0) ||
3044                     (extp->ext_len == 0) ||
3045                     (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3046                     (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3047                         /*
3048                          * This will pull the EFI from the AIL and
3049                          * free the memory associated with it.
3050                          */
3051                         xfs_efi_release(efip, efip->efi_format.efi_nextents);
3052                         return;
3053                 }
3054         }
3055
3056         tp = xfs_trans_alloc(mp, 0);
3057         xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3058         efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3059
3060         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3061                 extp = &(efip->efi_format.efi_extents[i]);
3062                 xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3063                 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3064                                          extp->ext_len);
3065         }
3066
3067         efip->efi_flags |= XFS_EFI_RECOVERED;
3068         xfs_trans_commit(tp, 0, NULL);
3069 }
3070
3071 /*
3072  * Verify that once we've encountered something other than an EFI
3073  * in the AIL that there are no more EFIs in the AIL.
3074  */
3075 #if defined(DEBUG)
3076 STATIC void
3077 xlog_recover_check_ail(
3078         xfs_mount_t             *mp,
3079         xfs_log_item_t          *lip,
3080         int                     gen)
3081 {
3082         int                     orig_gen = gen;
3083
3084         do {
3085                 ASSERT(lip->li_type != XFS_LI_EFI);
3086                 lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
3087                 /*
3088                  * The check will be bogus if we restart from the
3089                  * beginning of the AIL, so ASSERT that we don't.
3090                  * We never should since we're holding the AIL lock
3091                  * the entire time.
3092                  */
3093                 ASSERT(gen == orig_gen);
3094         } while (lip != NULL);
3095 }
3096 #endif  /* DEBUG */
3097
3098 /*
3099  * When this is called, all of the EFIs which did not have
3100  * corresponding EFDs should be in the AIL.  What we do now
3101  * is free the extents associated with each one.
3102  *
3103  * Since we process the EFIs in normal transactions, they
3104  * will be removed at some point after the commit.  This prevents
3105  * us from just walking down the list processing each one.
3106  * We'll use a flag in the EFI to skip those that we've already
3107  * processed and use the AIL iteration mechanism's generation
3108  * count to try to speed this up at least a bit.
3109  *
3110  * When we start, we know that the EFIs are the only things in
3111  * the AIL.  As we process them, however, other items are added
3112  * to the AIL.  Since everything added to the AIL must come after
3113  * everything already in the AIL, we stop processing as soon as
3114  * we see something other than an EFI in the AIL.
3115  */
3116 STATIC void
3117 xlog_recover_process_efis(
3118         xlog_t                  *log)
3119 {
3120         xfs_log_item_t          *lip;
3121         xfs_efi_log_item_t      *efip;
3122         int                     gen;
3123         xfs_mount_t             *mp;
3124         SPLDECL(s);
3125
3126         mp = log->l_mp;
3127         AIL_LOCK(mp,s);
3128
3129         lip = xfs_trans_first_ail(mp, &gen);
3130         while (lip != NULL) {
3131                 /*
3132                  * We're done when we see something other than an EFI.
3133                  */
3134                 if (lip->li_type != XFS_LI_EFI) {
3135                         xlog_recover_check_ail(mp, lip, gen);
3136                         break;
3137                 }
3138
3139                 /*
3140                  * Skip EFIs that we've already processed.
3141                  */
3142                 efip = (xfs_efi_log_item_t *)lip;
3143                 if (efip->efi_flags & XFS_EFI_RECOVERED) {
3144                         lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
3145                         continue;
3146                 }
3147
3148                 AIL_UNLOCK(mp, s);
3149                 xlog_recover_process_efi(mp, efip);
3150                 AIL_LOCK(mp,s);
3151                 lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
3152         }
3153         AIL_UNLOCK(mp, s);
3154 }
3155
3156 /*
3157  * This routine performs a transaction to null out a bad inode pointer
3158  * in an agi unlinked inode hash bucket.
3159  */
3160 STATIC void
3161 xlog_recover_clear_agi_bucket(
3162         xfs_mount_t     *mp,
3163         xfs_agnumber_t  agno,
3164         int             bucket)
3165 {
3166         xfs_trans_t     *tp;
3167         xfs_agi_t       *agi;
3168         xfs_buf_t       *agibp;
3169         int             offset;
3170         int             error;
3171
3172         tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3173         xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0);
3174
3175         error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3176                                    XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
3177                                    XFS_FSS_TO_BB(mp, 1), 0, &agibp);
3178         if (error) {
3179                 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3180                 return;
3181         }
3182
3183         agi = XFS_BUF_TO_AGI(agibp);
3184         if (INT_GET(agi->agi_magicnum, ARCH_CONVERT) != XFS_AGI_MAGIC) {
3185                 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3186                 return;
3187         }
3188         ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
3189
3190         INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT, NULLAGINO);
3191         offset = offsetof(xfs_agi_t, agi_unlinked) +
3192                  (sizeof(xfs_agino_t) * bucket);
3193         xfs_trans_log_buf(tp, agibp, offset,
3194                           (offset + sizeof(xfs_agino_t) - 1));
3195
3196         (void) xfs_trans_commit(tp, 0, NULL);
3197 }
3198
3199 /*
3200  * xlog_iunlink_recover
3201  *
3202  * This is called during recovery to process any inodes which
3203  * we unlinked but not freed when the system crashed.  These
3204  * inodes will be on the lists in the AGI blocks.  What we do
3205  * here is scan all the AGIs and fully truncate and free any
3206  * inodes found on the lists.  Each inode is removed from the
3207  * lists when it has been fully truncated and is freed.  The
3208  * freeing of the inode and its removal from the list must be
3209  * atomic.
3210  */
3211 void
3212 xlog_recover_process_iunlinks(
3213         xlog_t          *log)
3214 {
3215         xfs_mount_t     *mp;
3216         xfs_agnumber_t  agno;
3217         xfs_agi_t       *agi;
3218         xfs_buf_t       *agibp;
3219         xfs_buf_t       *ibp;
3220         xfs_dinode_t    *dip;
3221         xfs_inode_t     *ip;
3222         xfs_agino_t     agino;
3223         xfs_ino_t       ino;
3224         int             bucket;
3225         int             error;
3226         uint            mp_dmevmask;
3227
3228         mp = log->l_mp;
3229
3230         /*
3231          * Prevent any DMAPI event from being sent while in this function.
3232          */
3233         mp_dmevmask = mp->m_dmevmask;
3234         mp->m_dmevmask = 0;
3235
3236         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3237                 /*
3238                  * Find the agi for this ag.
3239                  */
3240                 agibp = xfs_buf_read(mp->m_ddev_targp,
3241                                 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
3242                                 XFS_FSS_TO_BB(mp, 1), 0);
3243                 if (XFS_BUF_ISERROR(agibp)) {
3244                         xfs_ioerror_alert("xlog_recover_process_iunlinks(#1)",
3245                                 log->l_mp, agibp,
3246                                 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)));
3247                 }
3248                 agi = XFS_BUF_TO_AGI(agibp);
3249                 ASSERT(XFS_AGI_MAGIC ==
3250                         INT_GET(agi->agi_magicnum, ARCH_CONVERT));
3251
3252                 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3253
3254                         agino = INT_GET(agi->agi_unlinked[bucket], ARCH_CONVERT);
3255                         while (agino != NULLAGINO) {
3256
3257                                 /*
3258                                  * Release the agi buffer so that it can
3259                                  * be acquired in the normal course of the
3260                                  * transaction to truncate and free the inode.
3261                                  */
3262                                 xfs_buf_relse(agibp);
3263
3264                                 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3265                                 error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
3266                                 ASSERT(error || (ip != NULL));
3267
3268                                 if (!error) {
3269                                         /*
3270                                          * Get the on disk inode to find the
3271                                          * next inode in the bucket.
3272                                          */
3273                                         error = xfs_itobp(mp, NULL, ip, &dip,
3274                                                         &ibp, 0);
3275                                         ASSERT(error || (dip != NULL));
3276                                 }
3277
3278                                 if (!error) {
3279                                         ASSERT(ip->i_d.di_nlink == 0);
3280
3281                                         /* setup for the next pass */
3282                                         agino = INT_GET(dip->di_next_unlinked,
3283                                                         ARCH_CONVERT);
3284                                         xfs_buf_relse(ibp);
3285                                         /*
3286                                          * Prevent any DMAPI event from
3287                                          * being sent when the
3288                                          * reference on the inode is
3289                                          * dropped.
3290                                          */
3291                                         ip->i_d.di_dmevmask = 0;
3292
3293                                         /*
3294                                          * If this is a new inode, handle
3295                                          * it specially.  Otherwise,
3296                                          * just drop our reference to the
3297                                          * inode.  If there are no
3298                                          * other references, this will
3299                                          * send the inode to
3300                                          * xfs_inactive() which will
3301                                          * truncate the file and free
3302                                          * the inode.
3303                                          */
3304                                         if (ip->i_d.di_mode == 0)
3305                                                 xfs_iput_new(ip, 0);
3306                                         else
3307                                                 VN_RELE(XFS_ITOV(ip));
3308                                 } else {
3309                                         /*
3310                                          * We can't read in the inode
3311                                          * this bucket points to, or
3312                                          * this inode is messed up.  Just
3313                                          * ditch this bucket of inodes.  We
3314                                          * will lose some inodes and space,
3315                                          * but at least we won't hang.  Call
3316                                          * xlog_recover_clear_agi_bucket()
3317                                          * to perform a transaction to clear
3318                                          * the inode pointer in the bucket.
3319                                          */
3320                                         xlog_recover_clear_agi_bucket(mp, agno,
3321                                                         bucket);
3322
3323                                         agino = NULLAGINO;
3324                                 }
3325
3326                                 /*
3327                                  * Reacquire the agibuffer and continue around
3328                                  * the loop.
3329                                  */
3330                                 agibp = xfs_buf_read(mp->m_ddev_targp,
3331                                                 XFS_AG_DADDR(mp, agno,
3332                                                         XFS_AGI_DADDR(mp)),
3333                                                 XFS_FSS_TO_BB(mp, 1), 0);
3334                                 if (XFS_BUF_ISERROR(agibp)) {
3335                                         xfs_ioerror_alert(
3336                                 "xlog_recover_process_iunlinks(#2)",
3337                                                 log->l_mp, agibp,
3338                                                 XFS_AG_DADDR(mp, agno,
3339                                                         XFS_AGI_DADDR(mp)));
3340                                 }
3341                                 agi = XFS_BUF_TO_AGI(agibp);
3342                                 ASSERT(XFS_AGI_MAGIC == INT_GET(
3343                                         agi->agi_magicnum, ARCH_CONVERT));
3344                         }
3345                 }
3346
3347                 /*
3348                  * Release the buffer for the current agi so we can
3349                  * go on to the next one.
3350                  */
3351                 xfs_buf_relse(agibp);
3352         }
3353
3354         mp->m_dmevmask = mp_dmevmask;
3355 }
3356
3357
3358 #ifdef DEBUG
3359 STATIC void
3360 xlog_pack_data_checksum(
3361         xlog_t          *log,
3362         xlog_in_core_t  *iclog,
3363         int             size)
3364 {
3365         int             i;
3366         uint            *up;
3367         uint            chksum = 0;
3368
3369         up = (uint *)iclog->ic_datap;
3370         /* divide length by 4 to get # words */
3371         for (i = 0; i < (size >> 2); i++) {
3372                 chksum ^= INT_GET(*up, ARCH_CONVERT);
3373                 up++;
3374         }
3375         INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum);
3376 }
3377 #else
3378 #define xlog_pack_data_checksum(log, iclog, size)
3379 #endif
3380
3381 /*
3382  * Stamp cycle number in every block
3383  */
3384 void
3385 xlog_pack_data(
3386         xlog_t                  *log,
3387         xlog_in_core_t          *iclog,
3388         int                     roundoff)
3389 {
3390         int                     i, j, k;
3391         int                     size = iclog->ic_offset + roundoff;
3392         uint                    cycle_lsn;
3393         xfs_caddr_t             dp;
3394         xlog_in_core_2_t        *xhdr;
3395
3396         xlog_pack_data_checksum(log, iclog, size);
3397
3398         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3399
3400         dp = iclog->ic_datap;
3401         for (i = 0; i < BTOBB(size) &&
3402                 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3403                 iclog->ic_header.h_cycle_data[i] = *(uint *)dp;
3404                 *(uint *)dp = cycle_lsn;
3405                 dp += BBSIZE;
3406         }
3407
3408         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3409                 xhdr = (xlog_in_core_2_t *)&iclog->ic_header;
3410                 for ( ; i < BTOBB(size); i++) {
3411                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3412                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3413                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp;
3414                         *(uint *)dp = cycle_lsn;
3415                         dp += BBSIZE;
3416                 }
3417
3418                 for (i = 1; i < log->l_iclog_heads; i++) {
3419                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3420                 }
3421         }
3422 }
3423
3424 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
3425 STATIC void
3426 xlog_unpack_data_checksum(
3427         xlog_rec_header_t       *rhead,
3428         xfs_caddr_t             dp,
3429         xlog_t                  *log)
3430 {
3431         uint                    *up = (uint *)dp;
3432         uint                    chksum = 0;
3433         int                     i;
3434
3435         /* divide length by 4 to get # words */
3436         for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) {
3437                 chksum ^= INT_GET(*up, ARCH_CONVERT);
3438                 up++;
3439         }
3440         if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) {
3441             if (rhead->h_chksum ||
3442                 ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
3443                     cmn_err(CE_DEBUG,
3444                         "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)",
3445                             INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum);
3446                     cmn_err(CE_DEBUG,
3447 "XFS: Disregard message if filesystem was created with non-DEBUG kernel");
3448                     if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3449                             cmn_err(CE_DEBUG,
3450                                 "XFS: LogR this is a LogV2 filesystem");
3451                     }
3452                     log->l_flags |= XLOG_CHKSUM_MISMATCH;
3453             }
3454         }
3455 }
3456 #else
3457 #define xlog_unpack_data_checksum(rhead, dp, log)
3458 #endif
3459
3460 STATIC void
3461 xlog_unpack_data(
3462         xlog_rec_header_t       *rhead,
3463         xfs_caddr_t             dp,
3464         xlog_t                  *log)
3465 {
3466         int                     i, j, k;
3467         xlog_in_core_2_t        *xhdr;
3468
3469         for (i = 0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) &&
3470                   i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3471                 *(uint *)dp = *(uint *)&rhead->h_cycle_data[i];
3472                 dp += BBSIZE;
3473         }
3474
3475         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3476                 xhdr = (xlog_in_core_2_t *)rhead;
3477                 for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) {
3478                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3479                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3480                         *(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3481                         dp += BBSIZE;
3482                 }
3483         }
3484
3485         xlog_unpack_data_checksum(rhead, dp, log);
3486 }
3487
3488 STATIC int
3489 xlog_valid_rec_header(
3490         xlog_t                  *log,
3491         xlog_rec_header_t       *rhead,
3492         xfs_daddr_t             blkno)
3493 {
3494         int                     hlen;
3495
3496         if (unlikely(
3497             (INT_GET(rhead->h_magicno, ARCH_CONVERT) !=
3498                         XLOG_HEADER_MAGIC_NUM))) {
3499                 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3500                                 XFS_ERRLEVEL_LOW, log->l_mp);
3501                 return XFS_ERROR(EFSCORRUPTED);
3502         }
3503         if (unlikely(
3504             (!rhead->h_version ||
3505             (INT_GET(rhead->h_version, ARCH_CONVERT) &
3506                         (~XLOG_VERSION_OKBITS)) != 0))) {
3507                 xlog_warn("XFS: %s: unrecognised log version (%d).",
3508                         __FUNCTION__, INT_GET(rhead->h_version, ARCH_CONVERT));
3509                 return XFS_ERROR(EIO);
3510         }
3511
3512         /* LR body must have data or it wouldn't have been written */
3513         hlen = INT_GET(rhead->h_len, ARCH_CONVERT);
3514         if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3515                 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3516                                 XFS_ERRLEVEL_LOW, log->l_mp);
3517                 return XFS_ERROR(EFSCORRUPTED);
3518         }
3519         if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3520                 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3521                                 XFS_ERRLEVEL_LOW, log->l_mp);
3522                 return XFS_ERROR(EFSCORRUPTED);
3523         }
3524         return 0;
3525 }
3526
3527 /*
3528  * Read the log from tail to head and process the log records found.
3529  * Handle the two cases where the tail and head are in the same cycle
3530  * and where the active portion of the log wraps around the end of
3531  * the physical log separately.  The pass parameter is passed through
3532  * to the routines called to process the data and is not looked at
3533  * here.
3534  */
3535 STATIC int
3536 xlog_do_recovery_pass(
3537         xlog_t                  *log,
3538         xfs_daddr_t             head_blk,
3539         xfs_daddr_t             tail_blk,
3540         int                     pass)
3541 {
3542         xlog_rec_header_t       *rhead;
3543         xfs_daddr_t             blk_no;
3544         xfs_caddr_t             bufaddr, offset;
3545         xfs_buf_t               *hbp, *dbp;
3546         int                     error = 0, h_size;
3547         int                     bblks, split_bblks;
3548         int                     hblks, split_hblks, wrapped_hblks;
3549         xlog_recover_t          *rhash[XLOG_RHASH_SIZE];
3550
3551         ASSERT(head_blk != tail_blk);
3552
3553         /*
3554          * Read the header of the tail block and get the iclog buffer size from
3555          * h_size.  Use this to tell how many sectors make up the log header.
3556          */
3557         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3558                 /*
3559                  * When using variable length iclogs, read first sector of
3560                  * iclog header and extract the header size from it.  Get a
3561                  * new hbp that is the correct size.
3562                  */
3563                 hbp = xlog_get_bp(log, 1);
3564                 if (!hbp)
3565                         return ENOMEM;
3566                 if ((error = xlog_bread(log, tail_blk, 1, hbp)))
3567                         goto bread_err1;
3568                 offset = xlog_align(log, tail_blk, 1, hbp);
3569                 rhead = (xlog_rec_header_t *)offset;
3570                 error = xlog_valid_rec_header(log, rhead, tail_blk);
3571                 if (error)
3572                         goto bread_err1;
3573                 h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
3574                 if ((INT_GET(rhead->h_version, ARCH_CONVERT)
3575                                 & XLOG_VERSION_2) &&
3576                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3577                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3578                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
3579                                 hblks++;
3580                         xlog_put_bp(hbp);
3581                         hbp = xlog_get_bp(log, hblks);
3582                 } else {
3583                         hblks = 1;
3584                 }
3585         } else {
3586                 ASSERT(log->l_sectbb_log == 0);
3587                 hblks = 1;
3588                 hbp = xlog_get_bp(log, 1);
3589                 h_size = XLOG_BIG_RECORD_BSIZE;
3590         }
3591
3592         if (!hbp)
3593                 return ENOMEM;
3594         dbp = xlog_get_bp(log, BTOBB(h_size));
3595         if (!dbp) {
3596                 xlog_put_bp(hbp);
3597                 return ENOMEM;
3598         }
3599
3600         memset(rhash, 0, sizeof(rhash));
3601         if (tail_blk <= head_blk) {
3602                 for (blk_no = tail_blk; blk_no < head_blk; ) {
3603                         if ((error = xlog_bread(log, blk_no, hblks, hbp)))
3604                                 goto bread_err2;
3605                         offset = xlog_align(log, blk_no, hblks, hbp);
3606                         rhead = (xlog_rec_header_t *)offset;
3607                         error = xlog_valid_rec_header(log, rhead, blk_no);
3608                         if (error)
3609                                 goto bread_err2;
3610
3611                         /* blocks in data section */
3612                         bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
3613                         error = xlog_bread(log, blk_no + hblks, bblks, dbp);
3614                         if (error)
3615                                 goto bread_err2;
3616                         offset = xlog_align(log, blk_no + hblks, bblks, dbp);
3617                         xlog_unpack_data(rhead, offset, log);
3618                         if ((error = xlog_recover_process_data(log,
3619                                                 rhash, rhead, offset, pass)))
3620                                 goto bread_err2;
3621                         blk_no += bblks + hblks;
3622                 }
3623         } else {
3624                 /*
3625                  * Perform recovery around the end of the physical log.
3626                  * When the head is not on the same cycle number as the tail,
3627                  * we can't do a sequential recovery as above.
3628                  */
3629                 blk_no = tail_blk;
3630                 while (blk_no < log->l_logBBsize) {
3631                         /*
3632                          * Check for header wrapping around physical end-of-log
3633                          */
3634                         offset = NULL;
3635                         split_hblks = 0;
3636                         wrapped_hblks = 0;
3637                         if (blk_no + hblks <= log->l_logBBsize) {
3638                                 /* Read header in one read */
3639                                 error = xlog_bread(log, blk_no, hblks, hbp);
3640                                 if (error)
3641                                         goto bread_err2;
3642                                 offset = xlog_align(log, blk_no, hblks, hbp);
3643                         } else {
3644                                 /* This LR is split across physical log end */
3645                                 if (blk_no != log->l_logBBsize) {
3646                                         /* some data before physical log end */
3647                                         ASSERT(blk_no <= INT_MAX);
3648                                         split_hblks = log->l_logBBsize - (int)blk_no;
3649                                         ASSERT(split_hblks > 0);
3650                                         if ((error = xlog_bread(log, blk_no,
3651                                                         split_hblks, hbp)))
3652                                                 goto bread_err2;
3653                                         offset = xlog_align(log, blk_no,
3654                                                         split_hblks, hbp);
3655                                 }
3656                                 /*
3657                                  * Note: this black magic still works with
3658                                  * large sector sizes (non-512) only because:
3659                                  * - we increased the buffer size originally
3660                                  *   by 1 sector giving us enough extra space
3661                                  *   for the second read;
3662                                  * - the log start is guaranteed to be sector
3663                                  *   aligned;
3664                                  * - we read the log end (LR header start)
3665                                  *   _first_, then the log start (LR header end)
3666                                  *   - order is important.
3667                                  */
3668                                 bufaddr = XFS_BUF_PTR(hbp);
3669                                 XFS_BUF_SET_PTR(hbp,
3670                                                 bufaddr + BBTOB(split_hblks),
3671                                                 BBTOB(hblks - split_hblks));
3672                                 wrapped_hblks = hblks - split_hblks;
3673                                 error = xlog_bread(log, 0, wrapped_hblks, hbp);
3674                                 if (error)
3675                                         goto bread_err2;
3676                                 XFS_BUF_SET_PTR(hbp, bufaddr, BBTOB(hblks));
3677                                 if (!offset)
3678                                         offset = xlog_align(log, 0,
3679                                                         wrapped_hblks, hbp);
3680                         }
3681                         rhead = (xlog_rec_header_t *)offset;
3682                         error = xlog_valid_rec_header(log, rhead,
3683                                                 split_hblks ? blk_no : 0);
3684                         if (error)
3685                                 goto bread_err2;
3686
3687                         bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
3688                         blk_no += hblks;
3689
3690                         /* Read in data for log record */
3691                         if (blk_no + bblks <= log->l_logBBsize) {
3692                                 error = xlog_bread(log, blk_no, bblks, dbp);
3693                                 if (error)
3694                                         goto bread_err2;
3695                                 offset = xlog_align(log, blk_no, bblks, dbp);
3696                         } else {
3697                                 /* This log record is split across the
3698                                  * physical end of log */
3699                                 offset = NULL;
3700                                 split_bblks = 0;
3701                                 if (blk_no != log->l_logBBsize) {
3702                                         /* some data is before the physical
3703                                          * end of log */
3704                                         ASSERT(!wrapped_hblks);
3705                                         ASSERT(blk_no <= INT_MAX);
3706                                         split_bblks =
3707                                                 log->l_logBBsize - (int)blk_no;
3708                                         ASSERT(split_bblks > 0);
3709                                         if ((error = xlog_bread(log, blk_no,
3710                                                         split_bblks, dbp)))
3711                                                 goto bread_err2;
3712                                         offset = xlog_align(log, blk_no,
3713                                                         split_bblks, dbp);
3714                                 }
3715                                 /*
3716                                  * Note: this black magic still works with
3717                                  * large sector sizes (non-512) only because:
3718                                  * - we increased the buffer size originally
3719                                  *   by 1 sector giving us enough extra space
3720                                  *   for the second read;
3721                                  * - the log start is guaranteed to be sector
3722                                  *   aligned;
3723                                  * - we read the log end (LR header start)
3724                                  *   _first_, then the log start (LR header end)
3725                                  *   - order is important.
3726                                  */
3727                                 bufaddr = XFS_BUF_PTR(dbp);
3728                                 XFS_BUF_SET_PTR(dbp,
3729                                                 bufaddr + BBTOB(split_bblks),
3730                                                 BBTOB(bblks - split_bblks));
3731                                 if ((error = xlog_bread(log, wrapped_hblks,
3732                                                 bblks - split_bblks, dbp)))
3733                                         goto bread_err2;
3734                                 XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
3735                                 if (!offset)
3736                                         offset = xlog_align(log, wrapped_hblks,
3737                                                 bblks - split_bblks, dbp);
3738                         }
3739                         xlog_unpack_data(rhead, offset, log);
3740                         if ((error = xlog_recover_process_data(log, rhash,
3741                                                         rhead, offset, pass)))
3742                                 goto bread_err2;
3743                         blk_no += bblks;
3744                 }
3745
3746                 ASSERT(blk_no >= log->l_logBBsize);
3747                 blk_no -= log->l_logBBsize;
3748
3749                 /* read first part of physical log */
3750                 while (blk_no < head_blk) {
3751                         if ((error = xlog_bread(log, blk_no, hblks, hbp)))
3752                                 goto bread_err2;
3753                         offset = xlog_align(log, blk_no, hblks, hbp);
3754                         rhead = (xlog_rec_header_t *)offset;
3755                         error = xlog_valid_rec_header(log, rhead, blk_no);
3756                         if (error)
3757                                 goto bread_err2;
3758                         bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
3759                         if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
3760                                 goto bread_err2;
3761                         offset = xlog_align(log, blk_no+hblks, bblks, dbp);
3762                         xlog_unpack_data(rhead, offset, log);
3763                         if ((error = xlog_recover_process_data(log, rhash,
3764                                                         rhead, offset, pass)))
3765                                 goto bread_err2;
3766                         blk_no += bblks + hblks;
3767                 }
3768         }
3769
3770  bread_err2:
3771         xlog_put_bp(dbp);
3772  bread_err1:
3773         xlog_put_bp(hbp);
3774         return error;
3775 }
3776
3777 /*
3778  * Do the recovery of the log.  We actually do this in two phases.
3779  * The two passes are necessary in order to implement the function
3780  * of cancelling a record written into the log.  The first pass
3781  * determines those things which have been cancelled, and the
3782  * second pass replays log items normally except for those which
3783  * have been cancelled.  The handling of the replay and cancellations
3784  * takes place in the log item type specific routines.
3785  *
3786  * The table of items which have cancel records in the log is allocated
3787  * and freed at this level, since only here do we know when all of
3788  * the log recovery has been completed.
3789  */
3790 STATIC int
3791 xlog_do_log_recovery(
3792         xlog_t          *log,
3793         xfs_daddr_t     head_blk,
3794         xfs_daddr_t     tail_blk)
3795 {
3796         int             error;
3797
3798         ASSERT(head_blk != tail_blk);
3799
3800         /*
3801          * First do a pass to find all of the cancelled buf log items.
3802          * Store them in the buf_cancel_table for use in the second pass.
3803          */
3804         log->l_buf_cancel_table =
3805                 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
3806                                                  sizeof(xfs_buf_cancel_t*),
3807                                                  KM_SLEEP);
3808         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3809                                       XLOG_RECOVER_PASS1);
3810         if (error != 0) {
3811                 kmem_free(log->l_buf_cancel_table,
3812                           XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
3813                 log->l_buf_cancel_table = NULL;
3814                 return error;
3815         }
3816         /*
3817          * Then do a second pass to actually recover the items in the log.
3818          * When it is complete free the table of buf cancel items.
3819          */
3820         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3821                                       XLOG_RECOVER_PASS2);
3822 #ifdef DEBUG
3823         {
3824                 int     i;
3825
3826                 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3827                         ASSERT(log->l_buf_cancel_table[i] == NULL);
3828         }
3829 #endif  /* DEBUG */
3830
3831         kmem_free(log->l_buf_cancel_table,
3832                   XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
3833         log->l_buf_cancel_table = NULL;
3834
3835         return error;
3836 }
3837
3838 /*
3839  * Do the actual recovery
3840  */
3841 STATIC int
3842 xlog_do_recover(
3843         xlog_t          *log,
3844         xfs_daddr_t     head_blk,
3845         xfs_daddr_t     tail_blk)
3846 {
3847         int             error;
3848         xfs_buf_t       *bp;
3849         xfs_sb_t        *sbp;
3850
3851         /*
3852          * First replay the images in the log.
3853          */
3854         error = xlog_do_log_recovery(log, head_blk, tail_blk);
3855         if (error) {
3856                 return error;
3857         }
3858
3859         XFS_bflush(log->l_mp->m_ddev_targp);
3860
3861         /*
3862          * If IO errors happened during recovery, bail out.
3863          */
3864         if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3865                 return (EIO);
3866         }
3867
3868         /*
3869          * We now update the tail_lsn since much of the recovery has completed
3870          * and there may be space available to use.  If there were no extent
3871          * or iunlinks, we can free up the entire log and set the tail_lsn to
3872          * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3873          * lsn of the last known good LR on disk.  If there are extent frees
3874          * or iunlinks they will have some entries in the AIL; so we look at
3875          * the AIL to determine how to set the tail_lsn.
3876          */
3877         xlog_assign_tail_lsn(log->l_mp);
3878
3879         /*
3880          * Now that we've finished replaying all buffer and inode
3881          * updates, re-read in the superblock.
3882          */
3883         bp = xfs_getsb(log->l_mp, 0);
3884         XFS_BUF_UNDONE(bp);
3885         XFS_BUF_READ(bp);
3886         xfsbdstrat(log->l_mp, bp);
3887         if ((error = xfs_iowait(bp))) {
3888                 xfs_ioerror_alert("xlog_do_recover",
3889                                   log->l_mp, bp, XFS_BUF_ADDR(bp));
3890                 ASSERT(0);
3891                 xfs_buf_relse(bp);
3892                 return error;
3893         }
3894
3895         /* Convert superblock from on-disk format */
3896         sbp = &log->l_mp->m_sb;
3897         xfs_xlatesb(XFS_BUF_TO_SBP(bp), sbp, 1, XFS_SB_ALL_BITS);
3898         ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3899         ASSERT(XFS_SB_GOOD_VERSION(sbp));
3900         xfs_buf_relse(bp);
3901
3902         xlog_recover_check_summary(log);
3903
3904         /* Normal transactions can now occur */
3905         log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3906         return 0;
3907 }
3908
3909 /*
3910  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3911  *
3912  * Return error or zero.
3913  */
3914 int
3915 xlog_recover(
3916         xlog_t          *log,
3917         int             readonly)
3918 {
3919         xfs_daddr_t     head_blk, tail_blk;
3920         int             error;
3921
3922         /* find the tail of the log */
3923         if ((error = xlog_find_tail(log, &head_blk, &tail_blk, readonly)))
3924                 return error;
3925
3926         if (tail_blk != head_blk) {
3927                 /* There used to be a comment here:
3928                  *
3929                  * disallow recovery on read-only mounts.  note -- mount
3930                  * checks for ENOSPC and turns it into an intelligent
3931                  * error message.
3932                  * ...but this is no longer true.  Now, unless you specify
3933                  * NORECOVERY (in which case this function would never be
3934                  * called), we just go ahead and recover.  We do this all
3935                  * under the vfs layer, so we can get away with it unless
3936                  * the device itself is read-only, in which case we fail.
3937                  */
3938                 if ((error = xfs_dev_is_read_only(log->l_mp,
3939                                                 "recovery required"))) {
3940                         return error;
3941                 }
3942
3943                 cmn_err(CE_NOTE,
3944                         "Starting XFS recovery on filesystem: %s (dev: %s)",
3945                         log->l_mp->m_fsname, XFS_BUFTARG_NAME(log->l_targ));
3946
3947                 error = xlog_do_recover(log, head_blk, tail_blk);
3948                 log->l_flags |= XLOG_RECOVERY_NEEDED;
3949         }
3950         return error;
3951 }
3952
3953 /*
3954  * In the first part of recovery we replay inodes and buffers and build
3955  * up the list of extent free items which need to be processed.  Here
3956  * we process the extent free items and clean up the on disk unlinked
3957  * inode lists.  This is separated from the first part of recovery so
3958  * that the root and real-time bitmap inodes can be read in from disk in
3959  * between the two stages.  This is necessary so that we can free space
3960  * in the real-time portion of the file system.
3961  */
3962 int
3963 xlog_recover_finish(
3964         xlog_t          *log,
3965         int             mfsi_flags)
3966 {
3967         /*
3968          * Now we're ready to do the transactions needed for the
3969          * rest of recovery.  Start with completing all the extent
3970          * free intent records and then process the unlinked inode
3971          * lists.  At this point, we essentially run in normal mode
3972          * except that we're still performing recovery actions
3973          * rather than accepting new requests.
3974          */
3975         if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3976                 xlog_recover_process_efis(log);
3977                 /*
3978                  * Sync the log to get all the EFIs out of the AIL.
3979                  * This isn't absolutely necessary, but it helps in
3980                  * case the unlink transactions would have problems
3981                  * pushing the EFIs out of the way.
3982                  */
3983                 xfs_log_force(log->l_mp, (xfs_lsn_t)0,
3984                               (XFS_LOG_FORCE | XFS_LOG_SYNC));
3985
3986                 if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) {
3987                         xlog_recover_process_iunlinks(log);
3988                 }
3989
3990                 xlog_recover_check_summary(log);
3991
3992                 cmn_err(CE_NOTE,
3993                         "Ending XFS recovery on filesystem: %s (dev: %s)",
3994                         log->l_mp->m_fsname, XFS_BUFTARG_NAME(log->l_targ));
3995                 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3996         } else {
3997                 cmn_err(CE_DEBUG,
3998                         "!Ending clean XFS mount for filesystem: %s",
3999                         log->l_mp->m_fsname);
4000         }
4001         return 0;
4002 }
4003
4004
4005 #if defined(DEBUG)
4006 /*
4007  * Read all of the agf and agi counters and check that they
4008  * are consistent with the superblock counters.
4009  */
4010 void
4011 xlog_recover_check_summary(
4012         xlog_t          *log)
4013 {
4014         xfs_mount_t     *mp;
4015         xfs_agf_t       *agfp;
4016         xfs_agi_t       *agip;
4017         xfs_buf_t       *agfbp;
4018         xfs_buf_t       *agibp;
4019         xfs_daddr_t     agfdaddr;
4020         xfs_daddr_t     agidaddr;
4021         xfs_buf_t       *sbbp;
4022 #ifdef XFS_LOUD_RECOVERY
4023         xfs_sb_t        *sbp;
4024 #endif
4025         xfs_agnumber_t  agno;
4026         __uint64_t      freeblks;
4027         __uint64_t      itotal;
4028         __uint64_t      ifree;
4029
4030         mp = log->l_mp;
4031
4032         freeblks = 0LL;
4033         itotal = 0LL;
4034         ifree = 0LL;
4035         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4036                 agfdaddr = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp));
4037                 agfbp = xfs_buf_read(mp->m_ddev_targp, agfdaddr,
4038                                 XFS_FSS_TO_BB(mp, 1), 0);
4039                 if (XFS_BUF_ISERROR(agfbp)) {
4040                         xfs_ioerror_alert("xlog_recover_check_summary(agf)",
4041                                                 mp, agfbp, agfdaddr);
4042                 }
4043                 agfp = XFS_BUF_TO_AGF(agfbp);
4044                 ASSERT(XFS_AGF_MAGIC ==
4045                         INT_GET(agfp->agf_magicnum, ARCH_CONVERT));
4046                 ASSERT(XFS_AGF_GOOD_VERSION(
4047                         INT_GET(agfp->agf_versionnum, ARCH_CONVERT)));
4048                 ASSERT(INT_GET(agfp->agf_seqno, ARCH_CONVERT) == agno);
4049
4050                 freeblks += INT_GET(agfp->agf_freeblks, ARCH_CONVERT) +
4051                             INT_GET(agfp->agf_flcount, ARCH_CONVERT);
4052                 xfs_buf_relse(agfbp);
4053
4054                 agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
4055                 agibp = xfs_buf_read(mp->m_ddev_targp, agidaddr,
4056                                 XFS_FSS_TO_BB(mp, 1), 0);
4057                 if (XFS_BUF_ISERROR(agibp)) {
4058                         xfs_ioerror_alert("xlog_recover_check_summary(agi)",
4059                                           mp, agibp, agidaddr);
4060                 }
4061                 agip = XFS_BUF_TO_AGI(agibp);
4062                 ASSERT(XFS_AGI_MAGIC ==
4063                         INT_GET(agip->agi_magicnum, ARCH_CONVERT));
4064                 ASSERT(XFS_AGI_GOOD_VERSION(
4065                         INT_GET(agip->agi_versionnum, ARCH_CONVERT)));
4066                 ASSERT(INT_GET(agip->agi_seqno, ARCH_CONVERT) == agno);
4067
4068                 itotal += INT_GET(agip->agi_count, ARCH_CONVERT);
4069                 ifree += INT_GET(agip->agi_freecount, ARCH_CONVERT);
4070                 xfs_buf_relse(agibp);
4071         }
4072
4073         sbbp = xfs_getsb(mp, 0);
4074 #ifdef XFS_LOUD_RECOVERY
4075         sbp = &mp->m_sb;
4076         xfs_xlatesb(XFS_BUF_TO_SBP(sbbp), sbp, 1, XFS_SB_ALL_BITS);
4077         cmn_err(CE_NOTE,
4078                 "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
4079                 sbp->sb_icount, itotal);
4080         cmn_err(CE_NOTE,
4081                 "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
4082                 sbp->sb_ifree, ifree);
4083         cmn_err(CE_NOTE,
4084                 "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
4085                 sbp->sb_fdblocks, freeblks);
4086 #if 0
4087         /*
4088          * This is turned off until I account for the allocation
4089          * btree blocks which live in free space.
4090          */
4091         ASSERT(sbp->sb_icount == itotal);
4092         ASSERT(sbp->sb_ifree == ifree);
4093         ASSERT(sbp->sb_fdblocks == freeblks);
4094 #endif
4095 #endif
4096         xfs_buf_relse(sbbp);
4097 }
4098 #endif /* DEBUG */