X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;ds=sidebyside;f=fs%2Fntfs%2Fattrib.c;h=1ff7f90a18b0a79b2c46351f692f40f7824e6917;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=6c7fb2a2b0c2f517eeb5c88a1bceb4a602e7a8e2;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index 6c7fb2a2b..1ff7f90a1 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -1,5 +1,5 @@ /** - * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project. + * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project. * * Copyright (c) 2001-2004 Anton Altaparmakov * Copyright (c) 2002 Richard Russon @@ -21,933 +21,34 @@ */ #include -#include "ntfs.h" -#include "dir.h" - -/* Temporary helper functions -- might become macros */ - -/** - * ntfs_rl_mm - run_list memmove - * - * It is up to the caller to serialize access to the run list @base. - */ -static inline void ntfs_rl_mm(run_list_element *base, int dst, int src, - int size) -{ - if (likely((dst != src) && (size > 0))) - memmove(base + dst, base + src, size * sizeof (*base)); -} - -/** - * ntfs_rl_mc - run_list memory copy - * - * It is up to the caller to serialize access to the run lists @dstbase and - * @srcbase. - */ -static inline void ntfs_rl_mc(run_list_element *dstbase, int dst, - run_list_element *srcbase, int src, int size) -{ - if (likely(size > 0)) - memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase)); -} - -/** - * ntfs_rl_realloc - Reallocate memory for run_lists - * @rl: original run list - * @old_size: number of run list elements in the original run list @rl - * @new_size: number of run list elements we need space for - * - * As the run_lists grow, more memory will be required. To prevent the - * kernel having to allocate and reallocate large numbers of small bits of - * memory, this function returns and entire page of memory. - * - * It is up to the caller to serialize access to the run list @rl. - * - * N.B. If the new allocation doesn't require a different number of pages in - * memory, the function will return the original pointer. - * - * On success, return a pointer to the newly allocated, or recycled, memory. - * On error, return -errno. The following error codes are defined: - * -ENOMEM - Not enough memory to allocate run list array. - * -EINVAL - Invalid parameters were passed in. - */ -static inline run_list_element *ntfs_rl_realloc(run_list_element *rl, - int old_size, int new_size) -{ - run_list_element *new_rl; - - old_size = PAGE_ALIGN(old_size * sizeof(*rl)); - new_size = PAGE_ALIGN(new_size * sizeof(*rl)); - if (old_size == new_size) - return rl; - - new_rl = ntfs_malloc_nofs(new_size); - if (unlikely(!new_rl)) - return ERR_PTR(-ENOMEM); - if (likely(rl != NULL)) { - if (unlikely(old_size > new_size)) - old_size = new_size; - memcpy(new_rl, rl, old_size); - ntfs_free(rl); - } - return new_rl; -} - -/** - * ntfs_are_rl_mergeable - test if two run lists can be joined together - * @dst: original run list - * @src: new run list to test for mergeability with @dst - * - * Test if two run lists can be joined together. For this, their VCNs and LCNs - * must be adjacent. - * - * It is up to the caller to serialize access to the run lists @dst and @src. - * - * Return: TRUE Success, the run lists can be merged. - * FALSE Failure, the run lists cannot be merged. - */ -static inline BOOL ntfs_are_rl_mergeable(run_list_element *dst, - run_list_element *src) -{ - BUG_ON(!dst); - BUG_ON(!src); - - if ((dst->lcn < 0) || (src->lcn < 0)) /* Are we merging holes? */ - return FALSE; - if ((dst->lcn + dst->length) != src->lcn) /* Are the runs contiguous? */ - return FALSE; - if ((dst->vcn + dst->length) != src->vcn) /* Are the runs misaligned? */ - return FALSE; - - return TRUE; -} - -/** - * __ntfs_rl_merge - merge two run lists without testing if they can be merged - * @dst: original, destination run list - * @src: new run list to merge with @dst - * - * Merge the two run lists, writing into the destination run list @dst. The - * caller must make sure the run lists can be merged or this will corrupt the - * destination run list. - * - * It is up to the caller to serialize access to the run lists @dst and @src. - */ -static inline void __ntfs_rl_merge(run_list_element *dst, run_list_element *src) -{ - dst->length += src->length; -} - -/** - * ntfs_rl_merge - test if two run lists can be joined together and merge them - * @dst: original, destination run list - * @src: new run list to merge with @dst - * - * Test if two run lists can be joined together. For this, their VCNs and LCNs - * must be adjacent. If they can be merged, perform the merge, writing into - * the destination run list @dst. - * - * It is up to the caller to serialize access to the run lists @dst and @src. - * - * Return: TRUE Success, the run lists have been merged. - * FALSE Failure, the run lists cannot be merged and have not been - * modified. - */ -static inline BOOL ntfs_rl_merge(run_list_element *dst, run_list_element *src) -{ - BOOL merge = ntfs_are_rl_mergeable(dst, src); - - if (merge) - __ntfs_rl_merge(dst, src); - return merge; -} - -/** - * ntfs_rl_append - append a run list after a given element - * @dst: original run list to be worked on - * @dsize: number of elements in @dst (including end marker) - * @src: run list to be inserted into @dst - * @ssize: number of elements in @src (excluding end marker) - * @loc: append the new run list @src after this element in @dst - * - * Append the run list @src after element @loc in @dst. Merge the right end of - * the new run list, if necessary. Adjust the size of the hole before the - * appended run list. - * - * It is up to the caller to serialize access to the run lists @dst and @src. - * - * On success, return a pointer to the new, combined, run list. Note, both - * run lists @dst and @src are deallocated before returning so you cannot use - * the pointers for anything any more. (Strictly speaking the returned run list - * may be the same as @dst but this is irrelevant.) - * - * On error, return -errno. Both run lists are left unmodified. The following - * error codes are defined: - * -ENOMEM - Not enough memory to allocate run list array. - * -EINVAL - Invalid parameters were passed in. - */ -static inline run_list_element *ntfs_rl_append(run_list_element *dst, - int dsize, run_list_element *src, int ssize, int loc) -{ - BOOL right; - int magic; - - BUG_ON(!dst); - BUG_ON(!src); - - /* First, check if the right hand end needs merging. */ - right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); - - /* Space required: @dst size + @src size, less one if we merged. */ - dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right); - if (IS_ERR(dst)) - return dst; - /* - * We are guaranteed to succeed from here so can start modifying the - * original run lists. - */ - - /* First, merge the right hand end, if necessary. */ - if (right) - __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); - - magic = loc + ssize; - - /* Move the tail of @dst out of the way, then copy in @src. */ - ntfs_rl_mm(dst, magic + 1, loc + 1 + right, dsize - loc - 1 - right); - ntfs_rl_mc(dst, loc + 1, src, 0, ssize); - - /* Adjust the size of the preceding hole. */ - dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; - - /* We may have changed the length of the file, so fix the end marker */ - if (dst[magic + 1].lcn == LCN_ENOENT) - dst[magic + 1].vcn = dst[magic].vcn + dst[magic].length; - - return dst; -} - -/** - * ntfs_rl_insert - insert a run list into another - * @dst: original run list to be worked on - * @dsize: number of elements in @dst (including end marker) - * @src: new run list to be inserted - * @ssize: number of elements in @src (excluding end marker) - * @loc: insert the new run list @src before this element in @dst - * - * Insert the run list @src before element @loc in the run list @dst. Merge the - * left end of the new run list, if necessary. Adjust the size of the hole - * after the inserted run list. - * - * It is up to the caller to serialize access to the run lists @dst and @src. - * - * On success, return a pointer to the new, combined, run list. Note, both - * run lists @dst and @src are deallocated before returning so you cannot use - * the pointers for anything any more. (Strictly speaking the returned run list - * may be the same as @dst but this is irrelevant.) - * - * On error, return -errno. Both run lists are left unmodified. The following - * error codes are defined: - * -ENOMEM - Not enough memory to allocate run list array. - * -EINVAL - Invalid parameters were passed in. - */ -static inline run_list_element *ntfs_rl_insert(run_list_element *dst, - int dsize, run_list_element *src, int ssize, int loc) -{ - BOOL left = FALSE; - BOOL disc = FALSE; /* Discontinuity */ - BOOL hole = FALSE; /* Following a hole */ - int magic; - - BUG_ON(!dst); - BUG_ON(!src); - - /* disc => Discontinuity between the end of @dst and the start of @src. - * This means we might need to insert a hole. - * hole => @dst ends with a hole or an unmapped region which we can - * extend to match the discontinuity. */ - if (loc == 0) - disc = (src[0].vcn > 0); - else { - s64 merged_length; - - left = ntfs_are_rl_mergeable(dst + loc - 1, src); - - merged_length = dst[loc - 1].length; - if (left) - merged_length += src->length; - - disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); - if (disc) - hole = (dst[loc - 1].lcn == LCN_HOLE); - } - - /* Space required: @dst size + @src size, less one if we merged, plus - * one if there was a discontinuity, less one for a trailing hole. */ - dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc - hole); - if (IS_ERR(dst)) - return dst; - /* - * We are guaranteed to succeed from here so can start modifying the - * original run list. - */ - - if (left) - __ntfs_rl_merge(dst + loc - 1, src); - - magic = loc + ssize - left + disc - hole; - - /* Move the tail of @dst out of the way, then copy in @src. */ - ntfs_rl_mm(dst, magic, loc, dsize - loc); - ntfs_rl_mc(dst, loc + disc - hole, src, left, ssize - left); - - /* Adjust the VCN of the last run ... */ - if (dst[magic].lcn <= LCN_HOLE) - dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length; - /* ... and the length. */ - if (dst[magic].lcn == LCN_HOLE || dst[magic].lcn == LCN_RL_NOT_MAPPED) - dst[magic].length = dst[magic + 1].vcn - dst[magic].vcn; - - /* Writing beyond the end of the file and there's a discontinuity. */ - if (disc) { - if (hole) - dst[loc - 1].length = dst[loc].vcn - dst[loc - 1].vcn; - else { - if (loc > 0) { - dst[loc].vcn = dst[loc - 1].vcn + - dst[loc - 1].length; - dst[loc].length = dst[loc + 1].vcn - - dst[loc].vcn; - } else { - dst[loc].vcn = 0; - dst[loc].length = dst[loc + 1].vcn; - } - dst[loc].lcn = LCN_RL_NOT_MAPPED; - } - - magic += hole; - - if (dst[magic].lcn == LCN_ENOENT) - dst[magic].vcn = dst[magic - 1].vcn + - dst[magic - 1].length; - } - return dst; -} - -/** - * ntfs_rl_replace - overwrite a run_list element with another run list - * @dst: original run list to be worked on - * @dsize: number of elements in @dst (including end marker) - * @src: new run list to be inserted - * @ssize: number of elements in @src (excluding end marker) - * @loc: index in run list @dst to overwrite with @src - * - * Replace the run list element @dst at @loc with @src. Merge the left and - * right ends of the inserted run list, if necessary. - * - * It is up to the caller to serialize access to the run lists @dst and @src. - * - * On success, return a pointer to the new, combined, run list. Note, both - * run lists @dst and @src are deallocated before returning so you cannot use - * the pointers for anything any more. (Strictly speaking the returned run list - * may be the same as @dst but this is irrelevant.) - * - * On error, return -errno. Both run lists are left unmodified. The following - * error codes are defined: - * -ENOMEM - Not enough memory to allocate run list array. - * -EINVAL - Invalid parameters were passed in. - */ -static inline run_list_element *ntfs_rl_replace(run_list_element *dst, - int dsize, run_list_element *src, int ssize, int loc) -{ - BOOL left = FALSE; - BOOL right; - int magic; - - BUG_ON(!dst); - BUG_ON(!src); - - /* First, merge the left and right ends, if necessary. */ - right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); - if (loc > 0) - left = ntfs_are_rl_mergeable(dst + loc - 1, src); - - /* Allocate some space. We'll need less if the left, right, or both - * ends were merged. */ - dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right); - if (IS_ERR(dst)) - return dst; - /* - * We are guaranteed to succeed from here so can start modifying the - * original run lists. - */ - if (right) - __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); - if (left) - __ntfs_rl_merge(dst + loc - 1, src); - - /* FIXME: What does this mean? (AIA) */ - magic = loc + ssize - left; - - /* Move the tail of @dst out of the way, then copy in @src. */ - ntfs_rl_mm(dst, magic, loc + right + 1, dsize - loc - right - 1); - ntfs_rl_mc(dst, loc, src, left, ssize - left); - - /* We may have changed the length of the file, so fix the end marker */ - if (dst[magic].lcn == LCN_ENOENT) - dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length; - return dst; -} - -/** - * ntfs_rl_split - insert a run list into the centre of a hole - * @dst: original run list to be worked on - * @dsize: number of elements in @dst (including end marker) - * @src: new run list to be inserted - * @ssize: number of elements in @src (excluding end marker) - * @loc: index in run list @dst at which to split and insert @src - * - * Split the run list @dst at @loc into two and insert @new in between the two - * fragments. No merging of run lists is necessary. Adjust the size of the - * holes either side. - * - * It is up to the caller to serialize access to the run lists @dst and @src. - * - * On success, return a pointer to the new, combined, run list. Note, both - * run lists @dst and @src are deallocated before returning so you cannot use - * the pointers for anything any more. (Strictly speaking the returned run list - * may be the same as @dst but this is irrelevant.) - * - * On error, return -errno. Both run lists are left unmodified. The following - * error codes are defined: - * -ENOMEM - Not enough memory to allocate run list array. - * -EINVAL - Invalid parameters were passed in. - */ -static inline run_list_element *ntfs_rl_split(run_list_element *dst, int dsize, - run_list_element *src, int ssize, int loc) -{ - BUG_ON(!dst); - BUG_ON(!src); - - /* Space required: @dst size + @src size + one new hole. */ - dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1); - if (IS_ERR(dst)) - return dst; - /* - * We are guaranteed to succeed from here so can start modifying the - * original run lists. - */ - - /* Move the tail of @dst out of the way, then copy in @src. */ - ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc); - ntfs_rl_mc(dst, loc + 1, src, 0, ssize); - - /* Adjust the size of the holes either size of @src. */ - dst[loc].length = dst[loc+1].vcn - dst[loc].vcn; - dst[loc+ssize+1].vcn = dst[loc+ssize].vcn + dst[loc+ssize].length; - dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn; - - return dst; -} - -/** - * ntfs_merge_run_lists - merge two run_lists into one - * @drl: original run list to be worked on - * @srl: new run list to be merged into @drl - * - * First we sanity check the two run lists @srl and @drl to make sure that they - * are sensible and can be merged. The run list @srl must be either after the - * run list @drl or completely within a hole (or unmapped region) in @drl. - * - * It is up to the caller to serialize access to the run lists @drl and @srl. - * - * Merging of run lists is necessary in two cases: - * 1. When attribute lists are used and a further extent is being mapped. - * 2. When new clusters are allocated to fill a hole or extend a file. - * - * There are four possible ways @srl can be merged. It can: - * - be inserted at the beginning of a hole, - * - split the hole in two and be inserted between the two fragments, - * - be appended at the end of a hole, or it can - * - replace the whole hole. - * It can also be appended to the end of the run list, which is just a variant - * of the insert case. - * - * On success, return a pointer to the new, combined, run list. Note, both - * run lists @drl and @srl are deallocated before returning so you cannot use - * the pointers for anything any more. (Strictly speaking the returned run list - * may be the same as @dst but this is irrelevant.) - * - * On error, return -errno. Both run lists are left unmodified. The following - * error codes are defined: - * -ENOMEM - Not enough memory to allocate run list array. - * -EINVAL - Invalid parameters were passed in. - * -ERANGE - The run lists overlap and cannot be merged. - */ -run_list_element *ntfs_merge_run_lists(run_list_element *drl, - run_list_element *srl) -{ - int di, si; /* Current index into @[ds]rl. */ - int sstart; /* First index with lcn > LCN_RL_NOT_MAPPED. */ - int dins; /* Index into @drl at which to insert @srl. */ - int dend, send; /* Last index into @[ds]rl. */ - int dfinal, sfinal; /* The last index into @[ds]rl with - lcn >= LCN_HOLE. */ - int marker = 0; - VCN marker_vcn = 0; - -#ifdef DEBUG - ntfs_debug("dst:"); - ntfs_debug_dump_runlist(drl); - ntfs_debug("src:"); - ntfs_debug_dump_runlist(srl); -#endif - - /* Check for silly calling... */ - if (unlikely(!srl)) - return drl; - if (unlikely(IS_ERR(srl) || IS_ERR(drl))) - return ERR_PTR(-EINVAL); - - /* Check for the case where the first mapping is being done now. */ - if (unlikely(!drl)) { - drl = srl; - /* Complete the source run list if necessary. */ - if (unlikely(drl[0].vcn)) { - /* Scan to the end of the source run list. */ - for (dend = 0; likely(drl[dend].length); dend++) - ; - drl = ntfs_rl_realloc(drl, dend, dend + 1); - if (IS_ERR(drl)) - return drl; - /* Insert start element at the front of the run list. */ - ntfs_rl_mm(drl, 1, 0, dend); - drl[0].vcn = 0; - drl[0].lcn = LCN_RL_NOT_MAPPED; - drl[0].length = drl[1].vcn; - } - goto finished; - } - - si = di = 0; - - /* Skip any unmapped start element(s) in the source run_list. */ - while (srl[si].length && srl[si].lcn < (LCN)LCN_HOLE) - si++; - - /* Can't have an entirely unmapped source run list. */ - BUG_ON(!srl[si].length); - - /* Record the starting points. */ - sstart = si; - - /* - * Skip forward in @drl until we reach the position where @srl needs to - * be inserted. If we reach the end of @drl, @srl just needs to be - * appended to @drl. - */ - for (; drl[di].length; di++) { - if (drl[di].vcn + drl[di].length > srl[sstart].vcn) - break; - } - dins = di; - - /* Sanity check for illegal overlaps. */ - if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) && - (srl[si].lcn >= 0)) { - ntfs_error(NULL, "Run lists overlap. Cannot merge!"); - return ERR_PTR(-ERANGE); - } - - /* Scan to the end of both run lists in order to know their sizes. */ - for (send = si; srl[send].length; send++) - ; - for (dend = di; drl[dend].length; dend++) - ; - - if (srl[send].lcn == (LCN)LCN_ENOENT) - marker_vcn = srl[marker = send].vcn; - - /* Scan to the last element with lcn >= LCN_HOLE. */ - for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--) - ; - for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--) - ; - - { - BOOL start; - BOOL finish; - int ds = dend + 1; /* Number of elements in drl & srl */ - int ss = sfinal - sstart + 1; - - start = ((drl[dins].lcn < LCN_RL_NOT_MAPPED) || /* End of file */ - (drl[dins].vcn == srl[sstart].vcn)); /* Start of hole */ - finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) && /* End of file */ - ((drl[dins].vcn + drl[dins].length) <= /* End of hole */ - (srl[send - 1].vcn + srl[send - 1].length))); - - /* Or we'll lose an end marker */ - if (start && finish && (drl[dins].length == 0)) - ss++; - if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn)) - finish = FALSE; -#if 0 - ntfs_debug("dfinal = %i, dend = %i", dfinal, dend); - ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send); - ntfs_debug("start = %i, finish = %i", start, finish); - ntfs_debug("ds = %i, ss = %i, dins = %i", ds, ss, dins); -#endif - if (start) { - if (finish) - drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins); - else - drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins); - } else { - if (finish) - drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins); - else - drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins); - } - if (IS_ERR(drl)) { - ntfs_error(NULL, "Merge failed."); - return drl; - } - ntfs_free(srl); - if (marker) { - ntfs_debug("Triggering marker code."); - for (ds = dend; drl[ds].length; ds++) - ; - /* We only need to care if @srl ended after @drl. */ - if (drl[ds].vcn <= marker_vcn) { - int slots = 0; - - if (drl[ds].vcn == marker_vcn) { - ntfs_debug("Old marker = 0x%llx, replacing " - "with LCN_ENOENT.", - (unsigned long long) - drl[ds].lcn); - drl[ds].lcn = (LCN)LCN_ENOENT; - goto finished; - } - /* - * We need to create an unmapped run list element in - * @drl or extend an existing one before adding the - * ENOENT terminator. - */ - if (drl[ds].lcn == (LCN)LCN_ENOENT) { - ds--; - slots = 1; - } - if (drl[ds].lcn != (LCN)LCN_RL_NOT_MAPPED) { - /* Add an unmapped run list element. */ - if (!slots) { - /* FIXME/TODO: We need to have the - * extra memory already! (AIA) */ - drl = ntfs_rl_realloc(drl, ds, ds + 2); - if (!drl) - goto critical_error; - slots = 2; - } - ds++; - /* Need to set vcn if it isn't set already. */ - if (slots != 1) - drl[ds].vcn = drl[ds - 1].vcn + - drl[ds - 1].length; - drl[ds].lcn = (LCN)LCN_RL_NOT_MAPPED; - /* We now used up a slot. */ - slots--; - } - drl[ds].length = marker_vcn - drl[ds].vcn; - /* Finally add the ENOENT terminator. */ - ds++; - if (!slots) { - /* FIXME/TODO: We need to have the extra - * memory already! (AIA) */ - drl = ntfs_rl_realloc(drl, ds, ds + 1); - if (!drl) - goto critical_error; - } - drl[ds].vcn = marker_vcn; - drl[ds].lcn = (LCN)LCN_ENOENT; - drl[ds].length = (s64)0; - } - } - } - -finished: - /* The merge was completed successfully. */ - ntfs_debug("Merged run list:"); - ntfs_debug_dump_runlist(drl); - return drl; - -critical_error: - /* Critical error! We cannot afford to fail here. */ - ntfs_error(NULL, "Critical error! Not enough memory."); - panic("NTFS: Cannot continue."); -} - -/** - * decompress_mapping_pairs - convert mapping pairs array to run list - * @vol: ntfs volume on which the attribute resides - * @attr: attribute record whose mapping pairs array to decompress - * @old_rl: optional run list in which to insert @attr's run list - * - * It is up to the caller to serialize access to the run list @old_rl. - * - * Decompress the attribute @attr's mapping pairs array into a run list. On - * success, return the decompressed run list. - * - * If @old_rl is not NULL, decompressed run list is inserted into the - * appropriate place in @old_rl and the resultant, combined run list is - * returned. The original @old_rl is deallocated. - * - * On error, return -errno. @old_rl is left unmodified in that case. - * - * The following error codes are defined: - * -ENOMEM - Not enough memory to allocate run list array. - * -EIO - Corrupt run list. - * -EINVAL - Invalid parameters were passed in. - * -ERANGE - The two run lists overlap. - * - * FIXME: For now we take the conceptionally simplest approach of creating the - * new run list disregarding the already existing one and then splicing the - * two into one, if that is possible (we check for overlap and discard the new - * run list if overlap present before returning ERR_PTR(-ERANGE)). - */ -run_list_element *decompress_mapping_pairs(const ntfs_volume *vol, - const ATTR_RECORD *attr, run_list_element *old_rl) -{ - VCN vcn; /* Current vcn. */ - LCN lcn; /* Current lcn. */ - s64 deltaxcn; /* Change in [vl]cn. */ - run_list_element *rl; /* The output run list. */ - u8 *buf; /* Current position in mapping pairs array. */ - u8 *attr_end; /* End of attribute. */ - int rlsize; /* Size of run list buffer. */ - u16 rlpos; /* Current run list position in units of - run_list_elements. */ - u8 b; /* Current byte offset in buf. */ - -#ifdef DEBUG - /* Make sure attr exists and is non-resident. */ - if (!attr || !attr->non_resident || sle64_to_cpu( - attr->data.non_resident.lowest_vcn) < (VCN)0) { - ntfs_error(vol->sb, "Invalid arguments."); - return ERR_PTR(-EINVAL); - } -#endif - /* Start at vcn = lowest_vcn and lcn 0. */ - vcn = sle64_to_cpu(attr->data.non_resident.lowest_vcn); - lcn = 0; - /* Get start of the mapping pairs array. */ - buf = (u8*)attr + le16_to_cpu( - attr->data.non_resident.mapping_pairs_offset); - attr_end = (u8*)attr + le32_to_cpu(attr->length); - if (unlikely(buf < (u8*)attr || buf > attr_end)) { - ntfs_error(vol->sb, "Corrupt attribute."); - return ERR_PTR(-EIO); - } - /* Current position in run list array. */ - rlpos = 0; - /* Allocate first page and set current run list size to one page. */ - rl = ntfs_malloc_nofs(rlsize = PAGE_SIZE); - if (unlikely(!rl)) - return ERR_PTR(-ENOMEM); - /* Insert unmapped starting element if necessary. */ - if (vcn) { - rl->vcn = (VCN)0; - rl->lcn = (LCN)LCN_RL_NOT_MAPPED; - rl->length = vcn; - rlpos++; - } - while (buf < attr_end && *buf) { - /* - * Allocate more memory if needed, including space for the - * not-mapped and terminator elements. ntfs_malloc_nofs() - * operates on whole pages only. - */ - if (((rlpos + 3) * sizeof(*old_rl)) > rlsize) { - run_list_element *rl2; - - rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE); - if (unlikely(!rl2)) { - ntfs_free(rl); - return ERR_PTR(-ENOMEM); - } - memcpy(rl2, rl, rlsize); - ntfs_free(rl); - rl = rl2; - rlsize += PAGE_SIZE; - } - /* Enter the current vcn into the current run_list element. */ - rl[rlpos].vcn = vcn; - /* - * Get the change in vcn, i.e. the run length in clusters. - * Doing it this way ensures that we signextend negative values. - * A negative run length doesn't make any sense, but hey, I - * didn't make up the NTFS specs and Windows NT4 treats the run - * length as a signed value so that's how it is... - */ - b = *buf & 0xf; - if (b) { - if (unlikely(buf + b > attr_end)) - goto io_error; - for (deltaxcn = (s8)buf[b--]; b; b--) - deltaxcn = (deltaxcn << 8) + buf[b]; - } else { /* The length entry is compulsory. */ - ntfs_error(vol->sb, "Missing length entry in mapping " - "pairs array."); - deltaxcn = (s64)-1; - } - /* - * Assume a negative length to indicate data corruption and - * hence clean-up and return NULL. - */ - if (unlikely(deltaxcn < 0)) { - ntfs_error(vol->sb, "Invalid length in mapping pairs " - "array."); - goto err_out; - } - /* - * Enter the current run length into the current run list - * element. - */ - rl[rlpos].length = deltaxcn; - /* Increment the current vcn by the current run length. */ - vcn += deltaxcn; - /* - * There might be no lcn change at all, as is the case for - * sparse clusters on NTFS 3.0+, in which case we set the lcn - * to LCN_HOLE. - */ - if (!(*buf & 0xf0)) - rl[rlpos].lcn = (LCN)LCN_HOLE; - else { - /* Get the lcn change which really can be negative. */ - u8 b2 = *buf & 0xf; - b = b2 + ((*buf >> 4) & 0xf); - if (buf + b > attr_end) - goto io_error; - for (deltaxcn = (s8)buf[b--]; b > b2; b--) - deltaxcn = (deltaxcn << 8) + buf[b]; - /* Change the current lcn to its new value. */ - lcn += deltaxcn; -#ifdef DEBUG - /* - * On NTFS 1.2-, apparently can have lcn == -1 to - * indicate a hole. But we haven't verified ourselves - * whether it is really the lcn or the deltaxcn that is - * -1. So if either is found give us a message so we - * can investigate it further! - */ - if (vol->major_ver < 3) { - if (unlikely(deltaxcn == (LCN)-1)) - ntfs_error(vol->sb, "lcn delta == -1"); - if (unlikely(lcn == (LCN)-1)) - ntfs_error(vol->sb, "lcn == -1"); - } -#endif - /* Check lcn is not below -1. */ - if (unlikely(lcn < (LCN)-1)) { - ntfs_error(vol->sb, "Invalid LCN < -1 in " - "mapping pairs array."); - goto err_out; - } - /* Enter the current lcn into the run_list element. */ - rl[rlpos].lcn = lcn; - } - /* Get to the next run_list element. */ - rlpos++; - /* Increment the buffer position to the next mapping pair. */ - buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1; - } - if (unlikely(buf >= attr_end)) - goto io_error; - /* - * If there is a highest_vcn specified, it must be equal to the final - * vcn in the run list - 1, or something has gone badly wrong. - */ - deltaxcn = sle64_to_cpu(attr->data.non_resident.highest_vcn); - if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) { -mpa_err: - ntfs_error(vol->sb, "Corrupt mapping pairs array in " - "non-resident attribute."); - goto err_out; - } - /* Setup not mapped run list element if this is the base extent. */ - if (!attr->data.non_resident.lowest_vcn) { - VCN max_cluster; - - max_cluster = (sle64_to_cpu( - attr->data.non_resident.allocated_size) + - vol->cluster_size - 1) >> - vol->cluster_size_bits; - /* - * If there is a difference between the highest_vcn and the - * highest cluster, the run list is either corrupt or, more - * likely, there are more extents following this one. - */ - if (deltaxcn < --max_cluster) { - ntfs_debug("More extents to follow; deltaxcn = 0x%llx, " - "max_cluster = 0x%llx", - (unsigned long long)deltaxcn, - (unsigned long long)max_cluster); - rl[rlpos].vcn = vcn; - vcn += rl[rlpos].length = max_cluster - deltaxcn; - rl[rlpos].lcn = (LCN)LCN_RL_NOT_MAPPED; - rlpos++; - } else if (unlikely(deltaxcn > max_cluster)) { - ntfs_error(vol->sb, "Corrupt attribute. deltaxcn = " - "0x%llx, max_cluster = 0x%llx", - (unsigned long long)deltaxcn, - (unsigned long long)max_cluster); - goto mpa_err; - } - rl[rlpos].lcn = (LCN)LCN_ENOENT; - } else /* Not the base extent. There may be more extents to follow. */ - rl[rlpos].lcn = (LCN)LCN_RL_NOT_MAPPED; - - /* Setup terminating run_list element. */ - rl[rlpos].vcn = vcn; - rl[rlpos].length = (s64)0; - /* If no existing run list was specified, we are done. */ - if (!old_rl) { - ntfs_debug("Mapping pairs array successfully decompressed:"); - ntfs_debug_dump_runlist(rl); - return rl; - } - /* Now combine the new and old run lists checking for overlaps. */ - old_rl = ntfs_merge_run_lists(old_rl, rl); - if (likely(!IS_ERR(old_rl))) - return old_rl; - ntfs_free(rl); - ntfs_error(vol->sb, "Failed to merge run lists."); - return old_rl; -io_error: - ntfs_error(vol->sb, "Corrupt attribute."); -err_out: - ntfs_free(rl); - return ERR_PTR(-EIO); -} +#include "attrib.h" +#include "debug.h" +#include "layout.h" +#include "mft.h" +#include "ntfs.h" +#include "types.h" /** - * map_run_list - map (a part of) a run list of an ntfs inode - * @ni: ntfs inode for which to map (part of) a run list - * @vcn: map run list part containing this vcn + * ntfs_map_runlist - map (a part of) a runlist of an ntfs inode + * @ni: ntfs inode for which to map (part of) a runlist + * @vcn: map runlist part containing this vcn * - * Map the part of a run list containing the @vcn of an the ntfs inode @ni. + * Map the part of a runlist containing the @vcn of the ntfs inode @ni. * * Return 0 on success and -errno on error. + * + * Locking: - The runlist must be unlocked on entry and is unlocked on return. + * - This function takes the lock for writing and modifies the runlist. */ -int map_run_list(ntfs_inode *ni, VCN vcn) +int ntfs_map_runlist(ntfs_inode *ni, VCN vcn) { ntfs_inode *base_ni; - attr_search_context *ctx; + ntfs_attr_search_ctx *ctx; MFT_RECORD *mrec; int err = 0; - ntfs_debug("Mapping run list part containing vcn 0x%llx.", + ntfs_debug("Mapping runlist part containing vcn 0x%llx.", (unsigned long long)vcn); if (!NInoAttr(ni)) @@ -958,98 +59,137 @@ int map_run_list(ntfs_inode *ni, VCN vcn) mrec = map_mft_record(base_ni); if (IS_ERR(mrec)) return PTR_ERR(mrec); - ctx = get_attr_search_ctx(base_ni, mrec); - if (!ctx) { + ctx = ntfs_attr_get_search_ctx(base_ni, mrec); + if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } - if (!lookup_attr(ni->type, ni->name, ni->name_len, IGNORE_CASE, vcn, - NULL, 0, ctx)) { - put_attr_search_ctx(ctx); - err = -ENOENT; - goto err_out; - } + err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, + CASE_SENSITIVE, vcn, NULL, 0, ctx); + if (unlikely(err)) + goto put_err_out; - down_write(&ni->run_list.lock); + down_write(&ni->runlist.lock); /* Make sure someone else didn't do the work while we were sleeping. */ - if (likely(vcn_to_lcn(ni->run_list.rl, vcn) <= LCN_RL_NOT_MAPPED)) { - run_list_element *rl; + if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= + LCN_RL_NOT_MAPPED)) { + runlist_element *rl; - rl = decompress_mapping_pairs(ni->vol, ctx->attr, - ni->run_list.rl); - if (unlikely(IS_ERR(rl))) + rl = ntfs_mapping_pairs_decompress(ni->vol, ctx->attr, + ni->runlist.rl); + if (IS_ERR(rl)) err = PTR_ERR(rl); else - ni->run_list.rl = rl; + ni->runlist.rl = rl; } - up_write(&ni->run_list.lock); + up_write(&ni->runlist.lock); - put_attr_search_ctx(ctx); +put_err_out: + ntfs_attr_put_search_ctx(ctx); err_out: unmap_mft_record(base_ni); return err; } /** - * vcn_to_lcn - convert a vcn into a lcn given a run list - * @rl: run list to use for conversion - * @vcn: vcn to convert - * - * Convert the virtual cluster number @vcn of an attribute into a logical - * cluster number (lcn) of a device using the run list @rl to map vcns to their - * corresponding lcns. - * - * It is up to the caller to serialize access to the run list @rl. - * - * Since lcns must be >= 0, we use negative return values with special meaning: - * - * Return value Meaning / Description - * ================================================== - * -1 = LCN_HOLE Hole / not allocated on disk. - * -2 = LCN_RL_NOT_MAPPED This is part of the run list which has not been - * inserted into the run list yet. - * -3 = LCN_ENOENT There is no such vcn in the attribute. - * -4 = LCN_EINVAL Input parameter error (if debug enabled). + * ntfs_find_vcn - find a vcn in the runlist described by an ntfs inode + * @ni: ntfs inode describing the runlist to search + * @vcn: vcn to find + * @need_write: if false, lock for reading and if true, lock for writing + * + * Find the virtual cluster number @vcn in the runlist described by the ntfs + * inode @ni and return the address of the runlist element containing the @vcn. + * The runlist is left locked and the caller has to unlock it. If @need_write + * is true, the runlist is locked for writing and if @need_write is false, the + * runlist is locked for reading. In the error case, the runlist is not left + * locked. + * + * Note you need to distinguish between the lcn of the returned runlist element + * being >= 0 and LCN_HOLE. In the later case you have to return zeroes on + * read and allocate clusters on write. + * + * Return the runlist element containing the @vcn on success and + * ERR_PTR(-errno) on error. You need to test the return value with IS_ERR() + * to decide if the return is success or failure and PTR_ERR() to get to the + * error code if IS_ERR() is true. + * + * The possible error return codes are: + * -ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds. + * -ENOMEM - Not enough memory to map runlist. + * -EIO - Critical error (runlist/file is corrupt, i/o error, etc). + * + * Locking: - The runlist must be unlocked on entry. + * - On failing return, the runlist is unlocked. + * - On successful return, the runlist is locked. If @need_write us + * true, it is locked for writing. Otherwise is is locked for + * reading. */ -LCN vcn_to_lcn(const run_list_element *rl, const VCN vcn) +runlist_element *ntfs_find_vcn(ntfs_inode *ni, const VCN vcn, + const BOOL need_write) { - int i; - -#ifdef DEBUG - if (vcn < (VCN)0) - return (LCN)LCN_EINVAL; -#endif - /* - * If rl is NULL, assume that we have found an unmapped run list. The - * caller can then attempt to map it and fail appropriately if - * necessary. - */ - if (unlikely(!rl)) - return (LCN)LCN_RL_NOT_MAPPED; - - /* Catch out of lower bounds vcn. */ - if (unlikely(vcn < rl[0].vcn)) - return (LCN)LCN_ENOENT; - - for (i = 0; likely(rl[i].length); i++) { - if (unlikely(vcn < rl[i+1].vcn)) { - if (likely(rl[i].lcn >= (LCN)0)) - return rl[i].lcn + (vcn - rl[i].vcn); - return rl[i].lcn; + runlist_element *rl; + int err = 0; + BOOL is_retry = FALSE; + + ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, lock for %sing.", + ni->mft_no, (unsigned long long)vcn, + !need_write ? "read" : "writ"); + BUG_ON(!ni); + BUG_ON(!NInoNonResident(ni)); + BUG_ON(vcn < 0); +lock_retry_remap: + if (!need_write) + down_read(&ni->runlist.lock); + else + down_write(&ni->runlist.lock); + rl = ni->runlist.rl; + if (likely(rl && vcn >= rl[0].vcn)) { + while (likely(rl->length)) { + if (likely(vcn < rl[1].vcn)) { + if (likely(rl->lcn >= LCN_HOLE)) { + ntfs_debug("Done."); + return rl; + } + break; + } + rl++; + } + if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) { + if (likely(rl->lcn == LCN_ENOENT)) + err = -ENOENT; + else + err = -EIO; } } - /* - * The terminator element is setup to the correct value, i.e. one of - * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT. - */ - if (likely(rl[i].lcn < (LCN)0)) - return rl[i].lcn; - /* Just in case... We could replace this with BUG() some day. */ - return (LCN)LCN_ENOENT; + if (!need_write) + up_read(&ni->runlist.lock); + else + up_write(&ni->runlist.lock); + if (!err && !is_retry) { + /* + * The @vcn is in an unmapped region, map the runlist and + * retry. + */ + err = ntfs_map_runlist(ni, vcn); + if (likely(!err)) { + is_retry = TRUE; + goto lock_retry_remap; + } + /* + * -EINVAL and -ENOENT coming from a failed mapping attempt are + * equivalent to i/o errors for us as they should not happen in + * our code paths. + */ + if (err == -EINVAL || err == -ENOENT) + err = -EIO; + } else if (!err) + err = -EIO; + ntfs_error(ni->vol->sb, "Failed with error code %i.", err); + return ERR_PTR(err); } /** - * find_attr - find (next) attribute in mft record + * ntfs_attr_find - find (next) attribute in mft record * @type: attribute type to find * @name: attribute name to find (optional, i.e. NULL means don't care) * @name_len: attribute name length (only needed if @name present) @@ -1058,62 +198,62 @@ LCN vcn_to_lcn(const run_list_element *rl, const VCN vcn) * @val_len: attribute value length * @ctx: search context with mft record and attribute to search from * - * You shouldn't need to call this function directly. Use lookup_attr() instead. + * You should not need to call this function directly. Use ntfs_attr_lookup() + * instead. + * + * ntfs_attr_find() takes a search context @ctx as parameter and searches the + * mft record specified by @ctx->mrec, beginning at @ctx->attr, for an + * attribute of @type, optionally @name and @val. + * + * If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will + * point to the found attribute. * - * find_attr() takes a search context @ctx as parameter and searches the mft - * record specified by @ctx->mrec, beginning at @ctx->attr, for an attribute of - * @type, optionally @name and @val. If found, find_attr() returns TRUE and - * @ctx->attr will point to the found attribute. If not found, find_attr() - * returns FALSE and @ctx->attr is undefined (i.e. do not rely on it not - * changing). + * If the attribute is not found, ntfs_attr_find() returns -ENOENT and + * @ctx->attr will point to the attribute before which the attribute being + * searched for would need to be inserted if such an action were to be desired. * - * If @ctx->is_first is TRUE, the search begins with @ctx->attr itself. If it + * On actual error, ntfs_attr_find() returns -EIO. In this case @ctx->attr is + * undefined and in particular do not rely on it not changing. + * + * If @ctx->is_first is TRUE, the search begins with @ctx->attr itself. If it * is FALSE, the search begins after @ctx->attr. * * If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and * @ctx->ntfs_ino must be set to the ntfs inode to which the mft record - * @ctx->mrec belongs. This is so we can get at the ntfs volume and hence at - * the upcase table. If @ic is CASE_SENSITIVE, the comparison is case - * sensitive. When @name is present, @name_len is the @name length in Unicode + * @ctx->mrec belongs. This is so we can get at the ntfs volume and hence at + * the upcase table. If @ic is CASE_SENSITIVE, the comparison is case + * sensitive. When @name is present, @name_len is the @name length in Unicode * characters. * * If @name is not present (NULL), we assume that the unnamed attribute is * being searched for. * - * Finally, the resident attribute value @val is looked for, if present. If @val - * is not present (NULL), @val_len is ignored. + * Finally, the resident attribute value @val is looked for, if present. If + * @val is not present (NULL), @val_len is ignored. * - * find_attr() only searches the specified mft record and it ignores the + * ntfs_attr_find() only searches the specified mft record and it ignores the * presence of an attribute list attribute (unless it is the one being searched - * for, obviously). If you need to take attribute lists into consideration, use - * lookup_attr() instead (see below). This also means that you cannot use - * find_attr() to search for extent records of non-resident attributes, as - * extents with lowest_vcn != 0 are usually described by the attribute list - * attribute only. - Note that it is possible that the first extent is only in - * the attribute list while the last extent is in the base mft record, so don't - * rely on being able to find the first extent in the base mft record. + * for, obviously). If you need to take attribute lists into consideration, + * use ntfs_attr_lookup() instead (see below). This also means that you cannot + * use ntfs_attr_find() to search for extent records of non-resident + * attributes, as extents with lowest_vcn != 0 are usually described by the + * attribute list attribute only. - Note that it is possible that the first + * extent is only in the attribute list while the last extent is in the base + * mft record, so do not rely on being able to find the first extent in the + * base mft record. * * Warning: Never use @val when looking for attribute types which can be * non-resident as this most likely will result in a crash! */ -BOOL find_attr(const ATTR_TYPES type, const ntfschar *name, const u32 name_len, - const IGNORE_CASE_BOOL ic, const u8 *val, const u32 val_len, - attr_search_context *ctx) +static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name, + const u32 name_len, const IGNORE_CASE_BOOL ic, + const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx) { ATTR_RECORD *a; - ntfs_volume *vol; - ntfschar *upcase; - u32 upcase_len; + ntfs_volume *vol = ctx->ntfs_ino->vol; + ntfschar *upcase = vol->upcase; + u32 upcase_len = vol->upcase_len; - if (ic == IGNORE_CASE) { - vol = ctx->ntfs_ino->vol; - upcase = vol->upcase; - upcase_len = vol->upcase_len; - } else { - vol = NULL; - upcase = NULL; - upcase_len = 0; - } /* * Iterate over attributes in mft record starting at @ctx->attr, or the * attribute following that, if @ctx->is_first is TRUE. @@ -1129,21 +269,21 @@ BOOL find_attr(const ATTR_TYPES type, const ntfschar *name, const u32 name_len, le32_to_cpu(ctx->mrec->bytes_allocated)) break; ctx->attr = a; - /* We catch $END with this more general check, too... */ - if (le32_to_cpu(a->type) > le32_to_cpu(type)) - return FALSE; + if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) || + a->type == AT_END)) + return -ENOENT; if (unlikely(!a->length)) break; if (a->type != type) continue; /* - * If @name is present, compare the two names. If @name is + * If @name is present, compare the two names. If @name is * missing, assume we want an unnamed attribute. */ if (!name) { /* The search failed if the found attribute is named. */ if (a->name_length) - return FALSE; + return -ENOENT; } else if (!ntfs_are_names_equal(name, name_len, (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)), a->name_length, ic, upcase, upcase_len)) { @@ -1151,7 +291,7 @@ BOOL find_attr(const ATTR_TYPES type, const ntfschar *name, const u32 name_len, rc = ntfs_collate_names(name, name_len, (ntfschar*)((u8*)a + - le16_to_cpu(a->name_offset)), + le16_to_cpu(a->name_offset)), a->name_length, 1, IGNORE_CASE, upcase, upcase_len); /* @@ -1159,67 +299,66 @@ BOOL find_attr(const ATTR_TYPES type, const ntfschar *name, const u32 name_len, * matching attribute. */ if (rc == -1) - return FALSE; + return -ENOENT; /* If the strings are not equal, continue search. */ if (rc) continue; rc = ntfs_collate_names(name, name_len, (ntfschar*)((u8*)a + - le16_to_cpu(a->name_offset)), + le16_to_cpu(a->name_offset)), a->name_length, 1, CASE_SENSITIVE, upcase, upcase_len); if (rc == -1) - return FALSE; + return -ENOENT; if (rc) continue; } /* * The names match or @name not present and attribute is - * unnamed. If no @val specified, we have found the attribute + * unnamed. If no @val specified, we have found the attribute * and are done. */ if (!val) - return TRUE; + return 0; /* @val is present; compare values. */ else { - u32 vl; register int rc; - vl = le32_to_cpu(a->data.resident.value_length); - if (vl > val_len) - vl = val_len; - rc = memcmp(val, (u8*)a + le16_to_cpu( - a->data.resident.value_offset), vl); + a->data.resident.value_offset), + min_t(u32, val_len, le32_to_cpu( + a->data.resident.value_length))); /* * If @val collates before the current attribute's * value, there is no matching attribute. */ if (!rc) { register u32 avl; + avl = le32_to_cpu( a->data.resident.value_length); if (val_len == avl) - return TRUE; + return 0; if (val_len < avl) - return FALSE; + return -ENOENT; } else if (rc < 0) - return FALSE; + return -ENOENT; } } - ntfs_error(NULL, "Inode is corrupt. Run chkdsk."); - return FALSE; + ntfs_error(vol->sb, "Inode is corrupt. Run chkdsk."); + NVolSetErrors(vol); + return -EIO; } /** * load_attribute_list - load an attribute list into memory * @vol: ntfs volume from which to read - * @run_list: run list of the attribute list + * @runlist: runlist of the attribute list * @al_start: destination buffer * @size: size of the destination buffer in bytes * @initialized_size: initialized size of the attribute list * - * Walk the run list @run_list and load all clusters from it copying them into + * Walk the runlist @runlist and load all clusters from it copying them into * the linear buffer @al. The maximum number of bytes copied to @al is @size * bytes. Note, @size does not need to be a multiple of the cluster size. If * @initialized_size is less than @size, the region in @al between @@ -1227,13 +366,13 @@ BOOL find_attr(const ATTR_TYPES type, const ntfschar *name, const u32 name_len, * * Return 0 on success or -errno on error. */ -int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al_start, +int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start, const s64 size, const s64 initialized_size) { LCN lcn; u8 *al = al_start; u8 *al_end = al + initialized_size; - run_list_element *rl; + runlist_element *rl; struct buffer_head *bh; struct super_block *sb; unsigned long block_size; @@ -1242,7 +381,7 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al_start, unsigned char block_size_bits; ntfs_debug("Entering."); - if (!vol || !run_list || !al || size <= 0 || initialized_size < 0 || + if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 || initialized_size > size) return -EINVAL; if (!initialized_size) { @@ -1252,18 +391,18 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al_start, sb = vol->sb; block_size = sb->s_blocksize; block_size_bits = sb->s_blocksize_bits; - down_read(&run_list->lock); - rl = run_list->rl; - /* Read all clusters specified by the run list one run at a time. */ + down_read(&runlist->lock); + rl = runlist->rl; + /* Read all clusters specified by the runlist one run at a time. */ while (rl->length) { - lcn = vcn_to_lcn(rl, rl->vcn); + lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn); ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.", (unsigned long long)rl->vcn, (unsigned long long)lcn); /* The attribute list cannot be sparse. */ if (lcn < 0) { - ntfs_error(sb, "vcn_to_lcn() failed. Cannot read " - "attribute list."); + ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed. Cannot " + "read attribute list."); goto err_out; } block = lcn << vol->cluster_size_bits >> block_size_bits; @@ -1292,7 +431,7 @@ initialize: memset(al_start + initialized_size, 0, size - initialized_size); } done: - up_read(&run_list->lock); + up_read(&runlist->lock); return err; do_final: if (al < al_end) { @@ -1320,7 +459,7 @@ err_out: } /** - * find_external_attr - find an attribute in the attribute list of an ntfs inode + * ntfs_external_attr_find - find an attribute in the attribute list of an inode * @type: attribute type to find * @name: attribute name to find (optional, i.e. NULL means don't care) * @name_len: attribute name length (only needed if @name present) @@ -1330,34 +469,49 @@ err_out: * @val_len: attribute value length * @ctx: search context with mft record and attribute to search from * - * You shouldn't need to call this function directly. Use lookup_attr() instead. + * You should not need to call this function directly. Use ntfs_attr_lookup() + * instead. * * Find an attribute by searching the attribute list for the corresponding - * attribute list entry. Having found the entry, map the mft record for read - * if the attribute is in a different mft record/inode, find_attr the attribute + * attribute list entry. Having found the entry, map the mft record if the + * attribute is in a different mft record/inode, ntfs_attr_find() the attribute * in there and return it. * * On first search @ctx->ntfs_ino must be the base mft record and @ctx must - * have been obtained from a call to get_attr_search_ctx(). On subsequent calls - * @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is then the - * base inode). + * have been obtained from a call to ntfs_attr_get_search_ctx(). On subsequent + * calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is + * then the base inode). * * After finishing with the attribute/mft record you need to call - * release_attr_search_ctx() to cleanup the search context (unmapping any + * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any * mapped inodes, etc). * - * Return TRUE if the search was successful and FALSE if not. When TRUE, - * @ctx->attr is the found attribute and it is in mft record @ctx->mrec. When - * FALSE, @ctx->attr is the attribute which collates just after the attribute - * being searched for in the base ntfs inode, i.e. if one wants to add the - * attribute to the mft record this is the correct place to insert it into - * and if there is not enough space, the attribute should be placed in an - * extent mft record. + * If the attribute is found, ntfs_external_attr_find() returns 0 and + * @ctx->attr will point to the found attribute. @ctx->mrec will point to the + * mft record in which @ctx->attr is located and @ctx->al_entry will point to + * the attribute list entry for the attribute. + * + * If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and + * @ctx->attr will point to the attribute in the base mft record before which + * the attribute being searched for would need to be inserted if such an action + * were to be desired. @ctx->mrec will point to the mft record in which + * @ctx->attr is located and @ctx->al_entry will point to the attribute list + * entry of the attribute before which the attribute being searched for would + * need to be inserted if such an action were to be desired. + * + * Thus to insert the not found attribute, one wants to add the attribute to + * @ctx->mrec (the base mft record) and if there is not enough space, the + * attribute should be placed in a newly allocated extent mft record. The + * attribute list entry for the inserted attribute should be inserted in the + * attribute list attribute at @ctx->al_entry. + * + * On actual error, ntfs_external_attr_find() returns -EIO. In this case + * @ctx->attr is undefined and in particular do not rely on it not changing. */ -static BOOL find_external_attr(const ATTR_TYPES type, const ntfschar *name, - const u32 name_len, const IGNORE_CASE_BOOL ic, - const VCN lowest_vcn, const u8 *val, const u32 val_len, - attr_search_context *ctx) +static int ntfs_external_attr_find(const ATTR_TYPE type, + const ntfschar *name, const u32 name_len, + const IGNORE_CASE_BOOL ic, const VCN lowest_vcn, + const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx) { ntfs_inode *base_ni, *ni; ntfs_volume *vol; @@ -1366,6 +520,8 @@ static BOOL find_external_attr(const ATTR_TYPES type, const ntfschar *name, ATTR_RECORD *a; ntfschar *al_name; u32 al_name_len; + int err = 0; + static const char *es = " Unmount and run chkdsk."; ni = ctx->ntfs_ino; base_ni = ctx->base_ntfs_ino; @@ -1377,6 +533,8 @@ static BOOL find_external_attr(const ATTR_TYPES type, const ntfschar *name, } if (ni == base_ni) ctx->base_attr = ctx->attr; + if (type == AT_END) + goto not_found; vol = base_ni->vol; al_start = base_ni->attr_list; al_end = al_start + base_ni->attr_list_size; @@ -1413,7 +571,7 @@ static BOOL find_external_attr(const ATTR_TYPES type, const ntfschar *name, if (type != al_entry->type) continue; /* - * If @name is present, compare the two names. If @name is + * If @name is present, compare the two names. If @name is * missing, assume we want an unnamed attribute. */ al_name_len = al_entry->name_length; @@ -1439,10 +597,11 @@ static BOOL find_external_attr(const ATTR_TYPES type, const ntfschar *name, continue; /* * FIXME: Reverse engineering showed 0, IGNORE_CASE but - * that is inconsistent with find_attr(). The subsequent - * rc checks were also different. Perhaps I made a - * mistake in one of the two. Need to recheck which is - * correct or at least see what is going on... (AIA) + * that is inconsistent with ntfs_attr_find(). The + * subsequent rc checks were also different. Perhaps I + * made a mistake in one of the two. Need to recheck + * which is correct or at least see what is going on... + * (AIA) */ rc = ntfs_collate_names(name, name_len, al_name, al_name_len, 1, CASE_SENSITIVE, @@ -1454,8 +613,8 @@ static BOOL find_external_attr(const ATTR_TYPES type, const ntfschar *name, } /* * The names match or @name not present and attribute is - * unnamed. Now check @lowest_vcn. Continue search if the - * next attribute list entry still fits @lowest_vcn. Otherwise + * unnamed. Now check @lowest_vcn. Continue search if the + * next attribute list entry still fits @lowest_vcn. Otherwise * we have reached the right one or the search has failed. */ if (lowest_vcn && (u8*)next_al_entry >= al_start && @@ -1463,7 +622,7 @@ static BOOL find_external_attr(const ATTR_TYPES type, const ntfschar *name, (u8*)next_al_entry + le16_to_cpu( next_al_entry->length) <= al_end && sle64_to_cpu(next_al_entry->lowest_vcn) <= - sle64_to_cpu(lowest_vcn) && + lowest_vcn && next_al_entry->type == al_entry->type && next_al_entry->name_length == al_name_len && ntfs_are_names_equal((ntfschar*)((u8*) @@ -1476,7 +635,10 @@ static BOOL find_external_attr(const ATTR_TYPES type, const ntfschar *name, if (MREF_LE(al_entry->mft_reference) == ni->mft_no) { if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) { ntfs_error(vol->sb, "Found stale mft " - "reference in attribute list!"); + "reference in attribute list " + "of base inode 0x%lx.%s", + base_ni->mft_no, es); + err = -EIO; break; } } else { /* Mft references do not match. */ @@ -1491,15 +653,24 @@ static BOOL find_external_attr(const ATTR_TYPES type, const ntfschar *name, } else { /* We want an extent record. */ ctx->mrec = map_extent_mft_record(base_ni, - al_entry->mft_reference, &ni); - ctx->ntfs_ino = ni; + le64_to_cpu( + al_entry->mft_reference), &ni); if (IS_ERR(ctx->mrec)) { - ntfs_error(vol->sb, "Failed to map mft " - "record, error code " - "%ld.", - -PTR_ERR(ctx->mrec)); + ntfs_error(vol->sb, "Failed to map " + "extent mft record " + "0x%lx of base inode " + "0x%lx.%s", + MREF_LE(al_entry-> + mft_reference), + base_ni->mft_no, es); + err = PTR_ERR(ctx->mrec); + if (err == -ENOENT) + err = -EIO; + /* Cause @ctx to be sanitized below. */ + ni = NULL; break; } + ctx->ntfs_ino = ni; } ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec + le16_to_cpu(ctx->mrec->attrs_offset)); @@ -1510,14 +681,14 @@ static BOOL find_external_attr(const ATTR_TYPES type, const ntfschar *name, * current al_entry. */ /* - * We could call into find_attr() to find the right attribute - * in this mft record but this would be less efficient and not - * quite accurate as find_attr() ignores the attribute instance - * numbers for example which become important when one plays - * with attribute lists. Also, because a proper match has been - * found in the attribute list entry above, the comparison can - * now be optimized. So it is worth re-implementing a - * simplified find_attr() here. + * We could call into ntfs_attr_find() to find the right + * attribute in this mft record but this would be less + * efficient and not quite accurate as ntfs_attr_find() ignores + * the attribute instance numbers for example which become + * important when one plays with attribute lists. Also, + * because a proper match has been found in the attribute list + * entry above, the comparison can now be optimized. So it is + * worth re-implementing a simplified ntfs_attr_find() here. */ a = ctx->attr; /* @@ -1534,18 +705,18 @@ do_next_attr_loop: break; if (al_entry->instance != a->instance) goto do_next_attr; + /* + * If the type and/or the name are mismatched between the + * attribute list entry and the attribute record, there is + * corruption so we break and return error EIO. + */ if (al_entry->type != a->type) - continue; - if (name) { - if (a->name_length != al_name_len) - continue; - if (!ntfs_are_names_equal((ntfschar*)((u8*)a + - le16_to_cpu(a->name_offset)), - a->name_length, al_name, al_name_len, - CASE_SENSITIVE, vol->upcase, - vol->upcase_len)) - continue; - } + break; + if (!ntfs_are_names_equal((ntfschar*)((u8*)a + + le16_to_cpu(a->name_offset)), a->name_length, + al_name, al_name_len, CASE_SENSITIVE, + vol->upcase, vol->upcase_len)) + break; ctx->attr = a; /* * If no @val specified or @val specified and it matches, we @@ -1557,46 +728,79 @@ do_next_attr_loop: le16_to_cpu(a->data.resident.value_offset), val, val_len))) { ntfs_debug("Done, found."); - return TRUE; + return 0; } do_next_attr: /* Proceed to the next attribute in the current mft record. */ a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length)); goto do_next_attr_loop; } - ntfs_error(base_ni->vol->sb, "Inode contains corrupt attribute list " - "attribute."); + if (!err) { + ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt " + "attribute list attribute.%s", base_ni->mft_no, + es); + err = -EIO; + } if (ni != base_ni) { - unmap_extent_mft_record(ni); + if (ni) + unmap_extent_mft_record(ni); ctx->ntfs_ino = base_ni; ctx->mrec = ctx->base_mrec; ctx->attr = ctx->base_attr; } + if (err != -ENOMEM) + NVolSetErrors(vol); + return err; +not_found: + /* + * If we were looking for AT_END, we reset the search context @ctx and + * use ntfs_attr_find() to seek to the end of the base mft record. + */ + if (type == AT_END) { + ntfs_attr_reinit_search_ctx(ctx); + return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len, + ctx); + } /* - * FIXME: We absolutely have to return ERROR status instead of just - * false or we will blow up or even worse cause corruption when we add - * write support and we reach this code path! + * The attribute was not found. Before we return, we want to ensure + * @ctx->mrec and @ctx->attr indicate the position at which the + * attribute should be inserted in the base mft record. Since we also + * want to preserve @ctx->al_entry we cannot reinitialize the search + * context using ntfs_attr_reinit_search_ctx() as this would set + * @ctx->al_entry to NULL. Thus we do the necessary bits manually (see + * ntfs_attr_init_search_ctx() below). Note, we _only_ preserve + * @ctx->al_entry as the remaining fields (base_*) are identical to + * their non base_ counterparts and we cannot set @ctx->base_attr + * correctly yet as we do not know what @ctx->attr will be set to by + * the call to ntfs_attr_find() below. */ - printk(KERN_CRIT "NTFS: FIXME: Hit unfinished error code path!!!\n"); - return FALSE; -not_found: + if (ni != base_ni) + unmap_extent_mft_record(ni); + ctx->mrec = ctx->base_mrec; + ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec + + le16_to_cpu(ctx->mrec->attrs_offset)); + ctx->is_first = TRUE; + ctx->ntfs_ino = base_ni; + ctx->base_ntfs_ino = NULL; + ctx->base_mrec = NULL; + ctx->base_attr = NULL; /* - * Seek to the end of the base mft record, i.e. when we return false, - * ctx->mrec and ctx->attr indicate where the attribute should be - * inserted into the attribute record. - * And of course ctx->al_entry points to the end of the attribute - * list inside NTFS_I(ctx->base_vfs_ino)->attr_list. - * - * FIXME: Do we really want to do this here? Think about it... (AIA) + * In case there are multiple matches in the base mft record, need to + * keep enumerating until we get an attribute not found response (or + * another error), otherwise we would keep returning the same attribute + * over and over again and all programs using us for enumeration would + * lock up in a tight loop. */ - reinit_attr_search_ctx(ctx); - find_attr(type, name, name_len, ic, val, val_len, ctx); + do { + err = ntfs_attr_find(type, name, name_len, ic, val, val_len, + ctx); + } while (!err); ntfs_debug("Done, not found."); - return FALSE; + return err; } /** - * lookup_attr - find an attribute in an ntfs inode + * ntfs_attr_lookup - find an attribute in an ntfs inode * @type: attribute type to find * @name: attribute name to find (optional, i.e. NULL means don't care) * @name_len: attribute name length (only needed if @name present) @@ -1606,27 +810,38 @@ not_found: * @val_len: attribute value length * @ctx: search context with mft record and attribute to search from * - * Find an attribute in an ntfs inode. On first search @ctx->ntfs_ino must + * Find an attribute in an ntfs inode. On first search @ctx->ntfs_ino must * be the base mft record and @ctx must have been obtained from a call to - * get_attr_search_ctx(). + * ntfs_attr_get_search_ctx(). * * This function transparently handles attribute lists and @ctx is used to * continue searches where they were left off at. * * After finishing with the attribute/mft record you need to call - * release_attr_search_ctx() to cleanup the search context (unmapping any + * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any * mapped inodes, etc). * - * Return TRUE if the search was successful and FALSE if not. When TRUE, - * @ctx->attr is the found attribute and it is in mft record @ctx->mrec. When - * FALSE, @ctx->attr is the attribute which collates just after the attribute - * being searched for, i.e. if one wants to add the attribute to the mft - * record this is the correct place to insert it into. + * Return 0 if the search was successful and -errno if not. + * + * When 0, @ctx->attr is the found attribute and it is in mft record + * @ctx->mrec. If an attribute list attribute is present, @ctx->al_entry is + * the attribute list entry of the found attribute. + * + * When -ENOENT, @ctx->attr is the attribute which collates just after the + * attribute being searched for, i.e. if one wants to add the attribute to the + * mft record this is the correct place to insert it into. If an attribute + * list attribute is present, @ctx->al_entry is the attribute list entry which + * collates just after the attribute list entry of the attribute being searched + * for, i.e. if one wants to add the attribute to the mft record this is the + * correct place to insert its attribute list entry into. + * + * When -errno != -ENOENT, an error occured during the lookup. @ctx->attr is + * then undefined and in particular you should not rely on it not changing. */ -BOOL lookup_attr(const ATTR_TYPES type, const ntfschar *name, +int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name, const u32 name_len, const IGNORE_CASE_BOOL ic, const VCN lowest_vcn, const u8 *val, const u32 val_len, - attr_search_context *ctx) + ntfs_attr_search_ctx *ctx) { ntfs_inode *base_ni; @@ -1637,21 +852,22 @@ BOOL lookup_attr(const ATTR_TYPES type, const ntfschar *name, base_ni = ctx->ntfs_ino; /* Sanity check, just for debugging really. */ BUG_ON(!base_ni); - if (!NInoAttrList(base_ni)) - return find_attr(type, name, name_len, ic, val, val_len, ctx); - return find_external_attr(type, name, name_len, ic, lowest_vcn, val, - val_len, ctx); + if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST) + return ntfs_attr_find(type, name, name_len, ic, val, val_len, + ctx); + return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn, + val, val_len, ctx); } /** - * init_attr_search_ctx - initialize an attribute search context + * ntfs_attr_init_search_ctx - initialize an attribute search context * @ctx: attribute search context to initialize * @ni: ntfs inode with which to initialize the search context * @mrec: mft record with which to initialize the search context * * Initialize the attribute search context @ctx with @ni and @mrec. */ -static inline void init_attr_search_ctx(attr_search_context *ctx, +static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx, ntfs_inode *ni, MFT_RECORD *mrec) { ctx->mrec = mrec; @@ -1666,7 +882,7 @@ static inline void init_attr_search_ctx(attr_search_context *ctx, } /** - * reinit_attr_search_ctx - reinitialize an attribute search context + * ntfs_attr_reinit_search_ctx - reinitialize an attribute search context * @ctx: attribute search context to reinitialize * * Reinitialize the attribute search context @ctx, unmapping an associated @@ -1675,7 +891,7 @@ static inline void init_attr_search_ctx(attr_search_context *ctx, * This is used when a search for a new attribute is being started to reset * the search context to the beginning. */ -void reinit_attr_search_ctx(attr_search_context *ctx) +void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx) { if (likely(!ctx->base_ntfs_ino)) { /* No attribute list. */ @@ -1683,40 +899,45 @@ void reinit_attr_search_ctx(attr_search_context *ctx) /* Sanity checks are performed elsewhere. */ ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec + le16_to_cpu(ctx->mrec->attrs_offset)); + /* + * This needs resetting due to ntfs_external_attr_find() which + * can leave it set despite having zeroed ctx->base_ntfs_ino. + */ + ctx->al_entry = NULL; return; } /* Attribute list. */ if (ctx->ntfs_ino != ctx->base_ntfs_ino) unmap_extent_mft_record(ctx->ntfs_ino); - init_attr_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec); + ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec); return; } /** - * get_attr_search_ctx - allocate and initialize a new attribute search context + * ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context * @ni: ntfs inode with which to initialize the search context * @mrec: mft record with which to initialize the search context * * Allocate a new attribute search context, initialize it with @ni and @mrec, * and return it. Return NULL if allocation failed. */ -attr_search_context *get_attr_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec) +ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec) { - attr_search_context *ctx; + ntfs_attr_search_ctx *ctx; ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, SLAB_NOFS); if (ctx) - init_attr_search_ctx(ctx, ni, mrec); + ntfs_attr_init_search_ctx(ctx, ni, mrec); return ctx; } /** - * put_attr_search_ctx - release an attribute search context + * ntfs_attr_put_search_ctx - release an attribute search context * @ctx: attribute search context to free * * Release the attribute search context @ctx, unmapping an associated extent * mft record if present. */ -void put_attr_search_ctx(attr_search_context *ctx) +void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx) { if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino) unmap_extent_mft_record(ctx->ntfs_ino); @@ -1724,3 +945,314 @@ void put_attr_search_ctx(attr_search_context *ctx) return; } +/** + * ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file + * @vol: ntfs volume to which the attribute belongs + * @type: attribute type which to find + * + * Search for the attribute definition record corresponding to the attribute + * @type in the $AttrDef system file. + * + * Return the attribute type definition record if found and NULL if not found. + */ +static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol, + const ATTR_TYPE type) +{ + ATTR_DEF *ad; + + BUG_ON(!vol->attrdef); + BUG_ON(!type); + for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef < + vol->attrdef_size && ad->type; ++ad) { + /* We have not found it yet, carry on searching. */ + if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type))) + continue; + /* We found the attribute; return it. */ + if (likely(ad->type == type)) + return ad; + /* We have gone too far already. No point in continuing. */ + break; + } + /* Attribute not found. */ + ntfs_debug("Attribute type 0x%x not found in $AttrDef.", + le32_to_cpu(type)); + return NULL; +} + +/** + * ntfs_attr_size_bounds_check - check a size of an attribute type for validity + * @vol: ntfs volume to which the attribute belongs + * @type: attribute type which to check + * @size: size which to check + * + * Check whether the @size in bytes is valid for an attribute of @type on the + * ntfs volume @vol. This information is obtained from $AttrDef system file. + * + * Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not + * listed in $AttrDef. + */ +int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type, + const s64 size) +{ + ATTR_DEF *ad; + + BUG_ON(size < 0); + /* + * $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not + * listed in $AttrDef. + */ + if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024)) + return -ERANGE; + /* Get the $AttrDef entry for the attribute @type. */ + ad = ntfs_attr_find_in_attrdef(vol, type); + if (unlikely(!ad)) + return -ENOENT; + /* Do the bounds check. */ + if (((sle64_to_cpu(ad->min_size) > 0) && + size < sle64_to_cpu(ad->min_size)) || + ((sle64_to_cpu(ad->max_size) > 0) && size > + sle64_to_cpu(ad->max_size))) + return -ERANGE; + return 0; +} + +/** + * ntfs_attr_can_be_non_resident - check if an attribute can be non-resident + * @vol: ntfs volume to which the attribute belongs + * @type: attribute type which to check + * + * Check whether the attribute of @type on the ntfs volume @vol is allowed to + * be non-resident. This information is obtained from $AttrDef system file. + * + * Return 0 if the attribute is allowed to be non-resident, -EPERM if not, or + * -ENOENT if the attribute is not listed in $AttrDef. + */ +int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type) +{ + ATTR_DEF *ad; + + /* + * $DATA is always allowed to be non-resident even if $AttrDef does not + * specify this in the flags of the $DATA attribute definition record. + */ + if (type == AT_DATA) + return 0; + /* Find the attribute definition record in $AttrDef. */ + ad = ntfs_attr_find_in_attrdef(vol, type); + if (unlikely(!ad)) + return -ENOENT; + /* Check the flags and return the result. */ + if (ad->flags & CAN_BE_NON_RESIDENT) + return 0; + return -EPERM; +} + +/** + * ntfs_attr_can_be_resident - check if an attribute can be resident + * @vol: ntfs volume to which the attribute belongs + * @type: attribute type which to check + * + * Check whether the attribute of @type on the ntfs volume @vol is allowed to + * be resident. This information is derived from our ntfs knowledge and may + * not be completely accurate, especially when user defined attributes are + * present. Basically we allow everything to be resident except for index + * allocation and $EA attributes. + * + * Return 0 if the attribute is allowed to be non-resident and -EPERM if not. + * + * Warning: In the system file $MFT the attribute $Bitmap must be non-resident + * otherwise windows will not boot (blue screen of death)! We cannot + * check for this here as we do not know which inode's $Bitmap is + * being asked about so the caller needs to special case this. + */ +int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type) +{ + if (type != AT_INDEX_ALLOCATION && type != AT_EA) + return 0; + return -EPERM; +} + +/** + * ntfs_attr_record_resize - resize an attribute record + * @m: mft record containing attribute record + * @a: attribute record to resize + * @new_size: new size in bytes to which to resize the attribute record @a + * + * Resize the attribute record @a, i.e. the resident part of the attribute, in + * the mft record @m to @new_size bytes. + * + * Return 0 on success and -errno on error. The following error codes are + * defined: + * -ENOSPC - Not enough space in the mft record @m to perform the resize. + * + * Note: On error, no modifications have been performed whatsoever. + * + * Warning: If you make a record smaller without having copied all the data you + * are interested in the data may be overwritten. + */ +int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size) +{ + ntfs_debug("Entering for new_size %u.", new_size); + /* Align to 8 bytes if it is not already done. */ + if (new_size & 7) + new_size = (new_size + 7) & ~7; + /* If the actual attribute length has changed, move things around. */ + if (new_size != le32_to_cpu(a->length)) { + u32 new_muse = le32_to_cpu(m->bytes_in_use) - + le32_to_cpu(a->length) + new_size; + /* Not enough space in this mft record. */ + if (new_muse > le32_to_cpu(m->bytes_allocated)) + return -ENOSPC; + /* Move attributes following @a to their new location. */ + memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length), + le32_to_cpu(m->bytes_in_use) - ((u8*)a - + (u8*)m) - le32_to_cpu(a->length)); + /* Adjust @m to reflect the change in used space. */ + m->bytes_in_use = cpu_to_le32(new_muse); + /* Adjust @a to reflect the new size. */ + if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length)) + a->length = cpu_to_le32(new_size); + } + return 0; +} + +/** + * ntfs_attr_set - fill (a part of) an attribute with a byte + * @ni: ntfs inode describing the attribute to fill + * @ofs: offset inside the attribute at which to start to fill + * @cnt: number of bytes to fill + * @val: the unsigned 8-bit value with which to fill the attribute + * + * Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at + * byte offset @ofs inside the attribute with the constant byte @val. + * + * This function is effectively like memset() applied to an ntfs attribute. + * + * Return 0 on success and -errno on error. An error code of -ESPIPE means + * that @ofs + @cnt were outside the end of the attribute and no write was + * performed. + */ +int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) +{ + ntfs_volume *vol = ni->vol; + struct address_space *mapping; + struct page *page; + u8 *kaddr; + pgoff_t idx, end; + unsigned int start_ofs, end_ofs, size; + + ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.", + (long long)ofs, (long long)cnt, val); + BUG_ON(ofs < 0); + BUG_ON(cnt < 0); + if (!cnt) + goto done; + mapping = VFS_I(ni)->i_mapping; + /* Work out the starting index and page offset. */ + idx = ofs >> PAGE_CACHE_SHIFT; + start_ofs = ofs & ~PAGE_CACHE_MASK; + /* Work out the ending index and page offset. */ + end = ofs + cnt; + end_ofs = end & ~PAGE_CACHE_MASK; + /* If the end is outside the inode size return -ESPIPE. */ + if (unlikely(end > VFS_I(ni)->i_size)) { + ntfs_error(vol->sb, "Request exceeds end of attribute."); + return -ESPIPE; + } + end >>= PAGE_CACHE_SHIFT; + /* If there is a first partial page, need to do it the slow way. */ + if (start_ofs) { + page = read_cache_page(mapping, idx, + (filler_t*)mapping->a_ops->readpage, NULL); + if (IS_ERR(page)) { + ntfs_error(vol->sb, "Failed to read first partial " + "page (sync error, index 0x%lx).", idx); + return PTR_ERR(page); + } + wait_on_page_locked(page); + if (unlikely(!PageUptodate(page))) { + ntfs_error(vol->sb, "Failed to read first partial page " + "(async error, index 0x%lx).", idx); + page_cache_release(page); + return PTR_ERR(page); + } + /* + * If the last page is the same as the first page, need to + * limit the write to the end offset. + */ + size = PAGE_CACHE_SIZE; + if (idx == end) + size = end_ofs; + kaddr = kmap_atomic(page, KM_USER0); + memset(kaddr + start_ofs, val, size - start_ofs); + flush_dcache_page(page); + kunmap_atomic(kaddr, KM_USER0); + set_page_dirty(page); + page_cache_release(page); + if (idx == end) + goto done; + idx++; + } + /* Do the whole pages the fast way. */ + for (; idx < end; idx++) { + /* Find or create the current page. (The page is locked.) */ + page = grab_cache_page(mapping, idx); + if (unlikely(!page)) { + ntfs_error(vol->sb, "Insufficient memory to grab " + "page (index 0x%lx).", idx); + return -ENOMEM; + } + kaddr = kmap_atomic(page, KM_USER0); + memset(kaddr, val, PAGE_CACHE_SIZE); + flush_dcache_page(page); + kunmap_atomic(kaddr, KM_USER0); + /* + * If the page has buffers, mark them uptodate since buffer + * state and not page state is definitive in 2.6 kernels. + */ + if (page_has_buffers(page)) { + struct buffer_head *bh, *head; + + bh = head = page_buffers(page); + do { + set_buffer_uptodate(bh); + } while ((bh = bh->b_this_page) != head); + } + /* Now that buffers are uptodate, set the page uptodate, too. */ + SetPageUptodate(page); + /* + * Set the page and all its buffers dirty and mark the inode + * dirty, too. The VM will write the page later on. + */ + set_page_dirty(page); + /* Finally unlock and release the page. */ + unlock_page(page); + page_cache_release(page); + } + /* If there is a last partial page, need to do it the slow way. */ + if (end_ofs) { + page = read_cache_page(mapping, idx, + (filler_t*)mapping->a_ops->readpage, NULL); + if (IS_ERR(page)) { + ntfs_error(vol->sb, "Failed to read last partial page " + "(sync error, index 0x%lx).", idx); + return PTR_ERR(page); + } + wait_on_page_locked(page); + if (unlikely(!PageUptodate(page))) { + ntfs_error(vol->sb, "Failed to read last partial page " + "(async error, index 0x%lx).", idx); + page_cache_release(page); + return PTR_ERR(page); + } + kaddr = kmap_atomic(page, KM_USER0); + memset(kaddr, val, end_ofs); + flush_dcache_page(page); + kunmap_atomic(kaddr, KM_USER0); + set_page_dirty(page); + page_cache_release(page); + } +done: + ntfs_debug("Done."); + return 0; +}