2 * lcnalloc.c - Cluster (de)allocation code. Part of the Linux-NTFS project.
4 * Copyright (c) 2004 Anton Altaparmakov
6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program/include file is distributed in the hope that it will be
12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program (in the main directory of the Linux-NTFS
18 * distribution in the file COPYING); if not, write to the Free Software
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/pagemap.h>
36 * ntfs_cluster_free_from_rl_nolock - free clusters from runlist
37 * @vol: mounted ntfs volume on which to free the clusters
38 * @rl: runlist describing the clusters to free
40 * Free all the clusters described by the runlist @rl on the volume @vol. In
41 * the case of an error being returned, at least some of the clusters were not
44 * Return 0 on success and -errno on error.
46 * Locking: - The volume lcn bitmap must be locked for writing on entry and is
47 * left locked on return.
49 static int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
50 const runlist_element *rl)
52 struct inode *lcnbmp_vi = vol->lcnbmp_ino;
55 ntfs_debug("Entering.");
56 for (; rl->length; rl++) {
61 err = ntfs_bitmap_clear_run(lcnbmp_vi, rl->lcn, rl->length);
62 if (unlikely(err && (!ret || ret == ENOMEM) && ret != err))
70 * ntfs_cluster_alloc - allocate clusters on an ntfs volume
71 * @vol: mounted ntfs volume on which to allocate the clusters
72 * @start_vcn: vcn to use for the first allocated cluster
73 * @count: number of clusters to allocate
74 * @start_lcn: starting lcn at which to allocate the clusters (or -1 if none)
75 * @zone: zone from which to allocate the clusters
77 * Allocate @count clusters preferably starting at cluster @start_lcn or at the
78 * current allocator position if @start_lcn is -1, on the mounted ntfs volume
79 * @vol. @zone is either DATA_ZONE for allocation of normal clusters or
80 * MFT_ZONE for allocation of clusters for the master file table, i.e. the
81 * $MFT/$DATA attribute.
83 * @start_vcn specifies the vcn of the first allocated cluster. This makes
84 * merging the resulting runlist with the old runlist easier.
86 * You need to check the return value with IS_ERR(). If this is false, the
87 * function was successful and the return value is a runlist describing the
88 * allocated cluster(s). If IS_ERR() is true, the function failed and
89 * PTR_ERR() gives you the error code.
91 * Notes on the allocation algorithm
92 * =================================
94 * There are two data zones. First is the area between the end of the mft zone
95 * and the end of the volume, and second is the area between the start of the
96 * volume and the start of the mft zone. On unmodified/standard NTFS 1.x
97 * volumes, the second data zone does not exist due to the mft zone being
98 * expanded to cover the start of the volume in order to reserve space for the
99 * mft bitmap attribute.
101 * This is not the prettiest function but the complexity stems from the need of
102 * implementing the mft vs data zoned approach and from the fact that we have
103 * access to the lcn bitmap in portions of up to 8192 bytes at a time, so we
104 * need to cope with crossing over boundaries of two buffers. Further, the
105 * fact that the allocator allows for caller supplied hints as to the location
106 * of where allocation should begin and the fact that the allocator keeps track
107 * of where in the data zones the next natural allocation should occur,
108 * contribute to the complexity of the function. But it should all be
109 * worthwhile, because this allocator should: 1) be a full implementation of
110 * the MFT zone approach used by Windows NT, 2) cause reduction in
111 * fragmentation, and 3) be speedy in allocations (the code is not optimized
112 * for speed, but the algorithm is, so further speed improvements are probably
115 * FIXME: We should be monitoring cluster allocation and increment the MFT zone
116 * size dynamically but this is something for the future. We will just cause
117 * heavier fragmentation by not doing it and I am not even sure Windows would
118 * grow the MFT zone dynamically, so it might even be correct not to do this.
119 * The overhead in doing dynamic MFT zone expansion would be very large and
120 * unlikely worth the effort. (AIA)
122 * TODO: I have added in double the required zone position pointer wrap around
123 * logic which can be optimized to having only one of the two logic sets.
124 * However, having the double logic will work fine, but if we have only one of
125 * the sets and we get it wrong somewhere, then we get into trouble, so
126 * removing the duplicate logic requires _very_ careful consideration of _all_
127 * possible code paths. So at least for now, I am leaving the double logic -
128 * better safe than sorry... (AIA)
130 * Locking: - The volume lcn bitmap must be unlocked on entry and is unlocked
132 * - This function takes the volume lcn bitmap lock for writing and
133 * modifies the bitmap contents.
135 runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
136 const s64 count, const LCN start_lcn,
137 const NTFS_CLUSTER_ALLOCATION_ZONES zone)
139 LCN zone_start, zone_end, bmp_pos, bmp_initial_pos, last_read_pos, lcn;
140 LCN prev_lcn = 0, prev_run_len = 0, mft_zone_size;
142 struct inode *lcnbmp_vi;
143 runlist_element *rl = NULL;
144 struct address_space *mapping;
145 struct page *page = NULL;
147 int err = 0, rlpos, rlsize, buf_size;
148 u8 pass, done_zones, search_zone, need_writeback = 0, bit;
150 ntfs_debug("Entering for start_vcn 0x%llx, count 0x%llx, start_lcn "
151 "0x%llx, zone %s_ZONE.", (unsigned long long)start_vcn,
152 (unsigned long long)count,
153 (unsigned long long)start_lcn,
154 zone == MFT_ZONE ? "MFT" : "DATA");
156 lcnbmp_vi = vol->lcnbmp_ino;
158 BUG_ON(start_vcn < 0);
160 BUG_ON(start_lcn < -1);
161 BUG_ON(zone < FIRST_ZONE);
162 BUG_ON(zone > LAST_ZONE);
164 /* Return empty runlist if @count == 0 */
165 // FIXME: Do we want to just return NULL instead? (AIA)
167 rl = ntfs_malloc_nofs(PAGE_SIZE);
169 return ERR_PTR(-ENOMEM);
170 rl[0].vcn = start_vcn;
171 rl[0].lcn = LCN_RL_NOT_MAPPED;
175 /* Take the lcnbmp lock for writing. */
176 down_write(&vol->lcnbmp_lock);
178 * If no specific @start_lcn was requested, use the current data zone
179 * position, otherwise use the requested @start_lcn but make sure it
180 * lies outside the mft zone. Also set done_zones to 0 (no zones done)
181 * and pass depending on whether we are starting inside a zone (1) or
182 * at the beginning of a zone (2). If requesting from the MFT_ZONE,
183 * we either start at the current position within the mft zone or at
184 * the specified position. If the latter is out of bounds then we start
185 * at the beginning of the MFT_ZONE.
190 * zone_start and zone_end are the current search range. search_zone
191 * is 1 for mft zone, 2 for data zone 1 (end of mft zone till end of
192 * volume) and 4 for data zone 2 (start of volume till start of mft
195 zone_start = start_lcn;
196 if (zone_start < 0) {
197 if (zone == DATA_ZONE)
198 zone_start = vol->data1_zone_pos;
200 zone_start = vol->mft_zone_pos;
203 * Zone starts at beginning of volume which means a
204 * single pass is sufficient.
208 } else if (zone == DATA_ZONE && zone_start >= vol->mft_zone_start &&
209 zone_start < vol->mft_zone_end) {
210 zone_start = vol->mft_zone_end;
212 * Starting at beginning of data1_zone which means a single
213 * pass in this zone is sufficient.
216 } else if (zone == MFT_ZONE && (zone_start < vol->mft_zone_start ||
217 zone_start >= vol->mft_zone_end)) {
218 zone_start = vol->mft_lcn;
219 if (!vol->mft_zone_end)
222 * Starting at beginning of volume which means a single pass
227 if (zone == MFT_ZONE) {
228 zone_end = vol->mft_zone_end;
230 } else /* if (zone == DATA_ZONE) */ {
231 /* Skip searching the mft zone. */
233 if (zone_start >= vol->mft_zone_end) {
234 zone_end = vol->nr_clusters;
237 zone_end = vol->mft_zone_start;
242 * bmp_pos is the current bit position inside the bitmap. We use
243 * bmp_initial_pos to determine whether or not to do a zone switch.
245 bmp_pos = bmp_initial_pos = zone_start;
247 /* Loop until all clusters are allocated, i.e. clusters == 0. */
250 mapping = lcnbmp_vi->i_mapping;
252 ntfs_debug("Start of outer while loop: done_zones 0x%x, "
253 "search_zone %i, pass %i, zone_start 0x%llx, "
254 "zone_end 0x%llx, bmp_initial_pos 0x%llx, "
255 "bmp_pos 0x%llx, rlpos %i, rlsize %i.",
256 done_zones, search_zone, pass,
257 (unsigned long long)zone_start,
258 (unsigned long long)zone_end,
259 (unsigned long long)bmp_initial_pos,
260 (unsigned long long)bmp_pos, rlpos, rlsize);
261 /* Loop until we run out of free clusters. */
262 last_read_pos = bmp_pos >> 3;
263 ntfs_debug("last_read_pos 0x%llx.",
264 (unsigned long long)last_read_pos);
265 if (last_read_pos > lcnbmp_vi->i_size) {
266 ntfs_debug("End of attribute reached. "
267 "Skipping to zone_pass_done.");
271 if (need_writeback) {
272 ntfs_debug("Marking page dirty.");
273 flush_dcache_page(page);
274 set_page_dirty(page);
277 ntfs_unmap_page(page);
279 page = ntfs_map_page(mapping, last_read_pos >>
283 ntfs_error(vol->sb, "Failed to map page.");
286 buf_size = last_read_pos & ~PAGE_CACHE_MASK;
287 buf = page_address(page) + buf_size;
288 buf_size = PAGE_CACHE_SIZE - buf_size;
289 if (unlikely(last_read_pos + buf_size > lcnbmp_vi->i_size))
290 buf_size = lcnbmp_vi->i_size - last_read_pos;
294 ntfs_debug("Before inner while loop: buf_size %i, lcn 0x%llx, "
295 "bmp_pos 0x%llx, need_writeback %i.", buf_size,
296 (unsigned long long)lcn,
297 (unsigned long long)bmp_pos, need_writeback);
298 while (lcn < buf_size && lcn + bmp_pos < zone_end) {
299 byte = buf + (lcn >> 3);
300 ntfs_debug("In inner while loop: buf_size %i, "
301 "lcn 0x%llx, bmp_pos 0x%llx, "
302 "need_writeback %i, byte ofs 0x%x, "
303 "*byte 0x%x.", buf_size,
304 (unsigned long long)lcn,
305 (unsigned long long)bmp_pos,
307 (unsigned int)(lcn >> 3),
308 (unsigned int)*byte);
309 /* Skip full bytes. */
311 lcn = (lcn + 8) & ~7;
312 ntfs_debug("Continuing while loop 1.");
315 bit = 1 << (lcn & 7);
316 ntfs_debug("bit %i.", bit);
317 /* If the bit is already set, go onto the next one. */
320 ntfs_debug("Continuing while loop 2.");
324 * Allocate more memory if needed, including space for
325 * the terminator element.
326 * ntfs_malloc_nofs() operates on whole pages only.
328 if ((rlpos + 2) * sizeof(*rl) > rlsize) {
329 runlist_element *rl2;
331 ntfs_debug("Reallocating memory.");
333 ntfs_debug("First free bit is at LCN "
337 rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE);
338 if (unlikely(!rl2)) {
340 ntfs_error(vol->sb, "Failed to "
344 memcpy(rl2, rl, rlsize);
348 ntfs_debug("Reallocated memory, rlsize 0x%x.",
351 /* Allocate the bitmap bit. */
353 /* We need to write this bitmap page to disk. */
355 ntfs_debug("*byte 0x%x, need_writeback is set.",
356 (unsigned int)*byte);
358 * Coalesce with previous run if adjacent LCNs.
359 * Otherwise, append a new run.
361 ntfs_debug("Adding run (lcn 0x%llx, len 0x%llx), "
362 "prev_lcn 0x%llx, lcn 0x%llx, "
363 "bmp_pos 0x%llx, prev_run_len 0x%llx, "
365 (unsigned long long)(lcn + bmp_pos),
366 1ULL, (unsigned long long)prev_lcn,
367 (unsigned long long)lcn,
368 (unsigned long long)bmp_pos,
369 (unsigned long long)prev_run_len,
371 if (prev_lcn == lcn + bmp_pos - prev_run_len && rlpos) {
372 ntfs_debug("Coalescing to run (lcn 0x%llx, "
377 rl[rlpos - 1].length);
378 rl[rlpos - 1].length = ++prev_run_len;
379 ntfs_debug("Run now (lcn 0x%llx, len 0x%llx), "
380 "prev_run_len 0x%llx.",
384 rl[rlpos - 1].length,
389 ntfs_debug("Adding new run, (previous "
395 rl[rlpos - 1].length);
396 rl[rlpos].vcn = rl[rlpos - 1].vcn +
399 ntfs_debug("Adding new run, is first "
401 rl[rlpos].vcn = start_vcn;
403 rl[rlpos].lcn = prev_lcn = lcn + bmp_pos;
404 rl[rlpos].length = prev_run_len = 1;
411 * Update the current zone position. Positions
412 * of already scanned zones have been updated
413 * during the respective zone switches.
415 tc = lcn + bmp_pos + 1;
416 ntfs_debug("Done. Updating current zone "
417 "position, tc 0x%llx, "
419 (unsigned long long)tc,
421 switch (search_zone) {
423 ntfs_debug("Before checks, "
428 if (tc >= vol->mft_zone_end) {
431 if (!vol->mft_zone_end)
432 vol->mft_zone_pos = 0;
433 } else if ((bmp_initial_pos >=
435 tc > vol->mft_zone_pos)
436 && tc >= vol->mft_lcn)
437 vol->mft_zone_pos = tc;
438 ntfs_debug("After checks, "
445 ntfs_debug("Before checks, "
446 "vol->data1_zone_pos "
449 vol->data1_zone_pos);
450 if (tc >= vol->nr_clusters)
451 vol->data1_zone_pos =
453 else if ((bmp_initial_pos >=
454 vol->data1_zone_pos ||
455 tc > vol->data1_zone_pos)
456 && tc >= vol->mft_zone_end)
457 vol->data1_zone_pos = tc;
458 ntfs_debug("After checks, "
459 "vol->data1_zone_pos "
462 vol->data1_zone_pos);
465 ntfs_debug("Before checks, "
466 "vol->data2_zone_pos "
469 vol->data2_zone_pos);
470 if (tc >= vol->mft_zone_start)
471 vol->data2_zone_pos = 0;
472 else if (bmp_initial_pos >=
473 vol->data2_zone_pos ||
474 tc > vol->data2_zone_pos)
475 vol->data2_zone_pos = tc;
476 ntfs_debug("After checks, "
477 "vol->data2_zone_pos "
480 vol->data2_zone_pos);
485 ntfs_debug("Finished. Going to out.");
491 ntfs_debug("After inner while loop: buf_size 0x%x, lcn "
492 "0x%llx, bmp_pos 0x%llx, need_writeback %i.",
493 buf_size, (unsigned long long)lcn,
494 (unsigned long long)bmp_pos, need_writeback);
495 if (bmp_pos < zone_end) {
496 ntfs_debug("Continuing outer while loop, "
497 "bmp_pos 0x%llx, zone_end 0x%llx.",
498 (unsigned long long)bmp_pos,
499 (unsigned long long)zone_end);
502 zone_pass_done: /* Finished with the current zone pass. */
503 ntfs_debug("At zone_pass_done, pass %i.", pass);
506 * Now do pass 2, scanning the first part of the zone
507 * we omitted in pass 1.
510 zone_end = zone_start;
511 switch (search_zone) {
512 case 1: /* mft_zone */
513 zone_start = vol->mft_zone_start;
515 case 2: /* data1_zone */
516 zone_start = vol->mft_zone_end;
518 case 4: /* data2_zone */
525 if (zone_end < zone_start)
526 zone_end = zone_start;
527 bmp_pos = zone_start;
528 ntfs_debug("Continuing outer while loop, pass 2, "
529 "zone_start 0x%llx, zone_end 0x%llx, "
531 (unsigned long long)zone_start,
532 (unsigned long long)zone_end,
533 (unsigned long long)bmp_pos);
537 ntfs_debug("At done_zones_check, search_zone %i, done_zones "
538 "before 0x%x, done_zones after 0x%x.",
539 search_zone, done_zones,
540 done_zones | search_zone);
541 done_zones |= search_zone;
542 if (done_zones < 7) {
543 ntfs_debug("Switching zone.");
544 /* Now switch to the next zone we haven't done yet. */
546 switch (search_zone) {
548 ntfs_debug("Switching from mft zone to data1 "
550 /* Update mft zone position. */
554 ntfs_debug("Before checks, "
559 tc = rl[rlpos - 1].lcn +
560 rl[rlpos - 1].length;
561 if (tc >= vol->mft_zone_end) {
564 if (!vol->mft_zone_end)
565 vol->mft_zone_pos = 0;
566 } else if ((bmp_initial_pos >=
568 tc > vol->mft_zone_pos)
569 && tc >= vol->mft_lcn)
570 vol->mft_zone_pos = tc;
571 ntfs_debug("After checks, "
577 /* Switch from mft zone to data1 zone. */
578 switch_to_data1_zone: search_zone = 2;
579 zone_start = bmp_initial_pos =
581 zone_end = vol->nr_clusters;
582 if (zone_start == vol->mft_zone_end)
584 if (zone_start >= zone_end) {
585 vol->data1_zone_pos = zone_start =
591 ntfs_debug("Switching from data1 zone to "
593 /* Update data1 zone position. */
597 ntfs_debug("Before checks, "
598 "vol->data1_zone_pos "
601 vol->data1_zone_pos);
602 tc = rl[rlpos - 1].lcn +
603 rl[rlpos - 1].length;
604 if (tc >= vol->nr_clusters)
605 vol->data1_zone_pos =
607 else if ((bmp_initial_pos >=
608 vol->data1_zone_pos ||
609 tc > vol->data1_zone_pos)
610 && tc >= vol->mft_zone_end)
611 vol->data1_zone_pos = tc;
612 ntfs_debug("After checks, "
613 "vol->data1_zone_pos "
616 vol->data1_zone_pos);
618 /* Switch from data1 zone to data2 zone. */
620 zone_start = bmp_initial_pos =
622 zone_end = vol->mft_zone_start;
625 if (zone_start >= zone_end) {
626 vol->data2_zone_pos = zone_start =
632 ntfs_debug("Switching from data2 zone to "
634 /* Update data2 zone position. */
638 ntfs_debug("Before checks, "
639 "vol->data2_zone_pos "
642 vol->data2_zone_pos);
643 tc = rl[rlpos - 1].lcn +
644 rl[rlpos - 1].length;
645 if (tc >= vol->mft_zone_start)
646 vol->data2_zone_pos = 0;
647 else if (bmp_initial_pos >=
648 vol->data2_zone_pos ||
649 tc > vol->data2_zone_pos)
650 vol->data2_zone_pos = tc;
651 ntfs_debug("After checks, "
652 "vol->data2_zone_pos "
655 vol->data2_zone_pos);
657 /* Switch from data2 zone to data1 zone. */
658 goto switch_to_data1_zone;
662 ntfs_debug("After zone switch, search_zone %i, "
663 "pass %i, bmp_initial_pos 0x%llx, "
664 "zone_start 0x%llx, zone_end 0x%llx.",
666 (unsigned long long)bmp_initial_pos,
667 (unsigned long long)zone_start,
668 (unsigned long long)zone_end);
669 bmp_pos = zone_start;
670 if (zone_start == zone_end) {
671 ntfs_debug("Empty zone, going to "
672 "done_zones_check.");
673 /* Empty zone. Don't bother searching it. */
674 goto done_zones_check;
676 ntfs_debug("Continuing outer while loop.");
678 } /* done_zones == 7 */
679 ntfs_debug("All zones are finished.");
681 * All zones are finished! If DATA_ZONE, shrink mft zone. If
682 * MFT_ZONE, we have really run out of space.
684 mft_zone_size = vol->mft_zone_end - vol->mft_zone_start;
685 ntfs_debug("vol->mft_zone_start 0x%llx, vol->mft_zone_end "
686 "0x%llx, mft_zone_size 0x%llx.",
687 (unsigned long long)vol->mft_zone_start,
688 (unsigned long long)vol->mft_zone_end,
689 (unsigned long long)mft_zone_size);
690 if (zone == MFT_ZONE || mft_zone_size <= 0) {
691 ntfs_debug("No free clusters left, going to out.");
692 /* Really no more space left on device. */
695 } /* zone == DATA_ZONE && mft_zone_size > 0 */
696 ntfs_debug("Shrinking mft zone.");
697 zone_end = vol->mft_zone_end;
699 if (mft_zone_size > 0)
700 vol->mft_zone_end = vol->mft_zone_start + mft_zone_size;
701 else /* mft zone and data2 zone no longer exist. */
702 vol->data2_zone_pos = vol->mft_zone_start =
703 vol->mft_zone_end = 0;
704 if (vol->mft_zone_pos >= vol->mft_zone_end) {
705 vol->mft_zone_pos = vol->mft_lcn;
706 if (!vol->mft_zone_end)
707 vol->mft_zone_pos = 0;
709 bmp_pos = zone_start = bmp_initial_pos =
710 vol->data1_zone_pos = vol->mft_zone_end;
714 ntfs_debug("After shrinking mft zone, mft_zone_size 0x%llx, "
715 "vol->mft_zone_start 0x%llx, "
716 "vol->mft_zone_end 0x%llx, "
717 "vol->mft_zone_pos 0x%llx, search_zone 2, "
718 "pass 2, dones_zones 0x%x, zone_start 0x%llx, "
719 "zone_end 0x%llx, vol->data1_zone_pos 0x%llx, "
720 "continuing outer while loop.",
721 (unsigned long long)mft_zone_size,
722 (unsigned long long)vol->mft_zone_start,
723 (unsigned long long)vol->mft_zone_end,
724 (unsigned long long)vol->mft_zone_pos,
725 done_zones, (unsigned long long)zone_start,
726 (unsigned long long)zone_end,
727 (unsigned long long)vol->data1_zone_pos);
729 ntfs_debug("After outer while loop.");
731 ntfs_debug("At out.");
732 /* Add runlist terminator element. */
734 rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length;
735 rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
736 rl[rlpos].length = 0;
738 if (likely(page && !IS_ERR(page))) {
739 if (need_writeback) {
740 ntfs_debug("Marking page dirty.");
741 flush_dcache_page(page);
742 set_page_dirty(page);
745 ntfs_unmap_page(page);
748 up_write(&vol->lcnbmp_lock);
752 ntfs_error(vol->sb, "Failed to allocate clusters, aborting "
758 ntfs_debug("Not enough space to complete allocation, "
759 "err ENOSPC, first free lcn 0x%llx, "
760 "could allocate up to 0x%llx "
762 (unsigned long long)rl[0].lcn,
763 (unsigned long long)count - clusters);
764 /* Deallocate all allocated clusters. */
765 ntfs_debug("Attempting rollback...");
766 err2 = ntfs_cluster_free_from_rl_nolock(vol, rl);
768 ntfs_error(vol->sb, "Failed to rollback (error %i). "
769 "Leaving inconsistent metadata! "
770 "Unmount and run chkdsk.", err2);
773 /* Free the runlist. */
775 } else if (err == ENOSPC)
776 ntfs_debug("No space left at all, err = ENOSPC, "
777 "first free lcn = 0x%llx.",
778 (unsigned long long)vol->data1_zone_pos);
779 up_write(&vol->lcnbmp_lock);
784 * __ntfs_cluster_free - free clusters on an ntfs volume
785 * @vi: vfs inode whose runlist describes the clusters to free
786 * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters
787 * @count: number of clusters to free or -1 for all clusters
788 * @is_rollback: if TRUE this is a rollback operation
790 * Free @count clusters starting at the cluster @start_vcn in the runlist
791 * described by the vfs inode @vi.
793 * If @count is -1, all clusters from @start_vcn to the end of the runlist are
794 * deallocated. Thus, to completely free all clusters in a runlist, use
795 * @start_vcn = 0 and @count = -1.
797 * @is_rollback should always be FALSE, it is for internal use to rollback
798 * errors. You probably want to use ntfs_cluster_free() instead.
800 * Note, ntfs_cluster_free() does not modify the runlist at all, so the caller
801 * has to deal with it later.
803 * Return the number of deallocated clusters (not counting sparse ones) on
804 * success and -errno on error.
806 * Locking: - The runlist described by @vi must be unlocked on entry and is
807 * unlocked on return.
808 * - This function takes the runlist lock of @vi for reading and
809 * sometimes for writing and sometimes modifies the runlist.
810 * - The volume lcn bitmap must be unlocked on entry and is unlocked
812 * - This function takes the volume lcn bitmap lock for writing and
813 * modifies the bitmap contents.
815 s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
816 const BOOL is_rollback)
818 s64 delta, to_free, total_freed, real_freed;
821 struct inode *lcnbmp_vi;
826 ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count "
827 "0x%llx.%s", vi->i_ino, (unsigned long long)start_vcn,
828 (unsigned long long)count,
829 is_rollback ? " (rollback)" : "");
832 lcnbmp_vi = vol->lcnbmp_ino;
834 BUG_ON(start_vcn < 0);
837 * Lock the lcn bitmap for writing but only if not rolling back. We
838 * must hold the lock all the way including through rollback otherwise
839 * rollback is not possible because once we have cleared a bit and
840 * dropped the lock, anyone could have set the bit again, thus
841 * allocating the cluster for another use.
843 if (likely(!is_rollback))
844 down_write(&vol->lcnbmp_lock);
846 total_freed = real_freed = 0;
848 /* This returns with ni->runlist locked for reading on success. */
849 rl = ntfs_find_vcn(ni, start_vcn, FALSE);
852 ntfs_error(vol->sb, "Failed to find first runlist "
853 "element (error %li), aborting.",
858 if (unlikely(rl->lcn < (LCN)LCN_HOLE)) {
860 ntfs_error(vol->sb, "First runlist element has "
861 "invalid lcn, aborting.");
865 /* Find the starting cluster inside the run that needs freeing. */
866 delta = start_vcn - rl->vcn;
868 /* The number of clusters in this run that need freeing. */
869 to_free = rl->length - delta;
870 if (count >= 0 && to_free > count)
873 if (likely(rl->lcn >= 0)) {
874 /* Do the actual freeing of the clusters in this run. */
875 err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn + delta,
876 to_free, likely(!is_rollback) ? 0 : 1);
879 ntfs_error(vol->sb, "Failed to clear first run "
880 "(error %i), aborting.", err);
883 /* We have freed @to_free real clusters. */
884 real_freed = to_free;
886 /* Go to the next run and adjust the number of clusters left to free. */
891 /* Keep track of the total "freed" clusters, including sparse ones. */
892 total_freed = to_free;
894 * Loop over the remaining runs, using @count as a capping value, and
897 for (; rl->length && count != 0; ++rl) {
898 if (unlikely(rl->lcn < (LCN)LCN_HOLE)) {
902 * Attempt to map runlist, dropping runlist lock for
905 up_read(&ni->runlist.lock);
907 err = ntfs_map_runlist(ni, vcn);
910 ntfs_error(vol->sb, "Failed to map "
911 "runlist fragment.");
912 if (err == -EINVAL || err == -ENOENT)
917 * This returns with ni->runlist locked for reading on
920 rl = ntfs_find_vcn(ni, vcn, FALSE);
924 ntfs_error(vol->sb, "Failed to find "
925 "subsequent runlist "
929 if (unlikely(rl->lcn < (LCN)LCN_HOLE)) {
931 ntfs_error(vol->sb, "Runlist element "
940 /* The number of clusters in this run that need freeing. */
941 to_free = rl->length;
942 if (count >= 0 && to_free > count)
945 if (likely(rl->lcn >= 0)) {
946 /* Do the actual freeing of the clusters in the run. */
947 err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn,
948 to_free, likely(!is_rollback) ? 0 : 1);
951 ntfs_error(vol->sb, "Failed to clear "
955 /* We have freed @to_free real clusters. */
956 real_freed += to_free;
958 /* Adjust the number of clusters left to free. */
962 /* Update the total done clusters. */
963 total_freed += to_free;
965 up_read(&ni->runlist.lock);
966 if (likely(!is_rollback))
967 up_write(&vol->lcnbmp_lock);
971 /* We are done. Return the number of actually freed clusters. */
975 up_read(&ni->runlist.lock);
979 /* If no real clusters were freed, no need to rollback. */
981 up_write(&vol->lcnbmp_lock);
985 * Attempt to rollback and if that succeeds just return the error code.
986 * If rollback fails, set the volume errors flag, emit an error
987 * message, and return the error code.
989 delta = __ntfs_cluster_free(vi, start_vcn, total_freed, TRUE);
991 ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving "
992 "inconsistent metadata! Unmount and run "
993 "chkdsk.", (int)delta);
996 up_write(&vol->lcnbmp_lock);
997 ntfs_error(vol->sb, "Aborting (error %i).", err);
1001 #endif /* NTFS_RW */