Skip to content

Commit

Permalink
fs/ntfs3: Change new sparse cluster processing
Browse files Browse the repository at this point in the history
Remove ntfs_sparse_cluster.
Zero clusters in attr_allocate_clusters.
Fixes xfstest generic/263

Signed-off-by: Konstantin Komarov <[email protected]>
  • Loading branch information
aalexandrovich committed Nov 14, 2022
1 parent 2f56a3f commit c380b52
Show file tree
Hide file tree
Showing 6 changed files with 166 additions and 181 deletions.
176 changes: 122 additions & 54 deletions fs/ntfs3/attrib.c
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
CLST *new_lcn)
CLST *new_lcn, CLST *new_len)
{
int err;
CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
Expand All @@ -169,20 +169,36 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
if (err)
goto out;

if (new_lcn && vcn == vcn0)
*new_lcn = lcn;
if (vcn == vcn0) {
/* Return the first fragment. */
if (new_lcn)
*new_lcn = lcn;
if (new_len)
*new_len = flen;
}

/* Add new fragment into run storage. */
if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
/* Undo last 'ntfs_look_for_free_space' */
mark_as_free_ex(sbi, lcn, len, false);
err = -ENOMEM;
goto out;
}

if (opt & ALLOCATE_ZERO) {
u8 shift = sbi->cluster_bits - SECTOR_SHIFT;

err = blkdev_issue_zeroout(sbi->sb->s_bdev,
(sector_t)lcn << shift,
(sector_t)flen << shift,
GFP_NOFS, 0);
if (err)
goto out;
}

vcn += flen;

if (flen >= len || opt == ALLOCATE_MFT ||
if (flen >= len || (opt & ALLOCATE_MFT) ||
(fr && run->count - cnt >= fr)) {
*alen = vcn - vcn0;
return 0;
Expand Down Expand Up @@ -257,7 +273,8 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
const char *data = resident_data(attr);

err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
ALLOCATE_DEF, &alen, 0, NULL);
ALLOCATE_DEF, &alen, 0, NULL,
NULL);
if (err)
goto out1;

Expand Down Expand Up @@ -552,13 +569,13 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
/* ~3 bytes per fragment. */
err = attr_allocate_clusters(
sbi, run, vcn, lcn, to_allocate, &pre_alloc,
is_mft ? ALLOCATE_MFT : 0, &alen,
is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
is_mft ? 0
: (sbi->record_size -
le32_to_cpu(rec->used) + 8) /
3 +
1,
NULL);
NULL, NULL);
if (err)
goto out;
}
Expand Down Expand Up @@ -855,8 +872,19 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
return err;
}

/*
* attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
*
* @new == NULL means just to get current mapping for 'vcn'
* @new != NULL means allocate real cluster if 'vcn' maps to hole
* @zero - zeroout new allocated clusters
*
* NOTE:
* - @new != NULL is called only for sparsed or compressed attributes.
* - new allocated clusters are zeroed via blkdev_issue_zeroout.
*/
int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
CLST *len, bool *new)
CLST *len, bool *new, bool zero)
{
int err = 0;
struct runs_tree *run = &ni->file.run;
Expand All @@ -865,29 +893,27 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
struct ATTRIB *attr = NULL, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b;
struct mft_inode *mi, *mi_b;
CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
unsigned fr;
u64 total_size;
u32 clst_per_frame;
bool ok;

if (new)
*new = false;

/* Try to find in cache. */
down_read(&ni->file.run_lock);
ok = run_lookup_entry(run, vcn, lcn, len, NULL);
if (!run_lookup_entry(run, vcn, lcn, len, NULL))
*len = 0;
up_read(&ni->file.run_lock);

if (ok && (*lcn != SPARSE_LCN || !new)) {
/* Normal way. */
return 0;
if (*len) {
if (*lcn != SPARSE_LCN || !new)
return 0; /* Fast normal way without allocation. */
else if (clen > *len)
clen = *len;
}

if (!clen)
clen = 1;

if (ok && clen > *len)
clen = *len;

/* No cluster in cache or we need to allocate cluster in hole. */
sbi = ni->mi.sbi;
cluster_bits = sbi->cluster_bits;

Expand All @@ -913,12 +939,6 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
goto out;
}

clst_per_frame = 1u << attr_b->nres.c_unit;
to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);

if (vcn + to_alloc > asize)
to_alloc = asize - vcn;

svcn = le64_to_cpu(attr_b->nres.svcn);
evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;

Expand All @@ -937,36 +957,68 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
}

/* Load in cache actual information. */
err = attr_load_runs(attr, ni, run, NULL);
if (err)
goto out;

if (!ok) {
ok = run_lookup_entry(run, vcn, lcn, len, NULL);
if (ok && (*lcn != SPARSE_LCN || !new)) {
/* Normal way. */
err = 0;
goto ok;
}
if (!*len) {
if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
if (*lcn != SPARSE_LCN || !new)
goto ok; /* Slow normal way without allocation. */

if (!ok && !new) {
*len = 0;
err = 0;
if (clen > *len)
clen = *len;
} else if (!new) {
/* Here we may return -ENOENT.
* In any case caller gets zero length. */
goto ok;
}

if (ok && clen > *len) {
clen = *len;
to_alloc = (clen + clst_per_frame - 1) &
~(clst_per_frame - 1);
}
}

if (!is_attr_ext(attr_b)) {
/* The code below only for sparsed or compressed attributes. */
err = -EINVAL;
goto out;
}

vcn0 = vcn;
to_alloc = clen;
fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
/* Allocate frame aligned clusters.
* ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
* ntfs3 uses 1 cluster per frame for new created sparsed files. */
if (attr_b->nres.c_unit) {
CLST clst_per_frame = 1u << attr_b->nres.c_unit;
CLST cmask = ~(clst_per_frame - 1);

/* Get frame aligned vcn and to_alloc. */
vcn = vcn0 & cmask;
to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
if (fr < clst_per_frame)
fr = clst_per_frame;
zero = true;

/* Check if 'vcn' and 'vcn0' in different attribute segments. */
if (vcn < svcn || evcn1 <= vcn) {
/* Load attribute for truncated vcn. */
attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
&vcn, &mi);
if (!attr) {
err = -EINVAL;
goto out;
}
svcn = le64_to_cpu(attr->nres.svcn);
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
err = attr_load_runs(attr, ni, run, NULL);
if (err)
goto out;
}
}

if (vcn + to_alloc > asize)
to_alloc = asize - vcn;

/* Get the last LCN to allocate from. */
hint = 0;

Expand All @@ -980,18 +1032,33 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
hint = -1;
}

err = attr_allocate_clusters(
sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
(sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
lcn);
/* Allocate and zeroout new clusters. */
err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
fr, lcn, len);
if (err)
goto out;
*new = true;

end = vcn + *len;

end = vcn + alen;
total_size = le64_to_cpu(attr_b->nres.total_size) +
((u64)*len << cluster_bits);
((u64)alen << cluster_bits);

if (vcn != vcn0) {
if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
err = -EINVAL;
goto out;
}
if (*lcn == SPARSE_LCN) {
/* Internal error. Should not happened. */
WARN_ON(1);
err = -EINVAL;
goto out;
}
/* Check case when vcn0 + len overlaps new allocated clusters. */
if (vcn0 + *len > end)
*len = end - vcn0;
}

repack:
err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
Expand Down Expand Up @@ -1516,7 +1583,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
struct ATTRIB *attr = NULL, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b;
struct mft_inode *mi, *mi_b;
CLST svcn, evcn1, next_svcn, lcn, len;
CLST svcn, evcn1, next_svcn, len;
CLST vcn, end, clst_data;
u64 total_size, valid_size, data_size;

Expand Down Expand Up @@ -1592,8 +1659,9 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
}

err = attr_allocate_clusters(sbi, run, vcn + clst_data,
hint + 1, len - clst_data, NULL, 0,
&alen, 0, &lcn);
hint + 1, len - clst_data, NULL,
ALLOCATE_DEF, &alen, 0, NULL,
NULL);
if (err)
goto out;

Expand Down
Loading

0 comments on commit c380b52

Please sign in to comment.