Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
   5 *
   6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
   7 */
   8
   9#include <linux/fs.h>
  10#include <linux/slab.h>
  11#include <linux/kernel.h>
  12
  13#include "debug.h"
  14#include "ntfs.h"
  15#include "ntfs_fs.h"
  16
  17/*
  18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
  19 * preallocate algorithm.
  20 */
  21#ifndef NTFS_MIN_LOG2_OF_CLUMP
  22#define NTFS_MIN_LOG2_OF_CLUMP 16
  23#endif
  24
  25#ifndef NTFS_MAX_LOG2_OF_CLUMP
  26#define NTFS_MAX_LOG2_OF_CLUMP 26
  27#endif
  28
  29// 16M
  30#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
  31// 16G
  32#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
  33
  34static inline u64 get_pre_allocated(u64 size)
  35{
  36	u32 clump;
  37	u8 align_shift;
  38	u64 ret;
  39
  40	if (size <= NTFS_CLUMP_MIN) {
  41		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
  42		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
  43	} else if (size >= NTFS_CLUMP_MAX) {
  44		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
  45		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
  46	} else {
  47		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
  48			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
  49		clump = 1u << align_shift;
  50	}
  51
  52	ret = (((size + clump - 1) >> align_shift)) << align_shift;
  53
  54	return ret;
  55}
  56
  57/*
  58 * attr_load_runs - Load all runs stored in @attr.
  59 */
  60static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
  61			  struct runs_tree *run, const CLST *vcn)
  62{
  63	int err;
  64	CLST svcn = le64_to_cpu(attr->nres.svcn);
  65	CLST evcn = le64_to_cpu(attr->nres.evcn);
  66	u32 asize;
  67	u16 run_off;
  68
  69	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
  70		return 0;
  71
  72	if (vcn && (evcn < *vcn || *vcn < svcn))
  73		return -EINVAL;
  74
  75	asize = le32_to_cpu(attr->size);
  76	run_off = le16_to_cpu(attr->nres.run_off);
  77
  78	if (run_off > asize)
  79		return -EINVAL;
  80
  81	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
  82			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
  83			    asize - run_off);
  84	if (err < 0)
  85		return err;
  86
  87	return 0;
  88}
  89
  90/*
  91 * run_deallocate_ex - Deallocate clusters.
  92 */
  93static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
  94			     CLST vcn, CLST len, CLST *done, bool trim)
  95{
  96	int err = 0;
  97	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
  98	size_t idx;
  99
 100	if (!len)
 101		goto out;
 102
 103	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
 104failed:
 105		run_truncate(run, vcn0);
 106		err = -EINVAL;
 107		goto out;
 108	}
 109
 110	for (;;) {
 111		if (clen > len)
 112			clen = len;
 113
 114		if (!clen) {
 115			err = -EINVAL;
 116			goto out;
 117		}
 118
 119		if (lcn != SPARSE_LCN) {
 120			if (sbi) {
 121				/* mark bitmap range [lcn + clen) as free and trim clusters. */
 122				mark_as_free_ex(sbi, lcn, clen, trim);
 123			}
 124			dn += clen;
 125		}
 126
 127		len -= clen;
 128		if (!len)
 129			break;
 130
 131		vcn_next = vcn + clen;
 132		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
 133		    vcn != vcn_next) {
 134			/* Save memory - don't load entire run. */
 135			goto failed;
 136		}
 137	}
 138
 139out:
 140	if (done)
 141		*done += dn;
 142
 143	return err;
 144}
 145
 146/*
 147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
 148 */
 149int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
 150			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
 151			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
 152			   CLST *new_lcn, CLST *new_len)
 153{
 154	int err;
 155	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
 156	size_t cnt = run->count;
 157
 158	for (;;) {
 159		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
 160					       opt);
 161
 162		if (err == -ENOSPC && pre) {
 163			pre = 0;
 164			if (*pre_alloc)
 165				*pre_alloc = 0;
 166			continue;
 167		}
 168
 169		if (err)
 170			goto out;
 171
 172		if (vcn == vcn0) {
 173			/* Return the first fragment. */
 174			if (new_lcn)
 175				*new_lcn = lcn;
 176			if (new_len)
 177				*new_len = flen;
 178		}
 179
 180		/* Add new fragment into run storage. */
 181		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
 182			/* Undo last 'ntfs_look_for_free_space' */
 183			mark_as_free_ex(sbi, lcn, len, false);
 184			err = -ENOMEM;
 185			goto out;
 186		}
 187
 188		if (opt & ALLOCATE_ZERO) {
 189			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
 190
 191			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
 192						   (sector_t)lcn << shift,
 193						   (sector_t)flen << shift,
 194						   GFP_NOFS, 0);
 195			if (err)
 196				goto out;
 197		}
 198
 199		vcn += flen;
 200
 201		if (flen >= len || (opt & ALLOCATE_MFT) ||
 202		    (fr && run->count - cnt >= fr)) {
 203			*alen = vcn - vcn0;
 204			return 0;
 205		}
 206
 207		len -= flen;
 208	}
 209
 210out:
 211	/* Undo 'ntfs_look_for_free_space' */
 212	if (vcn - vcn0) {
 213		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
 214		run_truncate(run, vcn0);
 215	}
 216
 217	return err;
 218}
 219
 220/*
 221 * attr_make_nonresident
 222 *
 223 * If page is not NULL - it is already contains resident data
 224 * and locked (called from ni_write_frame()).
 225 */
 226int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
 227			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 228			  u64 new_size, struct runs_tree *run,
 229			  struct ATTRIB **ins_attr, struct page *page)
 230{
 231	struct ntfs_sb_info *sbi;
 232	struct ATTRIB *attr_s;
 233	struct MFT_REC *rec;
 234	u32 used, asize, rsize, aoff, align;
 235	bool is_data;
 236	CLST len, alen;
 237	char *next;
 238	int err;
 239
 240	if (attr->non_res) {
 241		*ins_attr = attr;
 242		return 0;
 243	}
 244
 245	sbi = mi->sbi;
 246	rec = mi->mrec;
 247	attr_s = NULL;
 248	used = le32_to_cpu(rec->used);
 249	asize = le32_to_cpu(attr->size);
 250	next = Add2Ptr(attr, asize);
 251	aoff = PtrOffset(rec, attr);
 252	rsize = le32_to_cpu(attr->res.data_size);
 253	is_data = attr->type == ATTR_DATA && !attr->name_len;
 254
 255	align = sbi->cluster_size;
 256	if (is_attr_compressed(attr))
 257		align <<= COMPRESSION_UNIT;
 258	len = (rsize + align - 1) >> sbi->cluster_bits;
 
 
 
 259
 260	run_init(run);
 261
 262	/* Make a copy of original attribute. */
 263	attr_s = kmemdup(attr, asize, GFP_NOFS);
 264	if (!attr_s) {
 265		err = -ENOMEM;
 266		goto out;
 267	}
 268
 269	if (!len) {
 270		/* Empty resident -> Empty nonresident. */
 271		alen = 0;
 272	} else {
 273		const char *data = resident_data(attr);
 274
 275		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
 276					     ALLOCATE_DEF, &alen, 0, NULL,
 277					     NULL);
 278		if (err)
 279			goto out1;
 280
 281		if (!rsize) {
 282			/* Empty resident -> Non empty nonresident. */
 283		} else if (!is_data) {
 284			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
 285			if (err)
 286				goto out2;
 287		} else if (!page) {
 288			char *kaddr;
 
 289
 290			page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
 291			if (!page) {
 292				err = -ENOMEM;
 
 
 293				goto out2;
 294			}
 295			kaddr = kmap_atomic(page);
 296			memcpy(kaddr, data, rsize);
 297			memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
 298			kunmap_atomic(kaddr);
 299			flush_dcache_page(page);
 300			SetPageUptodate(page);
 301			set_page_dirty(page);
 302			unlock_page(page);
 303			put_page(page);
 304		}
 305	}
 306
 307	/* Remove original attribute. */
 308	used -= asize;
 309	memmove(attr, Add2Ptr(attr, asize), used - aoff);
 310	rec->used = cpu_to_le32(used);
 311	mi->dirty = true;
 312	if (le)
 313		al_remove_le(ni, le);
 314
 315	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
 316				    attr_s->name_len, run, 0, alen,
 317				    attr_s->flags, &attr, NULL, NULL);
 318	if (err)
 319		goto out3;
 320
 321	kfree(attr_s);
 322	attr->nres.data_size = cpu_to_le64(rsize);
 323	attr->nres.valid_size = attr->nres.data_size;
 324
 325	*ins_attr = attr;
 326
 327	if (is_data)
 328		ni->ni_flags &= ~NI_FLAG_RESIDENT;
 329
 330	/* Resident attribute becomes non resident. */
 331	return 0;
 332
 333out3:
 334	attr = Add2Ptr(rec, aoff);
 335	memmove(next, attr, used - aoff);
 336	memcpy(attr, attr_s, asize);
 337	rec->used = cpu_to_le32(used + asize);
 338	mi->dirty = true;
 339out2:
 340	/* Undo: do not trim new allocated clusters. */
 341	run_deallocate(sbi, run, false);
 342	run_close(run);
 343out1:
 344	kfree(attr_s);
 345out:
 346	return err;
 347}
 348
 349/*
 350 * attr_set_size_res - Helper for attr_set_size().
 351 */
 352static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
 353			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 354			     u64 new_size, struct runs_tree *run,
 355			     struct ATTRIB **ins_attr)
 356{
 357	struct ntfs_sb_info *sbi = mi->sbi;
 358	struct MFT_REC *rec = mi->mrec;
 359	u32 used = le32_to_cpu(rec->used);
 360	u32 asize = le32_to_cpu(attr->size);
 361	u32 aoff = PtrOffset(rec, attr);
 362	u32 rsize = le32_to_cpu(attr->res.data_size);
 363	u32 tail = used - aoff - asize;
 364	char *next = Add2Ptr(attr, asize);
 365	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
 366
 367	if (dsize < 0) {
 368		memmove(next + dsize, next, tail);
 369	} else if (dsize > 0) {
 370		if (used + dsize > sbi->max_bytes_per_attr)
 371			return attr_make_nonresident(ni, attr, le, mi, new_size,
 372						     run, ins_attr, NULL);
 373
 374		memmove(next + dsize, next, tail);
 375		memset(next, 0, dsize);
 376	}
 377
 378	if (new_size > rsize)
 379		memset(Add2Ptr(resident_data(attr), rsize), 0,
 380		       new_size - rsize);
 381
 382	rec->used = cpu_to_le32(used + dsize);
 383	attr->size = cpu_to_le32(asize + dsize);
 384	attr->res.data_size = cpu_to_le32(new_size);
 385	mi->dirty = true;
 386	*ins_attr = attr;
 387
 388	return 0;
 389}
 390
 391/*
 392 * attr_set_size - Change the size of attribute.
 393 *
 394 * Extend:
 395 *   - Sparse/compressed: No allocated clusters.
 396 *   - Normal: Append allocated and preallocated new clusters.
 397 * Shrink:
 398 *   - No deallocate if @keep_prealloc is set.
 399 */
 400int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
 401		  const __le16 *name, u8 name_len, struct runs_tree *run,
 402		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
 403		  struct ATTRIB **ret)
 404{
 405	int err = 0;
 406	struct ntfs_sb_info *sbi = ni->mi.sbi;
 407	u8 cluster_bits = sbi->cluster_bits;
 408	bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
 409		      !name_len;
 410	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
 411	struct ATTRIB *attr = NULL, *attr_b;
 412	struct ATTR_LIST_ENTRY *le, *le_b;
 413	struct mft_inode *mi, *mi_b;
 414	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
 415	CLST next_svcn, pre_alloc = -1, done = 0;
 416	bool is_ext, is_bad = false;
 417	bool dirty = false;
 418	u32 align;
 419	struct MFT_REC *rec;
 420
 421again:
 422	alen = 0;
 423	le_b = NULL;
 424	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
 425			      &mi_b);
 426	if (!attr_b) {
 427		err = -ENOENT;
 428		goto bad_inode;
 429	}
 430
 431	if (!attr_b->non_res) {
 432		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
 433					&attr_b);
 434		if (err)
 435			return err;
 436
 437		/* Return if file is still resident. */
 438		if (!attr_b->non_res) {
 439			dirty = true;
 440			goto ok1;
 441		}
 442
 443		/* Layout of records may be changed, so do a full search. */
 444		goto again;
 445	}
 446
 447	is_ext = is_attr_ext(attr_b);
 448	align = sbi->cluster_size;
 449	if (is_ext)
 450		align <<= attr_b->nres.c_unit;
 451
 452	old_valid = le64_to_cpu(attr_b->nres.valid_size);
 453	old_size = le64_to_cpu(attr_b->nres.data_size);
 454	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 455
 456again_1:
 457	old_alen = old_alloc >> cluster_bits;
 458
 459	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
 460	new_alen = new_alloc >> cluster_bits;
 461
 462	if (keep_prealloc && new_size < old_size) {
 463		attr_b->nres.data_size = cpu_to_le64(new_size);
 464		mi_b->dirty = dirty = true;
 465		goto ok;
 466	}
 467
 468	vcn = old_alen - 1;
 469
 470	svcn = le64_to_cpu(attr_b->nres.svcn);
 471	evcn = le64_to_cpu(attr_b->nres.evcn);
 472
 473	if (svcn <= vcn && vcn <= evcn) {
 474		attr = attr_b;
 475		le = le_b;
 476		mi = mi_b;
 477	} else if (!le_b) {
 478		err = -EINVAL;
 479		goto bad_inode;
 480	} else {
 481		le = le_b;
 482		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
 483				    &mi);
 484		if (!attr) {
 485			err = -EINVAL;
 486			goto bad_inode;
 487		}
 488
 489next_le_1:
 490		svcn = le64_to_cpu(attr->nres.svcn);
 491		evcn = le64_to_cpu(attr->nres.evcn);
 492	}
 493	/*
 494	 * Here we have:
 495	 * attr,mi,le - last attribute segment (containing 'vcn').
 496	 * attr_b,mi_b,le_b - base (primary) attribute segment.
 497	 */
 498next_le:
 499	rec = mi->mrec;
 500	err = attr_load_runs(attr, ni, run, NULL);
 501	if (err)
 502		goto out;
 503
 504	if (new_size > old_size) {
 505		CLST to_allocate;
 506		size_t free;
 507
 508		if (new_alloc <= old_alloc) {
 509			attr_b->nres.data_size = cpu_to_le64(new_size);
 510			mi_b->dirty = dirty = true;
 511			goto ok;
 512		}
 513
 514		/*
 515		 * Add clusters. In simple case we have to:
 516		 *  - allocate space (vcn, lcn, len)
 517		 *  - update packed run in 'mi'
 518		 *  - update attr->nres.evcn
 519		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 520		 */
 521		to_allocate = new_alen - old_alen;
 522add_alloc_in_same_attr_seg:
 523		lcn = 0;
 524		if (is_mft) {
 525			/* MFT allocates clusters from MFT zone. */
 526			pre_alloc = 0;
 527		} else if (is_ext) {
 528			/* No preallocate for sparse/compress. */
 529			pre_alloc = 0;
 530		} else if (pre_alloc == -1) {
 531			pre_alloc = 0;
 532			if (type == ATTR_DATA && !name_len &&
 533			    sbi->options->prealloc) {
 534				pre_alloc = bytes_to_cluster(
 535						    sbi, get_pre_allocated(
 536								 new_size)) -
 537					    new_alen;
 538			}
 539
 540			/* Get the last LCN to allocate from. */
 541			if (old_alen &&
 542			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
 543				lcn = SPARSE_LCN;
 544			}
 545
 546			if (lcn == SPARSE_LCN)
 547				lcn = 0;
 548			else if (lcn)
 549				lcn += 1;
 550
 551			free = wnd_zeroes(&sbi->used.bitmap);
 552			if (to_allocate > free) {
 553				err = -ENOSPC;
 554				goto out;
 555			}
 556
 557			if (pre_alloc && to_allocate + pre_alloc > free)
 558				pre_alloc = 0;
 559		}
 560
 561		vcn = old_alen;
 562
 563		if (is_ext) {
 564			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
 565					   false)) {
 566				err = -ENOMEM;
 567				goto out;
 568			}
 569			alen = to_allocate;
 570		} else {
 571			/* ~3 bytes per fragment. */
 572			err = attr_allocate_clusters(
 573				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
 574				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
 575				is_mft ? 0 :
 576					 (sbi->record_size -
 577					  le32_to_cpu(rec->used) + 8) /
 578							 3 +
 579						 1,
 580				NULL, NULL);
 581			if (err)
 582				goto out;
 583		}
 584
 585		done += alen;
 586		vcn += alen;
 587		if (to_allocate > alen)
 588			to_allocate -= alen;
 589		else
 590			to_allocate = 0;
 591
 592pack_runs:
 593		err = mi_pack_runs(mi, attr, run, vcn - svcn);
 594		if (err)
 595			goto undo_1;
 596
 597		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
 598		new_alloc_tmp = (u64)next_svcn << cluster_bits;
 599		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 600		mi_b->dirty = dirty = true;
 601
 602		if (next_svcn >= vcn && !to_allocate) {
 603			/* Normal way. Update attribute and exit. */
 604			attr_b->nres.data_size = cpu_to_le64(new_size);
 605			goto ok;
 606		}
 607
 608		/* At least two MFT to avoid recursive loop. */
 609		if (is_mft && next_svcn == vcn &&
 610		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
 611			new_size = new_alloc_tmp;
 612			attr_b->nres.data_size = attr_b->nres.alloc_size;
 613			goto ok;
 614		}
 615
 616		if (le32_to_cpu(rec->used) < sbi->record_size) {
 617			old_alen = next_svcn;
 618			evcn = old_alen - 1;
 619			goto add_alloc_in_same_attr_seg;
 620		}
 621
 622		attr_b->nres.data_size = attr_b->nres.alloc_size;
 623		if (new_alloc_tmp < old_valid)
 624			attr_b->nres.valid_size = attr_b->nres.data_size;
 625
 626		if (type == ATTR_LIST) {
 627			err = ni_expand_list(ni);
 628			if (err)
 629				goto undo_2;
 630			if (next_svcn < vcn)
 631				goto pack_runs;
 632
 633			/* Layout of records is changed. */
 634			goto again;
 635		}
 636
 637		if (!ni->attr_list.size) {
 638			err = ni_create_attr_list(ni);
 639			/* In case of error layout of records is not changed. */
 640			if (err)
 641				goto undo_2;
 642			/* Layout of records is changed. */
 643		}
 644
 645		if (next_svcn >= vcn) {
 646			/* This is MFT data, repeat. */
 647			goto again;
 648		}
 649
 650		/* Insert new attribute segment. */
 651		err = ni_insert_nonresident(ni, type, name, name_len, run,
 652					    next_svcn, vcn - next_svcn,
 653					    attr_b->flags, &attr, &mi, NULL);
 654
 655		/*
 656		 * Layout of records maybe changed.
 657		 * Find base attribute to update.
 658		 */
 659		le_b = NULL;
 660		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
 661				      NULL, &mi_b);
 662		if (!attr_b) {
 663			err = -EINVAL;
 664			goto bad_inode;
 665		}
 666
 667		if (err) {
 668			/* ni_insert_nonresident failed. */
 669			attr = NULL;
 670			goto undo_2;
 671		}
 672
 673		if (!is_mft)
 
 674			run_truncate_head(run, evcn + 1);
 675
 676		svcn = le64_to_cpu(attr->nres.svcn);
 677		evcn = le64_to_cpu(attr->nres.evcn);
 678
 679		/*
 680		 * Attribute is in consistency state.
 681		 * Save this point to restore to if next steps fail.
 682		 */
 683		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
 684		attr_b->nres.valid_size = attr_b->nres.data_size =
 685			attr_b->nres.alloc_size = cpu_to_le64(old_size);
 686		mi_b->dirty = dirty = true;
 687		goto again_1;
 688	}
 689
 690	if (new_size != old_size ||
 691	    (new_alloc != old_alloc && !keep_prealloc)) {
 692		/*
 693		 * Truncate clusters. In simple case we have to:
 694		 *  - update packed run in 'mi'
 695		 *  - update attr->nres.evcn
 696		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 697		 *  - mark and trim clusters as free (vcn, lcn, len)
 698		 */
 699		CLST dlen = 0;
 700
 701		vcn = max(svcn, new_alen);
 702		new_alloc_tmp = (u64)vcn << cluster_bits;
 703
 704		if (vcn > svcn) {
 705			err = mi_pack_runs(mi, attr, run, vcn - svcn);
 706			if (err)
 707				goto out;
 708		} else if (le && le->vcn) {
 709			u16 le_sz = le16_to_cpu(le->size);
 710
 711			/*
 712			 * NOTE: List entries for one attribute are always
 713			 * the same size. We deal with last entry (vcn==0)
 714			 * and it is not first in entries array
 715			 * (list entry for std attribute always first).
 716			 * So it is safe to step back.
 717			 */
 718			mi_remove_attr(NULL, mi, attr);
 719
 720			if (!al_remove_le(ni, le)) {
 721				err = -EINVAL;
 722				goto bad_inode;
 723			}
 724
 725			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
 726		} else {
 727			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
 728			mi->dirty = true;
 729		}
 730
 731		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 732
 733		if (vcn == new_alen) {
 734			attr_b->nres.data_size = cpu_to_le64(new_size);
 735			if (new_size < old_valid)
 736				attr_b->nres.valid_size =
 737					attr_b->nres.data_size;
 738		} else {
 739			if (new_alloc_tmp <=
 740			    le64_to_cpu(attr_b->nres.data_size))
 741				attr_b->nres.data_size =
 742					attr_b->nres.alloc_size;
 743			if (new_alloc_tmp <
 744			    le64_to_cpu(attr_b->nres.valid_size))
 745				attr_b->nres.valid_size =
 746					attr_b->nres.alloc_size;
 747		}
 748		mi_b->dirty = dirty = true;
 749
 750		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
 751					true);
 752		if (err)
 753			goto out;
 754
 755		if (is_ext) {
 756			/* dlen - really deallocated clusters. */
 757			le64_sub_cpu(&attr_b->nres.total_size,
 758				     ((u64)dlen << cluster_bits));
 759		}
 760
 761		run_truncate(run, vcn);
 762
 763		if (new_alloc_tmp <= new_alloc)
 764			goto ok;
 765
 766		old_size = new_alloc_tmp;
 767		vcn = svcn - 1;
 768
 769		if (le == le_b) {
 770			attr = attr_b;
 771			mi = mi_b;
 772			evcn = svcn - 1;
 773			svcn = 0;
 774			goto next_le;
 775		}
 776
 777		if (le->type != type || le->name_len != name_len ||
 778		    memcmp(le_name(le), name, name_len * sizeof(short))) {
 779			err = -EINVAL;
 780			goto bad_inode;
 781		}
 782
 783		err = ni_load_mi(ni, le, &mi);
 784		if (err)
 785			goto out;
 786
 787		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
 
 788		if (!attr) {
 789			err = -EINVAL;
 790			goto bad_inode;
 791		}
 792		goto next_le_1;
 793	}
 794
 795ok:
 796	if (new_valid) {
 797		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
 798
 799		if (attr_b->nres.valid_size != valid) {
 800			attr_b->nres.valid_size = valid;
 801			mi_b->dirty = true;
 802		}
 803	}
 804
 805ok1:
 806	if (ret)
 807		*ret = attr_b;
 808
 809	if (((type == ATTR_DATA && !name_len) ||
 810	     (type == ATTR_ALLOC && name == I30_NAME))) {
 811		/* Update inode_set_bytes. */
 812		if (attr_b->non_res) {
 813			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 814			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
 815				inode_set_bytes(&ni->vfs_inode, new_alloc);
 816				dirty = true;
 817			}
 818		}
 819
 820		/* Don't forget to update duplicate information in parent. */
 821		if (dirty) {
 822			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
 823			mark_inode_dirty(&ni->vfs_inode);
 824		}
 825	}
 826
 827	return 0;
 828
 829undo_2:
 830	vcn -= alen;
 831	attr_b->nres.data_size = cpu_to_le64(old_size);
 832	attr_b->nres.valid_size = cpu_to_le64(old_valid);
 833	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
 834
 835	/* Restore 'attr' and 'mi'. */
 836	if (attr)
 837		goto restore_run;
 838
 839	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
 840	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
 841		attr = attr_b;
 842		le = le_b;
 843		mi = mi_b;
 844	} else if (!le_b) {
 845		err = -EINVAL;
 846		goto bad_inode;
 847	} else {
 848		le = le_b;
 849		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
 850				    &svcn, &mi);
 851		if (!attr)
 852			goto bad_inode;
 853	}
 854
 855restore_run:
 856	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
 857		is_bad = true;
 858
 859undo_1:
 860	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
 861
 862	run_truncate(run, vcn);
 863out:
 864	if (is_bad) {
 865bad_inode:
 866		_ntfs_bad_inode(&ni->vfs_inode);
 867	}
 868	return err;
 869}
 870
 871/*
 872 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
 873 *
 874 * @new == NULL means just to get current mapping for 'vcn'
 875 * @new != NULL means allocate real cluster if 'vcn' maps to hole
 876 * @zero - zeroout new allocated clusters
 877 *
 878 *  NOTE:
 879 *  - @new != NULL is called only for sparsed or compressed attributes.
 880 *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
 881 */
 882int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
 883			CLST *len, bool *new, bool zero)
 884{
 885	int err = 0;
 886	struct runs_tree *run = &ni->file.run;
 887	struct ntfs_sb_info *sbi;
 888	u8 cluster_bits;
 889	struct ATTRIB *attr, *attr_b;
 890	struct ATTR_LIST_ENTRY *le, *le_b;
 891	struct mft_inode *mi, *mi_b;
 892	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
 893	CLST alloc, evcn;
 894	unsigned fr;
 895	u64 total_size, total_size0;
 896	int step = 0;
 897
 898	if (new)
 899		*new = false;
 900
 901	/* Try to find in cache. */
 902	down_read(&ni->file.run_lock);
 903	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
 904		*len = 0;
 905	up_read(&ni->file.run_lock);
 906
 907	if (*len && (*lcn != SPARSE_LCN || !new))
 908		return 0; /* Fast normal way without allocation. */
 909
 910	/* No cluster in cache or we need to allocate cluster in hole. */
 911	sbi = ni->mi.sbi;
 912	cluster_bits = sbi->cluster_bits;
 913
 914	ni_lock(ni);
 915	down_write(&ni->file.run_lock);
 916
 917	/* Repeat the code above (under write lock). */
 918	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
 919		*len = 0;
 920
 921	if (*len) {
 922		if (*lcn != SPARSE_LCN || !new)
 923			goto out; /* normal way without allocation. */
 924		if (clen > *len)
 925			clen = *len;
 926	}
 927
 928	le_b = NULL;
 929	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
 930	if (!attr_b) {
 931		err = -ENOENT;
 932		goto out;
 933	}
 934
 935	if (!attr_b->non_res) {
 936		*lcn = RESIDENT_LCN;
 937		*len = 1;
 938		goto out;
 939	}
 940
 941	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
 942	if (vcn >= asize) {
 943		if (new) {
 944			err = -EINVAL;
 945		} else {
 946			*len = 1;
 947			*lcn = SPARSE_LCN;
 948		}
 949		goto out;
 950	}
 951
 952	svcn = le64_to_cpu(attr_b->nres.svcn);
 953	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
 954
 955	attr = attr_b;
 956	le = le_b;
 957	mi = mi_b;
 958
 959	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
 960		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
 961				    &mi);
 962		if (!attr) {
 963			err = -EINVAL;
 964			goto out;
 965		}
 966		svcn = le64_to_cpu(attr->nres.svcn);
 967		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
 968	}
 969
 970	/* Load in cache actual information. */
 971	err = attr_load_runs(attr, ni, run, NULL);
 972	if (err)
 973		goto out;
 974
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975	if (!*len) {
 976		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
 977			if (*lcn != SPARSE_LCN || !new)
 978				goto ok; /* Slow normal way without allocation. */
 979
 980			if (clen > *len)
 981				clen = *len;
 982		} else if (!new) {
 983			/* Here we may return -ENOENT.
 984			 * In any case caller gets zero length. */
 985			goto ok;
 986		}
 987	}
 988
 989	if (!is_attr_ext(attr_b)) {
 990		/* The code below only for sparsed or compressed attributes. */
 991		err = -EINVAL;
 992		goto out;
 993	}
 994
 995	vcn0 = vcn;
 996	to_alloc = clen;
 997	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
 998	/* Allocate frame aligned clusters.
 999	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1000	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1001	if (attr_b->nres.c_unit) {
1002		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
1003		CLST cmask = ~(clst_per_frame - 1);
1004
1005		/* Get frame aligned vcn and to_alloc. */
1006		vcn = vcn0 & cmask;
1007		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1008		if (fr < clst_per_frame)
1009			fr = clst_per_frame;
1010		zero = true;
1011
1012		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1013		if (vcn < svcn || evcn1 <= vcn) {
1014			/* Load attribute for truncated vcn. */
1015			attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
1016					    &vcn, &mi);
1017			if (!attr) {
 
1018				err = -EINVAL;
1019				goto out;
1020			}
1021			svcn = le64_to_cpu(attr->nres.svcn);
1022			evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1023			err = attr_load_runs(attr, ni, run, NULL);
1024			if (err)
1025				goto out;
1026		}
1027	}
1028
1029	if (vcn + to_alloc > asize)
1030		to_alloc = asize - vcn;
1031
1032	/* Get the last LCN to allocate from. */
1033	hint = 0;
1034
1035	if (vcn > evcn1) {
1036		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1037				   false)) {
1038			err = -ENOMEM;
1039			goto out;
1040		}
1041	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1042		hint = -1;
1043	}
1044
1045	/* Allocate and zeroout new clusters. */
1046	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1047				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1048				     fr, lcn, len);
1049	if (err)
1050		goto out;
1051	*new = true;
1052	step = 1;
1053
1054	end = vcn + alen;
1055	/* Save 'total_size0' to restore if error. */
1056	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1057	total_size = total_size0 + ((u64)alen << cluster_bits);
1058
1059	if (vcn != vcn0) {
1060		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1061			err = -EINVAL;
1062			goto out;
1063		}
1064		if (*lcn == SPARSE_LCN) {
1065			/* Internal error. Should not happened. */
1066			WARN_ON(1);
1067			err = -EINVAL;
1068			goto out;
1069		}
1070		/* Check case when vcn0 + len overlaps new allocated clusters. */
1071		if (vcn0 + *len > end)
1072			*len = end - vcn0;
1073	}
1074
1075repack:
1076	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1077	if (err)
1078		goto out;
1079
1080	attr_b->nres.total_size = cpu_to_le64(total_size);
1081	inode_set_bytes(&ni->vfs_inode, total_size);
1082	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1083
1084	mi_b->dirty = true;
1085	mark_inode_dirty(&ni->vfs_inode);
1086
1087	/* Stored [vcn : next_svcn) from [vcn : end). */
1088	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1089
1090	if (end <= evcn1) {
1091		if (next_svcn == evcn1) {
1092			/* Normal way. Update attribute and exit. */
1093			goto ok;
1094		}
1095		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1096		if (!ni->attr_list.size) {
1097			err = ni_create_attr_list(ni);
1098			if (err)
1099				goto undo1;
1100			/* Layout of records is changed. */
1101			le_b = NULL;
1102			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1103					      0, NULL, &mi_b);
1104			if (!attr_b) {
1105				err = -ENOENT;
1106				goto out;
1107			}
1108
1109			attr = attr_b;
1110			le = le_b;
1111			mi = mi_b;
1112			goto repack;
1113		}
1114	}
1115
1116	/*
1117	 * The code below may require additional cluster (to extend attribute list)
1118	 * and / or one MFT record
1119	 * It is too complex to undo operations if -ENOSPC occurs deep inside
1120	 * in 'ni_insert_nonresident'.
1121	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1122	 */
1123	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1124		/* Undo step 1. */
1125		err = -ENOSPC;
1126		goto undo1;
1127	}
1128
1129	step = 2;
1130	svcn = evcn1;
1131
1132	/* Estimate next attribute. */
1133	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1134
1135	if (!attr) {
1136		/* Insert new attribute segment. */
1137		goto ins_ext;
1138	}
1139
1140	/* Try to update existed attribute segment. */
1141	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1142	evcn = le64_to_cpu(attr->nres.evcn);
1143
1144	if (end < next_svcn)
1145		end = next_svcn;
1146	while (end > evcn) {
1147		/* Remove segment [svcn : evcn). */
1148		mi_remove_attr(NULL, mi, attr);
1149
1150		if (!al_remove_le(ni, le)) {
1151			err = -EINVAL;
1152			goto out;
1153		}
1154
1155		if (evcn + 1 >= alloc) {
1156			/* Last attribute segment. */
1157			evcn1 = evcn + 1;
1158			goto ins_ext;
1159		}
1160
1161		if (ni_load_mi(ni, le, &mi)) {
1162			attr = NULL;
1163			goto out;
1164		}
1165
1166		attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1167		if (!attr) {
1168			err = -EINVAL;
1169			goto out;
1170		}
1171		svcn = le64_to_cpu(attr->nres.svcn);
1172		evcn = le64_to_cpu(attr->nres.evcn);
1173	}
1174
1175	if (end < svcn)
1176		end = svcn;
1177
1178	err = attr_load_runs(attr, ni, run, &end);
1179	if (err)
1180		goto out;
1181
1182	evcn1 = evcn + 1;
1183	attr->nres.svcn = cpu_to_le64(next_svcn);
1184	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1185	if (err)
1186		goto out;
1187
1188	le->vcn = cpu_to_le64(next_svcn);
1189	ni->attr_list.dirty = true;
1190	mi->dirty = true;
1191	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1192
1193ins_ext:
1194	if (evcn1 > next_svcn) {
1195		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1196					    next_svcn, evcn1 - next_svcn,
1197					    attr_b->flags, &attr, &mi, NULL);
1198		if (err)
1199			goto out;
1200	}
1201ok:
1202	run_truncate_around(run, vcn);
1203out:
1204	if (err && step > 1) {
1205		/* Too complex to restore. */
1206		_ntfs_bad_inode(&ni->vfs_inode);
1207	}
1208	up_write(&ni->file.run_lock);
1209	ni_unlock(ni);
1210
1211	return err;
1212
1213undo1:
1214	/* Undo step1. */
1215	attr_b->nres.total_size = cpu_to_le64(total_size0);
1216	inode_set_bytes(&ni->vfs_inode, total_size0);
1217
1218	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1219	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1220	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1221		_ntfs_bad_inode(&ni->vfs_inode);
1222	}
1223	goto out;
1224}
1225
1226int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1227{
1228	u64 vbo;
1229	struct ATTRIB *attr;
1230	u32 data_size;
 
1231
1232	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1233	if (!attr)
1234		return -EINVAL;
1235
1236	if (attr->non_res)
1237		return E_NTFS_NONRESIDENT;
1238
1239	vbo = page->index << PAGE_SHIFT;
1240	data_size = le32_to_cpu(attr->res.data_size);
1241	if (vbo < data_size) {
1242		const char *data = resident_data(attr);
1243		char *kaddr = kmap_atomic(page);
1244		u32 use = data_size - vbo;
1245
1246		if (use > PAGE_SIZE)
1247			use = PAGE_SIZE;
1248
1249		memcpy(kaddr, data + vbo, use);
1250		memset(kaddr + use, 0, PAGE_SIZE - use);
1251		kunmap_atomic(kaddr);
1252		flush_dcache_page(page);
1253		SetPageUptodate(page);
1254	} else if (!PageUptodate(page)) {
1255		zero_user_segment(page, 0, PAGE_SIZE);
1256		SetPageUptodate(page);
1257	}
1258
1259	return 0;
1260}
1261
1262int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1263{
1264	u64 vbo;
1265	struct mft_inode *mi;
1266	struct ATTRIB *attr;
1267	u32 data_size;
1268
1269	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1270	if (!attr)
1271		return -EINVAL;
1272
1273	if (attr->non_res) {
1274		/* Return special error code to check this case. */
1275		return E_NTFS_NONRESIDENT;
1276	}
1277
1278	vbo = page->index << PAGE_SHIFT;
1279	data_size = le32_to_cpu(attr->res.data_size);
1280	if (vbo < data_size) {
1281		char *data = resident_data(attr);
1282		char *kaddr = kmap_atomic(page);
1283		u32 use = data_size - vbo;
1284
1285		if (use > PAGE_SIZE)
1286			use = PAGE_SIZE;
1287		memcpy(data + vbo, kaddr, use);
1288		kunmap_atomic(kaddr);
1289		mi->dirty = true;
1290	}
1291	ni->i_valid = data_size;
1292
1293	return 0;
1294}
1295
1296/*
1297 * attr_load_runs_vcn - Load runs with VCN.
1298 */
1299int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1300		       const __le16 *name, u8 name_len, struct runs_tree *run,
1301		       CLST vcn)
1302{
1303	struct ATTRIB *attr;
1304	int err;
1305	CLST svcn, evcn;
1306	u16 ro;
1307
1308	if (!ni) {
1309		/* Is record corrupted? */
1310		return -ENOENT;
1311	}
1312
1313	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1314	if (!attr) {
1315		/* Is record corrupted? */
1316		return -ENOENT;
1317	}
1318
1319	svcn = le64_to_cpu(attr->nres.svcn);
1320	evcn = le64_to_cpu(attr->nres.evcn);
1321
1322	if (evcn < vcn || vcn < svcn) {
1323		/* Is record corrupted? */
1324		return -EINVAL;
1325	}
1326
1327	ro = le16_to_cpu(attr->nres.run_off);
1328
1329	if (ro > le32_to_cpu(attr->size))
1330		return -EINVAL;
1331
1332	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1333			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1334	if (err < 0)
1335		return err;
1336	return 0;
1337}
1338
1339/*
1340 * attr_load_runs_range - Load runs for given range [from to).
1341 */
1342int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1343			 const __le16 *name, u8 name_len, struct runs_tree *run,
1344			 u64 from, u64 to)
1345{
1346	struct ntfs_sb_info *sbi = ni->mi.sbi;
1347	u8 cluster_bits = sbi->cluster_bits;
1348	CLST vcn;
1349	CLST vcn_last = (to - 1) >> cluster_bits;
1350	CLST lcn, clen;
1351	int err;
1352
1353	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1354		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1355			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1356						 vcn);
1357			if (err)
1358				return err;
1359			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1360		}
1361	}
1362
1363	return 0;
1364}
1365
1366#ifdef CONFIG_NTFS3_LZX_XPRESS
1367/*
1368 * attr_wof_frame_info
1369 *
1370 * Read header of Xpress/LZX file to get info about frame.
1371 */
1372int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1373			struct runs_tree *run, u64 frame, u64 frames,
1374			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1375{
1376	struct ntfs_sb_info *sbi = ni->mi.sbi;
1377	u64 vbo[2], off[2], wof_size;
1378	u32 voff;
1379	u8 bytes_per_off;
1380	char *addr;
1381	struct page *page;
1382	int i, err;
1383	__le32 *off32;
1384	__le64 *off64;
1385
1386	if (ni->vfs_inode.i_size < 0x100000000ull) {
1387		/* File starts with array of 32 bit offsets. */
1388		bytes_per_off = sizeof(__le32);
1389		vbo[1] = frame << 2;
1390		*vbo_data = frames << 2;
1391	} else {
1392		/* File starts with array of 64 bit offsets. */
1393		bytes_per_off = sizeof(__le64);
1394		vbo[1] = frame << 3;
1395		*vbo_data = frames << 3;
1396	}
1397
1398	/*
1399	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1400	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1401	 */
1402	if (!attr->non_res) {
1403		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1404			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1405			return -EINVAL;
1406		}
1407		addr = resident_data(attr);
1408
1409		if (bytes_per_off == sizeof(__le32)) {
1410			off32 = Add2Ptr(addr, vbo[1]);
1411			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1412			off[1] = le32_to_cpu(off32[0]);
1413		} else {
1414			off64 = Add2Ptr(addr, vbo[1]);
1415			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1416			off[1] = le64_to_cpu(off64[0]);
1417		}
1418
1419		*vbo_data += off[0];
1420		*ondisk_size = off[1] - off[0];
1421		return 0;
1422	}
1423
1424	wof_size = le64_to_cpu(attr->nres.data_size);
1425	down_write(&ni->file.run_lock);
1426	page = ni->file.offs_page;
1427	if (!page) {
1428		page = alloc_page(GFP_KERNEL);
1429		if (!page) {
1430			err = -ENOMEM;
1431			goto out;
1432		}
1433		page->index = -1;
1434		ni->file.offs_page = page;
1435	}
1436	lock_page(page);
1437	addr = page_address(page);
1438
1439	if (vbo[1]) {
1440		voff = vbo[1] & (PAGE_SIZE - 1);
1441		vbo[0] = vbo[1] - bytes_per_off;
1442		i = 0;
1443	} else {
1444		voff = 0;
1445		vbo[0] = 0;
1446		off[0] = 0;
1447		i = 1;
1448	}
1449
1450	do {
1451		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1452
1453		if (index != page->index) {
 
1454			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1455			u64 to = min(from + PAGE_SIZE, wof_size);
1456
1457			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1458						   ARRAY_SIZE(WOF_NAME), run,
1459						   from, to);
1460			if (err)
1461				goto out1;
1462
1463			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1464					     to - from, REQ_OP_READ);
1465			if (err) {
1466				page->index = -1;
1467				goto out1;
1468			}
1469			page->index = index;
1470		}
1471
1472		if (i) {
1473			if (bytes_per_off == sizeof(__le32)) {
1474				off32 = Add2Ptr(addr, voff);
1475				off[1] = le32_to_cpu(*off32);
1476			} else {
1477				off64 = Add2Ptr(addr, voff);
1478				off[1] = le64_to_cpu(*off64);
1479			}
1480		} else if (!voff) {
1481			if (bytes_per_off == sizeof(__le32)) {
1482				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1483				off[0] = le32_to_cpu(*off32);
1484			} else {
1485				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1486				off[0] = le64_to_cpu(*off64);
1487			}
1488		} else {
1489			/* Two values in one page. */
1490			if (bytes_per_off == sizeof(__le32)) {
1491				off32 = Add2Ptr(addr, voff);
1492				off[0] = le32_to_cpu(off32[-1]);
1493				off[1] = le32_to_cpu(off32[0]);
1494			} else {
1495				off64 = Add2Ptr(addr, voff);
1496				off[0] = le64_to_cpu(off64[-1]);
1497				off[1] = le64_to_cpu(off64[0]);
1498			}
1499			break;
1500		}
1501	} while (++i < 2);
1502
1503	*vbo_data += off[0];
1504	*ondisk_size = off[1] - off[0];
1505
1506out1:
1507	unlock_page(page);
1508out:
1509	up_write(&ni->file.run_lock);
1510	return err;
1511}
1512#endif
1513
1514/*
1515 * attr_is_frame_compressed - Used to detect compressed frame.
 
 
 
 
1516 */
1517int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1518			     CLST frame, CLST *clst_data)
1519{
1520	int err;
1521	u32 clst_frame;
1522	CLST clen, lcn, vcn, alen, slen, vcn_next;
1523	size_t idx;
1524	struct runs_tree *run;
1525
1526	*clst_data = 0;
1527
1528	if (!is_attr_compressed(attr))
1529		return 0;
1530
1531	if (!attr->non_res)
1532		return 0;
1533
1534	clst_frame = 1u << attr->nres.c_unit;
1535	vcn = frame * clst_frame;
1536	run = &ni->file.run;
1537
1538	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1539		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1540					 attr->name_len, run, vcn);
1541		if (err)
1542			return err;
1543
1544		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1545			return -EINVAL;
1546	}
1547
1548	if (lcn == SPARSE_LCN) {
1549		/* Sparsed frame. */
1550		return 0;
1551	}
1552
1553	if (clen >= clst_frame) {
1554		/*
1555		 * The frame is not compressed 'cause
1556		 * it does not contain any sparse clusters.
1557		 */
1558		*clst_data = clst_frame;
1559		return 0;
1560	}
1561
1562	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1563	slen = 0;
1564	*clst_data = clen;
1565
1566	/*
1567	 * The frame is compressed if *clst_data + slen >= clst_frame.
1568	 * Check next fragments.
1569	 */
1570	while ((vcn += clen) < alen) {
1571		vcn_next = vcn;
1572
1573		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1574		    vcn_next != vcn) {
1575			err = attr_load_runs_vcn(ni, attr->type,
1576						 attr_name(attr),
1577						 attr->name_len, run, vcn_next);
1578			if (err)
1579				return err;
1580			vcn = vcn_next;
1581
1582			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1583				return -EINVAL;
1584		}
1585
1586		if (lcn == SPARSE_LCN) {
1587			slen += clen;
1588		} else {
1589			if (slen) {
1590				/*
1591				 * Data_clusters + sparse_clusters =
1592				 * not enough for frame.
1593				 */
1594				return -EINVAL;
1595			}
1596			*clst_data += clen;
1597		}
1598
1599		if (*clst_data + slen >= clst_frame) {
1600			if (!slen) {
1601				/*
1602				 * There is no sparsed clusters in this frame
1603				 * so it is not compressed.
1604				 */
1605				*clst_data = clst_frame;
1606			} else {
1607				/* Frame is compressed. */
1608			}
1609			break;
1610		}
1611	}
1612
1613	return 0;
1614}
1615
1616/*
1617 * attr_allocate_frame - Allocate/free clusters for @frame.
1618 *
1619 * Assumed: down_write(&ni->file.run_lock);
1620 */
1621int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1622			u64 new_valid)
1623{
1624	int err = 0;
1625	struct runs_tree *run = &ni->file.run;
1626	struct ntfs_sb_info *sbi = ni->mi.sbi;
1627	struct ATTRIB *attr = NULL, *attr_b;
1628	struct ATTR_LIST_ENTRY *le, *le_b;
1629	struct mft_inode *mi, *mi_b;
1630	CLST svcn, evcn1, next_svcn, len;
1631	CLST vcn, end, clst_data;
1632	u64 total_size, valid_size, data_size;
1633
1634	le_b = NULL;
1635	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1636	if (!attr_b)
1637		return -ENOENT;
1638
1639	if (!is_attr_ext(attr_b))
1640		return -EINVAL;
1641
1642	vcn = frame << NTFS_LZNT_CUNIT;
1643	total_size = le64_to_cpu(attr_b->nres.total_size);
1644
1645	svcn = le64_to_cpu(attr_b->nres.svcn);
1646	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1647	data_size = le64_to_cpu(attr_b->nres.data_size);
1648
1649	if (svcn <= vcn && vcn < evcn1) {
1650		attr = attr_b;
1651		le = le_b;
1652		mi = mi_b;
1653	} else if (!le_b) {
1654		err = -EINVAL;
1655		goto out;
1656	} else {
1657		le = le_b;
1658		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1659				    &mi);
1660		if (!attr) {
1661			err = -EINVAL;
1662			goto out;
1663		}
1664		svcn = le64_to_cpu(attr->nres.svcn);
1665		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1666	}
1667
1668	err = attr_load_runs(attr, ni, run, NULL);
1669	if (err)
1670		goto out;
1671
1672	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1673	if (err)
1674		goto out;
1675
1676	total_size -= (u64)clst_data << sbi->cluster_bits;
1677
1678	len = bytes_to_cluster(sbi, compr_size);
1679
1680	if (len == clst_data)
1681		goto out;
1682
1683	if (len < clst_data) {
1684		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1685					NULL, true);
1686		if (err)
1687			goto out;
1688
1689		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1690				   false)) {
1691			err = -ENOMEM;
1692			goto out;
1693		}
1694		end = vcn + clst_data;
1695		/* Run contains updated range [vcn + len : end). */
1696	} else {
1697		CLST alen, hint = 0;
1698		/* Get the last LCN to allocate from. */
1699		if (vcn + clst_data &&
1700		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1701				      NULL)) {
1702			hint = -1;
1703		}
1704
1705		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1706					     hint + 1, len - clst_data, NULL,
1707					     ALLOCATE_DEF, &alen, 0, NULL,
1708					     NULL);
1709		if (err)
1710			goto out;
1711
1712		end = vcn + len;
1713		/* Run contains updated range [vcn + clst_data : end). */
1714	}
1715
1716	total_size += (u64)len << sbi->cluster_bits;
1717
1718repack:
1719	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1720	if (err)
1721		goto out;
1722
1723	attr_b->nres.total_size = cpu_to_le64(total_size);
1724	inode_set_bytes(&ni->vfs_inode, total_size);
 
1725
1726	mi_b->dirty = true;
1727	mark_inode_dirty(&ni->vfs_inode);
1728
1729	/* Stored [vcn : next_svcn) from [vcn : end). */
1730	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1731
1732	if (end <= evcn1) {
1733		if (next_svcn == evcn1) {
1734			/* Normal way. Update attribute and exit. */
1735			goto ok;
1736		}
1737		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1738		if (!ni->attr_list.size) {
1739			err = ni_create_attr_list(ni);
1740			if (err)
1741				goto out;
1742			/* Layout of records is changed. */
1743			le_b = NULL;
1744			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1745					      0, NULL, &mi_b);
1746			if (!attr_b) {
1747				err = -ENOENT;
1748				goto out;
1749			}
1750
1751			attr = attr_b;
1752			le = le_b;
1753			mi = mi_b;
1754			goto repack;
1755		}
1756	}
1757
1758	svcn = evcn1;
1759
1760	/* Estimate next attribute. */
1761	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1762
1763	if (attr) {
1764		CLST alloc = bytes_to_cluster(
1765			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1766		CLST evcn = le64_to_cpu(attr->nres.evcn);
1767
1768		if (end < next_svcn)
1769			end = next_svcn;
1770		while (end > evcn) {
1771			/* Remove segment [svcn : evcn). */
1772			mi_remove_attr(NULL, mi, attr);
1773
1774			if (!al_remove_le(ni, le)) {
1775				err = -EINVAL;
1776				goto out;
1777			}
1778
1779			if (evcn + 1 >= alloc) {
1780				/* Last attribute segment. */
1781				evcn1 = evcn + 1;
1782				goto ins_ext;
1783			}
1784
1785			if (ni_load_mi(ni, le, &mi)) {
1786				attr = NULL;
1787				goto out;
1788			}
1789
1790			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1791					    &le->id);
1792			if (!attr) {
1793				err = -EINVAL;
1794				goto out;
1795			}
1796			svcn = le64_to_cpu(attr->nres.svcn);
1797			evcn = le64_to_cpu(attr->nres.evcn);
1798		}
1799
1800		if (end < svcn)
1801			end = svcn;
1802
1803		err = attr_load_runs(attr, ni, run, &end);
1804		if (err)
1805			goto out;
1806
1807		evcn1 = evcn + 1;
1808		attr->nres.svcn = cpu_to_le64(next_svcn);
1809		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1810		if (err)
1811			goto out;
1812
1813		le->vcn = cpu_to_le64(next_svcn);
1814		ni->attr_list.dirty = true;
1815		mi->dirty = true;
1816
1817		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1818	}
1819ins_ext:
1820	if (evcn1 > next_svcn) {
1821		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1822					    next_svcn, evcn1 - next_svcn,
1823					    attr_b->flags, &attr, &mi, NULL);
1824		if (err)
1825			goto out;
1826	}
1827ok:
1828	run_truncate_around(run, vcn);
1829out:
1830	if (attr_b) {
1831		if (new_valid > data_size)
1832			new_valid = data_size;
1833
1834		valid_size = le64_to_cpu(attr_b->nres.valid_size);
1835		if (new_valid != valid_size) {
1836			attr_b->nres.valid_size = cpu_to_le64(valid_size);
1837			mi_b->dirty = true;
1838		}
1839	}
1840
1841	return err;
1842}
1843
1844/*
1845 * attr_collapse_range - Collapse range in file.
1846 */
1847int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1848{
1849	int err = 0;
1850	struct runs_tree *run = &ni->file.run;
1851	struct ntfs_sb_info *sbi = ni->mi.sbi;
1852	struct ATTRIB *attr = NULL, *attr_b;
1853	struct ATTR_LIST_ENTRY *le, *le_b;
1854	struct mft_inode *mi, *mi_b;
1855	CLST svcn, evcn1, len, dealloc, alen;
1856	CLST vcn, end;
1857	u64 valid_size, data_size, alloc_size, total_size;
1858	u32 mask;
1859	__le16 a_flags;
1860
1861	if (!bytes)
1862		return 0;
1863
1864	le_b = NULL;
1865	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1866	if (!attr_b)
1867		return -ENOENT;
1868
1869	if (!attr_b->non_res) {
1870		/* Attribute is resident. Nothing to do? */
1871		return 0;
1872	}
1873
1874	data_size = le64_to_cpu(attr_b->nres.data_size);
1875	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1876	a_flags = attr_b->flags;
1877
1878	if (is_attr_ext(attr_b)) {
1879		total_size = le64_to_cpu(attr_b->nres.total_size);
1880		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1881	} else {
1882		total_size = alloc_size;
1883		mask = sbi->cluster_mask;
1884	}
1885
1886	if ((vbo & mask) || (bytes & mask)) {
1887		/* Allow to collapse only cluster aligned ranges. */
1888		return -EINVAL;
1889	}
1890
1891	if (vbo > data_size)
1892		return -EINVAL;
1893
1894	down_write(&ni->file.run_lock);
1895
1896	if (vbo + bytes >= data_size) {
1897		u64 new_valid = min(ni->i_valid, vbo);
1898
1899		/* Simple truncate file at 'vbo'. */
1900		truncate_setsize(&ni->vfs_inode, vbo);
1901		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1902				    &new_valid, true, NULL);
1903
1904		if (!err && new_valid < ni->i_valid)
1905			ni->i_valid = new_valid;
1906
1907		goto out;
1908	}
1909
1910	/*
1911	 * Enumerate all attribute segments and collapse.
1912	 */
1913	alen = alloc_size >> sbi->cluster_bits;
1914	vcn = vbo >> sbi->cluster_bits;
1915	len = bytes >> sbi->cluster_bits;
1916	end = vcn + len;
1917	dealloc = 0;
1918
1919	svcn = le64_to_cpu(attr_b->nres.svcn);
1920	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1921
1922	if (svcn <= vcn && vcn < evcn1) {
1923		attr = attr_b;
1924		le = le_b;
1925		mi = mi_b;
1926	} else if (!le_b) {
1927		err = -EINVAL;
1928		goto out;
1929	} else {
1930		le = le_b;
1931		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1932				    &mi);
1933		if (!attr) {
1934			err = -EINVAL;
1935			goto out;
1936		}
1937
1938		svcn = le64_to_cpu(attr->nres.svcn);
1939		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1940	}
1941
1942	for (;;) {
1943		if (svcn >= end) {
1944			/* Shift VCN- */
1945			attr->nres.svcn = cpu_to_le64(svcn - len);
1946			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1947			if (le) {
1948				le->vcn = attr->nres.svcn;
1949				ni->attr_list.dirty = true;
1950			}
1951			mi->dirty = true;
1952		} else if (svcn < vcn || end < evcn1) {
1953			CLST vcn1, eat, next_svcn;
1954
1955			/* Collapse a part of this attribute segment. */
1956			err = attr_load_runs(attr, ni, run, &svcn);
1957			if (err)
1958				goto out;
1959			vcn1 = max(vcn, svcn);
1960			eat = min(end, evcn1) - vcn1;
1961
1962			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1963						true);
1964			if (err)
1965				goto out;
1966
1967			if (!run_collapse_range(run, vcn1, eat)) {
1968				err = -ENOMEM;
1969				goto out;
1970			}
1971
1972			if (svcn >= vcn) {
1973				/* Shift VCN */
1974				attr->nres.svcn = cpu_to_le64(vcn);
1975				if (le) {
1976					le->vcn = attr->nres.svcn;
1977					ni->attr_list.dirty = true;
1978				}
1979			}
1980
1981			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1982			if (err)
1983				goto out;
1984
1985			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1986			if (next_svcn + eat < evcn1) {
1987				err = ni_insert_nonresident(
1988					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1989					evcn1 - eat - next_svcn, a_flags, &attr,
1990					&mi, &le);
1991				if (err)
1992					goto out;
1993
1994				/* Layout of records maybe changed. */
1995				attr_b = NULL;
1996			}
1997
1998			/* Free all allocated memory. */
1999			run_truncate(run, 0);
2000		} else {
2001			u16 le_sz;
2002			u16 roff = le16_to_cpu(attr->nres.run_off);
2003
2004			if (roff > le32_to_cpu(attr->size)) {
2005				err = -EINVAL;
2006				goto out;
2007			}
2008
2009			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2010				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
2011				      le32_to_cpu(attr->size) - roff);
2012
2013			/* Delete this attribute segment. */
2014			mi_remove_attr(NULL, mi, attr);
2015			if (!le)
2016				break;
2017
2018			le_sz = le16_to_cpu(le->size);
2019			if (!al_remove_le(ni, le)) {
2020				err = -EINVAL;
2021				goto out;
2022			}
2023
2024			if (evcn1 >= alen)
2025				break;
2026
2027			if (!svcn) {
2028				/* Load next record that contains this attribute. */
2029				if (ni_load_mi(ni, le, &mi)) {
2030					err = -EINVAL;
2031					goto out;
2032				}
2033
2034				/* Look for required attribute. */
2035				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
2036						    0, &le->id);
2037				if (!attr) {
2038					err = -EINVAL;
2039					goto out;
2040				}
2041				goto next_attr;
2042			}
2043			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2044		}
2045
2046		if (evcn1 >= alen)
2047			break;
2048
2049		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2050		if (!attr) {
2051			err = -EINVAL;
2052			goto out;
2053		}
2054
2055next_attr:
2056		svcn = le64_to_cpu(attr->nres.svcn);
2057		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2058	}
2059
2060	if (!attr_b) {
2061		le_b = NULL;
2062		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2063				      &mi_b);
2064		if (!attr_b) {
2065			err = -ENOENT;
2066			goto out;
2067		}
2068	}
2069
2070	data_size -= bytes;
2071	valid_size = ni->i_valid;
2072	if (vbo + bytes <= valid_size)
2073		valid_size -= bytes;
2074	else if (vbo < valid_size)
2075		valid_size = vbo;
2076
2077	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2078	attr_b->nres.data_size = cpu_to_le64(data_size);
2079	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2080	total_size -= (u64)dealloc << sbi->cluster_bits;
2081	if (is_attr_ext(attr_b))
2082		attr_b->nres.total_size = cpu_to_le64(total_size);
2083	mi_b->dirty = true;
2084
2085	/* Update inode size. */
2086	ni->i_valid = valid_size;
2087	i_size_write(&ni->vfs_inode, data_size);
2088	inode_set_bytes(&ni->vfs_inode, total_size);
2089	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2090	mark_inode_dirty(&ni->vfs_inode);
2091
2092out:
2093	up_write(&ni->file.run_lock);
2094	if (err)
2095		_ntfs_bad_inode(&ni->vfs_inode);
2096
2097	return err;
2098}
2099
2100/*
2101 * attr_punch_hole
2102 *
2103 * Not for normal files.
2104 */
2105int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2106{
2107	int err = 0;
2108	struct runs_tree *run = &ni->file.run;
2109	struct ntfs_sb_info *sbi = ni->mi.sbi;
2110	struct ATTRIB *attr = NULL, *attr_b;
2111	struct ATTR_LIST_ENTRY *le, *le_b;
2112	struct mft_inode *mi, *mi_b;
2113	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2114	u64 total_size, alloc_size;
2115	u32 mask;
2116	__le16 a_flags;
2117	struct runs_tree run2;
2118
2119	if (!bytes)
2120		return 0;
2121
2122	le_b = NULL;
2123	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2124	if (!attr_b)
2125		return -ENOENT;
2126
2127	if (!attr_b->non_res) {
2128		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2129		u32 from, to;
2130
2131		if (vbo > data_size)
2132			return 0;
2133
2134		from = vbo;
2135		to = min_t(u64, vbo + bytes, data_size);
2136		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2137		return 0;
2138	}
2139
2140	if (!is_attr_ext(attr_b))
2141		return -EOPNOTSUPP;
2142
2143	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2144	total_size = le64_to_cpu(attr_b->nres.total_size);
2145
2146	if (vbo >= alloc_size) {
2147		/* NOTE: It is allowed. */
2148		return 0;
2149	}
2150
2151	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2152
2153	bytes += vbo;
2154	if (bytes > alloc_size)
2155		bytes = alloc_size;
2156	bytes -= vbo;
2157
2158	if ((vbo & mask) || (bytes & mask)) {
2159		/* We have to zero a range(s). */
2160		if (frame_size == NULL) {
2161			/* Caller insists range is aligned. */
2162			return -EINVAL;
2163		}
2164		*frame_size = mask + 1;
2165		return E_NTFS_NOTALIGNED;
2166	}
2167
2168	down_write(&ni->file.run_lock);
2169	run_init(&run2);
2170	run_truncate(run, 0);
2171
2172	/*
2173	 * Enumerate all attribute segments and punch hole where necessary.
2174	 */
2175	alen = alloc_size >> sbi->cluster_bits;
2176	vcn = vbo >> sbi->cluster_bits;
2177	len = bytes >> sbi->cluster_bits;
2178	end = vcn + len;
2179	hole = 0;
2180
2181	svcn = le64_to_cpu(attr_b->nres.svcn);
2182	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2183	a_flags = attr_b->flags;
2184
2185	if (svcn <= vcn && vcn < evcn1) {
2186		attr = attr_b;
2187		le = le_b;
2188		mi = mi_b;
2189	} else if (!le_b) {
2190		err = -EINVAL;
2191		goto bad_inode;
2192	} else {
2193		le = le_b;
2194		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2195				    &mi);
2196		if (!attr) {
2197			err = -EINVAL;
2198			goto bad_inode;
2199		}
2200
2201		svcn = le64_to_cpu(attr->nres.svcn);
2202		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2203	}
2204
2205	while (svcn < end) {
2206		CLST vcn1, zero, hole2 = hole;
2207
2208		err = attr_load_runs(attr, ni, run, &svcn);
2209		if (err)
2210			goto done;
2211		vcn1 = max(vcn, svcn);
2212		zero = min(end, evcn1) - vcn1;
2213
2214		/*
2215		 * Check range [vcn1 + zero).
2216		 * Calculate how many clusters there are.
2217		 * Don't do any destructive actions.
2218		 */
2219		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2220		if (err)
2221			goto done;
2222
2223		/* Check if required range is already hole. */
2224		if (hole2 == hole)
2225			goto next_attr;
2226
2227		/* Make a clone of run to undo. */
2228		err = run_clone(run, &run2);
2229		if (err)
2230			goto done;
2231
2232		/* Make a hole range (sparse) [vcn1 + zero). */
2233		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2234			err = -ENOMEM;
2235			goto done;
2236		}
2237
2238		/* Update run in attribute segment. */
2239		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2240		if (err)
2241			goto done;
2242		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2243		if (next_svcn < evcn1) {
2244			/* Insert new attribute segment. */
2245			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2246						    next_svcn,
2247						    evcn1 - next_svcn, a_flags,
2248						    &attr, &mi, &le);
2249			if (err)
2250				goto undo_punch;
2251
2252			/* Layout of records maybe changed. */
2253			attr_b = NULL;
2254		}
2255
2256		/* Real deallocate. Should not fail. */
2257		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2258
2259next_attr:
2260		/* Free all allocated memory. */
2261		run_truncate(run, 0);
2262
2263		if (evcn1 >= alen)
2264			break;
2265
2266		/* Get next attribute segment. */
2267		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2268		if (!attr) {
2269			err = -EINVAL;
2270			goto bad_inode;
2271		}
2272
2273		svcn = le64_to_cpu(attr->nres.svcn);
2274		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2275	}
2276
2277done:
2278	if (!hole)
2279		goto out;
2280
2281	if (!attr_b) {
2282		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2283				      &mi_b);
2284		if (!attr_b) {
2285			err = -EINVAL;
2286			goto bad_inode;
2287		}
2288	}
2289
2290	total_size -= (u64)hole << sbi->cluster_bits;
2291	attr_b->nres.total_size = cpu_to_le64(total_size);
2292	mi_b->dirty = true;
2293
2294	/* Update inode size. */
2295	inode_set_bytes(&ni->vfs_inode, total_size);
2296	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2297	mark_inode_dirty(&ni->vfs_inode);
2298
2299out:
2300	run_close(&run2);
2301	up_write(&ni->file.run_lock);
2302	return err;
2303
2304bad_inode:
2305	_ntfs_bad_inode(&ni->vfs_inode);
2306	goto out;
2307
2308undo_punch:
2309	/*
2310	 * Restore packed runs.
2311	 * 'mi_pack_runs' should not fail, cause we restore original.
2312	 */
2313	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2314		goto bad_inode;
2315
2316	goto done;
2317}
2318
2319/*
2320 * attr_insert_range - Insert range (hole) in file.
2321 * Not for normal files.
2322 */
2323int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2324{
2325	int err = 0;
2326	struct runs_tree *run = &ni->file.run;
2327	struct ntfs_sb_info *sbi = ni->mi.sbi;
2328	struct ATTRIB *attr = NULL, *attr_b;
2329	struct ATTR_LIST_ENTRY *le, *le_b;
2330	struct mft_inode *mi, *mi_b;
2331	CLST vcn, svcn, evcn1, len, next_svcn;
2332	u64 data_size, alloc_size;
2333	u32 mask;
2334	__le16 a_flags;
2335
2336	if (!bytes)
2337		return 0;
2338
2339	le_b = NULL;
2340	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2341	if (!attr_b)
2342		return -ENOENT;
2343
2344	if (!is_attr_ext(attr_b)) {
2345		/* It was checked above. See fallocate. */
2346		return -EOPNOTSUPP;
2347	}
2348
2349	if (!attr_b->non_res) {
2350		data_size = le32_to_cpu(attr_b->res.data_size);
2351		alloc_size = data_size;
2352		mask = sbi->cluster_mask; /* cluster_size - 1 */
2353	} else {
2354		data_size = le64_to_cpu(attr_b->nres.data_size);
2355		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2356		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2357	}
2358
2359	if (vbo > data_size) {
2360		/* Insert range after the file size is not allowed. */
 
 
 
 
 
2361		return -EINVAL;
2362	}
2363
2364	if ((vbo & mask) || (bytes & mask)) {
2365		/* Allow to insert only frame aligned ranges. */
2366		return -EINVAL;
2367	}
2368
2369	/*
2370	 * valid_size <= data_size <= alloc_size
2371	 * Check alloc_size for maximum possible.
2372	 */
2373	if (bytes > sbi->maxbytes_sparse - alloc_size)
2374		return -EFBIG;
2375
2376	vcn = vbo >> sbi->cluster_bits;
2377	len = bytes >> sbi->cluster_bits;
2378
2379	down_write(&ni->file.run_lock);
2380
2381	if (!attr_b->non_res) {
2382		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2383				    data_size + bytes, NULL, false, NULL);
2384
2385		le_b = NULL;
2386		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2387				      &mi_b);
2388		if (!attr_b) {
2389			err = -EINVAL;
2390			goto bad_inode;
2391		}
2392
2393		if (err)
2394			goto out;
2395
2396		if (!attr_b->non_res) {
2397			/* Still resident. */
2398			char *data = Add2Ptr(attr_b,
2399					     le16_to_cpu(attr_b->res.data_off));
2400
2401			memmove(data + bytes, data, bytes);
2402			memset(data, 0, bytes);
2403			goto done;
2404		}
2405
2406		/* Resident files becomes nonresident. */
2407		data_size = le64_to_cpu(attr_b->nres.data_size);
2408		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2409	}
2410
2411	/*
2412	 * Enumerate all attribute segments and shift start vcn.
2413	 */
2414	a_flags = attr_b->flags;
2415	svcn = le64_to_cpu(attr_b->nres.svcn);
2416	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2417
2418	if (svcn <= vcn && vcn < evcn1) {
2419		attr = attr_b;
2420		le = le_b;
2421		mi = mi_b;
2422	} else if (!le_b) {
2423		err = -EINVAL;
2424		goto bad_inode;
2425	} else {
2426		le = le_b;
2427		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2428				    &mi);
2429		if (!attr) {
2430			err = -EINVAL;
2431			goto bad_inode;
2432		}
2433
2434		svcn = le64_to_cpu(attr->nres.svcn);
2435		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2436	}
2437
2438	run_truncate(run, 0); /* clear cached values. */
2439	err = attr_load_runs(attr, ni, run, NULL);
2440	if (err)
2441		goto out;
2442
2443	if (!run_insert_range(run, vcn, len)) {
2444		err = -ENOMEM;
2445		goto out;
2446	}
2447
2448	/* Try to pack in current record as much as possible. */
2449	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2450	if (err)
2451		goto out;
2452
2453	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2454
2455	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2456	       attr->type == ATTR_DATA && !attr->name_len) {
2457		le64_add_cpu(&attr->nres.svcn, len);
2458		le64_add_cpu(&attr->nres.evcn, len);
2459		if (le) {
2460			le->vcn = attr->nres.svcn;
2461			ni->attr_list.dirty = true;
2462		}
2463		mi->dirty = true;
2464	}
2465
2466	if (next_svcn < evcn1 + len) {
2467		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2468					    next_svcn, evcn1 + len - next_svcn,
2469					    a_flags, NULL, NULL, NULL);
2470
2471		le_b = NULL;
2472		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2473				      &mi_b);
2474		if (!attr_b) {
2475			err = -EINVAL;
2476			goto bad_inode;
2477		}
2478
2479		if (err) {
2480			/* ni_insert_nonresident failed. Try to undo. */
2481			goto undo_insert_range;
2482		}
2483	}
2484
2485	/*
2486	 * Update primary attribute segment.
2487	 */
2488	if (vbo <= ni->i_valid)
2489		ni->i_valid += bytes;
2490
2491	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2492	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2493
2494	/* ni->valid may be not equal valid_size (temporary). */
2495	if (ni->i_valid > data_size + bytes)
2496		attr_b->nres.valid_size = attr_b->nres.data_size;
2497	else
2498		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2499	mi_b->dirty = true;
2500
2501done:
2502	i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
2503	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2504	mark_inode_dirty(&ni->vfs_inode);
2505
2506out:
2507	run_truncate(run, 0); /* clear cached values. */
2508
2509	up_write(&ni->file.run_lock);
2510
2511	return err;
2512
2513bad_inode:
2514	_ntfs_bad_inode(&ni->vfs_inode);
2515	goto out;
2516
2517undo_insert_range:
2518	svcn = le64_to_cpu(attr_b->nres.svcn);
2519	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2520
2521	if (svcn <= vcn && vcn < evcn1) {
2522		attr = attr_b;
2523		le = le_b;
2524		mi = mi_b;
2525	} else if (!le_b) {
2526		goto bad_inode;
2527	} else {
2528		le = le_b;
2529		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2530				    &mi);
2531		if (!attr) {
2532			goto bad_inode;
2533		}
2534
2535		svcn = le64_to_cpu(attr->nres.svcn);
2536		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2537	}
2538
2539	if (attr_load_runs(attr, ni, run, NULL))
2540		goto bad_inode;
2541
2542	if (!run_collapse_range(run, vcn, len))
2543		goto bad_inode;
2544
2545	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2546		goto bad_inode;
2547
2548	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2549	       attr->type == ATTR_DATA && !attr->name_len) {
2550		le64_sub_cpu(&attr->nres.svcn, len);
2551		le64_sub_cpu(&attr->nres.evcn, len);
2552		if (le) {
2553			le->vcn = attr->nres.svcn;
2554			ni->attr_list.dirty = true;
2555		}
2556		mi->dirty = true;
2557	}
2558
2559	goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2560}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
   5 *
   6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
   7 */
   8
   9#include <linux/fs.h>
  10#include <linux/slab.h>
  11#include <linux/kernel.h>
  12
  13#include "debug.h"
  14#include "ntfs.h"
  15#include "ntfs_fs.h"
  16
  17/*
  18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
  19 * preallocate algorithm.
  20 */
  21#ifndef NTFS_MIN_LOG2_OF_CLUMP
  22#define NTFS_MIN_LOG2_OF_CLUMP 16
  23#endif
  24
  25#ifndef NTFS_MAX_LOG2_OF_CLUMP
  26#define NTFS_MAX_LOG2_OF_CLUMP 26
  27#endif
  28
  29// 16M
  30#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
  31// 16G
  32#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
  33
  34static inline u64 get_pre_allocated(u64 size)
  35{
  36	u32 clump;
  37	u8 align_shift;
  38	u64 ret;
  39
  40	if (size <= NTFS_CLUMP_MIN) {
  41		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
  42		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
  43	} else if (size >= NTFS_CLUMP_MAX) {
  44		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
  45		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
  46	} else {
  47		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
  48			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
  49		clump = 1u << align_shift;
  50	}
  51
  52	ret = (((size + clump - 1) >> align_shift)) << align_shift;
  53
  54	return ret;
  55}
  56
  57/*
  58 * attr_load_runs - Load all runs stored in @attr.
  59 */
  60static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
  61			  struct runs_tree *run, const CLST *vcn)
  62{
  63	int err;
  64	CLST svcn = le64_to_cpu(attr->nres.svcn);
  65	CLST evcn = le64_to_cpu(attr->nres.evcn);
  66	u32 asize;
  67	u16 run_off;
  68
  69	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
  70		return 0;
  71
  72	if (vcn && (evcn < *vcn || *vcn < svcn))
  73		return -EINVAL;
  74
  75	asize = le32_to_cpu(attr->size);
  76	run_off = le16_to_cpu(attr->nres.run_off);
  77
  78	if (run_off > asize)
  79		return -EINVAL;
  80
  81	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
  82			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
  83			    asize - run_off);
  84	if (err < 0)
  85		return err;
  86
  87	return 0;
  88}
  89
  90/*
  91 * run_deallocate_ex - Deallocate clusters.
  92 */
  93static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
  94			     CLST vcn, CLST len, CLST *done, bool trim)
  95{
  96	int err = 0;
  97	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
  98	size_t idx;
  99
 100	if (!len)
 101		goto out;
 102
 103	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
 104failed:
 105		run_truncate(run, vcn0);
 106		err = -EINVAL;
 107		goto out;
 108	}
 109
 110	for (;;) {
 111		if (clen > len)
 112			clen = len;
 113
 114		if (!clen) {
 115			err = -EINVAL;
 116			goto out;
 117		}
 118
 119		if (lcn != SPARSE_LCN) {
 120			if (sbi) {
 121				/* mark bitmap range [lcn + clen) as free and trim clusters. */
 122				mark_as_free_ex(sbi, lcn, clen, trim);
 123			}
 124			dn += clen;
 125		}
 126
 127		len -= clen;
 128		if (!len)
 129			break;
 130
 131		vcn_next = vcn + clen;
 132		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
 133		    vcn != vcn_next) {
 134			/* Save memory - don't load entire run. */
 135			goto failed;
 136		}
 137	}
 138
 139out:
 140	if (done)
 141		*done += dn;
 142
 143	return err;
 144}
 145
 146/*
 147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
 148 */
 149int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
 150			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
 151			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
 152			   CLST *new_lcn, CLST *new_len)
 153{
 154	int err;
 155	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
 156	size_t cnt = run->count;
 157
 158	for (;;) {
 159		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
 160					       opt);
 161
 162		if (err == -ENOSPC && pre) {
 163			pre = 0;
 164			if (*pre_alloc)
 165				*pre_alloc = 0;
 166			continue;
 167		}
 168
 169		if (err)
 170			goto out;
 171
 172		if (vcn == vcn0) {
 173			/* Return the first fragment. */
 174			if (new_lcn)
 175				*new_lcn = lcn;
 176			if (new_len)
 177				*new_len = flen;
 178		}
 179
 180		/* Add new fragment into run storage. */
 181		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
 182			/* Undo last 'ntfs_look_for_free_space' */
 183			mark_as_free_ex(sbi, lcn, len, false);
 184			err = -ENOMEM;
 185			goto out;
 186		}
 187
 188		if (opt & ALLOCATE_ZERO) {
 189			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
 190
 191			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
 192						   (sector_t)lcn << shift,
 193						   (sector_t)flen << shift,
 194						   GFP_NOFS, 0);
 195			if (err)
 196				goto out;
 197		}
 198
 199		vcn += flen;
 200
 201		if (flen >= len || (opt & ALLOCATE_MFT) ||
 202		    (fr && run->count - cnt >= fr)) {
 203			*alen = vcn - vcn0;
 204			return 0;
 205		}
 206
 207		len -= flen;
 208	}
 209
 210out:
 211	/* Undo 'ntfs_look_for_free_space' */
 212	if (vcn - vcn0) {
 213		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
 214		run_truncate(run, vcn0);
 215	}
 216
 217	return err;
 218}
 219
 220/*
 221 * attr_make_nonresident
 222 *
 223 * If page is not NULL - it is already contains resident data
 224 * and locked (called from ni_write_frame()).
 225 */
 226int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
 227			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 228			  u64 new_size, struct runs_tree *run,
 229			  struct ATTRIB **ins_attr, struct page *page)
 230{
 231	struct ntfs_sb_info *sbi;
 232	struct ATTRIB *attr_s;
 233	struct MFT_REC *rec;
 234	u32 used, asize, rsize, aoff;
 235	bool is_data;
 236	CLST len, alen;
 237	char *next;
 238	int err;
 239
 240	if (attr->non_res) {
 241		*ins_attr = attr;
 242		return 0;
 243	}
 244
 245	sbi = mi->sbi;
 246	rec = mi->mrec;
 247	attr_s = NULL;
 248	used = le32_to_cpu(rec->used);
 249	asize = le32_to_cpu(attr->size);
 250	next = Add2Ptr(attr, asize);
 251	aoff = PtrOffset(rec, attr);
 252	rsize = le32_to_cpu(attr->res.data_size);
 253	is_data = attr->type == ATTR_DATA && !attr->name_len;
 254
 255	/* len - how many clusters required to store 'rsize' bytes */
 256	if (is_attr_compressed(attr)) {
 257		u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT;
 258		len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT;
 259	} else {
 260		len = bytes_to_cluster(sbi, rsize);
 261	}
 262
 263	run_init(run);
 264
 265	/* Make a copy of original attribute. */
 266	attr_s = kmemdup(attr, asize, GFP_NOFS);
 267	if (!attr_s) {
 268		err = -ENOMEM;
 269		goto out;
 270	}
 271
 272	if (!len) {
 273		/* Empty resident -> Empty nonresident. */
 274		alen = 0;
 275	} else {
 276		const char *data = resident_data(attr);
 277
 278		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
 279					     ALLOCATE_DEF, &alen, 0, NULL,
 280					     NULL);
 281		if (err)
 282			goto out1;
 283
 284		if (!rsize) {
 285			/* Empty resident -> Non empty nonresident. */
 286		} else if (!is_data) {
 287			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
 288			if (err)
 289				goto out2;
 290		} else if (!page) {
 291			struct address_space *mapping = ni->vfs_inode.i_mapping;
 292			struct folio *folio;
 293
 294			folio = __filemap_get_folio(
 295				mapping, 0, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
 296				mapping_gfp_mask(mapping));
 297			if (IS_ERR(folio)) {
 298				err = PTR_ERR(folio);
 299				goto out2;
 300			}
 301			folio_fill_tail(folio, 0, data, rsize);
 302			folio_mark_uptodate(folio);
 303			folio_mark_dirty(folio);
 304			folio_unlock(folio);
 305			folio_put(folio);
 
 
 
 
 306		}
 307	}
 308
 309	/* Remove original attribute. */
 310	used -= asize;
 311	memmove(attr, Add2Ptr(attr, asize), used - aoff);
 312	rec->used = cpu_to_le32(used);
 313	mi->dirty = true;
 314	if (le)
 315		al_remove_le(ni, le);
 316
 317	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
 318				    attr_s->name_len, run, 0, alen,
 319				    attr_s->flags, &attr, NULL, NULL);
 320	if (err)
 321		goto out3;
 322
 323	kfree(attr_s);
 324	attr->nres.data_size = cpu_to_le64(rsize);
 325	attr->nres.valid_size = attr->nres.data_size;
 326
 327	*ins_attr = attr;
 328
 329	if (is_data)
 330		ni->ni_flags &= ~NI_FLAG_RESIDENT;
 331
 332	/* Resident attribute becomes non resident. */
 333	return 0;
 334
 335out3:
 336	attr = Add2Ptr(rec, aoff);
 337	memmove(next, attr, used - aoff);
 338	memcpy(attr, attr_s, asize);
 339	rec->used = cpu_to_le32(used + asize);
 340	mi->dirty = true;
 341out2:
 342	/* Undo: do not trim new allocated clusters. */
 343	run_deallocate(sbi, run, false);
 344	run_close(run);
 345out1:
 346	kfree(attr_s);
 347out:
 348	return err;
 349}
 350
 351/*
 352 * attr_set_size_res - Helper for attr_set_size().
 353 */
 354static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
 355			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 356			     u64 new_size, struct runs_tree *run,
 357			     struct ATTRIB **ins_attr)
 358{
 359	struct ntfs_sb_info *sbi = mi->sbi;
 360	struct MFT_REC *rec = mi->mrec;
 361	u32 used = le32_to_cpu(rec->used);
 362	u32 asize = le32_to_cpu(attr->size);
 363	u32 aoff = PtrOffset(rec, attr);
 364	u32 rsize = le32_to_cpu(attr->res.data_size);
 365	u32 tail = used - aoff - asize;
 366	char *next = Add2Ptr(attr, asize);
 367	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
 368
 369	if (dsize < 0) {
 370		memmove(next + dsize, next, tail);
 371	} else if (dsize > 0) {
 372		if (used + dsize > sbi->max_bytes_per_attr)
 373			return attr_make_nonresident(ni, attr, le, mi, new_size,
 374						     run, ins_attr, NULL);
 375
 376		memmove(next + dsize, next, tail);
 377		memset(next, 0, dsize);
 378	}
 379
 380	if (new_size > rsize)
 381		memset(Add2Ptr(resident_data(attr), rsize), 0,
 382		       new_size - rsize);
 383
 384	rec->used = cpu_to_le32(used + dsize);
 385	attr->size = cpu_to_le32(asize + dsize);
 386	attr->res.data_size = cpu_to_le32(new_size);
 387	mi->dirty = true;
 388	*ins_attr = attr;
 389
 390	return 0;
 391}
 392
 393/*
 394 * attr_set_size - Change the size of attribute.
 395 *
 396 * Extend:
 397 *   - Sparse/compressed: No allocated clusters.
 398 *   - Normal: Append allocated and preallocated new clusters.
 399 * Shrink:
 400 *   - No deallocate if @keep_prealloc is set.
 401 */
 402int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
 403		  const __le16 *name, u8 name_len, struct runs_tree *run,
 404		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
 405		  struct ATTRIB **ret)
 406{
 407	int err = 0;
 408	struct ntfs_sb_info *sbi = ni->mi.sbi;
 409	u8 cluster_bits = sbi->cluster_bits;
 410	bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
 411		      !name_len;
 412	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
 413	struct ATTRIB *attr = NULL, *attr_b;
 414	struct ATTR_LIST_ENTRY *le, *le_b;
 415	struct mft_inode *mi, *mi_b;
 416	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
 417	CLST next_svcn, pre_alloc = -1, done = 0;
 418	bool is_ext, is_bad = false;
 419	bool dirty = false;
 420	u32 align;
 421	struct MFT_REC *rec;
 422
 423again:
 424	alen = 0;
 425	le_b = NULL;
 426	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
 427			      &mi_b);
 428	if (!attr_b) {
 429		err = -ENOENT;
 430		goto bad_inode;
 431	}
 432
 433	if (!attr_b->non_res) {
 434		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
 435					&attr_b);
 436		if (err)
 437			return err;
 438
 439		/* Return if file is still resident. */
 440		if (!attr_b->non_res) {
 441			dirty = true;
 442			goto ok1;
 443		}
 444
 445		/* Layout of records may be changed, so do a full search. */
 446		goto again;
 447	}
 448
 449	is_ext = is_attr_ext(attr_b);
 450	align = sbi->cluster_size;
 451	if (is_ext)
 452		align <<= attr_b->nres.c_unit;
 453
 454	old_valid = le64_to_cpu(attr_b->nres.valid_size);
 455	old_size = le64_to_cpu(attr_b->nres.data_size);
 456	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 457
 458again_1:
 459	old_alen = old_alloc >> cluster_bits;
 460
 461	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
 462	new_alen = new_alloc >> cluster_bits;
 463
 464	if (keep_prealloc && new_size < old_size) {
 465		attr_b->nres.data_size = cpu_to_le64(new_size);
 466		mi_b->dirty = dirty = true;
 467		goto ok;
 468	}
 469
 470	vcn = old_alen - 1;
 471
 472	svcn = le64_to_cpu(attr_b->nres.svcn);
 473	evcn = le64_to_cpu(attr_b->nres.evcn);
 474
 475	if (svcn <= vcn && vcn <= evcn) {
 476		attr = attr_b;
 477		le = le_b;
 478		mi = mi_b;
 479	} else if (!le_b) {
 480		err = -EINVAL;
 481		goto bad_inode;
 482	} else {
 483		le = le_b;
 484		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
 485				    &mi);
 486		if (!attr) {
 487			err = -EINVAL;
 488			goto bad_inode;
 489		}
 490
 491next_le_1:
 492		svcn = le64_to_cpu(attr->nres.svcn);
 493		evcn = le64_to_cpu(attr->nres.evcn);
 494	}
 495	/*
 496	 * Here we have:
 497	 * attr,mi,le - last attribute segment (containing 'vcn').
 498	 * attr_b,mi_b,le_b - base (primary) attribute segment.
 499	 */
 500next_le:
 501	rec = mi->mrec;
 502	err = attr_load_runs(attr, ni, run, NULL);
 503	if (err)
 504		goto out;
 505
 506	if (new_size > old_size) {
 507		CLST to_allocate;
 508		size_t free;
 509
 510		if (new_alloc <= old_alloc) {
 511			attr_b->nres.data_size = cpu_to_le64(new_size);
 512			mi_b->dirty = dirty = true;
 513			goto ok;
 514		}
 515
 516		/*
 517		 * Add clusters. In simple case we have to:
 518		 *  - allocate space (vcn, lcn, len)
 519		 *  - update packed run in 'mi'
 520		 *  - update attr->nres.evcn
 521		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 522		 */
 523		to_allocate = new_alen - old_alen;
 524add_alloc_in_same_attr_seg:
 525		lcn = 0;
 526		if (is_mft) {
 527			/* MFT allocates clusters from MFT zone. */
 528			pre_alloc = 0;
 529		} else if (is_ext) {
 530			/* No preallocate for sparse/compress. */
 531			pre_alloc = 0;
 532		} else if (pre_alloc == -1) {
 533			pre_alloc = 0;
 534			if (type == ATTR_DATA && !name_len &&
 535			    sbi->options->prealloc) {
 536				pre_alloc = bytes_to_cluster(
 537						    sbi, get_pre_allocated(
 538								 new_size)) -
 539					    new_alen;
 540			}
 541
 542			/* Get the last LCN to allocate from. */
 543			if (old_alen &&
 544			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
 545				lcn = SPARSE_LCN;
 546			}
 547
 548			if (lcn == SPARSE_LCN)
 549				lcn = 0;
 550			else if (lcn)
 551				lcn += 1;
 552
 553			free = wnd_zeroes(&sbi->used.bitmap);
 554			if (to_allocate > free) {
 555				err = -ENOSPC;
 556				goto out;
 557			}
 558
 559			if (pre_alloc && to_allocate + pre_alloc > free)
 560				pre_alloc = 0;
 561		}
 562
 563		vcn = old_alen;
 564
 565		if (is_ext) {
 566			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
 567					   false)) {
 568				err = -ENOMEM;
 569				goto out;
 570			}
 571			alen = to_allocate;
 572		} else {
 573			/* ~3 bytes per fragment. */
 574			err = attr_allocate_clusters(
 575				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
 576				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
 577				is_mft ? 0 :
 578					 (sbi->record_size -
 579					  le32_to_cpu(rec->used) + 8) /
 580							 3 +
 581						 1,
 582				NULL, NULL);
 583			if (err)
 584				goto out;
 585		}
 586
 587		done += alen;
 588		vcn += alen;
 589		if (to_allocate > alen)
 590			to_allocate -= alen;
 591		else
 592			to_allocate = 0;
 593
 594pack_runs:
 595		err = mi_pack_runs(mi, attr, run, vcn - svcn);
 596		if (err)
 597			goto undo_1;
 598
 599		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
 600		new_alloc_tmp = (u64)next_svcn << cluster_bits;
 601		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 602		mi_b->dirty = dirty = true;
 603
 604		if (next_svcn >= vcn && !to_allocate) {
 605			/* Normal way. Update attribute and exit. */
 606			attr_b->nres.data_size = cpu_to_le64(new_size);
 607			goto ok;
 608		}
 609
 610		/* At least two MFT to avoid recursive loop. */
 611		if (is_mft && next_svcn == vcn &&
 612		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
 613			new_size = new_alloc_tmp;
 614			attr_b->nres.data_size = attr_b->nres.alloc_size;
 615			goto ok;
 616		}
 617
 618		if (le32_to_cpu(rec->used) < sbi->record_size) {
 619			old_alen = next_svcn;
 620			evcn = old_alen - 1;
 621			goto add_alloc_in_same_attr_seg;
 622		}
 623
 624		attr_b->nres.data_size = attr_b->nres.alloc_size;
 625		if (new_alloc_tmp < old_valid)
 626			attr_b->nres.valid_size = attr_b->nres.data_size;
 627
 628		if (type == ATTR_LIST) {
 629			err = ni_expand_list(ni);
 630			if (err)
 631				goto undo_2;
 632			if (next_svcn < vcn)
 633				goto pack_runs;
 634
 635			/* Layout of records is changed. */
 636			goto again;
 637		}
 638
 639		if (!ni->attr_list.size) {
 640			err = ni_create_attr_list(ni);
 641			/* In case of error layout of records is not changed. */
 642			if (err)
 643				goto undo_2;
 644			/* Layout of records is changed. */
 645		}
 646
 647		if (next_svcn >= vcn) {
 648			/* This is MFT data, repeat. */
 649			goto again;
 650		}
 651
 652		/* Insert new attribute segment. */
 653		err = ni_insert_nonresident(ni, type, name, name_len, run,
 654					    next_svcn, vcn - next_svcn,
 655					    attr_b->flags, &attr, &mi, NULL);
 656
 657		/*
 658		 * Layout of records maybe changed.
 659		 * Find base attribute to update.
 660		 */
 661		le_b = NULL;
 662		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
 663				      NULL, &mi_b);
 664		if (!attr_b) {
 665			err = -EINVAL;
 666			goto bad_inode;
 667		}
 668
 669		if (err) {
 670			/* ni_insert_nonresident failed. */
 671			attr = NULL;
 672			goto undo_2;
 673		}
 674
 675		/* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
 676		if (ni->mi.rno != MFT_REC_MFT)
 677			run_truncate_head(run, evcn + 1);
 678
 679		svcn = le64_to_cpu(attr->nres.svcn);
 680		evcn = le64_to_cpu(attr->nres.evcn);
 681
 682		/*
 683		 * Attribute is in consistency state.
 684		 * Save this point to restore to if next steps fail.
 685		 */
 686		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
 687		attr_b->nres.valid_size = attr_b->nres.data_size =
 688			attr_b->nres.alloc_size = cpu_to_le64(old_size);
 689		mi_b->dirty = dirty = true;
 690		goto again_1;
 691	}
 692
 693	if (new_size != old_size ||
 694	    (new_alloc != old_alloc && !keep_prealloc)) {
 695		/*
 696		 * Truncate clusters. In simple case we have to:
 697		 *  - update packed run in 'mi'
 698		 *  - update attr->nres.evcn
 699		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 700		 *  - mark and trim clusters as free (vcn, lcn, len)
 701		 */
 702		CLST dlen = 0;
 703
 704		vcn = max(svcn, new_alen);
 705		new_alloc_tmp = (u64)vcn << cluster_bits;
 706
 707		if (vcn > svcn) {
 708			err = mi_pack_runs(mi, attr, run, vcn - svcn);
 709			if (err)
 710				goto out;
 711		} else if (le && le->vcn) {
 712			u16 le_sz = le16_to_cpu(le->size);
 713
 714			/*
 715			 * NOTE: List entries for one attribute are always
 716			 * the same size. We deal with last entry (vcn==0)
 717			 * and it is not first in entries array
 718			 * (list entry for std attribute always first).
 719			 * So it is safe to step back.
 720			 */
 721			mi_remove_attr(NULL, mi, attr);
 722
 723			if (!al_remove_le(ni, le)) {
 724				err = -EINVAL;
 725				goto bad_inode;
 726			}
 727
 728			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
 729		} else {
 730			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
 731			mi->dirty = true;
 732		}
 733
 734		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 735
 736		if (vcn == new_alen) {
 737			attr_b->nres.data_size = cpu_to_le64(new_size);
 738			if (new_size < old_valid)
 739				attr_b->nres.valid_size =
 740					attr_b->nres.data_size;
 741		} else {
 742			if (new_alloc_tmp <=
 743			    le64_to_cpu(attr_b->nres.data_size))
 744				attr_b->nres.data_size =
 745					attr_b->nres.alloc_size;
 746			if (new_alloc_tmp <
 747			    le64_to_cpu(attr_b->nres.valid_size))
 748				attr_b->nres.valid_size =
 749					attr_b->nres.alloc_size;
 750		}
 751		mi_b->dirty = dirty = true;
 752
 753		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
 754					true);
 755		if (err)
 756			goto out;
 757
 758		if (is_ext) {
 759			/* dlen - really deallocated clusters. */
 760			le64_sub_cpu(&attr_b->nres.total_size,
 761				     ((u64)dlen << cluster_bits));
 762		}
 763
 764		run_truncate(run, vcn);
 765
 766		if (new_alloc_tmp <= new_alloc)
 767			goto ok;
 768
 769		old_size = new_alloc_tmp;
 770		vcn = svcn - 1;
 771
 772		if (le == le_b) {
 773			attr = attr_b;
 774			mi = mi_b;
 775			evcn = svcn - 1;
 776			svcn = 0;
 777			goto next_le;
 778		}
 779
 780		if (le->type != type || le->name_len != name_len ||
 781		    memcmp(le_name(le), name, name_len * sizeof(short))) {
 782			err = -EINVAL;
 783			goto bad_inode;
 784		}
 785
 786		err = ni_load_mi(ni, le, &mi);
 787		if (err)
 788			goto out;
 789
 790		attr = mi_find_attr(ni, mi, NULL, type, name, name_len,
 791				    &le->id);
 792		if (!attr) {
 793			err = -EINVAL;
 794			goto bad_inode;
 795		}
 796		goto next_le_1;
 797	}
 798
 799ok:
 800	if (new_valid) {
 801		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
 802
 803		if (attr_b->nres.valid_size != valid) {
 804			attr_b->nres.valid_size = valid;
 805			mi_b->dirty = true;
 806		}
 807	}
 808
 809ok1:
 810	if (ret)
 811		*ret = attr_b;
 812
 813	if (((type == ATTR_DATA && !name_len) ||
 814	     (type == ATTR_ALLOC && name == I30_NAME))) {
 815		/* Update inode_set_bytes. */
 816		if (attr_b->non_res) {
 817			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 818			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
 819				inode_set_bytes(&ni->vfs_inode, new_alloc);
 820				dirty = true;
 821			}
 822		}
 823
 824		/* Don't forget to update duplicate information in parent. */
 825		if (dirty) {
 826			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
 827			mark_inode_dirty(&ni->vfs_inode);
 828		}
 829	}
 830
 831	return 0;
 832
 833undo_2:
 834	vcn -= alen;
 835	attr_b->nres.data_size = cpu_to_le64(old_size);
 836	attr_b->nres.valid_size = cpu_to_le64(old_valid);
 837	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
 838
 839	/* Restore 'attr' and 'mi'. */
 840	if (attr)
 841		goto restore_run;
 842
 843	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
 844	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
 845		attr = attr_b;
 846		le = le_b;
 847		mi = mi_b;
 848	} else if (!le_b) {
 849		err = -EINVAL;
 850		goto bad_inode;
 851	} else {
 852		le = le_b;
 853		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
 854				    &svcn, &mi);
 855		if (!attr)
 856			goto bad_inode;
 857	}
 858
 859restore_run:
 860	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
 861		is_bad = true;
 862
 863undo_1:
 864	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
 865
 866	run_truncate(run, vcn);
 867out:
 868	if (is_bad) {
 869bad_inode:
 870		_ntfs_bad_inode(&ni->vfs_inode);
 871	}
 872	return err;
 873}
 874
 875/*
 876 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
 877 *
 878 * @new == NULL means just to get current mapping for 'vcn'
 879 * @new != NULL means allocate real cluster if 'vcn' maps to hole
 880 * @zero - zeroout new allocated clusters
 881 *
 882 *  NOTE:
 883 *  - @new != NULL is called only for sparsed or compressed attributes.
 884 *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
 885 */
 886int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
 887			CLST *len, bool *new, bool zero)
 888{
 889	int err = 0;
 890	struct runs_tree *run = &ni->file.run;
 891	struct ntfs_sb_info *sbi;
 892	u8 cluster_bits;
 893	struct ATTRIB *attr, *attr_b;
 894	struct ATTR_LIST_ENTRY *le, *le_b;
 895	struct mft_inode *mi, *mi_b;
 896	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
 897	CLST alloc, evcn;
 898	unsigned fr;
 899	u64 total_size, total_size0;
 900	int step = 0;
 901
 902	if (new)
 903		*new = false;
 904
 905	/* Try to find in cache. */
 906	down_read(&ni->file.run_lock);
 907	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
 908		*len = 0;
 909	up_read(&ni->file.run_lock);
 910
 911	if (*len && (*lcn != SPARSE_LCN || !new))
 912		return 0; /* Fast normal way without allocation. */
 913
 914	/* No cluster in cache or we need to allocate cluster in hole. */
 915	sbi = ni->mi.sbi;
 916	cluster_bits = sbi->cluster_bits;
 917
 918	ni_lock(ni);
 919	down_write(&ni->file.run_lock);
 920
 921	/* Repeat the code above (under write lock). */
 922	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
 923		*len = 0;
 924
 925	if (*len) {
 926		if (*lcn != SPARSE_LCN || !new)
 927			goto out; /* normal way without allocation. */
 928		if (clen > *len)
 929			clen = *len;
 930	}
 931
 932	le_b = NULL;
 933	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
 934	if (!attr_b) {
 935		err = -ENOENT;
 936		goto out;
 937	}
 938
 939	if (!attr_b->non_res) {
 940		*lcn = RESIDENT_LCN;
 941		*len = 1;
 942		goto out;
 943	}
 944
 945	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
 946	if (vcn >= asize) {
 947		if (new) {
 948			err = -EINVAL;
 949		} else {
 950			*len = 1;
 951			*lcn = SPARSE_LCN;
 952		}
 953		goto out;
 954	}
 955
 956	svcn = le64_to_cpu(attr_b->nres.svcn);
 957	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
 958
 959	attr = attr_b;
 960	le = le_b;
 961	mi = mi_b;
 962
 963	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
 964		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
 965				    &mi);
 966		if (!attr) {
 967			err = -EINVAL;
 968			goto out;
 969		}
 970		svcn = le64_to_cpu(attr->nres.svcn);
 971		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
 972	}
 973
 974	/* Load in cache actual information. */
 975	err = attr_load_runs(attr, ni, run, NULL);
 976	if (err)
 977		goto out;
 978
 979	/* Check for compressed frame. */
 980	err = attr_is_frame_compressed(ni, attr_b, vcn >> NTFS_LZNT_CUNIT,
 981				       &hint, run);
 982	if (err)
 983		goto out;
 984
 985	if (hint) {
 986		/* if frame is compressed - don't touch it. */
 987		*lcn = COMPRESSED_LCN;
 988		/* length to the end of frame. */
 989		*len = NTFS_LZNT_CLUSTERS - (vcn & (NTFS_LZNT_CLUSTERS - 1));
 990		err = 0;
 991		goto out;
 992	}
 993
 994	if (!*len) {
 995		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
 996			if (*lcn != SPARSE_LCN || !new)
 997				goto ok; /* Slow normal way without allocation. */
 998
 999			if (clen > *len)
1000				clen = *len;
1001		} else if (!new) {
1002			/* Here we may return -ENOENT.
1003			 * In any case caller gets zero length. */
1004			goto ok;
1005		}
1006	}
1007
1008	if (!is_attr_ext(attr_b)) {
1009		/* The code below only for sparsed or compressed attributes. */
1010		err = -EINVAL;
1011		goto out;
1012	}
1013
1014	vcn0 = vcn;
1015	to_alloc = clen;
1016	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
1017	/* Allocate frame aligned clusters.
1018	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1019	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1020	if (attr_b->nres.c_unit) {
1021		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
1022		CLST cmask = ~(clst_per_frame - 1);
1023
1024		/* Get frame aligned vcn and to_alloc. */
1025		vcn = vcn0 & cmask;
1026		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1027		if (fr < clst_per_frame)
1028			fr = clst_per_frame;
1029		zero = true;
1030
1031		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1032		if (vcn < svcn || evcn1 <= vcn) {
1033			struct ATTRIB *attr2;
1034			/* Load runs for truncated vcn. */
1035			attr2 = ni_find_attr(ni, attr_b, &le_b, ATTR_DATA, NULL,
1036					     0, &vcn, &mi);
1037			if (!attr2) {
1038				err = -EINVAL;
1039				goto out;
1040			}
1041			evcn1 = le64_to_cpu(attr2->nres.evcn) + 1;
1042			err = attr_load_runs(attr2, ni, run, NULL);
 
1043			if (err)
1044				goto out;
1045		}
1046	}
1047
1048	if (vcn + to_alloc > asize)
1049		to_alloc = asize - vcn;
1050
1051	/* Get the last LCN to allocate from. */
1052	hint = 0;
1053
1054	if (vcn > evcn1) {
1055		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1056				   false)) {
1057			err = -ENOMEM;
1058			goto out;
1059		}
1060	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1061		hint = -1;
1062	}
1063
1064	/* Allocate and zeroout new clusters. */
1065	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1066				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1067				     fr, lcn, len);
1068	if (err)
1069		goto out;
1070	*new = true;
1071	step = 1;
1072
1073	end = vcn + alen;
1074	/* Save 'total_size0' to restore if error. */
1075	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1076	total_size = total_size0 + ((u64)alen << cluster_bits);
1077
1078	if (vcn != vcn0) {
1079		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1080			err = -EINVAL;
1081			goto out;
1082		}
1083		if (*lcn == SPARSE_LCN) {
1084			/* Internal error. Should not happened. */
1085			WARN_ON(1);
1086			err = -EINVAL;
1087			goto out;
1088		}
1089		/* Check case when vcn0 + len overlaps new allocated clusters. */
1090		if (vcn0 + *len > end)
1091			*len = end - vcn0;
1092	}
1093
1094repack:
1095	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1096	if (err)
1097		goto out;
1098
1099	attr_b->nres.total_size = cpu_to_le64(total_size);
1100	inode_set_bytes(&ni->vfs_inode, total_size);
1101	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1102
1103	mi_b->dirty = true;
1104	mark_inode_dirty(&ni->vfs_inode);
1105
1106	/* Stored [vcn : next_svcn) from [vcn : end). */
1107	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1108
1109	if (end <= evcn1) {
1110		if (next_svcn == evcn1) {
1111			/* Normal way. Update attribute and exit. */
1112			goto ok;
1113		}
1114		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1115		if (!ni->attr_list.size) {
1116			err = ni_create_attr_list(ni);
1117			if (err)
1118				goto undo1;
1119			/* Layout of records is changed. */
1120			le_b = NULL;
1121			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1122					      0, NULL, &mi_b);
1123			if (!attr_b) {
1124				err = -ENOENT;
1125				goto out;
1126			}
1127
1128			attr = attr_b;
1129			le = le_b;
1130			mi = mi_b;
1131			goto repack;
1132		}
1133	}
1134
1135	/*
1136	 * The code below may require additional cluster (to extend attribute list)
1137	 * and / or one MFT record
1138	 * It is too complex to undo operations if -ENOSPC occurs deep inside
1139	 * in 'ni_insert_nonresident'.
1140	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1141	 */
1142	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1143		/* Undo step 1. */
1144		err = -ENOSPC;
1145		goto undo1;
1146	}
1147
1148	step = 2;
1149	svcn = evcn1;
1150
1151	/* Estimate next attribute. */
1152	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1153
1154	if (!attr) {
1155		/* Insert new attribute segment. */
1156		goto ins_ext;
1157	}
1158
1159	/* Try to update existed attribute segment. */
1160	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1161	evcn = le64_to_cpu(attr->nres.evcn);
1162
1163	if (end < next_svcn)
1164		end = next_svcn;
1165	while (end > evcn) {
1166		/* Remove segment [svcn : evcn). */
1167		mi_remove_attr(NULL, mi, attr);
1168
1169		if (!al_remove_le(ni, le)) {
1170			err = -EINVAL;
1171			goto out;
1172		}
1173
1174		if (evcn + 1 >= alloc) {
1175			/* Last attribute segment. */
1176			evcn1 = evcn + 1;
1177			goto ins_ext;
1178		}
1179
1180		if (ni_load_mi(ni, le, &mi)) {
1181			attr = NULL;
1182			goto out;
1183		}
1184
1185		attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1186		if (!attr) {
1187			err = -EINVAL;
1188			goto out;
1189		}
1190		svcn = le64_to_cpu(attr->nres.svcn);
1191		evcn = le64_to_cpu(attr->nres.evcn);
1192	}
1193
1194	if (end < svcn)
1195		end = svcn;
1196
1197	err = attr_load_runs(attr, ni, run, &end);
1198	if (err)
1199		goto out;
1200
1201	evcn1 = evcn + 1;
1202	attr->nres.svcn = cpu_to_le64(next_svcn);
1203	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1204	if (err)
1205		goto out;
1206
1207	le->vcn = cpu_to_le64(next_svcn);
1208	ni->attr_list.dirty = true;
1209	mi->dirty = true;
1210	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1211
1212ins_ext:
1213	if (evcn1 > next_svcn) {
1214		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1215					    next_svcn, evcn1 - next_svcn,
1216					    attr_b->flags, &attr, &mi, NULL);
1217		if (err)
1218			goto out;
1219	}
1220ok:
1221	run_truncate_around(run, vcn);
1222out:
1223	if (err && step > 1) {
1224		/* Too complex to restore. */
1225		_ntfs_bad_inode(&ni->vfs_inode);
1226	}
1227	up_write(&ni->file.run_lock);
1228	ni_unlock(ni);
1229
1230	return err;
1231
1232undo1:
1233	/* Undo step1. */
1234	attr_b->nres.total_size = cpu_to_le64(total_size0);
1235	inode_set_bytes(&ni->vfs_inode, total_size0);
1236
1237	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1238	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1239	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1240		_ntfs_bad_inode(&ni->vfs_inode);
1241	}
1242	goto out;
1243}
1244
1245int attr_data_read_resident(struct ntfs_inode *ni, struct folio *folio)
1246{
1247	u64 vbo;
1248	struct ATTRIB *attr;
1249	u32 data_size;
1250	size_t len;
1251
1252	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1253	if (!attr)
1254		return -EINVAL;
1255
1256	if (attr->non_res)
1257		return E_NTFS_NONRESIDENT;
1258
1259	vbo = folio->index << PAGE_SHIFT;
1260	data_size = le32_to_cpu(attr->res.data_size);
1261	if (vbo > data_size)
1262		len = 0;
1263	else
1264		len = min(data_size - vbo, folio_size(folio));
 
 
 
1265
1266	folio_fill_tail(folio, 0, resident_data(attr) + vbo, len);
1267	folio_mark_uptodate(folio);
 
 
 
 
 
 
 
1268
1269	return 0;
1270}
1271
1272int attr_data_write_resident(struct ntfs_inode *ni, struct folio *folio)
1273{
1274	u64 vbo;
1275	struct mft_inode *mi;
1276	struct ATTRIB *attr;
1277	u32 data_size;
1278
1279	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1280	if (!attr)
1281		return -EINVAL;
1282
1283	if (attr->non_res) {
1284		/* Return special error code to check this case. */
1285		return E_NTFS_NONRESIDENT;
1286	}
1287
1288	vbo = folio->index << PAGE_SHIFT;
1289	data_size = le32_to_cpu(attr->res.data_size);
1290	if (vbo < data_size) {
1291		char *data = resident_data(attr);
1292		size_t len = min(data_size - vbo, folio_size(folio));
 
1293
1294		memcpy_from_folio(data + vbo, folio, 0, len);
 
 
 
1295		mi->dirty = true;
1296	}
1297	ni->i_valid = data_size;
1298
1299	return 0;
1300}
1301
1302/*
1303 * attr_load_runs_vcn - Load runs with VCN.
1304 */
1305int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1306		       const __le16 *name, u8 name_len, struct runs_tree *run,
1307		       CLST vcn)
1308{
1309	struct ATTRIB *attr;
1310	int err;
1311	CLST svcn, evcn;
1312	u16 ro;
1313
1314	if (!ni) {
1315		/* Is record corrupted? */
1316		return -ENOENT;
1317	}
1318
1319	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1320	if (!attr) {
1321		/* Is record corrupted? */
1322		return -ENOENT;
1323	}
1324
1325	svcn = le64_to_cpu(attr->nres.svcn);
1326	evcn = le64_to_cpu(attr->nres.evcn);
1327
1328	if (evcn < vcn || vcn < svcn) {
1329		/* Is record corrupted? */
1330		return -EINVAL;
1331	}
1332
1333	ro = le16_to_cpu(attr->nres.run_off);
1334
1335	if (ro > le32_to_cpu(attr->size))
1336		return -EINVAL;
1337
1338	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1339			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1340	if (err < 0)
1341		return err;
1342	return 0;
1343}
1344
1345/*
1346 * attr_load_runs_range - Load runs for given range [from to).
1347 */
1348int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1349			 const __le16 *name, u8 name_len, struct runs_tree *run,
1350			 u64 from, u64 to)
1351{
1352	struct ntfs_sb_info *sbi = ni->mi.sbi;
1353	u8 cluster_bits = sbi->cluster_bits;
1354	CLST vcn;
1355	CLST vcn_last = (to - 1) >> cluster_bits;
1356	CLST lcn, clen;
1357	int err;
1358
1359	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1360		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1361			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1362						 vcn);
1363			if (err)
1364				return err;
1365			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1366		}
1367	}
1368
1369	return 0;
1370}
1371
1372#ifdef CONFIG_NTFS3_LZX_XPRESS
1373/*
1374 * attr_wof_frame_info
1375 *
1376 * Read header of Xpress/LZX file to get info about frame.
1377 */
1378int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1379			struct runs_tree *run, u64 frame, u64 frames,
1380			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1381{
1382	struct ntfs_sb_info *sbi = ni->mi.sbi;
1383	u64 vbo[2], off[2], wof_size;
1384	u32 voff;
1385	u8 bytes_per_off;
1386	char *addr;
1387	struct folio *folio;
1388	int i, err;
1389	__le32 *off32;
1390	__le64 *off64;
1391
1392	if (ni->vfs_inode.i_size < 0x100000000ull) {
1393		/* File starts with array of 32 bit offsets. */
1394		bytes_per_off = sizeof(__le32);
1395		vbo[1] = frame << 2;
1396		*vbo_data = frames << 2;
1397	} else {
1398		/* File starts with array of 64 bit offsets. */
1399		bytes_per_off = sizeof(__le64);
1400		vbo[1] = frame << 3;
1401		*vbo_data = frames << 3;
1402	}
1403
1404	/*
1405	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1406	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1407	 */
1408	if (!attr->non_res) {
1409		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1410			_ntfs_bad_inode(&ni->vfs_inode);
1411			return -EINVAL;
1412		}
1413		addr = resident_data(attr);
1414
1415		if (bytes_per_off == sizeof(__le32)) {
1416			off32 = Add2Ptr(addr, vbo[1]);
1417			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1418			off[1] = le32_to_cpu(off32[0]);
1419		} else {
1420			off64 = Add2Ptr(addr, vbo[1]);
1421			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1422			off[1] = le64_to_cpu(off64[0]);
1423		}
1424
1425		*vbo_data += off[0];
1426		*ondisk_size = off[1] - off[0];
1427		return 0;
1428	}
1429
1430	wof_size = le64_to_cpu(attr->nres.data_size);
1431	down_write(&ni->file.run_lock);
1432	folio = ni->file.offs_folio;
1433	if (!folio) {
1434		folio = folio_alloc(GFP_KERNEL, 0);
1435		if (!folio) {
1436			err = -ENOMEM;
1437			goto out;
1438		}
1439		folio->index = -1;
1440		ni->file.offs_folio = folio;
1441	}
1442	folio_lock(folio);
1443	addr = folio_address(folio);
1444
1445	if (vbo[1]) {
1446		voff = vbo[1] & (PAGE_SIZE - 1);
1447		vbo[0] = vbo[1] - bytes_per_off;
1448		i = 0;
1449	} else {
1450		voff = 0;
1451		vbo[0] = 0;
1452		off[0] = 0;
1453		i = 1;
1454	}
1455
1456	do {
1457		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1458
1459		if (index != folio->index) {
1460			struct page *page = &folio->page;
1461			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1462			u64 to = min(from + PAGE_SIZE, wof_size);
1463
1464			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1465						   ARRAY_SIZE(WOF_NAME), run,
1466						   from, to);
1467			if (err)
1468				goto out1;
1469
1470			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1471					     to - from, REQ_OP_READ);
1472			if (err) {
1473				folio->index = -1;
1474				goto out1;
1475			}
1476			folio->index = index;
1477		}
1478
1479		if (i) {
1480			if (bytes_per_off == sizeof(__le32)) {
1481				off32 = Add2Ptr(addr, voff);
1482				off[1] = le32_to_cpu(*off32);
1483			} else {
1484				off64 = Add2Ptr(addr, voff);
1485				off[1] = le64_to_cpu(*off64);
1486			}
1487		} else if (!voff) {
1488			if (bytes_per_off == sizeof(__le32)) {
1489				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1490				off[0] = le32_to_cpu(*off32);
1491			} else {
1492				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1493				off[0] = le64_to_cpu(*off64);
1494			}
1495		} else {
1496			/* Two values in one page. */
1497			if (bytes_per_off == sizeof(__le32)) {
1498				off32 = Add2Ptr(addr, voff);
1499				off[0] = le32_to_cpu(off32[-1]);
1500				off[1] = le32_to_cpu(off32[0]);
1501			} else {
1502				off64 = Add2Ptr(addr, voff);
1503				off[0] = le64_to_cpu(off64[-1]);
1504				off[1] = le64_to_cpu(off64[0]);
1505			}
1506			break;
1507		}
1508	} while (++i < 2);
1509
1510	*vbo_data += off[0];
1511	*ondisk_size = off[1] - off[0];
1512
1513out1:
1514	folio_unlock(folio);
1515out:
1516	up_write(&ni->file.run_lock);
1517	return err;
1518}
1519#endif
1520
1521/*
1522 * attr_is_frame_compressed - Used to detect compressed frame.
1523 *
1524 * attr - base (primary) attribute segment.
1525 * run  - run to use, usually == &ni->file.run.
1526 * Only base segments contains valid 'attr->nres.c_unit'
1527 */
1528int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1529			     CLST frame, CLST *clst_data, struct runs_tree *run)
1530{
1531	int err;
1532	u32 clst_frame;
1533	CLST clen, lcn, vcn, alen, slen, vcn_next;
1534	size_t idx;
 
1535
1536	*clst_data = 0;
1537
1538	if (!is_attr_compressed(attr))
1539		return 0;
1540
1541	if (!attr->non_res)
1542		return 0;
1543
1544	clst_frame = 1u << attr->nres.c_unit;
1545	vcn = frame * clst_frame;
 
1546
1547	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1548		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1549					 attr->name_len, run, vcn);
1550		if (err)
1551			return err;
1552
1553		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1554			return -EINVAL;
1555	}
1556
1557	if (lcn == SPARSE_LCN) {
1558		/* Sparsed frame. */
1559		return 0;
1560	}
1561
1562	if (clen >= clst_frame) {
1563		/*
1564		 * The frame is not compressed 'cause
1565		 * it does not contain any sparse clusters.
1566		 */
1567		*clst_data = clst_frame;
1568		return 0;
1569	}
1570
1571	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1572	slen = 0;
1573	*clst_data = clen;
1574
1575	/*
1576	 * The frame is compressed if *clst_data + slen >= clst_frame.
1577	 * Check next fragments.
1578	 */
1579	while ((vcn += clen) < alen) {
1580		vcn_next = vcn;
1581
1582		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1583		    vcn_next != vcn) {
1584			err = attr_load_runs_vcn(ni, attr->type,
1585						 attr_name(attr),
1586						 attr->name_len, run, vcn_next);
1587			if (err)
1588				return err;
1589			vcn = vcn_next;
1590
1591			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1592				return -EINVAL;
1593		}
1594
1595		if (lcn == SPARSE_LCN) {
1596			slen += clen;
1597		} else {
1598			if (slen) {
1599				/*
1600				 * Data_clusters + sparse_clusters =
1601				 * not enough for frame.
1602				 */
1603				return -EINVAL;
1604			}
1605			*clst_data += clen;
1606		}
1607
1608		if (*clst_data + slen >= clst_frame) {
1609			if (!slen) {
1610				/*
1611				 * There is no sparsed clusters in this frame
1612				 * so it is not compressed.
1613				 */
1614				*clst_data = clst_frame;
1615			} else {
1616				/* Frame is compressed. */
1617			}
1618			break;
1619		}
1620	}
1621
1622	return 0;
1623}
1624
1625/*
1626 * attr_allocate_frame - Allocate/free clusters for @frame.
1627 *
1628 * Assumed: down_write(&ni->file.run_lock);
1629 */
1630int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1631			u64 new_valid)
1632{
1633	int err = 0;
1634	struct runs_tree *run = &ni->file.run;
1635	struct ntfs_sb_info *sbi = ni->mi.sbi;
1636	struct ATTRIB *attr = NULL, *attr_b;
1637	struct ATTR_LIST_ENTRY *le, *le_b;
1638	struct mft_inode *mi, *mi_b;
1639	CLST svcn, evcn1, next_svcn, len;
1640	CLST vcn, end, clst_data;
1641	u64 total_size, valid_size, data_size;
1642
1643	le_b = NULL;
1644	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1645	if (!attr_b)
1646		return -ENOENT;
1647
1648	if (!is_attr_ext(attr_b))
1649		return -EINVAL;
1650
1651	vcn = frame << NTFS_LZNT_CUNIT;
1652	total_size = le64_to_cpu(attr_b->nres.total_size);
1653
1654	svcn = le64_to_cpu(attr_b->nres.svcn);
1655	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1656	data_size = le64_to_cpu(attr_b->nres.data_size);
1657
1658	if (svcn <= vcn && vcn < evcn1) {
1659		attr = attr_b;
1660		le = le_b;
1661		mi = mi_b;
1662	} else if (!le_b) {
1663		err = -EINVAL;
1664		goto out;
1665	} else {
1666		le = le_b;
1667		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1668				    &mi);
1669		if (!attr) {
1670			err = -EINVAL;
1671			goto out;
1672		}
1673		svcn = le64_to_cpu(attr->nres.svcn);
1674		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1675	}
1676
1677	err = attr_load_runs(attr, ni, run, NULL);
1678	if (err)
1679		goto out;
1680
1681	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data, run);
1682	if (err)
1683		goto out;
1684
1685	total_size -= (u64)clst_data << sbi->cluster_bits;
1686
1687	len = bytes_to_cluster(sbi, compr_size);
1688
1689	if (len == clst_data)
1690		goto out;
1691
1692	if (len < clst_data) {
1693		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1694					NULL, true);
1695		if (err)
1696			goto out;
1697
1698		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1699				   false)) {
1700			err = -ENOMEM;
1701			goto out;
1702		}
1703		end = vcn + clst_data;
1704		/* Run contains updated range [vcn + len : end). */
1705	} else {
1706		CLST alen, hint = 0;
1707		/* Get the last LCN to allocate from. */
1708		if (vcn + clst_data &&
1709		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1710				      NULL)) {
1711			hint = -1;
1712		}
1713
1714		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1715					     hint + 1, len - clst_data, NULL,
1716					     ALLOCATE_DEF, &alen, 0, NULL,
1717					     NULL);
1718		if (err)
1719			goto out;
1720
1721		end = vcn + len;
1722		/* Run contains updated range [vcn + clst_data : end). */
1723	}
1724
1725	total_size += (u64)len << sbi->cluster_bits;
1726
1727repack:
1728	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1729	if (err)
1730		goto out;
1731
1732	attr_b->nres.total_size = cpu_to_le64(total_size);
1733	inode_set_bytes(&ni->vfs_inode, total_size);
1734	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1735
1736	mi_b->dirty = true;
1737	mark_inode_dirty(&ni->vfs_inode);
1738
1739	/* Stored [vcn : next_svcn) from [vcn : end). */
1740	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1741
1742	if (end <= evcn1) {
1743		if (next_svcn == evcn1) {
1744			/* Normal way. Update attribute and exit. */
1745			goto ok;
1746		}
1747		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1748		if (!ni->attr_list.size) {
1749			err = ni_create_attr_list(ni);
1750			if (err)
1751				goto out;
1752			/* Layout of records is changed. */
1753			le_b = NULL;
1754			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1755					      0, NULL, &mi_b);
1756			if (!attr_b) {
1757				err = -ENOENT;
1758				goto out;
1759			}
1760
1761			attr = attr_b;
1762			le = le_b;
1763			mi = mi_b;
1764			goto repack;
1765		}
1766	}
1767
1768	svcn = evcn1;
1769
1770	/* Estimate next attribute. */
1771	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1772
1773	if (attr) {
1774		CLST alloc = bytes_to_cluster(
1775			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1776		CLST evcn = le64_to_cpu(attr->nres.evcn);
1777
1778		if (end < next_svcn)
1779			end = next_svcn;
1780		while (end > evcn) {
1781			/* Remove segment [svcn : evcn). */
1782			mi_remove_attr(NULL, mi, attr);
1783
1784			if (!al_remove_le(ni, le)) {
1785				err = -EINVAL;
1786				goto out;
1787			}
1788
1789			if (evcn + 1 >= alloc) {
1790				/* Last attribute segment. */
1791				evcn1 = evcn + 1;
1792				goto ins_ext;
1793			}
1794
1795			if (ni_load_mi(ni, le, &mi)) {
1796				attr = NULL;
1797				goto out;
1798			}
1799
1800			attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0,
1801					    &le->id);
1802			if (!attr) {
1803				err = -EINVAL;
1804				goto out;
1805			}
1806			svcn = le64_to_cpu(attr->nres.svcn);
1807			evcn = le64_to_cpu(attr->nres.evcn);
1808		}
1809
1810		if (end < svcn)
1811			end = svcn;
1812
1813		err = attr_load_runs(attr, ni, run, &end);
1814		if (err)
1815			goto out;
1816
1817		evcn1 = evcn + 1;
1818		attr->nres.svcn = cpu_to_le64(next_svcn);
1819		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1820		if (err)
1821			goto out;
1822
1823		le->vcn = cpu_to_le64(next_svcn);
1824		ni->attr_list.dirty = true;
1825		mi->dirty = true;
1826
1827		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1828	}
1829ins_ext:
1830	if (evcn1 > next_svcn) {
1831		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1832					    next_svcn, evcn1 - next_svcn,
1833					    attr_b->flags, &attr, &mi, NULL);
1834		if (err)
1835			goto out;
1836	}
1837ok:
1838	run_truncate_around(run, vcn);
1839out:
1840	if (attr_b) {
1841		if (new_valid > data_size)
1842			new_valid = data_size;
1843
1844		valid_size = le64_to_cpu(attr_b->nres.valid_size);
1845		if (new_valid != valid_size) {
1846			attr_b->nres.valid_size = cpu_to_le64(valid_size);
1847			mi_b->dirty = true;
1848		}
1849	}
1850
1851	return err;
1852}
1853
1854/*
1855 * attr_collapse_range - Collapse range in file.
1856 */
1857int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1858{
1859	int err = 0;
1860	struct runs_tree *run = &ni->file.run;
1861	struct ntfs_sb_info *sbi = ni->mi.sbi;
1862	struct ATTRIB *attr = NULL, *attr_b;
1863	struct ATTR_LIST_ENTRY *le, *le_b;
1864	struct mft_inode *mi, *mi_b;
1865	CLST svcn, evcn1, len, dealloc, alen;
1866	CLST vcn, end;
1867	u64 valid_size, data_size, alloc_size, total_size;
1868	u32 mask;
1869	__le16 a_flags;
1870
1871	if (!bytes)
1872		return 0;
1873
1874	le_b = NULL;
1875	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1876	if (!attr_b)
1877		return -ENOENT;
1878
1879	if (!attr_b->non_res) {
1880		/* Attribute is resident. Nothing to do? */
1881		return 0;
1882	}
1883
1884	data_size = le64_to_cpu(attr_b->nres.data_size);
1885	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1886	a_flags = attr_b->flags;
1887
1888	if (is_attr_ext(attr_b)) {
1889		total_size = le64_to_cpu(attr_b->nres.total_size);
1890		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1891	} else {
1892		total_size = alloc_size;
1893		mask = sbi->cluster_mask;
1894	}
1895
1896	if ((vbo & mask) || (bytes & mask)) {
1897		/* Allow to collapse only cluster aligned ranges. */
1898		return -EINVAL;
1899	}
1900
1901	if (vbo > data_size)
1902		return -EINVAL;
1903
1904	down_write(&ni->file.run_lock);
1905
1906	if (vbo + bytes >= data_size) {
1907		u64 new_valid = min(ni->i_valid, vbo);
1908
1909		/* Simple truncate file at 'vbo'. */
1910		truncate_setsize(&ni->vfs_inode, vbo);
1911		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1912				    &new_valid, true, NULL);
1913
1914		if (!err && new_valid < ni->i_valid)
1915			ni->i_valid = new_valid;
1916
1917		goto out;
1918	}
1919
1920	/*
1921	 * Enumerate all attribute segments and collapse.
1922	 */
1923	alen = alloc_size >> sbi->cluster_bits;
1924	vcn = vbo >> sbi->cluster_bits;
1925	len = bytes >> sbi->cluster_bits;
1926	end = vcn + len;
1927	dealloc = 0;
1928
1929	svcn = le64_to_cpu(attr_b->nres.svcn);
1930	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1931
1932	if (svcn <= vcn && vcn < evcn1) {
1933		attr = attr_b;
1934		le = le_b;
1935		mi = mi_b;
1936	} else if (!le_b) {
1937		err = -EINVAL;
1938		goto out;
1939	} else {
1940		le = le_b;
1941		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1942				    &mi);
1943		if (!attr) {
1944			err = -EINVAL;
1945			goto out;
1946		}
1947
1948		svcn = le64_to_cpu(attr->nres.svcn);
1949		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1950	}
1951
1952	for (;;) {
1953		if (svcn >= end) {
1954			/* Shift VCN- */
1955			attr->nres.svcn = cpu_to_le64(svcn - len);
1956			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1957			if (le) {
1958				le->vcn = attr->nres.svcn;
1959				ni->attr_list.dirty = true;
1960			}
1961			mi->dirty = true;
1962		} else if (svcn < vcn || end < evcn1) {
1963			CLST vcn1, eat, next_svcn;
1964
1965			/* Collapse a part of this attribute segment. */
1966			err = attr_load_runs(attr, ni, run, &svcn);
1967			if (err)
1968				goto out;
1969			vcn1 = max(vcn, svcn);
1970			eat = min(end, evcn1) - vcn1;
1971
1972			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1973						true);
1974			if (err)
1975				goto out;
1976
1977			if (!run_collapse_range(run, vcn1, eat)) {
1978				err = -ENOMEM;
1979				goto out;
1980			}
1981
1982			if (svcn >= vcn) {
1983				/* Shift VCN */
1984				attr->nres.svcn = cpu_to_le64(vcn);
1985				if (le) {
1986					le->vcn = attr->nres.svcn;
1987					ni->attr_list.dirty = true;
1988				}
1989			}
1990
1991			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1992			if (err)
1993				goto out;
1994
1995			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1996			if (next_svcn + eat < evcn1) {
1997				err = ni_insert_nonresident(
1998					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1999					evcn1 - eat - next_svcn, a_flags, &attr,
2000					&mi, &le);
2001				if (err)
2002					goto out;
2003
2004				/* Layout of records maybe changed. */
2005				attr_b = NULL;
2006			}
2007
2008			/* Free all allocated memory. */
2009			run_truncate(run, 0);
2010		} else {
2011			u16 le_sz;
2012			u16 roff = le16_to_cpu(attr->nres.run_off);
2013
2014			if (roff > le32_to_cpu(attr->size)) {
2015				err = -EINVAL;
2016				goto out;
2017			}
2018
2019			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2020				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
2021				      le32_to_cpu(attr->size) - roff);
2022
2023			/* Delete this attribute segment. */
2024			mi_remove_attr(NULL, mi, attr);
2025			if (!le)
2026				break;
2027
2028			le_sz = le16_to_cpu(le->size);
2029			if (!al_remove_le(ni, le)) {
2030				err = -EINVAL;
2031				goto out;
2032			}
2033
2034			if (evcn1 >= alen)
2035				break;
2036
2037			if (!svcn) {
2038				/* Load next record that contains this attribute. */
2039				if (ni_load_mi(ni, le, &mi)) {
2040					err = -EINVAL;
2041					goto out;
2042				}
2043
2044				/* Look for required attribute. */
2045				attr = mi_find_attr(ni, mi, NULL, ATTR_DATA,
2046						    NULL, 0, &le->id);
2047				if (!attr) {
2048					err = -EINVAL;
2049					goto out;
2050				}
2051				goto next_attr;
2052			}
2053			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2054		}
2055
2056		if (evcn1 >= alen)
2057			break;
2058
2059		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2060		if (!attr) {
2061			err = -EINVAL;
2062			goto out;
2063		}
2064
2065next_attr:
2066		svcn = le64_to_cpu(attr->nres.svcn);
2067		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2068	}
2069
2070	if (!attr_b) {
2071		le_b = NULL;
2072		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2073				      &mi_b);
2074		if (!attr_b) {
2075			err = -ENOENT;
2076			goto out;
2077		}
2078	}
2079
2080	data_size -= bytes;
2081	valid_size = ni->i_valid;
2082	if (vbo + bytes <= valid_size)
2083		valid_size -= bytes;
2084	else if (vbo < valid_size)
2085		valid_size = vbo;
2086
2087	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2088	attr_b->nres.data_size = cpu_to_le64(data_size);
2089	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2090	total_size -= (u64)dealloc << sbi->cluster_bits;
2091	if (is_attr_ext(attr_b))
2092		attr_b->nres.total_size = cpu_to_le64(total_size);
2093	mi_b->dirty = true;
2094
2095	/* Update inode size. */
2096	ni->i_valid = valid_size;
2097	i_size_write(&ni->vfs_inode, data_size);
2098	inode_set_bytes(&ni->vfs_inode, total_size);
2099	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2100	mark_inode_dirty(&ni->vfs_inode);
2101
2102out:
2103	up_write(&ni->file.run_lock);
2104	if (err)
2105		_ntfs_bad_inode(&ni->vfs_inode);
2106
2107	return err;
2108}
2109
2110/*
2111 * attr_punch_hole
2112 *
2113 * Not for normal files.
2114 */
2115int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2116{
2117	int err = 0;
2118	struct runs_tree *run = &ni->file.run;
2119	struct ntfs_sb_info *sbi = ni->mi.sbi;
2120	struct ATTRIB *attr = NULL, *attr_b;
2121	struct ATTR_LIST_ENTRY *le, *le_b;
2122	struct mft_inode *mi, *mi_b;
2123	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2124	u64 total_size, alloc_size;
2125	u32 mask;
2126	__le16 a_flags;
2127	struct runs_tree run2;
2128
2129	if (!bytes)
2130		return 0;
2131
2132	le_b = NULL;
2133	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2134	if (!attr_b)
2135		return -ENOENT;
2136
2137	if (!attr_b->non_res) {
2138		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2139		u32 from, to;
2140
2141		if (vbo > data_size)
2142			return 0;
2143
2144		from = vbo;
2145		to = min_t(u64, vbo + bytes, data_size);
2146		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2147		return 0;
2148	}
2149
2150	if (!is_attr_ext(attr_b))
2151		return -EOPNOTSUPP;
2152
2153	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2154	total_size = le64_to_cpu(attr_b->nres.total_size);
2155
2156	if (vbo >= alloc_size) {
2157		/* NOTE: It is allowed. */
2158		return 0;
2159	}
2160
2161	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2162
2163	bytes += vbo;
2164	if (bytes > alloc_size)
2165		bytes = alloc_size;
2166	bytes -= vbo;
2167
2168	if ((vbo & mask) || (bytes & mask)) {
2169		/* We have to zero a range(s). */
2170		if (frame_size == NULL) {
2171			/* Caller insists range is aligned. */
2172			return -EINVAL;
2173		}
2174		*frame_size = mask + 1;
2175		return E_NTFS_NOTALIGNED;
2176	}
2177
2178	down_write(&ni->file.run_lock);
2179	run_init(&run2);
2180	run_truncate(run, 0);
2181
2182	/*
2183	 * Enumerate all attribute segments and punch hole where necessary.
2184	 */
2185	alen = alloc_size >> sbi->cluster_bits;
2186	vcn = vbo >> sbi->cluster_bits;
2187	len = bytes >> sbi->cluster_bits;
2188	end = vcn + len;
2189	hole = 0;
2190
2191	svcn = le64_to_cpu(attr_b->nres.svcn);
2192	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2193	a_flags = attr_b->flags;
2194
2195	if (svcn <= vcn && vcn < evcn1) {
2196		attr = attr_b;
2197		le = le_b;
2198		mi = mi_b;
2199	} else if (!le_b) {
2200		err = -EINVAL;
2201		goto bad_inode;
2202	} else {
2203		le = le_b;
2204		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2205				    &mi);
2206		if (!attr) {
2207			err = -EINVAL;
2208			goto bad_inode;
2209		}
2210
2211		svcn = le64_to_cpu(attr->nres.svcn);
2212		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2213	}
2214
2215	while (svcn < end) {
2216		CLST vcn1, zero, hole2 = hole;
2217
2218		err = attr_load_runs(attr, ni, run, &svcn);
2219		if (err)
2220			goto done;
2221		vcn1 = max(vcn, svcn);
2222		zero = min(end, evcn1) - vcn1;
2223
2224		/*
2225		 * Check range [vcn1 + zero).
2226		 * Calculate how many clusters there are.
2227		 * Don't do any destructive actions.
2228		 */
2229		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2230		if (err)
2231			goto done;
2232
2233		/* Check if required range is already hole. */
2234		if (hole2 == hole)
2235			goto next_attr;
2236
2237		/* Make a clone of run to undo. */
2238		err = run_clone(run, &run2);
2239		if (err)
2240			goto done;
2241
2242		/* Make a hole range (sparse) [vcn1 + zero). */
2243		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2244			err = -ENOMEM;
2245			goto done;
2246		}
2247
2248		/* Update run in attribute segment. */
2249		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2250		if (err)
2251			goto done;
2252		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2253		if (next_svcn < evcn1) {
2254			/* Insert new attribute segment. */
2255			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2256						    next_svcn,
2257						    evcn1 - next_svcn, a_flags,
2258						    &attr, &mi, &le);
2259			if (err)
2260				goto undo_punch;
2261
2262			/* Layout of records maybe changed. */
2263			attr_b = NULL;
2264		}
2265
2266		/* Real deallocate. Should not fail. */
2267		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2268
2269next_attr:
2270		/* Free all allocated memory. */
2271		run_truncate(run, 0);
2272
2273		if (evcn1 >= alen)
2274			break;
2275
2276		/* Get next attribute segment. */
2277		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2278		if (!attr) {
2279			err = -EINVAL;
2280			goto bad_inode;
2281		}
2282
2283		svcn = le64_to_cpu(attr->nres.svcn);
2284		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2285	}
2286
2287done:
2288	if (!hole)
2289		goto out;
2290
2291	if (!attr_b) {
2292		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2293				      &mi_b);
2294		if (!attr_b) {
2295			err = -EINVAL;
2296			goto bad_inode;
2297		}
2298	}
2299
2300	total_size -= (u64)hole << sbi->cluster_bits;
2301	attr_b->nres.total_size = cpu_to_le64(total_size);
2302	mi_b->dirty = true;
2303
2304	/* Update inode size. */
2305	inode_set_bytes(&ni->vfs_inode, total_size);
2306	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2307	mark_inode_dirty(&ni->vfs_inode);
2308
2309out:
2310	run_close(&run2);
2311	up_write(&ni->file.run_lock);
2312	return err;
2313
2314bad_inode:
2315	_ntfs_bad_inode(&ni->vfs_inode);
2316	goto out;
2317
2318undo_punch:
2319	/*
2320	 * Restore packed runs.
2321	 * 'mi_pack_runs' should not fail, cause we restore original.
2322	 */
2323	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2324		goto bad_inode;
2325
2326	goto done;
2327}
2328
2329/*
2330 * attr_insert_range - Insert range (hole) in file.
2331 * Not for normal files.
2332 */
2333int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2334{
2335	int err = 0;
2336	struct runs_tree *run = &ni->file.run;
2337	struct ntfs_sb_info *sbi = ni->mi.sbi;
2338	struct ATTRIB *attr = NULL, *attr_b;
2339	struct ATTR_LIST_ENTRY *le, *le_b;
2340	struct mft_inode *mi, *mi_b;
2341	CLST vcn, svcn, evcn1, len, next_svcn;
2342	u64 data_size, alloc_size;
2343	u32 mask;
2344	__le16 a_flags;
2345
2346	if (!bytes)
2347		return 0;
2348
2349	le_b = NULL;
2350	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2351	if (!attr_b)
2352		return -ENOENT;
2353
2354	if (!is_attr_ext(attr_b)) {
2355		/* It was checked above. See fallocate. */
2356		return -EOPNOTSUPP;
2357	}
2358
2359	if (!attr_b->non_res) {
2360		data_size = le32_to_cpu(attr_b->res.data_size);
2361		alloc_size = data_size;
2362		mask = sbi->cluster_mask; /* cluster_size - 1 */
2363	} else {
2364		data_size = le64_to_cpu(attr_b->nres.data_size);
2365		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2366		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2367	}
2368
2369	if (vbo >= data_size) {
2370		/*
2371		 * Insert range after the file size is not allowed.
2372		 * If the offset is equal to or greater than the end of
2373		 * file, an error is returned.  For such operations (i.e., inserting
2374		 * a hole at the end of file), ftruncate(2) should be used.
2375		 */
2376		return -EINVAL;
2377	}
2378
2379	if ((vbo & mask) || (bytes & mask)) {
2380		/* Allow to insert only frame aligned ranges. */
2381		return -EINVAL;
2382	}
2383
2384	/*
2385	 * valid_size <= data_size <= alloc_size
2386	 * Check alloc_size for maximum possible.
2387	 */
2388	if (bytes > sbi->maxbytes_sparse - alloc_size)
2389		return -EFBIG;
2390
2391	vcn = vbo >> sbi->cluster_bits;
2392	len = bytes >> sbi->cluster_bits;
2393
2394	down_write(&ni->file.run_lock);
2395
2396	if (!attr_b->non_res) {
2397		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2398				    data_size + bytes, NULL, false, NULL);
2399
2400		le_b = NULL;
2401		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2402				      &mi_b);
2403		if (!attr_b) {
2404			err = -EINVAL;
2405			goto bad_inode;
2406		}
2407
2408		if (err)
2409			goto out;
2410
2411		if (!attr_b->non_res) {
2412			/* Still resident. */
2413			char *data = Add2Ptr(attr_b,
2414					     le16_to_cpu(attr_b->res.data_off));
2415
2416			memmove(data + bytes, data, bytes);
2417			memset(data, 0, bytes);
2418			goto done;
2419		}
2420
2421		/* Resident files becomes nonresident. */
2422		data_size = le64_to_cpu(attr_b->nres.data_size);
2423		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2424	}
2425
2426	/*
2427	 * Enumerate all attribute segments and shift start vcn.
2428	 */
2429	a_flags = attr_b->flags;
2430	svcn = le64_to_cpu(attr_b->nres.svcn);
2431	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2432
2433	if (svcn <= vcn && vcn < evcn1) {
2434		attr = attr_b;
2435		le = le_b;
2436		mi = mi_b;
2437	} else if (!le_b) {
2438		err = -EINVAL;
2439		goto bad_inode;
2440	} else {
2441		le = le_b;
2442		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2443				    &mi);
2444		if (!attr) {
2445			err = -EINVAL;
2446			goto bad_inode;
2447		}
2448
2449		svcn = le64_to_cpu(attr->nres.svcn);
2450		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2451	}
2452
2453	run_truncate(run, 0); /* clear cached values. */
2454	err = attr_load_runs(attr, ni, run, NULL);
2455	if (err)
2456		goto out;
2457
2458	if (!run_insert_range(run, vcn, len)) {
2459		err = -ENOMEM;
2460		goto out;
2461	}
2462
2463	/* Try to pack in current record as much as possible. */
2464	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2465	if (err)
2466		goto out;
2467
2468	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2469
2470	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2471	       attr->type == ATTR_DATA && !attr->name_len) {
2472		le64_add_cpu(&attr->nres.svcn, len);
2473		le64_add_cpu(&attr->nres.evcn, len);
2474		if (le) {
2475			le->vcn = attr->nres.svcn;
2476			ni->attr_list.dirty = true;
2477		}
2478		mi->dirty = true;
2479	}
2480
2481	if (next_svcn < evcn1 + len) {
2482		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2483					    next_svcn, evcn1 + len - next_svcn,
2484					    a_flags, NULL, NULL, NULL);
2485
2486		le_b = NULL;
2487		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2488				      &mi_b);
2489		if (!attr_b) {
2490			err = -EINVAL;
2491			goto bad_inode;
2492		}
2493
2494		if (err) {
2495			/* ni_insert_nonresident failed. Try to undo. */
2496			goto undo_insert_range;
2497		}
2498	}
2499
2500	/*
2501	 * Update primary attribute segment.
2502	 */
2503	if (vbo <= ni->i_valid)
2504		ni->i_valid += bytes;
2505
2506	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2507	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2508
2509	/* ni->valid may be not equal valid_size (temporary). */
2510	if (ni->i_valid > data_size + bytes)
2511		attr_b->nres.valid_size = attr_b->nres.data_size;
2512	else
2513		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2514	mi_b->dirty = true;
2515
2516done:
2517	i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
2518	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2519	mark_inode_dirty(&ni->vfs_inode);
2520
2521out:
2522	run_truncate(run, 0); /* clear cached values. */
2523
2524	up_write(&ni->file.run_lock);
2525
2526	return err;
2527
2528bad_inode:
2529	_ntfs_bad_inode(&ni->vfs_inode);
2530	goto out;
2531
2532undo_insert_range:
2533	svcn = le64_to_cpu(attr_b->nres.svcn);
2534	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2535
2536	if (svcn <= vcn && vcn < evcn1) {
2537		attr = attr_b;
2538		le = le_b;
2539		mi = mi_b;
2540	} else if (!le_b) {
2541		goto bad_inode;
2542	} else {
2543		le = le_b;
2544		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2545				    &mi);
2546		if (!attr) {
2547			goto bad_inode;
2548		}
2549
2550		svcn = le64_to_cpu(attr->nres.svcn);
2551		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2552	}
2553
2554	if (attr_load_runs(attr, ni, run, NULL))
2555		goto bad_inode;
2556
2557	if (!run_collapse_range(run, vcn, len))
2558		goto bad_inode;
2559
2560	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2561		goto bad_inode;
2562
2563	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2564	       attr->type == ATTR_DATA && !attr->name_len) {
2565		le64_sub_cpu(&attr->nres.svcn, len);
2566		le64_sub_cpu(&attr->nres.evcn, len);
2567		if (le) {
2568			le->vcn = attr->nres.svcn;
2569			ni->attr_list.dirty = true;
2570		}
2571		mi->dirty = true;
2572	}
2573
2574	goto out;
2575}
2576
2577/*
2578 * attr_force_nonresident
2579 *
2580 * Convert default data attribute into non resident form.
2581 */
2582int attr_force_nonresident(struct ntfs_inode *ni)
2583{
2584	int err;
2585	struct ATTRIB *attr;
2586	struct ATTR_LIST_ENTRY *le = NULL;
2587	struct mft_inode *mi;
2588
2589	attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
2590	if (!attr) {
2591		_ntfs_bad_inode(&ni->vfs_inode);
2592		return -ENOENT;
2593	}
2594
2595	if (attr->non_res) {
2596		/* Already non resident. */
2597		return 0;
2598	}
2599
2600	down_write(&ni->file.run_lock);
2601	err = attr_make_nonresident(ni, attr, le, mi,
2602				    le32_to_cpu(attr->res.data_size),
2603				    &ni->file.run, &attr, NULL);
2604	up_write(&ni->file.run_lock);
2605
2606	return err;
2607}
2608
2609/*
2610 * Change the compression of data attribute
2611 */
2612int attr_set_compress(struct ntfs_inode *ni, bool compr)
2613{
2614	struct ATTRIB *attr;
2615	struct mft_inode *mi;
2616
2617	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
2618	if (!attr)
2619		return -ENOENT;
2620
2621	if (is_attr_compressed(attr) == !!compr) {
2622		/* Already required compressed state. */
2623		return 0;
2624	}
2625
2626	if (attr->non_res) {
2627		u16 run_off;
2628		u32 run_size;
2629		char *run;
2630
2631		if (attr->nres.data_size) {
2632			/*
2633			 * There are rare cases when it possible to change
2634			 * compress state without big changes.
2635			 * TODO: Process these cases.
2636			 */
2637			return -EOPNOTSUPP;
2638		}
2639
2640		run_off = le16_to_cpu(attr->nres.run_off);
2641		run_size = le32_to_cpu(attr->size) - run_off;
2642		run = Add2Ptr(attr, run_off);
2643
2644		if (!compr) {
2645			/* remove field 'attr->nres.total_size'. */
2646			memmove(run - 8, run, run_size);
2647			run_off -= 8;
2648		}
2649
2650		if (!mi_resize_attr(mi, attr, compr ? +8 : -8)) {
2651			/*
2652			 * Ignore rare case when there are no 8 bytes in record with attr.
2653			 * TODO: split attribute.
2654			 */
2655			return -EOPNOTSUPP;
2656		}
2657
2658		if (compr) {
2659			/* Make a gap for 'attr->nres.total_size'. */
2660			memmove(run + 8, run, run_size);
2661			run_off += 8;
2662			attr->nres.total_size = attr->nres.alloc_size;
2663		}
2664		attr->nres.run_off = cpu_to_le16(run_off);
2665	}
2666
2667	/* Update data attribute flags. */
2668	if (compr) {
2669		attr->flags |= ATTR_FLAG_COMPRESSED;
2670		attr->nres.c_unit = NTFS_LZNT_CUNIT;
2671	} else {
2672		attr->flags &= ~ATTR_FLAG_COMPRESSED;
2673		attr->nres.c_unit = 0;
2674	}
2675	mi->dirty = true;
2676
2677	return 0;
2678}