Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
   5 *
   6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
   7 */
   8
   9#include <linux/fs.h>
  10#include <linux/slab.h>
  11#include <linux/kernel.h>
  12
  13#include "debug.h"
  14#include "ntfs.h"
  15#include "ntfs_fs.h"
  16
  17/*
  18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
  19 * preallocate algorithm.
  20 */
  21#ifndef NTFS_MIN_LOG2_OF_CLUMP
  22#define NTFS_MIN_LOG2_OF_CLUMP 16
  23#endif
  24
  25#ifndef NTFS_MAX_LOG2_OF_CLUMP
  26#define NTFS_MAX_LOG2_OF_CLUMP 26
  27#endif
  28
  29// 16M
  30#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
  31// 16G
  32#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
  33
  34static inline u64 get_pre_allocated(u64 size)
  35{
  36	u32 clump;
  37	u8 align_shift;
  38	u64 ret;
  39
  40	if (size <= NTFS_CLUMP_MIN) {
  41		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
  42		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
  43	} else if (size >= NTFS_CLUMP_MAX) {
  44		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
  45		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
  46	} else {
  47		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
  48			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
  49		clump = 1u << align_shift;
  50	}
  51
  52	ret = (((size + clump - 1) >> align_shift)) << align_shift;
  53
  54	return ret;
  55}
  56
  57/*
  58 * attr_load_runs - Load all runs stored in @attr.
  59 */
  60static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
  61			  struct runs_tree *run, const CLST *vcn)
  62{
  63	int err;
  64	CLST svcn = le64_to_cpu(attr->nres.svcn);
  65	CLST evcn = le64_to_cpu(attr->nres.evcn);
  66	u32 asize;
  67	u16 run_off;
  68
  69	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
  70		return 0;
  71
  72	if (vcn && (evcn < *vcn || *vcn < svcn))
  73		return -EINVAL;
  74
  75	asize = le32_to_cpu(attr->size);
  76	run_off = le16_to_cpu(attr->nres.run_off);
  77
  78	if (run_off > asize)
  79		return -EINVAL;
  80
  81	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
  82			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
  83			    asize - run_off);
  84	if (err < 0)
  85		return err;
  86
  87	return 0;
  88}
  89
  90/*
  91 * run_deallocate_ex - Deallocate clusters.
  92 */
  93static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
  94			     CLST vcn, CLST len, CLST *done, bool trim)
  95{
  96	int err = 0;
  97	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
  98	size_t idx;
  99
 100	if (!len)
 101		goto out;
 102
 103	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
 104failed:
 105		run_truncate(run, vcn0);
 106		err = -EINVAL;
 107		goto out;
 108	}
 109
 110	for (;;) {
 111		if (clen > len)
 112			clen = len;
 113
 114		if (!clen) {
 115			err = -EINVAL;
 116			goto out;
 117		}
 118
 119		if (lcn != SPARSE_LCN) {
 120			if (sbi) {
 121				/* mark bitmap range [lcn + clen) as free and trim clusters. */
 122				mark_as_free_ex(sbi, lcn, clen, trim);
 123			}
 124			dn += clen;
 125		}
 126
 127		len -= clen;
 128		if (!len)
 129			break;
 130
 131		vcn_next = vcn + clen;
 132		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
 133		    vcn != vcn_next) {
 134			/* Save memory - don't load entire run. */
 135			goto failed;
 136		}
 137	}
 138
 139out:
 140	if (done)
 141		*done += dn;
 142
 143	return err;
 144}
 145
 146/*
 147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
 148 */
 149int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
 150			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
 151			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
 152			   CLST *new_lcn, CLST *new_len)
 153{
 154	int err;
 155	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
 156	size_t cnt = run->count;
 157
 158	for (;;) {
 159		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
 160					       opt);
 161
 162		if (err == -ENOSPC && pre) {
 163			pre = 0;
 164			if (*pre_alloc)
 165				*pre_alloc = 0;
 166			continue;
 167		}
 168
 169		if (err)
 170			goto out;
 171
 172		if (vcn == vcn0) {
 173			/* Return the first fragment. */
 174			if (new_lcn)
 175				*new_lcn = lcn;
 176			if (new_len)
 177				*new_len = flen;
 178		}
 179
 180		/* Add new fragment into run storage. */
 181		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
 182			/* Undo last 'ntfs_look_for_free_space' */
 183			mark_as_free_ex(sbi, lcn, len, false);
 184			err = -ENOMEM;
 185			goto out;
 186		}
 187
 188		if (opt & ALLOCATE_ZERO) {
 189			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
 190
 191			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
 192						   (sector_t)lcn << shift,
 193						   (sector_t)flen << shift,
 194						   GFP_NOFS, 0);
 195			if (err)
 196				goto out;
 197		}
 198
 199		vcn += flen;
 200
 201		if (flen >= len || (opt & ALLOCATE_MFT) ||
 202		    (fr && run->count - cnt >= fr)) {
 203			*alen = vcn - vcn0;
 204			return 0;
 205		}
 206
 207		len -= flen;
 208	}
 209
 210out:
 211	/* Undo 'ntfs_look_for_free_space' */
 212	if (vcn - vcn0) {
 213		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
 214		run_truncate(run, vcn0);
 215	}
 216
 217	return err;
 218}
 219
 220/*
 221 * attr_make_nonresident
 222 *
 223 * If page is not NULL - it is already contains resident data
 224 * and locked (called from ni_write_frame()).
 225 */
 226int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
 227			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 228			  u64 new_size, struct runs_tree *run,
 229			  struct ATTRIB **ins_attr, struct page *page)
 230{
 231	struct ntfs_sb_info *sbi;
 232	struct ATTRIB *attr_s;
 233	struct MFT_REC *rec;
 234	u32 used, asize, rsize, aoff, align;
 235	bool is_data;
 236	CLST len, alen;
 237	char *next;
 238	int err;
 239
 240	if (attr->non_res) {
 241		*ins_attr = attr;
 242		return 0;
 243	}
 244
 245	sbi = mi->sbi;
 246	rec = mi->mrec;
 247	attr_s = NULL;
 248	used = le32_to_cpu(rec->used);
 249	asize = le32_to_cpu(attr->size);
 250	next = Add2Ptr(attr, asize);
 251	aoff = PtrOffset(rec, attr);
 252	rsize = le32_to_cpu(attr->res.data_size);
 253	is_data = attr->type == ATTR_DATA && !attr->name_len;
 254
 255	align = sbi->cluster_size;
 256	if (is_attr_compressed(attr))
 257		align <<= COMPRESSION_UNIT;
 258	len = (rsize + align - 1) >> sbi->cluster_bits;
 259
 260	run_init(run);
 261
 262	/* Make a copy of original attribute. */
 263	attr_s = kmemdup(attr, asize, GFP_NOFS);
 264	if (!attr_s) {
 265		err = -ENOMEM;
 266		goto out;
 267	}
 268
 269	if (!len) {
 270		/* Empty resident -> Empty nonresident. */
 271		alen = 0;
 272	} else {
 273		const char *data = resident_data(attr);
 274
 275		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
 276					     ALLOCATE_DEF, &alen, 0, NULL,
 277					     NULL);
 278		if (err)
 279			goto out1;
 280
 281		if (!rsize) {
 282			/* Empty resident -> Non empty nonresident. */
 283		} else if (!is_data) {
 284			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
 285			if (err)
 286				goto out2;
 287		} else if (!page) {
 288			char *kaddr;
 289
 290			page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
 291			if (!page) {
 292				err = -ENOMEM;
 293				goto out2;
 294			}
 295			kaddr = kmap_atomic(page);
 296			memcpy(kaddr, data, rsize);
 297			memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
 298			kunmap_atomic(kaddr);
 299			flush_dcache_page(page);
 300			SetPageUptodate(page);
 301			set_page_dirty(page);
 302			unlock_page(page);
 303			put_page(page);
 304		}
 305	}
 306
 307	/* Remove original attribute. */
 308	used -= asize;
 309	memmove(attr, Add2Ptr(attr, asize), used - aoff);
 310	rec->used = cpu_to_le32(used);
 311	mi->dirty = true;
 312	if (le)
 313		al_remove_le(ni, le);
 314
 315	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
 316				    attr_s->name_len, run, 0, alen,
 317				    attr_s->flags, &attr, NULL, NULL);
 318	if (err)
 319		goto out3;
 320
 321	kfree(attr_s);
 322	attr->nres.data_size = cpu_to_le64(rsize);
 323	attr->nres.valid_size = attr->nres.data_size;
 324
 325	*ins_attr = attr;
 326
 327	if (is_data)
 328		ni->ni_flags &= ~NI_FLAG_RESIDENT;
 329
 330	/* Resident attribute becomes non resident. */
 331	return 0;
 332
 333out3:
 334	attr = Add2Ptr(rec, aoff);
 335	memmove(next, attr, used - aoff);
 336	memcpy(attr, attr_s, asize);
 337	rec->used = cpu_to_le32(used + asize);
 338	mi->dirty = true;
 339out2:
 340	/* Undo: do not trim new allocated clusters. */
 341	run_deallocate(sbi, run, false);
 342	run_close(run);
 343out1:
 344	kfree(attr_s);
 345out:
 346	return err;
 347}
 348
 349/*
 350 * attr_set_size_res - Helper for attr_set_size().
 351 */
 352static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
 353			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 354			     u64 new_size, struct runs_tree *run,
 355			     struct ATTRIB **ins_attr)
 356{
 357	struct ntfs_sb_info *sbi = mi->sbi;
 358	struct MFT_REC *rec = mi->mrec;
 359	u32 used = le32_to_cpu(rec->used);
 360	u32 asize = le32_to_cpu(attr->size);
 361	u32 aoff = PtrOffset(rec, attr);
 362	u32 rsize = le32_to_cpu(attr->res.data_size);
 363	u32 tail = used - aoff - asize;
 364	char *next = Add2Ptr(attr, asize);
 365	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
 366
 367	if (dsize < 0) {
 368		memmove(next + dsize, next, tail);
 369	} else if (dsize > 0) {
 370		if (used + dsize > sbi->max_bytes_per_attr)
 371			return attr_make_nonresident(ni, attr, le, mi, new_size,
 372						     run, ins_attr, NULL);
 373
 374		memmove(next + dsize, next, tail);
 375		memset(next, 0, dsize);
 376	}
 377
 378	if (new_size > rsize)
 379		memset(Add2Ptr(resident_data(attr), rsize), 0,
 380		       new_size - rsize);
 381
 382	rec->used = cpu_to_le32(used + dsize);
 383	attr->size = cpu_to_le32(asize + dsize);
 384	attr->res.data_size = cpu_to_le32(new_size);
 385	mi->dirty = true;
 386	*ins_attr = attr;
 387
 388	return 0;
 389}
 390
 391/*
 392 * attr_set_size - Change the size of attribute.
 393 *
 394 * Extend:
 395 *   - Sparse/compressed: No allocated clusters.
 396 *   - Normal: Append allocated and preallocated new clusters.
 397 * Shrink:
 398 *   - No deallocate if @keep_prealloc is set.
 399 */
 400int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
 401		  const __le16 *name, u8 name_len, struct runs_tree *run,
 402		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
 403		  struct ATTRIB **ret)
 404{
 405	int err = 0;
 406	struct ntfs_sb_info *sbi = ni->mi.sbi;
 407	u8 cluster_bits = sbi->cluster_bits;
 408	bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
 409		      !name_len;
 410	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
 411	struct ATTRIB *attr = NULL, *attr_b;
 412	struct ATTR_LIST_ENTRY *le, *le_b;
 413	struct mft_inode *mi, *mi_b;
 414	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
 415	CLST next_svcn, pre_alloc = -1, done = 0;
 416	bool is_ext, is_bad = false;
 417	bool dirty = false;
 418	u32 align;
 419	struct MFT_REC *rec;
 420
 421again:
 422	alen = 0;
 423	le_b = NULL;
 424	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
 425			      &mi_b);
 426	if (!attr_b) {
 427		err = -ENOENT;
 428		goto bad_inode;
 429	}
 430
 431	if (!attr_b->non_res) {
 432		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
 433					&attr_b);
 434		if (err)
 435			return err;
 436
 437		/* Return if file is still resident. */
 438		if (!attr_b->non_res) {
 439			dirty = true;
 440			goto ok1;
 441		}
 442
 443		/* Layout of records may be changed, so do a full search. */
 444		goto again;
 445	}
 446
 447	is_ext = is_attr_ext(attr_b);
 448	align = sbi->cluster_size;
 449	if (is_ext)
 450		align <<= attr_b->nres.c_unit;
 451
 452	old_valid = le64_to_cpu(attr_b->nres.valid_size);
 453	old_size = le64_to_cpu(attr_b->nres.data_size);
 454	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 455
 456again_1:
 457	old_alen = old_alloc >> cluster_bits;
 458
 459	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
 460	new_alen = new_alloc >> cluster_bits;
 461
 462	if (keep_prealloc && new_size < old_size) {
 463		attr_b->nres.data_size = cpu_to_le64(new_size);
 464		mi_b->dirty = dirty = true;
 465		goto ok;
 466	}
 467
 468	vcn = old_alen - 1;
 469
 470	svcn = le64_to_cpu(attr_b->nres.svcn);
 471	evcn = le64_to_cpu(attr_b->nres.evcn);
 472
 473	if (svcn <= vcn && vcn <= evcn) {
 474		attr = attr_b;
 475		le = le_b;
 476		mi = mi_b;
 477	} else if (!le_b) {
 478		err = -EINVAL;
 479		goto bad_inode;
 480	} else {
 481		le = le_b;
 482		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
 483				    &mi);
 484		if (!attr) {
 485			err = -EINVAL;
 486			goto bad_inode;
 487		}
 488
 489next_le_1:
 490		svcn = le64_to_cpu(attr->nres.svcn);
 491		evcn = le64_to_cpu(attr->nres.evcn);
 492	}
 493	/*
 494	 * Here we have:
 495	 * attr,mi,le - last attribute segment (containing 'vcn').
 496	 * attr_b,mi_b,le_b - base (primary) attribute segment.
 497	 */
 498next_le:
 499	rec = mi->mrec;
 500	err = attr_load_runs(attr, ni, run, NULL);
 501	if (err)
 502		goto out;
 503
 504	if (new_size > old_size) {
 505		CLST to_allocate;
 506		size_t free;
 507
 508		if (new_alloc <= old_alloc) {
 509			attr_b->nres.data_size = cpu_to_le64(new_size);
 510			mi_b->dirty = dirty = true;
 511			goto ok;
 512		}
 513
 514		/*
 515		 * Add clusters. In simple case we have to:
 516		 *  - allocate space (vcn, lcn, len)
 517		 *  - update packed run in 'mi'
 518		 *  - update attr->nres.evcn
 519		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 520		 */
 521		to_allocate = new_alen - old_alen;
 522add_alloc_in_same_attr_seg:
 523		lcn = 0;
 524		if (is_mft) {
 525			/* MFT allocates clusters from MFT zone. */
 526			pre_alloc = 0;
 527		} else if (is_ext) {
 528			/* No preallocate for sparse/compress. */
 529			pre_alloc = 0;
 530		} else if (pre_alloc == -1) {
 531			pre_alloc = 0;
 532			if (type == ATTR_DATA && !name_len &&
 533			    sbi->options->prealloc) {
 534				pre_alloc = bytes_to_cluster(
 535						    sbi, get_pre_allocated(
 536								 new_size)) -
 537					    new_alen;
 
 538			}
 539
 540			/* Get the last LCN to allocate from. */
 541			if (old_alen &&
 542			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
 543				lcn = SPARSE_LCN;
 544			}
 545
 546			if (lcn == SPARSE_LCN)
 547				lcn = 0;
 548			else if (lcn)
 549				lcn += 1;
 550
 551			free = wnd_zeroes(&sbi->used.bitmap);
 552			if (to_allocate > free) {
 553				err = -ENOSPC;
 554				goto out;
 555			}
 556
 557			if (pre_alloc && to_allocate + pre_alloc > free)
 558				pre_alloc = 0;
 559		}
 560
 561		vcn = old_alen;
 562
 563		if (is_ext) {
 564			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
 565					   false)) {
 566				err = -ENOMEM;
 567				goto out;
 568			}
 569			alen = to_allocate;
 570		} else {
 571			/* ~3 bytes per fragment. */
 572			err = attr_allocate_clusters(
 573				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
 574				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
 575				is_mft ? 0 :
 576					 (sbi->record_size -
 577					  le32_to_cpu(rec->used) + 8) /
 578							 3 +
 579						 1,
 580				NULL, NULL);
 581			if (err)
 582				goto out;
 583		}
 584
 585		done += alen;
 586		vcn += alen;
 587		if (to_allocate > alen)
 588			to_allocate -= alen;
 589		else
 590			to_allocate = 0;
 591
 592pack_runs:
 593		err = mi_pack_runs(mi, attr, run, vcn - svcn);
 594		if (err)
 595			goto undo_1;
 596
 597		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
 598		new_alloc_tmp = (u64)next_svcn << cluster_bits;
 599		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 600		mi_b->dirty = dirty = true;
 601
 602		if (next_svcn >= vcn && !to_allocate) {
 603			/* Normal way. Update attribute and exit. */
 604			attr_b->nres.data_size = cpu_to_le64(new_size);
 605			goto ok;
 606		}
 607
 608		/* At least two MFT to avoid recursive loop. */
 609		if (is_mft && next_svcn == vcn &&
 610		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
 611			new_size = new_alloc_tmp;
 612			attr_b->nres.data_size = attr_b->nres.alloc_size;
 613			goto ok;
 614		}
 615
 616		if (le32_to_cpu(rec->used) < sbi->record_size) {
 617			old_alen = next_svcn;
 618			evcn = old_alen - 1;
 619			goto add_alloc_in_same_attr_seg;
 620		}
 621
 622		attr_b->nres.data_size = attr_b->nres.alloc_size;
 623		if (new_alloc_tmp < old_valid)
 624			attr_b->nres.valid_size = attr_b->nres.data_size;
 625
 626		if (type == ATTR_LIST) {
 627			err = ni_expand_list(ni);
 628			if (err)
 629				goto undo_2;
 630			if (next_svcn < vcn)
 631				goto pack_runs;
 632
 633			/* Layout of records is changed. */
 634			goto again;
 635		}
 636
 637		if (!ni->attr_list.size) {
 638			err = ni_create_attr_list(ni);
 639			/* In case of error layout of records is not changed. */
 640			if (err)
 641				goto undo_2;
 642			/* Layout of records is changed. */
 643		}
 644
 645		if (next_svcn >= vcn) {
 646			/* This is MFT data, repeat. */
 647			goto again;
 648		}
 649
 650		/* Insert new attribute segment. */
 651		err = ni_insert_nonresident(ni, type, name, name_len, run,
 652					    next_svcn, vcn - next_svcn,
 653					    attr_b->flags, &attr, &mi, NULL);
 654
 655		/*
 656		 * Layout of records maybe changed.
 657		 * Find base attribute to update.
 658		 */
 659		le_b = NULL;
 660		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
 661				      NULL, &mi_b);
 662		if (!attr_b) {
 663			err = -EINVAL;
 664			goto bad_inode;
 665		}
 666
 667		if (err) {
 668			/* ni_insert_nonresident failed. */
 669			attr = NULL;
 670			goto undo_2;
 671		}
 672
 673		if (!is_mft)
 674			run_truncate_head(run, evcn + 1);
 675
 676		svcn = le64_to_cpu(attr->nres.svcn);
 677		evcn = le64_to_cpu(attr->nres.evcn);
 678
 679		/*
 680		 * Attribute is in consistency state.
 681		 * Save this point to restore to if next steps fail.
 682		 */
 683		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
 684		attr_b->nres.valid_size = attr_b->nres.data_size =
 685			attr_b->nres.alloc_size = cpu_to_le64(old_size);
 686		mi_b->dirty = dirty = true;
 687		goto again_1;
 688	}
 689
 690	if (new_size != old_size ||
 691	    (new_alloc != old_alloc && !keep_prealloc)) {
 692		/*
 693		 * Truncate clusters. In simple case we have to:
 694		 *  - update packed run in 'mi'
 695		 *  - update attr->nres.evcn
 696		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 697		 *  - mark and trim clusters as free (vcn, lcn, len)
 698		 */
 699		CLST dlen = 0;
 700
 701		vcn = max(svcn, new_alen);
 702		new_alloc_tmp = (u64)vcn << cluster_bits;
 703
 704		if (vcn > svcn) {
 705			err = mi_pack_runs(mi, attr, run, vcn - svcn);
 706			if (err)
 707				goto out;
 708		} else if (le && le->vcn) {
 709			u16 le_sz = le16_to_cpu(le->size);
 710
 711			/*
 712			 * NOTE: List entries for one attribute are always
 713			 * the same size. We deal with last entry (vcn==0)
 714			 * and it is not first in entries array
 715			 * (list entry for std attribute always first).
 716			 * So it is safe to step back.
 717			 */
 718			mi_remove_attr(NULL, mi, attr);
 719
 720			if (!al_remove_le(ni, le)) {
 721				err = -EINVAL;
 722				goto bad_inode;
 723			}
 724
 725			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
 726		} else {
 727			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
 728			mi->dirty = true;
 729		}
 730
 731		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 732
 733		if (vcn == new_alen) {
 734			attr_b->nres.data_size = cpu_to_le64(new_size);
 735			if (new_size < old_valid)
 736				attr_b->nres.valid_size =
 737					attr_b->nres.data_size;
 738		} else {
 739			if (new_alloc_tmp <=
 740			    le64_to_cpu(attr_b->nres.data_size))
 741				attr_b->nres.data_size =
 742					attr_b->nres.alloc_size;
 743			if (new_alloc_tmp <
 744			    le64_to_cpu(attr_b->nres.valid_size))
 745				attr_b->nres.valid_size =
 746					attr_b->nres.alloc_size;
 747		}
 748		mi_b->dirty = dirty = true;
 749
 750		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
 751					true);
 752		if (err)
 753			goto out;
 754
 755		if (is_ext) {
 756			/* dlen - really deallocated clusters. */
 757			le64_sub_cpu(&attr_b->nres.total_size,
 758				     ((u64)dlen << cluster_bits));
 759		}
 760
 761		run_truncate(run, vcn);
 762
 763		if (new_alloc_tmp <= new_alloc)
 764			goto ok;
 765
 766		old_size = new_alloc_tmp;
 767		vcn = svcn - 1;
 768
 769		if (le == le_b) {
 770			attr = attr_b;
 771			mi = mi_b;
 772			evcn = svcn - 1;
 773			svcn = 0;
 774			goto next_le;
 775		}
 776
 777		if (le->type != type || le->name_len != name_len ||
 778		    memcmp(le_name(le), name, name_len * sizeof(short))) {
 779			err = -EINVAL;
 780			goto bad_inode;
 781		}
 782
 783		err = ni_load_mi(ni, le, &mi);
 784		if (err)
 785			goto out;
 786
 787		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
 788		if (!attr) {
 789			err = -EINVAL;
 790			goto bad_inode;
 791		}
 792		goto next_le_1;
 793	}
 794
 795ok:
 796	if (new_valid) {
 797		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
 798
 799		if (attr_b->nres.valid_size != valid) {
 800			attr_b->nres.valid_size = valid;
 801			mi_b->dirty = true;
 802		}
 803	}
 804
 805ok1:
 806	if (ret)
 807		*ret = attr_b;
 808
 809	if (((type == ATTR_DATA && !name_len) ||
 810	     (type == ATTR_ALLOC && name == I30_NAME))) {
 811		/* Update inode_set_bytes. */
 812		if (attr_b->non_res) {
 813			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 814			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
 815				inode_set_bytes(&ni->vfs_inode, new_alloc);
 816				dirty = true;
 817			}
 818		}
 819
 820		/* Don't forget to update duplicate information in parent. */
 821		if (dirty) {
 822			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
 823			mark_inode_dirty(&ni->vfs_inode);
 824		}
 825	}
 826
 827	return 0;
 828
 829undo_2:
 830	vcn -= alen;
 831	attr_b->nres.data_size = cpu_to_le64(old_size);
 832	attr_b->nres.valid_size = cpu_to_le64(old_valid);
 833	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
 834
 835	/* Restore 'attr' and 'mi'. */
 836	if (attr)
 837		goto restore_run;
 838
 839	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
 840	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
 841		attr = attr_b;
 842		le = le_b;
 843		mi = mi_b;
 844	} else if (!le_b) {
 845		err = -EINVAL;
 846		goto bad_inode;
 847	} else {
 848		le = le_b;
 849		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
 850				    &svcn, &mi);
 851		if (!attr)
 852			goto bad_inode;
 853	}
 854
 855restore_run:
 856	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
 857		is_bad = true;
 858
 859undo_1:
 860	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
 861
 862	run_truncate(run, vcn);
 863out:
 864	if (is_bad) {
 865bad_inode:
 866		_ntfs_bad_inode(&ni->vfs_inode);
 867	}
 868	return err;
 869}
 870
 871/*
 872 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
 873 *
 874 * @new == NULL means just to get current mapping for 'vcn'
 875 * @new != NULL means allocate real cluster if 'vcn' maps to hole
 876 * @zero - zeroout new allocated clusters
 877 *
 878 *  NOTE:
 879 *  - @new != NULL is called only for sparsed or compressed attributes.
 880 *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
 881 */
 882int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
 883			CLST *len, bool *new, bool zero)
 884{
 885	int err = 0;
 886	struct runs_tree *run = &ni->file.run;
 887	struct ntfs_sb_info *sbi;
 888	u8 cluster_bits;
 889	struct ATTRIB *attr, *attr_b;
 890	struct ATTR_LIST_ENTRY *le, *le_b;
 891	struct mft_inode *mi, *mi_b;
 892	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
 893	CLST alloc, evcn;
 894	unsigned fr;
 895	u64 total_size, total_size0;
 896	int step = 0;
 897
 898	if (new)
 899		*new = false;
 900
 901	/* Try to find in cache. */
 902	down_read(&ni->file.run_lock);
 903	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
 904		*len = 0;
 905	up_read(&ni->file.run_lock);
 906
 907	if (*len && (*lcn != SPARSE_LCN || !new))
 908		return 0; /* Fast normal way without allocation. */
 
 
 
 
 909
 910	/* No cluster in cache or we need to allocate cluster in hole. */
 911	sbi = ni->mi.sbi;
 912	cluster_bits = sbi->cluster_bits;
 913
 914	ni_lock(ni);
 915	down_write(&ni->file.run_lock);
 916
 917	/* Repeat the code above (under write lock). */
 918	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
 919		*len = 0;
 920
 921	if (*len) {
 922		if (*lcn != SPARSE_LCN || !new)
 923			goto out; /* normal way without allocation. */
 924		if (clen > *len)
 925			clen = *len;
 926	}
 927
 928	le_b = NULL;
 929	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
 930	if (!attr_b) {
 931		err = -ENOENT;
 932		goto out;
 933	}
 934
 935	if (!attr_b->non_res) {
 936		*lcn = RESIDENT_LCN;
 937		*len = 1;
 938		goto out;
 939	}
 940
 941	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
 942	if (vcn >= asize) {
 943		if (new) {
 944			err = -EINVAL;
 945		} else {
 946			*len = 1;
 947			*lcn = SPARSE_LCN;
 948		}
 949		goto out;
 950	}
 951
 952	svcn = le64_to_cpu(attr_b->nres.svcn);
 953	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
 954
 955	attr = attr_b;
 956	le = le_b;
 957	mi = mi_b;
 958
 959	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
 960		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
 961				    &mi);
 962		if (!attr) {
 963			err = -EINVAL;
 964			goto out;
 965		}
 966		svcn = le64_to_cpu(attr->nres.svcn);
 967		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
 968	}
 969
 970	/* Load in cache actual information. */
 971	err = attr_load_runs(attr, ni, run, NULL);
 972	if (err)
 973		goto out;
 974
 975	if (!*len) {
 976		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
 977			if (*lcn != SPARSE_LCN || !new)
 978				goto ok; /* Slow normal way without allocation. */
 979
 980			if (clen > *len)
 981				clen = *len;
 982		} else if (!new) {
 983			/* Here we may return -ENOENT.
 984			 * In any case caller gets zero length. */
 985			goto ok;
 986		}
 987	}
 988
 989	if (!is_attr_ext(attr_b)) {
 990		/* The code below only for sparsed or compressed attributes. */
 991		err = -EINVAL;
 992		goto out;
 993	}
 994
 995	vcn0 = vcn;
 996	to_alloc = clen;
 997	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
 998	/* Allocate frame aligned clusters.
 999	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1000	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1001	if (attr_b->nres.c_unit) {
1002		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
1003		CLST cmask = ~(clst_per_frame - 1);
1004
1005		/* Get frame aligned vcn and to_alloc. */
1006		vcn = vcn0 & cmask;
1007		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1008		if (fr < clst_per_frame)
1009			fr = clst_per_frame;
1010		zero = true;
1011
1012		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1013		if (vcn < svcn || evcn1 <= vcn) {
1014			/* Load attribute for truncated vcn. */
1015			attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
1016					    &vcn, &mi);
1017			if (!attr) {
1018				err = -EINVAL;
1019				goto out;
1020			}
1021			svcn = le64_to_cpu(attr->nres.svcn);
1022			evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1023			err = attr_load_runs(attr, ni, run, NULL);
1024			if (err)
1025				goto out;
1026		}
1027	}
1028
1029	if (vcn + to_alloc > asize)
1030		to_alloc = asize - vcn;
1031
1032	/* Get the last LCN to allocate from. */
1033	hint = 0;
1034
1035	if (vcn > evcn1) {
1036		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1037				   false)) {
1038			err = -ENOMEM;
1039			goto out;
1040		}
1041	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1042		hint = -1;
1043	}
1044
1045	/* Allocate and zeroout new clusters. */
1046	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1047				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1048				     fr, lcn, len);
1049	if (err)
1050		goto out;
1051	*new = true;
1052	step = 1;
1053
1054	end = vcn + alen;
1055	/* Save 'total_size0' to restore if error. */
1056	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1057	total_size = total_size0 + ((u64)alen << cluster_bits);
1058
1059	if (vcn != vcn0) {
1060		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1061			err = -EINVAL;
1062			goto out;
1063		}
1064		if (*lcn == SPARSE_LCN) {
1065			/* Internal error. Should not happened. */
1066			WARN_ON(1);
1067			err = -EINVAL;
1068			goto out;
1069		}
1070		/* Check case when vcn0 + len overlaps new allocated clusters. */
1071		if (vcn0 + *len > end)
1072			*len = end - vcn0;
1073	}
1074
1075repack:
1076	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1077	if (err)
1078		goto out;
1079
1080	attr_b->nres.total_size = cpu_to_le64(total_size);
1081	inode_set_bytes(&ni->vfs_inode, total_size);
1082	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1083
1084	mi_b->dirty = true;
1085	mark_inode_dirty(&ni->vfs_inode);
1086
1087	/* Stored [vcn : next_svcn) from [vcn : end). */
1088	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1089
1090	if (end <= evcn1) {
1091		if (next_svcn == evcn1) {
1092			/* Normal way. Update attribute and exit. */
1093			goto ok;
1094		}
1095		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1096		if (!ni->attr_list.size) {
1097			err = ni_create_attr_list(ni);
1098			if (err)
1099				goto undo1;
1100			/* Layout of records is changed. */
1101			le_b = NULL;
1102			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1103					      0, NULL, &mi_b);
1104			if (!attr_b) {
1105				err = -ENOENT;
1106				goto out;
1107			}
1108
1109			attr = attr_b;
1110			le = le_b;
1111			mi = mi_b;
1112			goto repack;
1113		}
1114	}
1115
1116	/*
1117	 * The code below may require additional cluster (to extend attribute list)
1118	 * and / or one MFT record
1119	 * It is too complex to undo operations if -ENOSPC occurs deep inside
1120	 * in 'ni_insert_nonresident'.
1121	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1122	 */
1123	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1124		/* Undo step 1. */
1125		err = -ENOSPC;
1126		goto undo1;
1127	}
1128
1129	step = 2;
1130	svcn = evcn1;
1131
1132	/* Estimate next attribute. */
1133	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1134
1135	if (!attr) {
1136		/* Insert new attribute segment. */
1137		goto ins_ext;
1138	}
1139
1140	/* Try to update existed attribute segment. */
1141	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1142	evcn = le64_to_cpu(attr->nres.evcn);
1143
1144	if (end < next_svcn)
1145		end = next_svcn;
1146	while (end > evcn) {
1147		/* Remove segment [svcn : evcn). */
1148		mi_remove_attr(NULL, mi, attr);
1149
1150		if (!al_remove_le(ni, le)) {
1151			err = -EINVAL;
1152			goto out;
1153		}
1154
1155		if (evcn + 1 >= alloc) {
1156			/* Last attribute segment. */
1157			evcn1 = evcn + 1;
1158			goto ins_ext;
1159		}
1160
1161		if (ni_load_mi(ni, le, &mi)) {
1162			attr = NULL;
1163			goto out;
1164		}
1165
1166		attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1167		if (!attr) {
1168			err = -EINVAL;
1169			goto out;
1170		}
1171		svcn = le64_to_cpu(attr->nres.svcn);
1172		evcn = le64_to_cpu(attr->nres.evcn);
1173	}
1174
1175	if (end < svcn)
1176		end = svcn;
1177
1178	err = attr_load_runs(attr, ni, run, &end);
1179	if (err)
1180		goto out;
1181
1182	evcn1 = evcn + 1;
1183	attr->nres.svcn = cpu_to_le64(next_svcn);
1184	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1185	if (err)
1186		goto out;
1187
1188	le->vcn = cpu_to_le64(next_svcn);
1189	ni->attr_list.dirty = true;
1190	mi->dirty = true;
1191	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1192
1193ins_ext:
1194	if (evcn1 > next_svcn) {
1195		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1196					    next_svcn, evcn1 - next_svcn,
1197					    attr_b->flags, &attr, &mi, NULL);
1198		if (err)
1199			goto out;
1200	}
1201ok:
1202	run_truncate_around(run, vcn);
1203out:
1204	if (err && step > 1) {
1205		/* Too complex to restore. */
1206		_ntfs_bad_inode(&ni->vfs_inode);
1207	}
1208	up_write(&ni->file.run_lock);
1209	ni_unlock(ni);
1210
1211	return err;
1212
1213undo1:
1214	/* Undo step1. */
1215	attr_b->nres.total_size = cpu_to_le64(total_size0);
1216	inode_set_bytes(&ni->vfs_inode, total_size0);
1217
1218	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1219	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1220	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1221		_ntfs_bad_inode(&ni->vfs_inode);
1222	}
1223	goto out;
1224}
1225
1226int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1227{
1228	u64 vbo;
1229	struct ATTRIB *attr;
1230	u32 data_size;
1231
1232	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1233	if (!attr)
1234		return -EINVAL;
1235
1236	if (attr->non_res)
1237		return E_NTFS_NONRESIDENT;
1238
1239	vbo = page->index << PAGE_SHIFT;
1240	data_size = le32_to_cpu(attr->res.data_size);
1241	if (vbo < data_size) {
1242		const char *data = resident_data(attr);
1243		char *kaddr = kmap_atomic(page);
1244		u32 use = data_size - vbo;
1245
1246		if (use > PAGE_SIZE)
1247			use = PAGE_SIZE;
1248
1249		memcpy(kaddr, data + vbo, use);
1250		memset(kaddr + use, 0, PAGE_SIZE - use);
1251		kunmap_atomic(kaddr);
1252		flush_dcache_page(page);
1253		SetPageUptodate(page);
1254	} else if (!PageUptodate(page)) {
1255		zero_user_segment(page, 0, PAGE_SIZE);
1256		SetPageUptodate(page);
1257	}
1258
1259	return 0;
1260}
1261
1262int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1263{
1264	u64 vbo;
1265	struct mft_inode *mi;
1266	struct ATTRIB *attr;
1267	u32 data_size;
1268
1269	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1270	if (!attr)
1271		return -EINVAL;
1272
1273	if (attr->non_res) {
1274		/* Return special error code to check this case. */
1275		return E_NTFS_NONRESIDENT;
1276	}
1277
1278	vbo = page->index << PAGE_SHIFT;
1279	data_size = le32_to_cpu(attr->res.data_size);
1280	if (vbo < data_size) {
1281		char *data = resident_data(attr);
1282		char *kaddr = kmap_atomic(page);
1283		u32 use = data_size - vbo;
1284
1285		if (use > PAGE_SIZE)
1286			use = PAGE_SIZE;
1287		memcpy(data + vbo, kaddr, use);
1288		kunmap_atomic(kaddr);
1289		mi->dirty = true;
1290	}
1291	ni->i_valid = data_size;
1292
1293	return 0;
1294}
1295
1296/*
1297 * attr_load_runs_vcn - Load runs with VCN.
1298 */
1299int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1300		       const __le16 *name, u8 name_len, struct runs_tree *run,
1301		       CLST vcn)
1302{
1303	struct ATTRIB *attr;
1304	int err;
1305	CLST svcn, evcn;
1306	u16 ro;
1307
1308	if (!ni) {
1309		/* Is record corrupted? */
1310		return -ENOENT;
1311	}
1312
1313	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1314	if (!attr) {
1315		/* Is record corrupted? */
1316		return -ENOENT;
1317	}
1318
1319	svcn = le64_to_cpu(attr->nres.svcn);
1320	evcn = le64_to_cpu(attr->nres.evcn);
1321
1322	if (evcn < vcn || vcn < svcn) {
1323		/* Is record corrupted? */
1324		return -EINVAL;
1325	}
1326
1327	ro = le16_to_cpu(attr->nres.run_off);
1328
1329	if (ro > le32_to_cpu(attr->size))
1330		return -EINVAL;
1331
1332	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1333			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1334	if (err < 0)
1335		return err;
1336	return 0;
1337}
1338
1339/*
1340 * attr_load_runs_range - Load runs for given range [from to).
1341 */
1342int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1343			 const __le16 *name, u8 name_len, struct runs_tree *run,
1344			 u64 from, u64 to)
1345{
1346	struct ntfs_sb_info *sbi = ni->mi.sbi;
1347	u8 cluster_bits = sbi->cluster_bits;
1348	CLST vcn;
1349	CLST vcn_last = (to - 1) >> cluster_bits;
1350	CLST lcn, clen;
1351	int err;
1352
1353	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1354		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1355			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1356						 vcn);
1357			if (err)
1358				return err;
1359			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1360		}
1361	}
1362
1363	return 0;
1364}
1365
1366#ifdef CONFIG_NTFS3_LZX_XPRESS
1367/*
1368 * attr_wof_frame_info
1369 *
1370 * Read header of Xpress/LZX file to get info about frame.
1371 */
1372int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1373			struct runs_tree *run, u64 frame, u64 frames,
1374			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1375{
1376	struct ntfs_sb_info *sbi = ni->mi.sbi;
1377	u64 vbo[2], off[2], wof_size;
1378	u32 voff;
1379	u8 bytes_per_off;
1380	char *addr;
1381	struct page *page;
1382	int i, err;
1383	__le32 *off32;
1384	__le64 *off64;
1385
1386	if (ni->vfs_inode.i_size < 0x100000000ull) {
1387		/* File starts with array of 32 bit offsets. */
1388		bytes_per_off = sizeof(__le32);
1389		vbo[1] = frame << 2;
1390		*vbo_data = frames << 2;
1391	} else {
1392		/* File starts with array of 64 bit offsets. */
1393		bytes_per_off = sizeof(__le64);
1394		vbo[1] = frame << 3;
1395		*vbo_data = frames << 3;
1396	}
1397
1398	/*
1399	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1400	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1401	 */
1402	if (!attr->non_res) {
1403		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1404			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1405			return -EINVAL;
1406		}
1407		addr = resident_data(attr);
1408
1409		if (bytes_per_off == sizeof(__le32)) {
1410			off32 = Add2Ptr(addr, vbo[1]);
1411			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1412			off[1] = le32_to_cpu(off32[0]);
1413		} else {
1414			off64 = Add2Ptr(addr, vbo[1]);
1415			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1416			off[1] = le64_to_cpu(off64[0]);
1417		}
1418
1419		*vbo_data += off[0];
1420		*ondisk_size = off[1] - off[0];
1421		return 0;
1422	}
1423
1424	wof_size = le64_to_cpu(attr->nres.data_size);
1425	down_write(&ni->file.run_lock);
1426	page = ni->file.offs_page;
1427	if (!page) {
1428		page = alloc_page(GFP_KERNEL);
1429		if (!page) {
1430			err = -ENOMEM;
1431			goto out;
1432		}
1433		page->index = -1;
1434		ni->file.offs_page = page;
1435	}
1436	lock_page(page);
1437	addr = page_address(page);
1438
1439	if (vbo[1]) {
1440		voff = vbo[1] & (PAGE_SIZE - 1);
1441		vbo[0] = vbo[1] - bytes_per_off;
1442		i = 0;
1443	} else {
1444		voff = 0;
1445		vbo[0] = 0;
1446		off[0] = 0;
1447		i = 1;
1448	}
1449
1450	do {
1451		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1452
1453		if (index != page->index) {
1454			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1455			u64 to = min(from + PAGE_SIZE, wof_size);
1456
1457			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1458						   ARRAY_SIZE(WOF_NAME), run,
1459						   from, to);
1460			if (err)
1461				goto out1;
1462
1463			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1464					     to - from, REQ_OP_READ);
1465			if (err) {
1466				page->index = -1;
1467				goto out1;
1468			}
1469			page->index = index;
1470		}
1471
1472		if (i) {
1473			if (bytes_per_off == sizeof(__le32)) {
1474				off32 = Add2Ptr(addr, voff);
1475				off[1] = le32_to_cpu(*off32);
1476			} else {
1477				off64 = Add2Ptr(addr, voff);
1478				off[1] = le64_to_cpu(*off64);
1479			}
1480		} else if (!voff) {
1481			if (bytes_per_off == sizeof(__le32)) {
1482				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1483				off[0] = le32_to_cpu(*off32);
1484			} else {
1485				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1486				off[0] = le64_to_cpu(*off64);
1487			}
1488		} else {
1489			/* Two values in one page. */
1490			if (bytes_per_off == sizeof(__le32)) {
1491				off32 = Add2Ptr(addr, voff);
1492				off[0] = le32_to_cpu(off32[-1]);
1493				off[1] = le32_to_cpu(off32[0]);
1494			} else {
1495				off64 = Add2Ptr(addr, voff);
1496				off[0] = le64_to_cpu(off64[-1]);
1497				off[1] = le64_to_cpu(off64[0]);
1498			}
1499			break;
1500		}
1501	} while (++i < 2);
1502
1503	*vbo_data += off[0];
1504	*ondisk_size = off[1] - off[0];
1505
1506out1:
1507	unlock_page(page);
1508out:
1509	up_write(&ni->file.run_lock);
1510	return err;
1511}
1512#endif
1513
1514/*
1515 * attr_is_frame_compressed - Used to detect compressed frame.
1516 */
1517int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1518			     CLST frame, CLST *clst_data)
1519{
1520	int err;
1521	u32 clst_frame;
1522	CLST clen, lcn, vcn, alen, slen, vcn_next;
1523	size_t idx;
1524	struct runs_tree *run;
1525
1526	*clst_data = 0;
1527
1528	if (!is_attr_compressed(attr))
1529		return 0;
1530
1531	if (!attr->non_res)
1532		return 0;
1533
1534	clst_frame = 1u << attr->nres.c_unit;
1535	vcn = frame * clst_frame;
1536	run = &ni->file.run;
1537
1538	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1539		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1540					 attr->name_len, run, vcn);
1541		if (err)
1542			return err;
1543
1544		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1545			return -EINVAL;
1546	}
1547
1548	if (lcn == SPARSE_LCN) {
1549		/* Sparsed frame. */
1550		return 0;
1551	}
1552
1553	if (clen >= clst_frame) {
1554		/*
1555		 * The frame is not compressed 'cause
1556		 * it does not contain any sparse clusters.
1557		 */
1558		*clst_data = clst_frame;
1559		return 0;
1560	}
1561
1562	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1563	slen = 0;
1564	*clst_data = clen;
1565
1566	/*
1567	 * The frame is compressed if *clst_data + slen >= clst_frame.
1568	 * Check next fragments.
1569	 */
1570	while ((vcn += clen) < alen) {
1571		vcn_next = vcn;
1572
1573		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1574		    vcn_next != vcn) {
1575			err = attr_load_runs_vcn(ni, attr->type,
1576						 attr_name(attr),
1577						 attr->name_len, run, vcn_next);
1578			if (err)
1579				return err;
1580			vcn = vcn_next;
1581
1582			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1583				return -EINVAL;
1584		}
1585
1586		if (lcn == SPARSE_LCN) {
1587			slen += clen;
1588		} else {
1589			if (slen) {
1590				/*
1591				 * Data_clusters + sparse_clusters =
1592				 * not enough for frame.
1593				 */
1594				return -EINVAL;
1595			}
1596			*clst_data += clen;
1597		}
1598
1599		if (*clst_data + slen >= clst_frame) {
1600			if (!slen) {
1601				/*
1602				 * There is no sparsed clusters in this frame
1603				 * so it is not compressed.
1604				 */
1605				*clst_data = clst_frame;
1606			} else {
1607				/* Frame is compressed. */
1608			}
1609			break;
1610		}
1611	}
1612
1613	return 0;
1614}
1615
1616/*
1617 * attr_allocate_frame - Allocate/free clusters for @frame.
1618 *
1619 * Assumed: down_write(&ni->file.run_lock);
1620 */
1621int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1622			u64 new_valid)
1623{
1624	int err = 0;
1625	struct runs_tree *run = &ni->file.run;
1626	struct ntfs_sb_info *sbi = ni->mi.sbi;
1627	struct ATTRIB *attr = NULL, *attr_b;
1628	struct ATTR_LIST_ENTRY *le, *le_b;
1629	struct mft_inode *mi, *mi_b;
1630	CLST svcn, evcn1, next_svcn, len;
1631	CLST vcn, end, clst_data;
1632	u64 total_size, valid_size, data_size;
1633
1634	le_b = NULL;
1635	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1636	if (!attr_b)
1637		return -ENOENT;
1638
1639	if (!is_attr_ext(attr_b))
1640		return -EINVAL;
1641
1642	vcn = frame << NTFS_LZNT_CUNIT;
1643	total_size = le64_to_cpu(attr_b->nres.total_size);
1644
1645	svcn = le64_to_cpu(attr_b->nres.svcn);
1646	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1647	data_size = le64_to_cpu(attr_b->nres.data_size);
1648
1649	if (svcn <= vcn && vcn < evcn1) {
1650		attr = attr_b;
1651		le = le_b;
1652		mi = mi_b;
1653	} else if (!le_b) {
1654		err = -EINVAL;
1655		goto out;
1656	} else {
1657		le = le_b;
1658		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1659				    &mi);
1660		if (!attr) {
1661			err = -EINVAL;
1662			goto out;
1663		}
1664		svcn = le64_to_cpu(attr->nres.svcn);
1665		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1666	}
1667
1668	err = attr_load_runs(attr, ni, run, NULL);
1669	if (err)
1670		goto out;
1671
1672	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1673	if (err)
1674		goto out;
1675
1676	total_size -= (u64)clst_data << sbi->cluster_bits;
1677
1678	len = bytes_to_cluster(sbi, compr_size);
1679
1680	if (len == clst_data)
1681		goto out;
1682
1683	if (len < clst_data) {
1684		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1685					NULL, true);
1686		if (err)
1687			goto out;
1688
1689		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1690				   false)) {
1691			err = -ENOMEM;
1692			goto out;
1693		}
1694		end = vcn + clst_data;
1695		/* Run contains updated range [vcn + len : end). */
1696	} else {
1697		CLST alen, hint = 0;
1698		/* Get the last LCN to allocate from. */
1699		if (vcn + clst_data &&
1700		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1701				      NULL)) {
1702			hint = -1;
1703		}
1704
1705		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1706					     hint + 1, len - clst_data, NULL,
1707					     ALLOCATE_DEF, &alen, 0, NULL,
1708					     NULL);
1709		if (err)
1710			goto out;
1711
1712		end = vcn + len;
1713		/* Run contains updated range [vcn + clst_data : end). */
1714	}
1715
1716	total_size += (u64)len << sbi->cluster_bits;
1717
1718repack:
1719	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1720	if (err)
1721		goto out;
1722
1723	attr_b->nres.total_size = cpu_to_le64(total_size);
1724	inode_set_bytes(&ni->vfs_inode, total_size);
1725
1726	mi_b->dirty = true;
1727	mark_inode_dirty(&ni->vfs_inode);
1728
1729	/* Stored [vcn : next_svcn) from [vcn : end). */
1730	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1731
1732	if (end <= evcn1) {
1733		if (next_svcn == evcn1) {
1734			/* Normal way. Update attribute and exit. */
1735			goto ok;
1736		}
1737		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1738		if (!ni->attr_list.size) {
1739			err = ni_create_attr_list(ni);
1740			if (err)
1741				goto out;
1742			/* Layout of records is changed. */
1743			le_b = NULL;
1744			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1745					      0, NULL, &mi_b);
1746			if (!attr_b) {
1747				err = -ENOENT;
1748				goto out;
1749			}
1750
1751			attr = attr_b;
1752			le = le_b;
1753			mi = mi_b;
1754			goto repack;
1755		}
1756	}
1757
1758	svcn = evcn1;
1759
1760	/* Estimate next attribute. */
1761	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1762
1763	if (attr) {
1764		CLST alloc = bytes_to_cluster(
1765			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1766		CLST evcn = le64_to_cpu(attr->nres.evcn);
1767
1768		if (end < next_svcn)
1769			end = next_svcn;
1770		while (end > evcn) {
1771			/* Remove segment [svcn : evcn). */
1772			mi_remove_attr(NULL, mi, attr);
1773
1774			if (!al_remove_le(ni, le)) {
1775				err = -EINVAL;
1776				goto out;
1777			}
1778
1779			if (evcn + 1 >= alloc) {
1780				/* Last attribute segment. */
1781				evcn1 = evcn + 1;
1782				goto ins_ext;
1783			}
1784
1785			if (ni_load_mi(ni, le, &mi)) {
1786				attr = NULL;
1787				goto out;
1788			}
1789
1790			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1791					    &le->id);
1792			if (!attr) {
1793				err = -EINVAL;
1794				goto out;
1795			}
1796			svcn = le64_to_cpu(attr->nres.svcn);
1797			evcn = le64_to_cpu(attr->nres.evcn);
1798		}
1799
1800		if (end < svcn)
1801			end = svcn;
1802
1803		err = attr_load_runs(attr, ni, run, &end);
1804		if (err)
1805			goto out;
1806
1807		evcn1 = evcn + 1;
1808		attr->nres.svcn = cpu_to_le64(next_svcn);
1809		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1810		if (err)
1811			goto out;
1812
1813		le->vcn = cpu_to_le64(next_svcn);
1814		ni->attr_list.dirty = true;
1815		mi->dirty = true;
1816
1817		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1818	}
1819ins_ext:
1820	if (evcn1 > next_svcn) {
1821		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1822					    next_svcn, evcn1 - next_svcn,
1823					    attr_b->flags, &attr, &mi, NULL);
1824		if (err)
1825			goto out;
1826	}
1827ok:
1828	run_truncate_around(run, vcn);
1829out:
1830	if (attr_b) {
1831		if (new_valid > data_size)
1832			new_valid = data_size;
1833
1834		valid_size = le64_to_cpu(attr_b->nres.valid_size);
1835		if (new_valid != valid_size) {
1836			attr_b->nres.valid_size = cpu_to_le64(valid_size);
1837			mi_b->dirty = true;
1838		}
1839	}
1840
1841	return err;
1842}
1843
1844/*
1845 * attr_collapse_range - Collapse range in file.
1846 */
1847int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1848{
1849	int err = 0;
1850	struct runs_tree *run = &ni->file.run;
1851	struct ntfs_sb_info *sbi = ni->mi.sbi;
1852	struct ATTRIB *attr = NULL, *attr_b;
1853	struct ATTR_LIST_ENTRY *le, *le_b;
1854	struct mft_inode *mi, *mi_b;
1855	CLST svcn, evcn1, len, dealloc, alen;
1856	CLST vcn, end;
1857	u64 valid_size, data_size, alloc_size, total_size;
1858	u32 mask;
1859	__le16 a_flags;
1860
1861	if (!bytes)
1862		return 0;
1863
1864	le_b = NULL;
1865	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1866	if (!attr_b)
1867		return -ENOENT;
1868
1869	if (!attr_b->non_res) {
1870		/* Attribute is resident. Nothing to do? */
1871		return 0;
1872	}
1873
1874	data_size = le64_to_cpu(attr_b->nres.data_size);
1875	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1876	a_flags = attr_b->flags;
1877
1878	if (is_attr_ext(attr_b)) {
1879		total_size = le64_to_cpu(attr_b->nres.total_size);
1880		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1881	} else {
1882		total_size = alloc_size;
1883		mask = sbi->cluster_mask;
1884	}
1885
1886	if ((vbo & mask) || (bytes & mask)) {
1887		/* Allow to collapse only cluster aligned ranges. */
1888		return -EINVAL;
1889	}
1890
1891	if (vbo > data_size)
1892		return -EINVAL;
1893
1894	down_write(&ni->file.run_lock);
1895
1896	if (vbo + bytes >= data_size) {
1897		u64 new_valid = min(ni->i_valid, vbo);
1898
1899		/* Simple truncate file at 'vbo'. */
1900		truncate_setsize(&ni->vfs_inode, vbo);
1901		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1902				    &new_valid, true, NULL);
1903
1904		if (!err && new_valid < ni->i_valid)
1905			ni->i_valid = new_valid;
1906
1907		goto out;
1908	}
1909
1910	/*
1911	 * Enumerate all attribute segments and collapse.
1912	 */
1913	alen = alloc_size >> sbi->cluster_bits;
1914	vcn = vbo >> sbi->cluster_bits;
1915	len = bytes >> sbi->cluster_bits;
1916	end = vcn + len;
1917	dealloc = 0;
1918
1919	svcn = le64_to_cpu(attr_b->nres.svcn);
1920	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1921
1922	if (svcn <= vcn && vcn < evcn1) {
1923		attr = attr_b;
1924		le = le_b;
1925		mi = mi_b;
1926	} else if (!le_b) {
1927		err = -EINVAL;
1928		goto out;
1929	} else {
1930		le = le_b;
1931		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1932				    &mi);
1933		if (!attr) {
1934			err = -EINVAL;
1935			goto out;
1936		}
1937
1938		svcn = le64_to_cpu(attr->nres.svcn);
1939		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1940	}
1941
1942	for (;;) {
1943		if (svcn >= end) {
1944			/* Shift VCN- */
1945			attr->nres.svcn = cpu_to_le64(svcn - len);
1946			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1947			if (le) {
1948				le->vcn = attr->nres.svcn;
1949				ni->attr_list.dirty = true;
1950			}
1951			mi->dirty = true;
1952		} else if (svcn < vcn || end < evcn1) {
1953			CLST vcn1, eat, next_svcn;
1954
1955			/* Collapse a part of this attribute segment. */
1956			err = attr_load_runs(attr, ni, run, &svcn);
1957			if (err)
1958				goto out;
1959			vcn1 = max(vcn, svcn);
1960			eat = min(end, evcn1) - vcn1;
1961
1962			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1963						true);
1964			if (err)
1965				goto out;
1966
1967			if (!run_collapse_range(run, vcn1, eat)) {
1968				err = -ENOMEM;
1969				goto out;
1970			}
1971
1972			if (svcn >= vcn) {
1973				/* Shift VCN */
1974				attr->nres.svcn = cpu_to_le64(vcn);
1975				if (le) {
1976					le->vcn = attr->nres.svcn;
1977					ni->attr_list.dirty = true;
1978				}
1979			}
1980
1981			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1982			if (err)
1983				goto out;
1984
1985			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1986			if (next_svcn + eat < evcn1) {
1987				err = ni_insert_nonresident(
1988					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1989					evcn1 - eat - next_svcn, a_flags, &attr,
1990					&mi, &le);
1991				if (err)
1992					goto out;
1993
1994				/* Layout of records maybe changed. */
1995				attr_b = NULL;
1996			}
1997
1998			/* Free all allocated memory. */
1999			run_truncate(run, 0);
2000		} else {
2001			u16 le_sz;
2002			u16 roff = le16_to_cpu(attr->nres.run_off);
2003
2004			if (roff > le32_to_cpu(attr->size)) {
2005				err = -EINVAL;
2006				goto out;
2007			}
2008
2009			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2010				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
2011				      le32_to_cpu(attr->size) - roff);
2012
2013			/* Delete this attribute segment. */
2014			mi_remove_attr(NULL, mi, attr);
2015			if (!le)
2016				break;
2017
2018			le_sz = le16_to_cpu(le->size);
2019			if (!al_remove_le(ni, le)) {
2020				err = -EINVAL;
2021				goto out;
2022			}
2023
2024			if (evcn1 >= alen)
2025				break;
2026
2027			if (!svcn) {
2028				/* Load next record that contains this attribute. */
2029				if (ni_load_mi(ni, le, &mi)) {
2030					err = -EINVAL;
2031					goto out;
2032				}
2033
2034				/* Look for required attribute. */
2035				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
2036						    0, &le->id);
2037				if (!attr) {
2038					err = -EINVAL;
2039					goto out;
2040				}
2041				goto next_attr;
2042			}
2043			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2044		}
2045
2046		if (evcn1 >= alen)
2047			break;
2048
2049		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2050		if (!attr) {
2051			err = -EINVAL;
2052			goto out;
2053		}
2054
2055next_attr:
2056		svcn = le64_to_cpu(attr->nres.svcn);
2057		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2058	}
2059
2060	if (!attr_b) {
2061		le_b = NULL;
2062		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2063				      &mi_b);
2064		if (!attr_b) {
2065			err = -ENOENT;
2066			goto out;
2067		}
2068	}
2069
2070	data_size -= bytes;
2071	valid_size = ni->i_valid;
2072	if (vbo + bytes <= valid_size)
2073		valid_size -= bytes;
2074	else if (vbo < valid_size)
2075		valid_size = vbo;
2076
2077	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2078	attr_b->nres.data_size = cpu_to_le64(data_size);
2079	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2080	total_size -= (u64)dealloc << sbi->cluster_bits;
2081	if (is_attr_ext(attr_b))
2082		attr_b->nres.total_size = cpu_to_le64(total_size);
2083	mi_b->dirty = true;
2084
2085	/* Update inode size. */
2086	ni->i_valid = valid_size;
2087	i_size_write(&ni->vfs_inode, data_size);
2088	inode_set_bytes(&ni->vfs_inode, total_size);
2089	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2090	mark_inode_dirty(&ni->vfs_inode);
2091
2092out:
2093	up_write(&ni->file.run_lock);
2094	if (err)
2095		_ntfs_bad_inode(&ni->vfs_inode);
2096
2097	return err;
2098}
2099
2100/*
2101 * attr_punch_hole
2102 *
2103 * Not for normal files.
2104 */
2105int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2106{
2107	int err = 0;
2108	struct runs_tree *run = &ni->file.run;
2109	struct ntfs_sb_info *sbi = ni->mi.sbi;
2110	struct ATTRIB *attr = NULL, *attr_b;
2111	struct ATTR_LIST_ENTRY *le, *le_b;
2112	struct mft_inode *mi, *mi_b;
2113	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2114	u64 total_size, alloc_size;
2115	u32 mask;
2116	__le16 a_flags;
2117	struct runs_tree run2;
2118
2119	if (!bytes)
2120		return 0;
2121
2122	le_b = NULL;
2123	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2124	if (!attr_b)
2125		return -ENOENT;
2126
2127	if (!attr_b->non_res) {
2128		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2129		u32 from, to;
2130
2131		if (vbo > data_size)
2132			return 0;
2133
2134		from = vbo;
2135		to = min_t(u64, vbo + bytes, data_size);
2136		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2137		return 0;
2138	}
2139
2140	if (!is_attr_ext(attr_b))
2141		return -EOPNOTSUPP;
2142
2143	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2144	total_size = le64_to_cpu(attr_b->nres.total_size);
2145
2146	if (vbo >= alloc_size) {
2147		/* NOTE: It is allowed. */
2148		return 0;
2149	}
2150
2151	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2152
2153	bytes += vbo;
2154	if (bytes > alloc_size)
2155		bytes = alloc_size;
2156	bytes -= vbo;
2157
2158	if ((vbo & mask) || (bytes & mask)) {
2159		/* We have to zero a range(s). */
2160		if (frame_size == NULL) {
2161			/* Caller insists range is aligned. */
2162			return -EINVAL;
2163		}
2164		*frame_size = mask + 1;
2165		return E_NTFS_NOTALIGNED;
2166	}
2167
2168	down_write(&ni->file.run_lock);
2169	run_init(&run2);
2170	run_truncate(run, 0);
2171
2172	/*
2173	 * Enumerate all attribute segments and punch hole where necessary.
2174	 */
2175	alen = alloc_size >> sbi->cluster_bits;
2176	vcn = vbo >> sbi->cluster_bits;
2177	len = bytes >> sbi->cluster_bits;
2178	end = vcn + len;
2179	hole = 0;
2180
2181	svcn = le64_to_cpu(attr_b->nres.svcn);
2182	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2183	a_flags = attr_b->flags;
2184
2185	if (svcn <= vcn && vcn < evcn1) {
2186		attr = attr_b;
2187		le = le_b;
2188		mi = mi_b;
2189	} else if (!le_b) {
2190		err = -EINVAL;
2191		goto bad_inode;
2192	} else {
2193		le = le_b;
2194		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2195				    &mi);
2196		if (!attr) {
2197			err = -EINVAL;
2198			goto bad_inode;
2199		}
2200
2201		svcn = le64_to_cpu(attr->nres.svcn);
2202		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2203	}
2204
2205	while (svcn < end) {
2206		CLST vcn1, zero, hole2 = hole;
2207
2208		err = attr_load_runs(attr, ni, run, &svcn);
2209		if (err)
2210			goto done;
2211		vcn1 = max(vcn, svcn);
2212		zero = min(end, evcn1) - vcn1;
2213
2214		/*
2215		 * Check range [vcn1 + zero).
2216		 * Calculate how many clusters there are.
2217		 * Don't do any destructive actions.
2218		 */
2219		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2220		if (err)
2221			goto done;
2222
2223		/* Check if required range is already hole. */
2224		if (hole2 == hole)
2225			goto next_attr;
2226
2227		/* Make a clone of run to undo. */
2228		err = run_clone(run, &run2);
2229		if (err)
2230			goto done;
2231
2232		/* Make a hole range (sparse) [vcn1 + zero). */
2233		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2234			err = -ENOMEM;
2235			goto done;
2236		}
2237
2238		/* Update run in attribute segment. */
2239		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2240		if (err)
2241			goto done;
2242		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2243		if (next_svcn < evcn1) {
2244			/* Insert new attribute segment. */
2245			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2246						    next_svcn,
2247						    evcn1 - next_svcn, a_flags,
2248						    &attr, &mi, &le);
2249			if (err)
2250				goto undo_punch;
2251
2252			/* Layout of records maybe changed. */
2253			attr_b = NULL;
2254		}
2255
2256		/* Real deallocate. Should not fail. */
2257		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2258
2259next_attr:
2260		/* Free all allocated memory. */
2261		run_truncate(run, 0);
2262
2263		if (evcn1 >= alen)
2264			break;
2265
2266		/* Get next attribute segment. */
2267		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2268		if (!attr) {
2269			err = -EINVAL;
2270			goto bad_inode;
2271		}
2272
2273		svcn = le64_to_cpu(attr->nres.svcn);
2274		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2275	}
2276
2277done:
2278	if (!hole)
2279		goto out;
2280
2281	if (!attr_b) {
2282		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2283				      &mi_b);
2284		if (!attr_b) {
2285			err = -EINVAL;
2286			goto bad_inode;
2287		}
2288	}
2289
2290	total_size -= (u64)hole << sbi->cluster_bits;
2291	attr_b->nres.total_size = cpu_to_le64(total_size);
2292	mi_b->dirty = true;
2293
2294	/* Update inode size. */
2295	inode_set_bytes(&ni->vfs_inode, total_size);
2296	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2297	mark_inode_dirty(&ni->vfs_inode);
2298
2299out:
2300	run_close(&run2);
2301	up_write(&ni->file.run_lock);
2302	return err;
2303
2304bad_inode:
2305	_ntfs_bad_inode(&ni->vfs_inode);
2306	goto out;
2307
2308undo_punch:
2309	/*
2310	 * Restore packed runs.
2311	 * 'mi_pack_runs' should not fail, cause we restore original.
2312	 */
2313	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2314		goto bad_inode;
2315
2316	goto done;
2317}
2318
2319/*
2320 * attr_insert_range - Insert range (hole) in file.
2321 * Not for normal files.
2322 */
2323int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2324{
2325	int err = 0;
2326	struct runs_tree *run = &ni->file.run;
2327	struct ntfs_sb_info *sbi = ni->mi.sbi;
2328	struct ATTRIB *attr = NULL, *attr_b;
2329	struct ATTR_LIST_ENTRY *le, *le_b;
2330	struct mft_inode *mi, *mi_b;
2331	CLST vcn, svcn, evcn1, len, next_svcn;
2332	u64 data_size, alloc_size;
2333	u32 mask;
2334	__le16 a_flags;
2335
2336	if (!bytes)
2337		return 0;
2338
2339	le_b = NULL;
2340	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2341	if (!attr_b)
2342		return -ENOENT;
2343
2344	if (!is_attr_ext(attr_b)) {
2345		/* It was checked above. See fallocate. */
2346		return -EOPNOTSUPP;
2347	}
2348
2349	if (!attr_b->non_res) {
2350		data_size = le32_to_cpu(attr_b->res.data_size);
2351		alloc_size = data_size;
2352		mask = sbi->cluster_mask; /* cluster_size - 1 */
2353	} else {
2354		data_size = le64_to_cpu(attr_b->nres.data_size);
2355		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2356		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2357	}
2358
2359	if (vbo > data_size) {
2360		/* Insert range after the file size is not allowed. */
2361		return -EINVAL;
2362	}
2363
2364	if ((vbo & mask) || (bytes & mask)) {
2365		/* Allow to insert only frame aligned ranges. */
2366		return -EINVAL;
2367	}
2368
2369	/*
2370	 * valid_size <= data_size <= alloc_size
2371	 * Check alloc_size for maximum possible.
2372	 */
2373	if (bytes > sbi->maxbytes_sparse - alloc_size)
2374		return -EFBIG;
2375
2376	vcn = vbo >> sbi->cluster_bits;
2377	len = bytes >> sbi->cluster_bits;
2378
2379	down_write(&ni->file.run_lock);
2380
2381	if (!attr_b->non_res) {
2382		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2383				    data_size + bytes, NULL, false, NULL);
2384
2385		le_b = NULL;
2386		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2387				      &mi_b);
2388		if (!attr_b) {
2389			err = -EINVAL;
2390			goto bad_inode;
2391		}
2392
2393		if (err)
2394			goto out;
2395
2396		if (!attr_b->non_res) {
2397			/* Still resident. */
2398			char *data = Add2Ptr(attr_b,
2399					     le16_to_cpu(attr_b->res.data_off));
2400
2401			memmove(data + bytes, data, bytes);
2402			memset(data, 0, bytes);
2403			goto done;
2404		}
2405
2406		/* Resident files becomes nonresident. */
2407		data_size = le64_to_cpu(attr_b->nres.data_size);
2408		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2409	}
2410
2411	/*
2412	 * Enumerate all attribute segments and shift start vcn.
2413	 */
2414	a_flags = attr_b->flags;
2415	svcn = le64_to_cpu(attr_b->nres.svcn);
2416	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2417
2418	if (svcn <= vcn && vcn < evcn1) {
2419		attr = attr_b;
2420		le = le_b;
2421		mi = mi_b;
2422	} else if (!le_b) {
2423		err = -EINVAL;
2424		goto bad_inode;
2425	} else {
2426		le = le_b;
2427		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2428				    &mi);
2429		if (!attr) {
2430			err = -EINVAL;
2431			goto bad_inode;
2432		}
2433
2434		svcn = le64_to_cpu(attr->nres.svcn);
2435		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2436	}
2437
2438	run_truncate(run, 0); /* clear cached values. */
2439	err = attr_load_runs(attr, ni, run, NULL);
2440	if (err)
2441		goto out;
2442
2443	if (!run_insert_range(run, vcn, len)) {
2444		err = -ENOMEM;
2445		goto out;
2446	}
2447
2448	/* Try to pack in current record as much as possible. */
2449	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2450	if (err)
2451		goto out;
2452
2453	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2454
2455	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2456	       attr->type == ATTR_DATA && !attr->name_len) {
2457		le64_add_cpu(&attr->nres.svcn, len);
2458		le64_add_cpu(&attr->nres.evcn, len);
2459		if (le) {
2460			le->vcn = attr->nres.svcn;
2461			ni->attr_list.dirty = true;
2462		}
2463		mi->dirty = true;
2464	}
2465
2466	if (next_svcn < evcn1 + len) {
2467		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2468					    next_svcn, evcn1 + len - next_svcn,
2469					    a_flags, NULL, NULL, NULL);
2470
2471		le_b = NULL;
2472		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2473				      &mi_b);
2474		if (!attr_b) {
2475			err = -EINVAL;
2476			goto bad_inode;
2477		}
2478
2479		if (err) {
2480			/* ni_insert_nonresident failed. Try to undo. */
2481			goto undo_insert_range;
2482		}
2483	}
2484
2485	/*
2486	 * Update primary attribute segment.
2487	 */
2488	if (vbo <= ni->i_valid)
2489		ni->i_valid += bytes;
2490
2491	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2492	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2493
2494	/* ni->valid may be not equal valid_size (temporary). */
2495	if (ni->i_valid > data_size + bytes)
2496		attr_b->nres.valid_size = attr_b->nres.data_size;
2497	else
2498		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2499	mi_b->dirty = true;
2500
2501done:
2502	i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
2503	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2504	mark_inode_dirty(&ni->vfs_inode);
2505
2506out:
2507	run_truncate(run, 0); /* clear cached values. */
2508
2509	up_write(&ni->file.run_lock);
2510
2511	return err;
2512
2513bad_inode:
2514	_ntfs_bad_inode(&ni->vfs_inode);
2515	goto out;
2516
2517undo_insert_range:
2518	svcn = le64_to_cpu(attr_b->nres.svcn);
2519	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2520
2521	if (svcn <= vcn && vcn < evcn1) {
2522		attr = attr_b;
2523		le = le_b;
2524		mi = mi_b;
2525	} else if (!le_b) {
2526		goto bad_inode;
2527	} else {
2528		le = le_b;
2529		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2530				    &mi);
2531		if (!attr) {
2532			goto bad_inode;
2533		}
2534
2535		svcn = le64_to_cpu(attr->nres.svcn);
2536		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2537	}
2538
2539	if (attr_load_runs(attr, ni, run, NULL))
2540		goto bad_inode;
2541
2542	if (!run_collapse_range(run, vcn, len))
2543		goto bad_inode;
2544
2545	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2546		goto bad_inode;
2547
2548	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2549	       attr->type == ATTR_DATA && !attr->name_len) {
2550		le64_sub_cpu(&attr->nres.svcn, len);
2551		le64_sub_cpu(&attr->nres.evcn, len);
2552		if (le) {
2553			le->vcn = attr->nres.svcn;
2554			ni->attr_list.dirty = true;
2555		}
2556		mi->dirty = true;
2557	}
2558
2559	goto out;
2560}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
   5 *
   6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
   7 */
   8
   9#include <linux/fs.h>
  10#include <linux/slab.h>
  11#include <linux/kernel.h>
  12
  13#include "debug.h"
  14#include "ntfs.h"
  15#include "ntfs_fs.h"
  16
  17/*
  18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
  19 * preallocate algorithm.
  20 */
  21#ifndef NTFS_MIN_LOG2_OF_CLUMP
  22#define NTFS_MIN_LOG2_OF_CLUMP 16
  23#endif
  24
  25#ifndef NTFS_MAX_LOG2_OF_CLUMP
  26#define NTFS_MAX_LOG2_OF_CLUMP 26
  27#endif
  28
  29// 16M
  30#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
  31// 16G
  32#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
  33
  34static inline u64 get_pre_allocated(u64 size)
  35{
  36	u32 clump;
  37	u8 align_shift;
  38	u64 ret;
  39
  40	if (size <= NTFS_CLUMP_MIN) {
  41		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
  42		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
  43	} else if (size >= NTFS_CLUMP_MAX) {
  44		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
  45		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
  46	} else {
  47		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
  48			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
  49		clump = 1u << align_shift;
  50	}
  51
  52	ret = (((size + clump - 1) >> align_shift)) << align_shift;
  53
  54	return ret;
  55}
  56
  57/*
  58 * attr_load_runs - Load all runs stored in @attr.
  59 */
  60static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
  61			  struct runs_tree *run, const CLST *vcn)
  62{
  63	int err;
  64	CLST svcn = le64_to_cpu(attr->nres.svcn);
  65	CLST evcn = le64_to_cpu(attr->nres.evcn);
  66	u32 asize;
  67	u16 run_off;
  68
  69	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
  70		return 0;
  71
  72	if (vcn && (evcn < *vcn || *vcn < svcn))
  73		return -EINVAL;
  74
  75	asize = le32_to_cpu(attr->size);
  76	run_off = le16_to_cpu(attr->nres.run_off);
  77
  78	if (run_off > asize)
  79		return -EINVAL;
  80
  81	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
  82			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
  83			    asize - run_off);
  84	if (err < 0)
  85		return err;
  86
  87	return 0;
  88}
  89
  90/*
  91 * run_deallocate_ex - Deallocate clusters.
  92 */
  93static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
  94			     CLST vcn, CLST len, CLST *done, bool trim)
  95{
  96	int err = 0;
  97	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
  98	size_t idx;
  99
 100	if (!len)
 101		goto out;
 102
 103	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
 104failed:
 105		run_truncate(run, vcn0);
 106		err = -EINVAL;
 107		goto out;
 108	}
 109
 110	for (;;) {
 111		if (clen > len)
 112			clen = len;
 113
 114		if (!clen) {
 115			err = -EINVAL;
 116			goto out;
 117		}
 118
 119		if (lcn != SPARSE_LCN) {
 120			if (sbi) {
 121				/* mark bitmap range [lcn + clen) as free and trim clusters. */
 122				mark_as_free_ex(sbi, lcn, clen, trim);
 123			}
 124			dn += clen;
 125		}
 126
 127		len -= clen;
 128		if (!len)
 129			break;
 130
 131		vcn_next = vcn + clen;
 132		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
 133		    vcn != vcn_next) {
 134			/* Save memory - don't load entire run. */
 135			goto failed;
 136		}
 137	}
 138
 139out:
 140	if (done)
 141		*done += dn;
 142
 143	return err;
 144}
 145
 146/*
 147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
 148 */
 149int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
 150			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
 151			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
 152			   CLST *new_lcn, CLST *new_len)
 153{
 154	int err;
 155	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
 156	size_t cnt = run->count;
 157
 158	for (;;) {
 159		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
 160					       opt);
 161
 162		if (err == -ENOSPC && pre) {
 163			pre = 0;
 164			if (*pre_alloc)
 165				*pre_alloc = 0;
 166			continue;
 167		}
 168
 169		if (err)
 170			goto out;
 171
 172		if (vcn == vcn0) {
 173			/* Return the first fragment. */
 174			if (new_lcn)
 175				*new_lcn = lcn;
 176			if (new_len)
 177				*new_len = flen;
 178		}
 179
 180		/* Add new fragment into run storage. */
 181		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
 182			/* Undo last 'ntfs_look_for_free_space' */
 183			mark_as_free_ex(sbi, lcn, len, false);
 184			err = -ENOMEM;
 185			goto out;
 186		}
 187
 188		if (opt & ALLOCATE_ZERO) {
 189			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
 190
 191			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
 192						   (sector_t)lcn << shift,
 193						   (sector_t)flen << shift,
 194						   GFP_NOFS, 0);
 195			if (err)
 196				goto out;
 197		}
 198
 199		vcn += flen;
 200
 201		if (flen >= len || (opt & ALLOCATE_MFT) ||
 202		    (fr && run->count - cnt >= fr)) {
 203			*alen = vcn - vcn0;
 204			return 0;
 205		}
 206
 207		len -= flen;
 208	}
 209
 210out:
 211	/* Undo 'ntfs_look_for_free_space' */
 212	if (vcn - vcn0) {
 213		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
 214		run_truncate(run, vcn0);
 215	}
 216
 217	return err;
 218}
 219
 220/*
 221 * attr_make_nonresident
 222 *
 223 * If page is not NULL - it is already contains resident data
 224 * and locked (called from ni_write_frame()).
 225 */
 226int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
 227			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 228			  u64 new_size, struct runs_tree *run,
 229			  struct ATTRIB **ins_attr, struct page *page)
 230{
 231	struct ntfs_sb_info *sbi;
 232	struct ATTRIB *attr_s;
 233	struct MFT_REC *rec;
 234	u32 used, asize, rsize, aoff, align;
 235	bool is_data;
 236	CLST len, alen;
 237	char *next;
 238	int err;
 239
 240	if (attr->non_res) {
 241		*ins_attr = attr;
 242		return 0;
 243	}
 244
 245	sbi = mi->sbi;
 246	rec = mi->mrec;
 247	attr_s = NULL;
 248	used = le32_to_cpu(rec->used);
 249	asize = le32_to_cpu(attr->size);
 250	next = Add2Ptr(attr, asize);
 251	aoff = PtrOffset(rec, attr);
 252	rsize = le32_to_cpu(attr->res.data_size);
 253	is_data = attr->type == ATTR_DATA && !attr->name_len;
 254
 255	align = sbi->cluster_size;
 256	if (is_attr_compressed(attr))
 257		align <<= COMPRESSION_UNIT;
 258	len = (rsize + align - 1) >> sbi->cluster_bits;
 259
 260	run_init(run);
 261
 262	/* Make a copy of original attribute. */
 263	attr_s = kmemdup(attr, asize, GFP_NOFS);
 264	if (!attr_s) {
 265		err = -ENOMEM;
 266		goto out;
 267	}
 268
 269	if (!len) {
 270		/* Empty resident -> Empty nonresident. */
 271		alen = 0;
 272	} else {
 273		const char *data = resident_data(attr);
 274
 275		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
 276					     ALLOCATE_DEF, &alen, 0, NULL,
 277					     NULL);
 278		if (err)
 279			goto out1;
 280
 281		if (!rsize) {
 282			/* Empty resident -> Non empty nonresident. */
 283		} else if (!is_data) {
 284			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
 285			if (err)
 286				goto out2;
 287		} else if (!page) {
 288			char *kaddr;
 289
 290			page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
 291			if (!page) {
 292				err = -ENOMEM;
 293				goto out2;
 294			}
 295			kaddr = kmap_atomic(page);
 296			memcpy(kaddr, data, rsize);
 297			memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
 298			kunmap_atomic(kaddr);
 299			flush_dcache_page(page);
 300			SetPageUptodate(page);
 301			set_page_dirty(page);
 302			unlock_page(page);
 303			put_page(page);
 304		}
 305	}
 306
 307	/* Remove original attribute. */
 308	used -= asize;
 309	memmove(attr, Add2Ptr(attr, asize), used - aoff);
 310	rec->used = cpu_to_le32(used);
 311	mi->dirty = true;
 312	if (le)
 313		al_remove_le(ni, le);
 314
 315	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
 316				    attr_s->name_len, run, 0, alen,
 317				    attr_s->flags, &attr, NULL, NULL);
 318	if (err)
 319		goto out3;
 320
 321	kfree(attr_s);
 322	attr->nres.data_size = cpu_to_le64(rsize);
 323	attr->nres.valid_size = attr->nres.data_size;
 324
 325	*ins_attr = attr;
 326
 327	if (is_data)
 328		ni->ni_flags &= ~NI_FLAG_RESIDENT;
 329
 330	/* Resident attribute becomes non resident. */
 331	return 0;
 332
 333out3:
 334	attr = Add2Ptr(rec, aoff);
 335	memmove(next, attr, used - aoff);
 336	memcpy(attr, attr_s, asize);
 337	rec->used = cpu_to_le32(used + asize);
 338	mi->dirty = true;
 339out2:
 340	/* Undo: do not trim new allocated clusters. */
 341	run_deallocate(sbi, run, false);
 342	run_close(run);
 343out1:
 344	kfree(attr_s);
 345out:
 346	return err;
 347}
 348
 349/*
 350 * attr_set_size_res - Helper for attr_set_size().
 351 */
 352static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
 353			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 354			     u64 new_size, struct runs_tree *run,
 355			     struct ATTRIB **ins_attr)
 356{
 357	struct ntfs_sb_info *sbi = mi->sbi;
 358	struct MFT_REC *rec = mi->mrec;
 359	u32 used = le32_to_cpu(rec->used);
 360	u32 asize = le32_to_cpu(attr->size);
 361	u32 aoff = PtrOffset(rec, attr);
 362	u32 rsize = le32_to_cpu(attr->res.data_size);
 363	u32 tail = used - aoff - asize;
 364	char *next = Add2Ptr(attr, asize);
 365	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
 366
 367	if (dsize < 0) {
 368		memmove(next + dsize, next, tail);
 369	} else if (dsize > 0) {
 370		if (used + dsize > sbi->max_bytes_per_attr)
 371			return attr_make_nonresident(ni, attr, le, mi, new_size,
 372						     run, ins_attr, NULL);
 373
 374		memmove(next + dsize, next, tail);
 375		memset(next, 0, dsize);
 376	}
 377
 378	if (new_size > rsize)
 379		memset(Add2Ptr(resident_data(attr), rsize), 0,
 380		       new_size - rsize);
 381
 382	rec->used = cpu_to_le32(used + dsize);
 383	attr->size = cpu_to_le32(asize + dsize);
 384	attr->res.data_size = cpu_to_le32(new_size);
 385	mi->dirty = true;
 386	*ins_attr = attr;
 387
 388	return 0;
 389}
 390
 391/*
 392 * attr_set_size - Change the size of attribute.
 393 *
 394 * Extend:
 395 *   - Sparse/compressed: No allocated clusters.
 396 *   - Normal: Append allocated and preallocated new clusters.
 397 * Shrink:
 398 *   - No deallocate if @keep_prealloc is set.
 399 */
 400int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
 401		  const __le16 *name, u8 name_len, struct runs_tree *run,
 402		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
 403		  struct ATTRIB **ret)
 404{
 405	int err = 0;
 406	struct ntfs_sb_info *sbi = ni->mi.sbi;
 407	u8 cluster_bits = sbi->cluster_bits;
 408	bool is_mft =
 409		ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
 410	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
 411	struct ATTRIB *attr = NULL, *attr_b;
 412	struct ATTR_LIST_ENTRY *le, *le_b;
 413	struct mft_inode *mi, *mi_b;
 414	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
 415	CLST next_svcn, pre_alloc = -1, done = 0;
 416	bool is_ext, is_bad = false;
 417	bool dirty = false;
 418	u32 align;
 419	struct MFT_REC *rec;
 420
 421again:
 422	alen = 0;
 423	le_b = NULL;
 424	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
 425			      &mi_b);
 426	if (!attr_b) {
 427		err = -ENOENT;
 428		goto bad_inode;
 429	}
 430
 431	if (!attr_b->non_res) {
 432		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
 433					&attr_b);
 434		if (err)
 435			return err;
 436
 437		/* Return if file is still resident. */
 438		if (!attr_b->non_res) {
 439			dirty = true;
 440			goto ok1;
 441		}
 442
 443		/* Layout of records may be changed, so do a full search. */
 444		goto again;
 445	}
 446
 447	is_ext = is_attr_ext(attr_b);
 448	align = sbi->cluster_size;
 449	if (is_ext)
 450		align <<= attr_b->nres.c_unit;
 451
 452	old_valid = le64_to_cpu(attr_b->nres.valid_size);
 453	old_size = le64_to_cpu(attr_b->nres.data_size);
 454	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 455
 456again_1:
 457	old_alen = old_alloc >> cluster_bits;
 458
 459	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
 460	new_alen = new_alloc >> cluster_bits;
 461
 462	if (keep_prealloc && new_size < old_size) {
 463		attr_b->nres.data_size = cpu_to_le64(new_size);
 464		mi_b->dirty = dirty = true;
 465		goto ok;
 466	}
 467
 468	vcn = old_alen - 1;
 469
 470	svcn = le64_to_cpu(attr_b->nres.svcn);
 471	evcn = le64_to_cpu(attr_b->nres.evcn);
 472
 473	if (svcn <= vcn && vcn <= evcn) {
 474		attr = attr_b;
 475		le = le_b;
 476		mi = mi_b;
 477	} else if (!le_b) {
 478		err = -EINVAL;
 479		goto bad_inode;
 480	} else {
 481		le = le_b;
 482		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
 483				    &mi);
 484		if (!attr) {
 485			err = -EINVAL;
 486			goto bad_inode;
 487		}
 488
 489next_le_1:
 490		svcn = le64_to_cpu(attr->nres.svcn);
 491		evcn = le64_to_cpu(attr->nres.evcn);
 492	}
 493	/*
 494	 * Here we have:
 495	 * attr,mi,le - last attribute segment (containing 'vcn').
 496	 * attr_b,mi_b,le_b - base (primary) attribute segment.
 497	 */
 498next_le:
 499	rec = mi->mrec;
 500	err = attr_load_runs(attr, ni, run, NULL);
 501	if (err)
 502		goto out;
 503
 504	if (new_size > old_size) {
 505		CLST to_allocate;
 506		size_t free;
 507
 508		if (new_alloc <= old_alloc) {
 509			attr_b->nres.data_size = cpu_to_le64(new_size);
 510			mi_b->dirty = dirty = true;
 511			goto ok;
 512		}
 513
 514		/*
 515		 * Add clusters. In simple case we have to:
 516		 *  - allocate space (vcn, lcn, len)
 517		 *  - update packed run in 'mi'
 518		 *  - update attr->nres.evcn
 519		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 520		 */
 521		to_allocate = new_alen - old_alen;
 522add_alloc_in_same_attr_seg:
 523		lcn = 0;
 524		if (is_mft) {
 525			/* MFT allocates clusters from MFT zone. */
 526			pre_alloc = 0;
 527		} else if (is_ext) {
 528			/* No preallocate for sparse/compress. */
 529			pre_alloc = 0;
 530		} else if (pre_alloc == -1) {
 531			pre_alloc = 0;
 532			if (type == ATTR_DATA && !name_len &&
 533			    sbi->options->prealloc) {
 534				pre_alloc =
 535					bytes_to_cluster(
 536						sbi,
 537						get_pre_allocated(new_size)) -
 538					new_alen;
 539			}
 540
 541			/* Get the last LCN to allocate from. */
 542			if (old_alen &&
 543			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
 544				lcn = SPARSE_LCN;
 545			}
 546
 547			if (lcn == SPARSE_LCN)
 548				lcn = 0;
 549			else if (lcn)
 550				lcn += 1;
 551
 552			free = wnd_zeroes(&sbi->used.bitmap);
 553			if (to_allocate > free) {
 554				err = -ENOSPC;
 555				goto out;
 556			}
 557
 558			if (pre_alloc && to_allocate + pre_alloc > free)
 559				pre_alloc = 0;
 560		}
 561
 562		vcn = old_alen;
 563
 564		if (is_ext) {
 565			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
 566					   false)) {
 567				err = -ENOMEM;
 568				goto out;
 569			}
 570			alen = to_allocate;
 571		} else {
 572			/* ~3 bytes per fragment. */
 573			err = attr_allocate_clusters(
 574				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
 575				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
 576				is_mft ? 0
 577				       : (sbi->record_size -
 578					  le32_to_cpu(rec->used) + 8) /
 579							 3 +
 580						 1,
 581				NULL, NULL);
 582			if (err)
 583				goto out;
 584		}
 585
 586		done += alen;
 587		vcn += alen;
 588		if (to_allocate > alen)
 589			to_allocate -= alen;
 590		else
 591			to_allocate = 0;
 592
 593pack_runs:
 594		err = mi_pack_runs(mi, attr, run, vcn - svcn);
 595		if (err)
 596			goto undo_1;
 597
 598		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
 599		new_alloc_tmp = (u64)next_svcn << cluster_bits;
 600		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 601		mi_b->dirty = dirty = true;
 602
 603		if (next_svcn >= vcn && !to_allocate) {
 604			/* Normal way. Update attribute and exit. */
 605			attr_b->nres.data_size = cpu_to_le64(new_size);
 606			goto ok;
 607		}
 608
 609		/* At least two MFT to avoid recursive loop. */
 610		if (is_mft && next_svcn == vcn &&
 611		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
 612			new_size = new_alloc_tmp;
 613			attr_b->nres.data_size = attr_b->nres.alloc_size;
 614			goto ok;
 615		}
 616
 617		if (le32_to_cpu(rec->used) < sbi->record_size) {
 618			old_alen = next_svcn;
 619			evcn = old_alen - 1;
 620			goto add_alloc_in_same_attr_seg;
 621		}
 622
 623		attr_b->nres.data_size = attr_b->nres.alloc_size;
 624		if (new_alloc_tmp < old_valid)
 625			attr_b->nres.valid_size = attr_b->nres.data_size;
 626
 627		if (type == ATTR_LIST) {
 628			err = ni_expand_list(ni);
 629			if (err)
 630				goto undo_2;
 631			if (next_svcn < vcn)
 632				goto pack_runs;
 633
 634			/* Layout of records is changed. */
 635			goto again;
 636		}
 637
 638		if (!ni->attr_list.size) {
 639			err = ni_create_attr_list(ni);
 640			/* In case of error layout of records is not changed. */
 641			if (err)
 642				goto undo_2;
 643			/* Layout of records is changed. */
 644		}
 645
 646		if (next_svcn >= vcn) {
 647			/* This is MFT data, repeat. */
 648			goto again;
 649		}
 650
 651		/* Insert new attribute segment. */
 652		err = ni_insert_nonresident(ni, type, name, name_len, run,
 653					    next_svcn, vcn - next_svcn,
 654					    attr_b->flags, &attr, &mi, NULL);
 655
 656		/*
 657		 * Layout of records maybe changed.
 658		 * Find base attribute to update.
 659		 */
 660		le_b = NULL;
 661		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
 662				      NULL, &mi_b);
 663		if (!attr_b) {
 664			err = -EINVAL;
 665			goto bad_inode;
 666		}
 667
 668		if (err) {
 669			/* ni_insert_nonresident failed. */
 670			attr = NULL;
 671			goto undo_2;
 672		}
 673
 674		if (!is_mft)
 675			run_truncate_head(run, evcn + 1);
 676
 677		svcn = le64_to_cpu(attr->nres.svcn);
 678		evcn = le64_to_cpu(attr->nres.evcn);
 679
 680		/*
 681		 * Attribute is in consistency state.
 682		 * Save this point to restore to if next steps fail.
 683		 */
 684		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
 685		attr_b->nres.valid_size = attr_b->nres.data_size =
 686			attr_b->nres.alloc_size = cpu_to_le64(old_size);
 687		mi_b->dirty = dirty = true;
 688		goto again_1;
 689	}
 690
 691	if (new_size != old_size ||
 692	    (new_alloc != old_alloc && !keep_prealloc)) {
 693		/*
 694		 * Truncate clusters. In simple case we have to:
 695		 *  - update packed run in 'mi'
 696		 *  - update attr->nres.evcn
 697		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 698		 *  - mark and trim clusters as free (vcn, lcn, len)
 699		 */
 700		CLST dlen = 0;
 701
 702		vcn = max(svcn, new_alen);
 703		new_alloc_tmp = (u64)vcn << cluster_bits;
 704
 705		if (vcn > svcn) {
 706			err = mi_pack_runs(mi, attr, run, vcn - svcn);
 707			if (err)
 708				goto out;
 709		} else if (le && le->vcn) {
 710			u16 le_sz = le16_to_cpu(le->size);
 711
 712			/*
 713			 * NOTE: List entries for one attribute are always
 714			 * the same size. We deal with last entry (vcn==0)
 715			 * and it is not first in entries array
 716			 * (list entry for std attribute always first).
 717			 * So it is safe to step back.
 718			 */
 719			mi_remove_attr(NULL, mi, attr);
 720
 721			if (!al_remove_le(ni, le)) {
 722				err = -EINVAL;
 723				goto bad_inode;
 724			}
 725
 726			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
 727		} else {
 728			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
 729			mi->dirty = true;
 730		}
 731
 732		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 733
 734		if (vcn == new_alen) {
 735			attr_b->nres.data_size = cpu_to_le64(new_size);
 736			if (new_size < old_valid)
 737				attr_b->nres.valid_size =
 738					attr_b->nres.data_size;
 739		} else {
 740			if (new_alloc_tmp <=
 741			    le64_to_cpu(attr_b->nres.data_size))
 742				attr_b->nres.data_size =
 743					attr_b->nres.alloc_size;
 744			if (new_alloc_tmp <
 745			    le64_to_cpu(attr_b->nres.valid_size))
 746				attr_b->nres.valid_size =
 747					attr_b->nres.alloc_size;
 748		}
 749		mi_b->dirty = dirty = true;
 750
 751		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
 752					true);
 753		if (err)
 754			goto out;
 755
 756		if (is_ext) {
 757			/* dlen - really deallocated clusters. */
 758			le64_sub_cpu(&attr_b->nres.total_size,
 759				     ((u64)dlen << cluster_bits));
 760		}
 761
 762		run_truncate(run, vcn);
 763
 764		if (new_alloc_tmp <= new_alloc)
 765			goto ok;
 766
 767		old_size = new_alloc_tmp;
 768		vcn = svcn - 1;
 769
 770		if (le == le_b) {
 771			attr = attr_b;
 772			mi = mi_b;
 773			evcn = svcn - 1;
 774			svcn = 0;
 775			goto next_le;
 776		}
 777
 778		if (le->type != type || le->name_len != name_len ||
 779		    memcmp(le_name(le), name, name_len * sizeof(short))) {
 780			err = -EINVAL;
 781			goto bad_inode;
 782		}
 783
 784		err = ni_load_mi(ni, le, &mi);
 785		if (err)
 786			goto out;
 787
 788		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
 789		if (!attr) {
 790			err = -EINVAL;
 791			goto bad_inode;
 792		}
 793		goto next_le_1;
 794	}
 795
 796ok:
 797	if (new_valid) {
 798		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
 799
 800		if (attr_b->nres.valid_size != valid) {
 801			attr_b->nres.valid_size = valid;
 802			mi_b->dirty = true;
 803		}
 804	}
 805
 806ok1:
 807	if (ret)
 808		*ret = attr_b;
 809
 810	if (((type == ATTR_DATA && !name_len) ||
 811	     (type == ATTR_ALLOC && name == I30_NAME))) {
 812		/* Update inode_set_bytes. */
 813		if (attr_b->non_res) {
 814			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 815			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
 816				inode_set_bytes(&ni->vfs_inode, new_alloc);
 817				dirty = true;
 818			}
 819		}
 820
 821		/* Don't forget to update duplicate information in parent. */
 822		if (dirty) {
 823			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
 824			mark_inode_dirty(&ni->vfs_inode);
 825		}
 826	}
 827
 828	return 0;
 829
 830undo_2:
 831	vcn -= alen;
 832	attr_b->nres.data_size = cpu_to_le64(old_size);
 833	attr_b->nres.valid_size = cpu_to_le64(old_valid);
 834	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
 835
 836	/* Restore 'attr' and 'mi'. */
 837	if (attr)
 838		goto restore_run;
 839
 840	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
 841	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
 842		attr = attr_b;
 843		le = le_b;
 844		mi = mi_b;
 845	} else if (!le_b) {
 846		err = -EINVAL;
 847		goto bad_inode;
 848	} else {
 849		le = le_b;
 850		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
 851				    &svcn, &mi);
 852		if (!attr)
 853			goto bad_inode;
 854	}
 855
 856restore_run:
 857	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
 858		is_bad = true;
 859
 860undo_1:
 861	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
 862
 863	run_truncate(run, vcn);
 864out:
 865	if (is_bad) {
 866bad_inode:
 867		_ntfs_bad_inode(&ni->vfs_inode);
 868	}
 869	return err;
 870}
 871
 872/*
 873 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
 874 *
 875 * @new == NULL means just to get current mapping for 'vcn'
 876 * @new != NULL means allocate real cluster if 'vcn' maps to hole
 877 * @zero - zeroout new allocated clusters
 878 *
 879 *  NOTE:
 880 *  - @new != NULL is called only for sparsed or compressed attributes.
 881 *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
 882 */
 883int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
 884			CLST *len, bool *new, bool zero)
 885{
 886	int err = 0;
 887	struct runs_tree *run = &ni->file.run;
 888	struct ntfs_sb_info *sbi;
 889	u8 cluster_bits;
 890	struct ATTRIB *attr = NULL, *attr_b;
 891	struct ATTR_LIST_ENTRY *le, *le_b;
 892	struct mft_inode *mi, *mi_b;
 893	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
 894	CLST alloc, evcn;
 895	unsigned fr;
 896	u64 total_size, total_size0;
 897	int step = 0;
 898
 899	if (new)
 900		*new = false;
 901
 902	/* Try to find in cache. */
 903	down_read(&ni->file.run_lock);
 904	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
 905		*len = 0;
 906	up_read(&ni->file.run_lock);
 907
 908	if (*len) {
 909		if (*lcn != SPARSE_LCN || !new)
 910			return 0; /* Fast normal way without allocation. */
 911		else if (clen > *len)
 912			clen = *len;
 913	}
 914
 915	/* No cluster in cache or we need to allocate cluster in hole. */
 916	sbi = ni->mi.sbi;
 917	cluster_bits = sbi->cluster_bits;
 918
 919	ni_lock(ni);
 920	down_write(&ni->file.run_lock);
 921
 
 
 
 
 
 
 
 
 
 
 
 922	le_b = NULL;
 923	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
 924	if (!attr_b) {
 925		err = -ENOENT;
 926		goto out;
 927	}
 928
 929	if (!attr_b->non_res) {
 930		*lcn = RESIDENT_LCN;
 931		*len = 1;
 932		goto out;
 933	}
 934
 935	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
 936	if (vcn >= asize) {
 937		if (new) {
 938			err = -EINVAL;
 939		} else {
 940			*len = 1;
 941			*lcn = SPARSE_LCN;
 942		}
 943		goto out;
 944	}
 945
 946	svcn = le64_to_cpu(attr_b->nres.svcn);
 947	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
 948
 949	attr = attr_b;
 950	le = le_b;
 951	mi = mi_b;
 952
 953	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
 954		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
 955				    &mi);
 956		if (!attr) {
 957			err = -EINVAL;
 958			goto out;
 959		}
 960		svcn = le64_to_cpu(attr->nres.svcn);
 961		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
 962	}
 963
 964	/* Load in cache actual information. */
 965	err = attr_load_runs(attr, ni, run, NULL);
 966	if (err)
 967		goto out;
 968
 969	if (!*len) {
 970		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
 971			if (*lcn != SPARSE_LCN || !new)
 972				goto ok; /* Slow normal way without allocation. */
 973
 974			if (clen > *len)
 975				clen = *len;
 976		} else if (!new) {
 977			/* Here we may return -ENOENT.
 978			 * In any case caller gets zero length. */
 979			goto ok;
 980		}
 981	}
 982
 983	if (!is_attr_ext(attr_b)) {
 984		/* The code below only for sparsed or compressed attributes. */
 985		err = -EINVAL;
 986		goto out;
 987	}
 988
 989	vcn0 = vcn;
 990	to_alloc = clen;
 991	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
 992	/* Allocate frame aligned clusters.
 993	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
 994	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
 995	if (attr_b->nres.c_unit) {
 996		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
 997		CLST cmask = ~(clst_per_frame - 1);
 998
 999		/* Get frame aligned vcn and to_alloc. */
1000		vcn = vcn0 & cmask;
1001		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1002		if (fr < clst_per_frame)
1003			fr = clst_per_frame;
1004		zero = true;
1005
1006		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1007		if (vcn < svcn || evcn1 <= vcn) {
1008			/* Load attribute for truncated vcn. */
1009			attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
1010					    &vcn, &mi);
1011			if (!attr) {
1012				err = -EINVAL;
1013				goto out;
1014			}
1015			svcn = le64_to_cpu(attr->nres.svcn);
1016			evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1017			err = attr_load_runs(attr, ni, run, NULL);
1018			if (err)
1019				goto out;
1020		}
1021	}
1022
1023	if (vcn + to_alloc > asize)
1024		to_alloc = asize - vcn;
1025
1026	/* Get the last LCN to allocate from. */
1027	hint = 0;
1028
1029	if (vcn > evcn1) {
1030		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1031				   false)) {
1032			err = -ENOMEM;
1033			goto out;
1034		}
1035	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1036		hint = -1;
1037	}
1038
1039	/* Allocate and zeroout new clusters. */
1040	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1041				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1042				     fr, lcn, len);
1043	if (err)
1044		goto out;
1045	*new = true;
1046	step = 1;
1047
1048	end = vcn + alen;
1049	/* Save 'total_size0' to restore if error. */
1050	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1051	total_size = total_size0 + ((u64)alen << cluster_bits);
1052
1053	if (vcn != vcn0) {
1054		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1055			err = -EINVAL;
1056			goto out;
1057		}
1058		if (*lcn == SPARSE_LCN) {
1059			/* Internal error. Should not happened. */
1060			WARN_ON(1);
1061			err = -EINVAL;
1062			goto out;
1063		}
1064		/* Check case when vcn0 + len overlaps new allocated clusters. */
1065		if (vcn0 + *len > end)
1066			*len = end - vcn0;
1067	}
1068
1069repack:
1070	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1071	if (err)
1072		goto out;
1073
1074	attr_b->nres.total_size = cpu_to_le64(total_size);
1075	inode_set_bytes(&ni->vfs_inode, total_size);
1076	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1077
1078	mi_b->dirty = true;
1079	mark_inode_dirty(&ni->vfs_inode);
1080
1081	/* Stored [vcn : next_svcn) from [vcn : end). */
1082	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1083
1084	if (end <= evcn1) {
1085		if (next_svcn == evcn1) {
1086			/* Normal way. Update attribute and exit. */
1087			goto ok;
1088		}
1089		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1090		if (!ni->attr_list.size) {
1091			err = ni_create_attr_list(ni);
1092			if (err)
1093				goto undo1;
1094			/* Layout of records is changed. */
1095			le_b = NULL;
1096			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1097					      0, NULL, &mi_b);
1098			if (!attr_b) {
1099				err = -ENOENT;
1100				goto out;
1101			}
1102
1103			attr = attr_b;
1104			le = le_b;
1105			mi = mi_b;
1106			goto repack;
1107		}
1108	}
1109
1110	/* 
1111	 * The code below may require additional cluster (to extend attribute list)
1112	 * and / or one MFT record 
1113	 * It is too complex to undo operations if -ENOSPC occurs deep inside 
1114	 * in 'ni_insert_nonresident'.
1115	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1116	 */
1117	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1118		/* Undo step 1. */
1119		err = -ENOSPC;
1120		goto undo1;
1121	}
1122
1123	step = 2;
1124	svcn = evcn1;
1125
1126	/* Estimate next attribute. */
1127	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1128
1129	if (!attr) {
1130		/* Insert new attribute segment. */
1131		goto ins_ext;
1132	}
1133
1134	/* Try to update existed attribute segment. */
1135	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1136	evcn = le64_to_cpu(attr->nres.evcn);
1137
1138	if (end < next_svcn)
1139		end = next_svcn;
1140	while (end > evcn) {
1141		/* Remove segment [svcn : evcn). */
1142		mi_remove_attr(NULL, mi, attr);
1143
1144		if (!al_remove_le(ni, le)) {
1145			err = -EINVAL;
1146			goto out;
1147		}
1148
1149		if (evcn + 1 >= alloc) {
1150			/* Last attribute segment. */
1151			evcn1 = evcn + 1;
1152			goto ins_ext;
1153		}
1154
1155		if (ni_load_mi(ni, le, &mi)) {
1156			attr = NULL;
1157			goto out;
1158		}
1159
1160		attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1161		if (!attr) {
1162			err = -EINVAL;
1163			goto out;
1164		}
1165		svcn = le64_to_cpu(attr->nres.svcn);
1166		evcn = le64_to_cpu(attr->nres.evcn);
1167	}
1168
1169	if (end < svcn)
1170		end = svcn;
1171
1172	err = attr_load_runs(attr, ni, run, &end);
1173	if (err)
1174		goto out;
1175
1176	evcn1 = evcn + 1;
1177	attr->nres.svcn = cpu_to_le64(next_svcn);
1178	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1179	if (err)
1180		goto out;
1181
1182	le->vcn = cpu_to_le64(next_svcn);
1183	ni->attr_list.dirty = true;
1184	mi->dirty = true;
1185	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1186
1187ins_ext:
1188	if (evcn1 > next_svcn) {
1189		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1190					    next_svcn, evcn1 - next_svcn,
1191					    attr_b->flags, &attr, &mi, NULL);
1192		if (err)
1193			goto out;
1194	}
1195ok:
1196	run_truncate_around(run, vcn);
1197out:
1198	if (err && step > 1) {
1199		/* Too complex to restore. */
1200		_ntfs_bad_inode(&ni->vfs_inode);
1201	}
1202	up_write(&ni->file.run_lock);
1203	ni_unlock(ni);
1204
1205	return err;
1206
1207undo1:
1208	/* Undo step1. */
1209	attr_b->nres.total_size = cpu_to_le64(total_size0);
1210	inode_set_bytes(&ni->vfs_inode, total_size0);
1211
1212	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1213	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1214	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1215		_ntfs_bad_inode(&ni->vfs_inode);
1216	}
1217	goto out;
1218}
1219
1220int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1221{
1222	u64 vbo;
1223	struct ATTRIB *attr;
1224	u32 data_size;
1225
1226	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1227	if (!attr)
1228		return -EINVAL;
1229
1230	if (attr->non_res)
1231		return E_NTFS_NONRESIDENT;
1232
1233	vbo = page->index << PAGE_SHIFT;
1234	data_size = le32_to_cpu(attr->res.data_size);
1235	if (vbo < data_size) {
1236		const char *data = resident_data(attr);
1237		char *kaddr = kmap_atomic(page);
1238		u32 use = data_size - vbo;
1239
1240		if (use > PAGE_SIZE)
1241			use = PAGE_SIZE;
1242
1243		memcpy(kaddr, data + vbo, use);
1244		memset(kaddr + use, 0, PAGE_SIZE - use);
1245		kunmap_atomic(kaddr);
1246		flush_dcache_page(page);
1247		SetPageUptodate(page);
1248	} else if (!PageUptodate(page)) {
1249		zero_user_segment(page, 0, PAGE_SIZE);
1250		SetPageUptodate(page);
1251	}
1252
1253	return 0;
1254}
1255
1256int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1257{
1258	u64 vbo;
1259	struct mft_inode *mi;
1260	struct ATTRIB *attr;
1261	u32 data_size;
1262
1263	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1264	if (!attr)
1265		return -EINVAL;
1266
1267	if (attr->non_res) {
1268		/* Return special error code to check this case. */
1269		return E_NTFS_NONRESIDENT;
1270	}
1271
1272	vbo = page->index << PAGE_SHIFT;
1273	data_size = le32_to_cpu(attr->res.data_size);
1274	if (vbo < data_size) {
1275		char *data = resident_data(attr);
1276		char *kaddr = kmap_atomic(page);
1277		u32 use = data_size - vbo;
1278
1279		if (use > PAGE_SIZE)
1280			use = PAGE_SIZE;
1281		memcpy(data + vbo, kaddr, use);
1282		kunmap_atomic(kaddr);
1283		mi->dirty = true;
1284	}
1285	ni->i_valid = data_size;
1286
1287	return 0;
1288}
1289
1290/*
1291 * attr_load_runs_vcn - Load runs with VCN.
1292 */
1293int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1294		       const __le16 *name, u8 name_len, struct runs_tree *run,
1295		       CLST vcn)
1296{
1297	struct ATTRIB *attr;
1298	int err;
1299	CLST svcn, evcn;
1300	u16 ro;
1301
1302	if (!ni) {
1303		/* Is record corrupted? */
1304		return -ENOENT;
1305	}
1306
1307	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1308	if (!attr) {
1309		/* Is record corrupted? */
1310		return -ENOENT;
1311	}
1312
1313	svcn = le64_to_cpu(attr->nres.svcn);
1314	evcn = le64_to_cpu(attr->nres.evcn);
1315
1316	if (evcn < vcn || vcn < svcn) {
1317		/* Is record corrupted? */
1318		return -EINVAL;
1319	}
1320
1321	ro = le16_to_cpu(attr->nres.run_off);
1322
1323	if (ro > le32_to_cpu(attr->size))
1324		return -EINVAL;
1325
1326	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1327			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1328	if (err < 0)
1329		return err;
1330	return 0;
1331}
1332
1333/*
1334 * attr_load_runs_range - Load runs for given range [from to).
1335 */
1336int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1337			 const __le16 *name, u8 name_len, struct runs_tree *run,
1338			 u64 from, u64 to)
1339{
1340	struct ntfs_sb_info *sbi = ni->mi.sbi;
1341	u8 cluster_bits = sbi->cluster_bits;
1342	CLST vcn;
1343	CLST vcn_last = (to - 1) >> cluster_bits;
1344	CLST lcn, clen;
1345	int err;
1346
1347	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1348		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1349			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1350						 vcn);
1351			if (err)
1352				return err;
1353			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1354		}
1355	}
1356
1357	return 0;
1358}
1359
1360#ifdef CONFIG_NTFS3_LZX_XPRESS
1361/*
1362 * attr_wof_frame_info
1363 *
1364 * Read header of Xpress/LZX file to get info about frame.
1365 */
1366int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1367			struct runs_tree *run, u64 frame, u64 frames,
1368			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1369{
1370	struct ntfs_sb_info *sbi = ni->mi.sbi;
1371	u64 vbo[2], off[2], wof_size;
1372	u32 voff;
1373	u8 bytes_per_off;
1374	char *addr;
1375	struct page *page;
1376	int i, err;
1377	__le32 *off32;
1378	__le64 *off64;
1379
1380	if (ni->vfs_inode.i_size < 0x100000000ull) {
1381		/* File starts with array of 32 bit offsets. */
1382		bytes_per_off = sizeof(__le32);
1383		vbo[1] = frame << 2;
1384		*vbo_data = frames << 2;
1385	} else {
1386		/* File starts with array of 64 bit offsets. */
1387		bytes_per_off = sizeof(__le64);
1388		vbo[1] = frame << 3;
1389		*vbo_data = frames << 3;
1390	}
1391
1392	/*
1393	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1394	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1395	 */
1396	if (!attr->non_res) {
1397		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1398			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1399			return -EINVAL;
1400		}
1401		addr = resident_data(attr);
1402
1403		if (bytes_per_off == sizeof(__le32)) {
1404			off32 = Add2Ptr(addr, vbo[1]);
1405			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1406			off[1] = le32_to_cpu(off32[0]);
1407		} else {
1408			off64 = Add2Ptr(addr, vbo[1]);
1409			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1410			off[1] = le64_to_cpu(off64[0]);
1411		}
1412
1413		*vbo_data += off[0];
1414		*ondisk_size = off[1] - off[0];
1415		return 0;
1416	}
1417
1418	wof_size = le64_to_cpu(attr->nres.data_size);
1419	down_write(&ni->file.run_lock);
1420	page = ni->file.offs_page;
1421	if (!page) {
1422		page = alloc_page(GFP_KERNEL);
1423		if (!page) {
1424			err = -ENOMEM;
1425			goto out;
1426		}
1427		page->index = -1;
1428		ni->file.offs_page = page;
1429	}
1430	lock_page(page);
1431	addr = page_address(page);
1432
1433	if (vbo[1]) {
1434		voff = vbo[1] & (PAGE_SIZE - 1);
1435		vbo[0] = vbo[1] - bytes_per_off;
1436		i = 0;
1437	} else {
1438		voff = 0;
1439		vbo[0] = 0;
1440		off[0] = 0;
1441		i = 1;
1442	}
1443
1444	do {
1445		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1446
1447		if (index != page->index) {
1448			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1449			u64 to = min(from + PAGE_SIZE, wof_size);
1450
1451			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1452						   ARRAY_SIZE(WOF_NAME), run,
1453						   from, to);
1454			if (err)
1455				goto out1;
1456
1457			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1458					     to - from, REQ_OP_READ);
1459			if (err) {
1460				page->index = -1;
1461				goto out1;
1462			}
1463			page->index = index;
1464		}
1465
1466		if (i) {
1467			if (bytes_per_off == sizeof(__le32)) {
1468				off32 = Add2Ptr(addr, voff);
1469				off[1] = le32_to_cpu(*off32);
1470			} else {
1471				off64 = Add2Ptr(addr, voff);
1472				off[1] = le64_to_cpu(*off64);
1473			}
1474		} else if (!voff) {
1475			if (bytes_per_off == sizeof(__le32)) {
1476				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1477				off[0] = le32_to_cpu(*off32);
1478			} else {
1479				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1480				off[0] = le64_to_cpu(*off64);
1481			}
1482		} else {
1483			/* Two values in one page. */
1484			if (bytes_per_off == sizeof(__le32)) {
1485				off32 = Add2Ptr(addr, voff);
1486				off[0] = le32_to_cpu(off32[-1]);
1487				off[1] = le32_to_cpu(off32[0]);
1488			} else {
1489				off64 = Add2Ptr(addr, voff);
1490				off[0] = le64_to_cpu(off64[-1]);
1491				off[1] = le64_to_cpu(off64[0]);
1492			}
1493			break;
1494		}
1495	} while (++i < 2);
1496
1497	*vbo_data += off[0];
1498	*ondisk_size = off[1] - off[0];
1499
1500out1:
1501	unlock_page(page);
1502out:
1503	up_write(&ni->file.run_lock);
1504	return err;
1505}
1506#endif
1507
1508/*
1509 * attr_is_frame_compressed - Used to detect compressed frame.
1510 */
1511int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1512			     CLST frame, CLST *clst_data)
1513{
1514	int err;
1515	u32 clst_frame;
1516	CLST clen, lcn, vcn, alen, slen, vcn_next;
1517	size_t idx;
1518	struct runs_tree *run;
1519
1520	*clst_data = 0;
1521
1522	if (!is_attr_compressed(attr))
1523		return 0;
1524
1525	if (!attr->non_res)
1526		return 0;
1527
1528	clst_frame = 1u << attr->nres.c_unit;
1529	vcn = frame * clst_frame;
1530	run = &ni->file.run;
1531
1532	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1533		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1534					 attr->name_len, run, vcn);
1535		if (err)
1536			return err;
1537
1538		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1539			return -EINVAL;
1540	}
1541
1542	if (lcn == SPARSE_LCN) {
1543		/* Sparsed frame. */
1544		return 0;
1545	}
1546
1547	if (clen >= clst_frame) {
1548		/*
1549		 * The frame is not compressed 'cause
1550		 * it does not contain any sparse clusters.
1551		 */
1552		*clst_data = clst_frame;
1553		return 0;
1554	}
1555
1556	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1557	slen = 0;
1558	*clst_data = clen;
1559
1560	/*
1561	 * The frame is compressed if *clst_data + slen >= clst_frame.
1562	 * Check next fragments.
1563	 */
1564	while ((vcn += clen) < alen) {
1565		vcn_next = vcn;
1566
1567		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1568		    vcn_next != vcn) {
1569			err = attr_load_runs_vcn(ni, attr->type,
1570						 attr_name(attr),
1571						 attr->name_len, run, vcn_next);
1572			if (err)
1573				return err;
1574			vcn = vcn_next;
1575
1576			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1577				return -EINVAL;
1578		}
1579
1580		if (lcn == SPARSE_LCN) {
1581			slen += clen;
1582		} else {
1583			if (slen) {
1584				/*
1585				 * Data_clusters + sparse_clusters =
1586				 * not enough for frame.
1587				 */
1588				return -EINVAL;
1589			}
1590			*clst_data += clen;
1591		}
1592
1593		if (*clst_data + slen >= clst_frame) {
1594			if (!slen) {
1595				/*
1596				 * There is no sparsed clusters in this frame
1597				 * so it is not compressed.
1598				 */
1599				*clst_data = clst_frame;
1600			} else {
1601				/* Frame is compressed. */
1602			}
1603			break;
1604		}
1605	}
1606
1607	return 0;
1608}
1609
1610/*
1611 * attr_allocate_frame - Allocate/free clusters for @frame.
1612 *
1613 * Assumed: down_write(&ni->file.run_lock);
1614 */
1615int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1616			u64 new_valid)
1617{
1618	int err = 0;
1619	struct runs_tree *run = &ni->file.run;
1620	struct ntfs_sb_info *sbi = ni->mi.sbi;
1621	struct ATTRIB *attr = NULL, *attr_b;
1622	struct ATTR_LIST_ENTRY *le, *le_b;
1623	struct mft_inode *mi, *mi_b;
1624	CLST svcn, evcn1, next_svcn, len;
1625	CLST vcn, end, clst_data;
1626	u64 total_size, valid_size, data_size;
1627
1628	le_b = NULL;
1629	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1630	if (!attr_b)
1631		return -ENOENT;
1632
1633	if (!is_attr_ext(attr_b))
1634		return -EINVAL;
1635
1636	vcn = frame << NTFS_LZNT_CUNIT;
1637	total_size = le64_to_cpu(attr_b->nres.total_size);
1638
1639	svcn = le64_to_cpu(attr_b->nres.svcn);
1640	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1641	data_size = le64_to_cpu(attr_b->nres.data_size);
1642
1643	if (svcn <= vcn && vcn < evcn1) {
1644		attr = attr_b;
1645		le = le_b;
1646		mi = mi_b;
1647	} else if (!le_b) {
1648		err = -EINVAL;
1649		goto out;
1650	} else {
1651		le = le_b;
1652		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1653				    &mi);
1654		if (!attr) {
1655			err = -EINVAL;
1656			goto out;
1657		}
1658		svcn = le64_to_cpu(attr->nres.svcn);
1659		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1660	}
1661
1662	err = attr_load_runs(attr, ni, run, NULL);
1663	if (err)
1664		goto out;
1665
1666	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1667	if (err)
1668		goto out;
1669
1670	total_size -= (u64)clst_data << sbi->cluster_bits;
1671
1672	len = bytes_to_cluster(sbi, compr_size);
1673
1674	if (len == clst_data)
1675		goto out;
1676
1677	if (len < clst_data) {
1678		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1679					NULL, true);
1680		if (err)
1681			goto out;
1682
1683		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1684				   false)) {
1685			err = -ENOMEM;
1686			goto out;
1687		}
1688		end = vcn + clst_data;
1689		/* Run contains updated range [vcn + len : end). */
1690	} else {
1691		CLST alen, hint = 0;
1692		/* Get the last LCN to allocate from. */
1693		if (vcn + clst_data &&
1694		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1695				      NULL)) {
1696			hint = -1;
1697		}
1698
1699		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1700					     hint + 1, len - clst_data, NULL,
1701					     ALLOCATE_DEF, &alen, 0, NULL,
1702					     NULL);
1703		if (err)
1704			goto out;
1705
1706		end = vcn + len;
1707		/* Run contains updated range [vcn + clst_data : end). */
1708	}
1709
1710	total_size += (u64)len << sbi->cluster_bits;
1711
1712repack:
1713	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1714	if (err)
1715		goto out;
1716
1717	attr_b->nres.total_size = cpu_to_le64(total_size);
1718	inode_set_bytes(&ni->vfs_inode, total_size);
1719
1720	mi_b->dirty = true;
1721	mark_inode_dirty(&ni->vfs_inode);
1722
1723	/* Stored [vcn : next_svcn) from [vcn : end). */
1724	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1725
1726	if (end <= evcn1) {
1727		if (next_svcn == evcn1) {
1728			/* Normal way. Update attribute and exit. */
1729			goto ok;
1730		}
1731		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1732		if (!ni->attr_list.size) {
1733			err = ni_create_attr_list(ni);
1734			if (err)
1735				goto out;
1736			/* Layout of records is changed. */
1737			le_b = NULL;
1738			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1739					      0, NULL, &mi_b);
1740			if (!attr_b) {
1741				err = -ENOENT;
1742				goto out;
1743			}
1744
1745			attr = attr_b;
1746			le = le_b;
1747			mi = mi_b;
1748			goto repack;
1749		}
1750	}
1751
1752	svcn = evcn1;
1753
1754	/* Estimate next attribute. */
1755	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1756
1757	if (attr) {
1758		CLST alloc = bytes_to_cluster(
1759			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1760		CLST evcn = le64_to_cpu(attr->nres.evcn);
1761
1762		if (end < next_svcn)
1763			end = next_svcn;
1764		while (end > evcn) {
1765			/* Remove segment [svcn : evcn). */
1766			mi_remove_attr(NULL, mi, attr);
1767
1768			if (!al_remove_le(ni, le)) {
1769				err = -EINVAL;
1770				goto out;
1771			}
1772
1773			if (evcn + 1 >= alloc) {
1774				/* Last attribute segment. */
1775				evcn1 = evcn + 1;
1776				goto ins_ext;
1777			}
1778
1779			if (ni_load_mi(ni, le, &mi)) {
1780				attr = NULL;
1781				goto out;
1782			}
1783
1784			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1785					    &le->id);
1786			if (!attr) {
1787				err = -EINVAL;
1788				goto out;
1789			}
1790			svcn = le64_to_cpu(attr->nres.svcn);
1791			evcn = le64_to_cpu(attr->nres.evcn);
1792		}
1793
1794		if (end < svcn)
1795			end = svcn;
1796
1797		err = attr_load_runs(attr, ni, run, &end);
1798		if (err)
1799			goto out;
1800
1801		evcn1 = evcn + 1;
1802		attr->nres.svcn = cpu_to_le64(next_svcn);
1803		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1804		if (err)
1805			goto out;
1806
1807		le->vcn = cpu_to_le64(next_svcn);
1808		ni->attr_list.dirty = true;
1809		mi->dirty = true;
1810
1811		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1812	}
1813ins_ext:
1814	if (evcn1 > next_svcn) {
1815		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1816					    next_svcn, evcn1 - next_svcn,
1817					    attr_b->flags, &attr, &mi, NULL);
1818		if (err)
1819			goto out;
1820	}
1821ok:
1822	run_truncate_around(run, vcn);
1823out:
1824	if (new_valid > data_size)
1825		new_valid = data_size;
1826
1827	valid_size = le64_to_cpu(attr_b->nres.valid_size);
1828	if (new_valid != valid_size) {
1829		attr_b->nres.valid_size = cpu_to_le64(valid_size);
1830		mi_b->dirty = true;
 
 
1831	}
1832
1833	return err;
1834}
1835
1836/*
1837 * attr_collapse_range - Collapse range in file.
1838 */
1839int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1840{
1841	int err = 0;
1842	struct runs_tree *run = &ni->file.run;
1843	struct ntfs_sb_info *sbi = ni->mi.sbi;
1844	struct ATTRIB *attr = NULL, *attr_b;
1845	struct ATTR_LIST_ENTRY *le, *le_b;
1846	struct mft_inode *mi, *mi_b;
1847	CLST svcn, evcn1, len, dealloc, alen;
1848	CLST vcn, end;
1849	u64 valid_size, data_size, alloc_size, total_size;
1850	u32 mask;
1851	__le16 a_flags;
1852
1853	if (!bytes)
1854		return 0;
1855
1856	le_b = NULL;
1857	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1858	if (!attr_b)
1859		return -ENOENT;
1860
1861	if (!attr_b->non_res) {
1862		/* Attribute is resident. Nothing to do? */
1863		return 0;
1864	}
1865
1866	data_size = le64_to_cpu(attr_b->nres.data_size);
1867	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1868	a_flags = attr_b->flags;
1869
1870	if (is_attr_ext(attr_b)) {
1871		total_size = le64_to_cpu(attr_b->nres.total_size);
1872		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1873	} else {
1874		total_size = alloc_size;
1875		mask = sbi->cluster_mask;
1876	}
1877
1878	if ((vbo & mask) || (bytes & mask)) {
1879		/* Allow to collapse only cluster aligned ranges. */
1880		return -EINVAL;
1881	}
1882
1883	if (vbo > data_size)
1884		return -EINVAL;
1885
1886	down_write(&ni->file.run_lock);
1887
1888	if (vbo + bytes >= data_size) {
1889		u64 new_valid = min(ni->i_valid, vbo);
1890
1891		/* Simple truncate file at 'vbo'. */
1892		truncate_setsize(&ni->vfs_inode, vbo);
1893		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1894				    &new_valid, true, NULL);
1895
1896		if (!err && new_valid < ni->i_valid)
1897			ni->i_valid = new_valid;
1898
1899		goto out;
1900	}
1901
1902	/*
1903	 * Enumerate all attribute segments and collapse.
1904	 */
1905	alen = alloc_size >> sbi->cluster_bits;
1906	vcn = vbo >> sbi->cluster_bits;
1907	len = bytes >> sbi->cluster_bits;
1908	end = vcn + len;
1909	dealloc = 0;
1910
1911	svcn = le64_to_cpu(attr_b->nres.svcn);
1912	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1913
1914	if (svcn <= vcn && vcn < evcn1) {
1915		attr = attr_b;
1916		le = le_b;
1917		mi = mi_b;
1918	} else if (!le_b) {
1919		err = -EINVAL;
1920		goto out;
1921	} else {
1922		le = le_b;
1923		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1924				    &mi);
1925		if (!attr) {
1926			err = -EINVAL;
1927			goto out;
1928		}
1929
1930		svcn = le64_to_cpu(attr->nres.svcn);
1931		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1932	}
1933
1934	for (;;) {
1935		if (svcn >= end) {
1936			/* Shift VCN- */
1937			attr->nres.svcn = cpu_to_le64(svcn - len);
1938			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1939			if (le) {
1940				le->vcn = attr->nres.svcn;
1941				ni->attr_list.dirty = true;
1942			}
1943			mi->dirty = true;
1944		} else if (svcn < vcn || end < evcn1) {
1945			CLST vcn1, eat, next_svcn;
1946
1947			/* Collapse a part of this attribute segment. */
1948			err = attr_load_runs(attr, ni, run, &svcn);
1949			if (err)
1950				goto out;
1951			vcn1 = max(vcn, svcn);
1952			eat = min(end, evcn1) - vcn1;
1953
1954			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1955						true);
1956			if (err)
1957				goto out;
1958
1959			if (!run_collapse_range(run, vcn1, eat)) {
1960				err = -ENOMEM;
1961				goto out;
1962			}
1963
1964			if (svcn >= vcn) {
1965				/* Shift VCN */
1966				attr->nres.svcn = cpu_to_le64(vcn);
1967				if (le) {
1968					le->vcn = attr->nres.svcn;
1969					ni->attr_list.dirty = true;
1970				}
1971			}
1972
1973			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1974			if (err)
1975				goto out;
1976
1977			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1978			if (next_svcn + eat < evcn1) {
1979				err = ni_insert_nonresident(
1980					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1981					evcn1 - eat - next_svcn, a_flags, &attr,
1982					&mi, &le);
1983				if (err)
1984					goto out;
1985
1986				/* Layout of records maybe changed. */
1987				attr_b = NULL;
1988			}
1989
1990			/* Free all allocated memory. */
1991			run_truncate(run, 0);
1992		} else {
1993			u16 le_sz;
1994			u16 roff = le16_to_cpu(attr->nres.run_off);
1995
1996			if (roff > le32_to_cpu(attr->size)) {
1997				err = -EINVAL;
1998				goto out;
1999			}
2000
2001			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2002				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
2003				      le32_to_cpu(attr->size) - roff);
2004
2005			/* Delete this attribute segment. */
2006			mi_remove_attr(NULL, mi, attr);
2007			if (!le)
2008				break;
2009
2010			le_sz = le16_to_cpu(le->size);
2011			if (!al_remove_le(ni, le)) {
2012				err = -EINVAL;
2013				goto out;
2014			}
2015
2016			if (evcn1 >= alen)
2017				break;
2018
2019			if (!svcn) {
2020				/* Load next record that contains this attribute. */
2021				if (ni_load_mi(ni, le, &mi)) {
2022					err = -EINVAL;
2023					goto out;
2024				}
2025
2026				/* Look for required attribute. */
2027				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
2028						    0, &le->id);
2029				if (!attr) {
2030					err = -EINVAL;
2031					goto out;
2032				}
2033				goto next_attr;
2034			}
2035			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2036		}
2037
2038		if (evcn1 >= alen)
2039			break;
2040
2041		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2042		if (!attr) {
2043			err = -EINVAL;
2044			goto out;
2045		}
2046
2047next_attr:
2048		svcn = le64_to_cpu(attr->nres.svcn);
2049		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2050	}
2051
2052	if (!attr_b) {
2053		le_b = NULL;
2054		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2055				      &mi_b);
2056		if (!attr_b) {
2057			err = -ENOENT;
2058			goto out;
2059		}
2060	}
2061
2062	data_size -= bytes;
2063	valid_size = ni->i_valid;
2064	if (vbo + bytes <= valid_size)
2065		valid_size -= bytes;
2066	else if (vbo < valid_size)
2067		valid_size = vbo;
2068
2069	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2070	attr_b->nres.data_size = cpu_to_le64(data_size);
2071	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2072	total_size -= (u64)dealloc << sbi->cluster_bits;
2073	if (is_attr_ext(attr_b))
2074		attr_b->nres.total_size = cpu_to_le64(total_size);
2075	mi_b->dirty = true;
2076
2077	/* Update inode size. */
2078	ni->i_valid = valid_size;
2079	ni->vfs_inode.i_size = data_size;
2080	inode_set_bytes(&ni->vfs_inode, total_size);
2081	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2082	mark_inode_dirty(&ni->vfs_inode);
2083
2084out:
2085	up_write(&ni->file.run_lock);
2086	if (err)
2087		_ntfs_bad_inode(&ni->vfs_inode);
2088
2089	return err;
2090}
2091
2092/*
2093 * attr_punch_hole
2094 *
2095 * Not for normal files.
2096 */
2097int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2098{
2099	int err = 0;
2100	struct runs_tree *run = &ni->file.run;
2101	struct ntfs_sb_info *sbi = ni->mi.sbi;
2102	struct ATTRIB *attr = NULL, *attr_b;
2103	struct ATTR_LIST_ENTRY *le, *le_b;
2104	struct mft_inode *mi, *mi_b;
2105	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2106	u64 total_size, alloc_size;
2107	u32 mask;
2108	__le16 a_flags;
2109	struct runs_tree run2;
2110
2111	if (!bytes)
2112		return 0;
2113
2114	le_b = NULL;
2115	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2116	if (!attr_b)
2117		return -ENOENT;
2118
2119	if (!attr_b->non_res) {
2120		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2121		u32 from, to;
2122
2123		if (vbo > data_size)
2124			return 0;
2125
2126		from = vbo;
2127		to = min_t(u64, vbo + bytes, data_size);
2128		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2129		return 0;
2130	}
2131
2132	if (!is_attr_ext(attr_b))
2133		return -EOPNOTSUPP;
2134
2135	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2136	total_size = le64_to_cpu(attr_b->nres.total_size);
2137
2138	if (vbo >= alloc_size) {
2139		/* NOTE: It is allowed. */
2140		return 0;
2141	}
2142
2143	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2144
2145	bytes += vbo;
2146	if (bytes > alloc_size)
2147		bytes = alloc_size;
2148	bytes -= vbo;
2149
2150	if ((vbo & mask) || (bytes & mask)) {
2151		/* We have to zero a range(s). */
2152		if (frame_size == NULL) {
2153			/* Caller insists range is aligned. */
2154			return -EINVAL;
2155		}
2156		*frame_size = mask + 1;
2157		return E_NTFS_NOTALIGNED;
2158	}
2159
2160	down_write(&ni->file.run_lock);
2161	run_init(&run2);
2162	run_truncate(run, 0);
2163
2164	/*
2165	 * Enumerate all attribute segments and punch hole where necessary.
2166	 */
2167	alen = alloc_size >> sbi->cluster_bits;
2168	vcn = vbo >> sbi->cluster_bits;
2169	len = bytes >> sbi->cluster_bits;
2170	end = vcn + len;
2171	hole = 0;
2172
2173	svcn = le64_to_cpu(attr_b->nres.svcn);
2174	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2175	a_flags = attr_b->flags;
2176
2177	if (svcn <= vcn && vcn < evcn1) {
2178		attr = attr_b;
2179		le = le_b;
2180		mi = mi_b;
2181	} else if (!le_b) {
2182		err = -EINVAL;
2183		goto bad_inode;
2184	} else {
2185		le = le_b;
2186		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2187				    &mi);
2188		if (!attr) {
2189			err = -EINVAL;
2190			goto bad_inode;
2191		}
2192
2193		svcn = le64_to_cpu(attr->nres.svcn);
2194		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2195	}
2196
2197	while (svcn < end) {
2198		CLST vcn1, zero, hole2 = hole;
2199
2200		err = attr_load_runs(attr, ni, run, &svcn);
2201		if (err)
2202			goto done;
2203		vcn1 = max(vcn, svcn);
2204		zero = min(end, evcn1) - vcn1;
2205
2206		/*
2207		 * Check range [vcn1 + zero).
2208		 * Calculate how many clusters there are.
2209		 * Don't do any destructive actions.
2210		 */
2211		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2212		if (err)
2213			goto done;
2214
2215		/* Check if required range is already hole. */
2216		if (hole2 == hole)
2217			goto next_attr;
2218
2219		/* Make a clone of run to undo. */
2220		err = run_clone(run, &run2);
2221		if (err)
2222			goto done;
2223
2224		/* Make a hole range (sparse) [vcn1 + zero). */
2225		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2226			err = -ENOMEM;
2227			goto done;
2228		}
2229
2230		/* Update run in attribute segment. */
2231		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2232		if (err)
2233			goto done;
2234		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2235		if (next_svcn < evcn1) {
2236			/* Insert new attribute segment. */
2237			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2238						    next_svcn,
2239						    evcn1 - next_svcn, a_flags,
2240						    &attr, &mi, &le);
2241			if (err)
2242				goto undo_punch;
2243
2244			/* Layout of records maybe changed. */
2245			attr_b = NULL;
2246		}
2247
2248		/* Real deallocate. Should not fail. */
2249		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2250
2251next_attr:
2252		/* Free all allocated memory. */
2253		run_truncate(run, 0);
2254
2255		if (evcn1 >= alen)
2256			break;
2257
2258		/* Get next attribute segment. */
2259		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2260		if (!attr) {
2261			err = -EINVAL;
2262			goto bad_inode;
2263		}
2264
2265		svcn = le64_to_cpu(attr->nres.svcn);
2266		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2267	}
2268
2269done:
2270	if (!hole)
2271		goto out;
2272
2273	if (!attr_b) {
2274		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2275				      &mi_b);
2276		if (!attr_b) {
2277			err = -EINVAL;
2278			goto bad_inode;
2279		}
2280	}
2281
2282	total_size -= (u64)hole << sbi->cluster_bits;
2283	attr_b->nres.total_size = cpu_to_le64(total_size);
2284	mi_b->dirty = true;
2285
2286	/* Update inode size. */
2287	inode_set_bytes(&ni->vfs_inode, total_size);
2288	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2289	mark_inode_dirty(&ni->vfs_inode);
2290
2291out:
2292	run_close(&run2);
2293	up_write(&ni->file.run_lock);
2294	return err;
2295
2296bad_inode:
2297	_ntfs_bad_inode(&ni->vfs_inode);
2298	goto out;
2299
2300undo_punch:
2301	/*
2302	 * Restore packed runs.
2303	 * 'mi_pack_runs' should not fail, cause we restore original.
2304	 */
2305	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2306		goto bad_inode;
2307
2308	goto done;
2309}
2310
2311/*
2312 * attr_insert_range - Insert range (hole) in file.
2313 * Not for normal files.
2314 */
2315int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2316{
2317	int err = 0;
2318	struct runs_tree *run = &ni->file.run;
2319	struct ntfs_sb_info *sbi = ni->mi.sbi;
2320	struct ATTRIB *attr = NULL, *attr_b;
2321	struct ATTR_LIST_ENTRY *le, *le_b;
2322	struct mft_inode *mi, *mi_b;
2323	CLST vcn, svcn, evcn1, len, next_svcn;
2324	u64 data_size, alloc_size;
2325	u32 mask;
2326	__le16 a_flags;
2327
2328	if (!bytes)
2329		return 0;
2330
2331	le_b = NULL;
2332	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2333	if (!attr_b)
2334		return -ENOENT;
2335
2336	if (!is_attr_ext(attr_b)) {
2337		/* It was checked above. See fallocate. */
2338		return -EOPNOTSUPP;
2339	}
2340
2341	if (!attr_b->non_res) {
2342		data_size = le32_to_cpu(attr_b->res.data_size);
2343		alloc_size = data_size;
2344		mask = sbi->cluster_mask; /* cluster_size - 1 */
2345	} else {
2346		data_size = le64_to_cpu(attr_b->nres.data_size);
2347		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2348		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2349	}
2350
2351	if (vbo > data_size) {
2352		/* Insert range after the file size is not allowed. */
2353		return -EINVAL;
2354	}
2355
2356	if ((vbo & mask) || (bytes & mask)) {
2357		/* Allow to insert only frame aligned ranges. */
2358		return -EINVAL;
2359	}
2360
2361	/*
2362	 * valid_size <= data_size <= alloc_size
2363	 * Check alloc_size for maximum possible.
2364	 */
2365	if (bytes > sbi->maxbytes_sparse - alloc_size)
2366		return -EFBIG;
2367
2368	vcn = vbo >> sbi->cluster_bits;
2369	len = bytes >> sbi->cluster_bits;
2370
2371	down_write(&ni->file.run_lock);
2372
2373	if (!attr_b->non_res) {
2374		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2375				    data_size + bytes, NULL, false, NULL);
2376
2377		le_b = NULL;
2378		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2379				      &mi_b);
2380		if (!attr_b) {
2381			err = -EINVAL;
2382			goto bad_inode;
2383		}
2384
2385		if (err)
2386			goto out;
2387
2388		if (!attr_b->non_res) {
2389			/* Still resident. */
2390			char *data = Add2Ptr(attr_b,
2391					     le16_to_cpu(attr_b->res.data_off));
2392
2393			memmove(data + bytes, data, bytes);
2394			memset(data, 0, bytes);
2395			goto done;
2396		}
2397
2398		/* Resident files becomes nonresident. */
2399		data_size = le64_to_cpu(attr_b->nres.data_size);
2400		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2401	}
2402
2403	/*
2404	 * Enumerate all attribute segments and shift start vcn.
2405	 */
2406	a_flags = attr_b->flags;
2407	svcn = le64_to_cpu(attr_b->nres.svcn);
2408	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2409
2410	if (svcn <= vcn && vcn < evcn1) {
2411		attr = attr_b;
2412		le = le_b;
2413		mi = mi_b;
2414	} else if (!le_b) {
2415		err = -EINVAL;
2416		goto bad_inode;
2417	} else {
2418		le = le_b;
2419		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2420				    &mi);
2421		if (!attr) {
2422			err = -EINVAL;
2423			goto bad_inode;
2424		}
2425
2426		svcn = le64_to_cpu(attr->nres.svcn);
2427		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2428	}
2429
2430	run_truncate(run, 0); /* clear cached values. */
2431	err = attr_load_runs(attr, ni, run, NULL);
2432	if (err)
2433		goto out;
2434
2435	if (!run_insert_range(run, vcn, len)) {
2436		err = -ENOMEM;
2437		goto out;
2438	}
2439
2440	/* Try to pack in current record as much as possible. */
2441	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2442	if (err)
2443		goto out;
2444
2445	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2446
2447	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2448	       attr->type == ATTR_DATA && !attr->name_len) {
2449		le64_add_cpu(&attr->nres.svcn, len);
2450		le64_add_cpu(&attr->nres.evcn, len);
2451		if (le) {
2452			le->vcn = attr->nres.svcn;
2453			ni->attr_list.dirty = true;
2454		}
2455		mi->dirty = true;
2456	}
2457
2458	if (next_svcn < evcn1 + len) {
2459		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2460					    next_svcn, evcn1 + len - next_svcn,
2461					    a_flags, NULL, NULL, NULL);
2462
2463		le_b = NULL;
2464		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2465				      &mi_b);
2466		if (!attr_b) {
2467			err = -EINVAL;
2468			goto bad_inode;
2469		}
2470
2471		if (err) {
2472			/* ni_insert_nonresident failed. Try to undo. */
2473			goto undo_insert_range;
2474		}
2475	}
2476
2477	/*
2478	 * Update primary attribute segment.
2479	 */
2480	if (vbo <= ni->i_valid)
2481		ni->i_valid += bytes;
2482
2483	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2484	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2485
2486	/* ni->valid may be not equal valid_size (temporary). */
2487	if (ni->i_valid > data_size + bytes)
2488		attr_b->nres.valid_size = attr_b->nres.data_size;
2489	else
2490		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2491	mi_b->dirty = true;
2492
2493done:
2494	ni->vfs_inode.i_size += bytes;
2495	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2496	mark_inode_dirty(&ni->vfs_inode);
2497
2498out:
2499	run_truncate(run, 0); /* clear cached values. */
2500
2501	up_write(&ni->file.run_lock);
2502
2503	return err;
2504
2505bad_inode:
2506	_ntfs_bad_inode(&ni->vfs_inode);
2507	goto out;
2508
2509undo_insert_range:
2510	svcn = le64_to_cpu(attr_b->nres.svcn);
2511	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2512
2513	if (svcn <= vcn && vcn < evcn1) {
2514		attr = attr_b;
2515		le = le_b;
2516		mi = mi_b;
2517	} else if (!le_b) {
2518		goto bad_inode;
2519	} else {
2520		le = le_b;
2521		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2522				    &mi);
2523		if (!attr) {
2524			goto bad_inode;
2525		}
2526
2527		svcn = le64_to_cpu(attr->nres.svcn);
2528		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2529	}
2530
2531	if (attr_load_runs(attr, ni, run, NULL))
2532		goto bad_inode;
2533
2534	if (!run_collapse_range(run, vcn, len))
2535		goto bad_inode;
2536
2537	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2538		goto bad_inode;
2539
2540	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2541	       attr->type == ATTR_DATA && !attr->name_len) {
2542		le64_sub_cpu(&attr->nres.svcn, len);
2543		le64_sub_cpu(&attr->nres.evcn, len);
2544		if (le) {
2545			le->vcn = attr->nres.svcn;
2546			ni->attr_list.dirty = true;
2547		}
2548		mi->dirty = true;
2549	}
2550
2551	goto out;
2552}