Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
   5 *
   6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
   7 */
   8
   9#include <linux/fs.h>
  10#include <linux/slab.h>
  11#include <linux/kernel.h>
  12
  13#include "debug.h"
  14#include "ntfs.h"
  15#include "ntfs_fs.h"
  16
  17/*
  18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
  19 * preallocate algorithm.
  20 */
  21#ifndef NTFS_MIN_LOG2_OF_CLUMP
  22#define NTFS_MIN_LOG2_OF_CLUMP 16
  23#endif
  24
  25#ifndef NTFS_MAX_LOG2_OF_CLUMP
  26#define NTFS_MAX_LOG2_OF_CLUMP 26
  27#endif
  28
  29// 16M
  30#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
  31// 16G
  32#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
  33
  34static inline u64 get_pre_allocated(u64 size)
  35{
  36	u32 clump;
  37	u8 align_shift;
  38	u64 ret;
  39
  40	if (size <= NTFS_CLUMP_MIN) {
  41		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
  42		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
  43	} else if (size >= NTFS_CLUMP_MAX) {
  44		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
  45		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
  46	} else {
  47		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
  48			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
  49		clump = 1u << align_shift;
  50	}
  51
  52	ret = (((size + clump - 1) >> align_shift)) << align_shift;
  53
  54	return ret;
  55}
  56
  57/*
  58 * attr_load_runs - Load all runs stored in @attr.
  59 */
  60static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
  61			  struct runs_tree *run, const CLST *vcn)
  62{
  63	int err;
  64	CLST svcn = le64_to_cpu(attr->nres.svcn);
  65	CLST evcn = le64_to_cpu(attr->nres.evcn);
  66	u32 asize;
  67	u16 run_off;
  68
  69	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
  70		return 0;
  71
  72	if (vcn && (evcn < *vcn || *vcn < svcn))
  73		return -EINVAL;
  74
  75	asize = le32_to_cpu(attr->size);
  76	run_off = le16_to_cpu(attr->nres.run_off);
  77
  78	if (run_off > asize)
  79		return -EINVAL;
  80
  81	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
  82			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
  83			    asize - run_off);
  84	if (err < 0)
  85		return err;
  86
  87	return 0;
  88}
  89
  90/*
  91 * run_deallocate_ex - Deallocate clusters.
  92 */
  93static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
  94			     CLST vcn, CLST len, CLST *done, bool trim)
  95{
  96	int err = 0;
  97	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
  98	size_t idx;
  99
 100	if (!len)
 101		goto out;
 102
 103	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
 104failed:
 105		run_truncate(run, vcn0);
 106		err = -EINVAL;
 107		goto out;
 108	}
 109
 110	for (;;) {
 111		if (clen > len)
 112			clen = len;
 113
 114		if (!clen) {
 115			err = -EINVAL;
 116			goto out;
 117		}
 118
 119		if (lcn != SPARSE_LCN) {
 120			if (sbi) {
 121				/* mark bitmap range [lcn + clen) as free and trim clusters. */
 122				mark_as_free_ex(sbi, lcn, clen, trim);
 123			}
 124			dn += clen;
 125		}
 126
 127		len -= clen;
 128		if (!len)
 129			break;
 130
 131		vcn_next = vcn + clen;
 132		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
 133		    vcn != vcn_next) {
 134			/* Save memory - don't load entire run. */
 135			goto failed;
 136		}
 137	}
 138
 139out:
 140	if (done)
 141		*done += dn;
 142
 143	return err;
 144}
 145
 146/*
 147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
 148 */
 149int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
 150			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
 151			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
 152			   CLST *new_lcn, CLST *new_len)
 153{
 154	int err;
 155	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
 156	size_t cnt = run->count;
 157
 158	for (;;) {
 159		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
 160					       opt);
 161
 162		if (err == -ENOSPC && pre) {
 163			pre = 0;
 164			if (*pre_alloc)
 165				*pre_alloc = 0;
 166			continue;
 167		}
 168
 169		if (err)
 170			goto out;
 171
 172		if (vcn == vcn0) {
 173			/* Return the first fragment. */
 174			if (new_lcn)
 175				*new_lcn = lcn;
 176			if (new_len)
 177				*new_len = flen;
 178		}
 179
 180		/* Add new fragment into run storage. */
 181		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
 182			/* Undo last 'ntfs_look_for_free_space' */
 183			mark_as_free_ex(sbi, lcn, len, false);
 184			err = -ENOMEM;
 185			goto out;
 186		}
 187
 188		if (opt & ALLOCATE_ZERO) {
 189			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
 190
 191			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
 192						   (sector_t)lcn << shift,
 193						   (sector_t)flen << shift,
 194						   GFP_NOFS, 0);
 195			if (err)
 196				goto out;
 197		}
 198
 199		vcn += flen;
 200
 201		if (flen >= len || (opt & ALLOCATE_MFT) ||
 202		    (fr && run->count - cnt >= fr)) {
 203			*alen = vcn - vcn0;
 204			return 0;
 205		}
 206
 207		len -= flen;
 208	}
 209
 210out:
 211	/* Undo 'ntfs_look_for_free_space' */
 212	if (vcn - vcn0) {
 213		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
 214		run_truncate(run, vcn0);
 215	}
 216
 217	return err;
 218}
 219
 220/*
 221 * attr_make_nonresident
 222 *
 223 * If page is not NULL - it is already contains resident data
 224 * and locked (called from ni_write_frame()).
 225 */
 226int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
 227			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 228			  u64 new_size, struct runs_tree *run,
 229			  struct ATTRIB **ins_attr, struct page *page)
 230{
 231	struct ntfs_sb_info *sbi;
 232	struct ATTRIB *attr_s;
 233	struct MFT_REC *rec;
 234	u32 used, asize, rsize, aoff, align;
 235	bool is_data;
 236	CLST len, alen;
 237	char *next;
 238	int err;
 239
 240	if (attr->non_res) {
 241		*ins_attr = attr;
 242		return 0;
 243	}
 244
 245	sbi = mi->sbi;
 246	rec = mi->mrec;
 247	attr_s = NULL;
 248	used = le32_to_cpu(rec->used);
 249	asize = le32_to_cpu(attr->size);
 250	next = Add2Ptr(attr, asize);
 251	aoff = PtrOffset(rec, attr);
 252	rsize = le32_to_cpu(attr->res.data_size);
 253	is_data = attr->type == ATTR_DATA && !attr->name_len;
 254
 255	align = sbi->cluster_size;
 256	if (is_attr_compressed(attr))
 257		align <<= COMPRESSION_UNIT;
 258	len = (rsize + align - 1) >> sbi->cluster_bits;
 
 
 
 259
 260	run_init(run);
 261
 262	/* Make a copy of original attribute. */
 263	attr_s = kmemdup(attr, asize, GFP_NOFS);
 264	if (!attr_s) {
 265		err = -ENOMEM;
 266		goto out;
 267	}
 268
 269	if (!len) {
 270		/* Empty resident -> Empty nonresident. */
 271		alen = 0;
 272	} else {
 273		const char *data = resident_data(attr);
 274
 275		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
 276					     ALLOCATE_DEF, &alen, 0, NULL,
 277					     NULL);
 278		if (err)
 279			goto out1;
 280
 281		if (!rsize) {
 282			/* Empty resident -> Non empty nonresident. */
 283		} else if (!is_data) {
 284			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
 285			if (err)
 286				goto out2;
 287		} else if (!page) {
 288			char *kaddr;
 
 289
 290			page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
 291			if (!page) {
 292				err = -ENOMEM;
 
 
 293				goto out2;
 294			}
 295			kaddr = kmap_atomic(page);
 296			memcpy(kaddr, data, rsize);
 297			memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
 298			kunmap_atomic(kaddr);
 299			flush_dcache_page(page);
 300			SetPageUptodate(page);
 301			set_page_dirty(page);
 302			unlock_page(page);
 303			put_page(page);
 304		}
 305	}
 306
 307	/* Remove original attribute. */
 308	used -= asize;
 309	memmove(attr, Add2Ptr(attr, asize), used - aoff);
 310	rec->used = cpu_to_le32(used);
 311	mi->dirty = true;
 312	if (le)
 313		al_remove_le(ni, le);
 314
 315	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
 316				    attr_s->name_len, run, 0, alen,
 317				    attr_s->flags, &attr, NULL, NULL);
 318	if (err)
 319		goto out3;
 320
 321	kfree(attr_s);
 322	attr->nres.data_size = cpu_to_le64(rsize);
 323	attr->nres.valid_size = attr->nres.data_size;
 324
 325	*ins_attr = attr;
 326
 327	if (is_data)
 328		ni->ni_flags &= ~NI_FLAG_RESIDENT;
 329
 330	/* Resident attribute becomes non resident. */
 331	return 0;
 332
 333out3:
 334	attr = Add2Ptr(rec, aoff);
 335	memmove(next, attr, used - aoff);
 336	memcpy(attr, attr_s, asize);
 337	rec->used = cpu_to_le32(used + asize);
 338	mi->dirty = true;
 339out2:
 340	/* Undo: do not trim new allocated clusters. */
 341	run_deallocate(sbi, run, false);
 342	run_close(run);
 343out1:
 344	kfree(attr_s);
 345out:
 346	return err;
 347}
 348
 349/*
 350 * attr_set_size_res - Helper for attr_set_size().
 351 */
 352static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
 353			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 354			     u64 new_size, struct runs_tree *run,
 355			     struct ATTRIB **ins_attr)
 356{
 357	struct ntfs_sb_info *sbi = mi->sbi;
 358	struct MFT_REC *rec = mi->mrec;
 359	u32 used = le32_to_cpu(rec->used);
 360	u32 asize = le32_to_cpu(attr->size);
 361	u32 aoff = PtrOffset(rec, attr);
 362	u32 rsize = le32_to_cpu(attr->res.data_size);
 363	u32 tail = used - aoff - asize;
 364	char *next = Add2Ptr(attr, asize);
 365	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
 366
 367	if (dsize < 0) {
 368		memmove(next + dsize, next, tail);
 369	} else if (dsize > 0) {
 370		if (used + dsize > sbi->max_bytes_per_attr)
 371			return attr_make_nonresident(ni, attr, le, mi, new_size,
 372						     run, ins_attr, NULL);
 373
 374		memmove(next + dsize, next, tail);
 375		memset(next, 0, dsize);
 376	}
 377
 378	if (new_size > rsize)
 379		memset(Add2Ptr(resident_data(attr), rsize), 0,
 380		       new_size - rsize);
 381
 382	rec->used = cpu_to_le32(used + dsize);
 383	attr->size = cpu_to_le32(asize + dsize);
 384	attr->res.data_size = cpu_to_le32(new_size);
 385	mi->dirty = true;
 386	*ins_attr = attr;
 387
 388	return 0;
 389}
 390
 391/*
 392 * attr_set_size - Change the size of attribute.
 393 *
 394 * Extend:
 395 *   - Sparse/compressed: No allocated clusters.
 396 *   - Normal: Append allocated and preallocated new clusters.
 397 * Shrink:
 398 *   - No deallocate if @keep_prealloc is set.
 399 */
 400int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
 401		  const __le16 *name, u8 name_len, struct runs_tree *run,
 402		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
 403		  struct ATTRIB **ret)
 404{
 405	int err = 0;
 406	struct ntfs_sb_info *sbi = ni->mi.sbi;
 407	u8 cluster_bits = sbi->cluster_bits;
 408	bool is_mft =
 409		ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
 410	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
 411	struct ATTRIB *attr = NULL, *attr_b;
 412	struct ATTR_LIST_ENTRY *le, *le_b;
 413	struct mft_inode *mi, *mi_b;
 414	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
 415	CLST next_svcn, pre_alloc = -1, done = 0;
 416	bool is_ext, is_bad = false;
 417	bool dirty = false;
 418	u32 align;
 419	struct MFT_REC *rec;
 420
 421again:
 422	alen = 0;
 423	le_b = NULL;
 424	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
 425			      &mi_b);
 426	if (!attr_b) {
 427		err = -ENOENT;
 428		goto bad_inode;
 429	}
 430
 431	if (!attr_b->non_res) {
 432		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
 433					&attr_b);
 434		if (err)
 435			return err;
 436
 437		/* Return if file is still resident. */
 438		if (!attr_b->non_res) {
 439			dirty = true;
 440			goto ok1;
 441		}
 442
 443		/* Layout of records may be changed, so do a full search. */
 444		goto again;
 445	}
 446
 447	is_ext = is_attr_ext(attr_b);
 448	align = sbi->cluster_size;
 449	if (is_ext)
 450		align <<= attr_b->nres.c_unit;
 451
 452	old_valid = le64_to_cpu(attr_b->nres.valid_size);
 453	old_size = le64_to_cpu(attr_b->nres.data_size);
 454	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 455
 456again_1:
 457	old_alen = old_alloc >> cluster_bits;
 458
 459	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
 460	new_alen = new_alloc >> cluster_bits;
 461
 462	if (keep_prealloc && new_size < old_size) {
 463		attr_b->nres.data_size = cpu_to_le64(new_size);
 464		mi_b->dirty = dirty = true;
 465		goto ok;
 466	}
 467
 468	vcn = old_alen - 1;
 469
 470	svcn = le64_to_cpu(attr_b->nres.svcn);
 471	evcn = le64_to_cpu(attr_b->nres.evcn);
 472
 473	if (svcn <= vcn && vcn <= evcn) {
 474		attr = attr_b;
 475		le = le_b;
 476		mi = mi_b;
 477	} else if (!le_b) {
 478		err = -EINVAL;
 479		goto bad_inode;
 480	} else {
 481		le = le_b;
 482		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
 483				    &mi);
 484		if (!attr) {
 485			err = -EINVAL;
 486			goto bad_inode;
 487		}
 488
 489next_le_1:
 490		svcn = le64_to_cpu(attr->nres.svcn);
 491		evcn = le64_to_cpu(attr->nres.evcn);
 492	}
 493	/*
 494	 * Here we have:
 495	 * attr,mi,le - last attribute segment (containing 'vcn').
 496	 * attr_b,mi_b,le_b - base (primary) attribute segment.
 497	 */
 498next_le:
 499	rec = mi->mrec;
 500	err = attr_load_runs(attr, ni, run, NULL);
 501	if (err)
 502		goto out;
 503
 504	if (new_size > old_size) {
 505		CLST to_allocate;
 506		size_t free;
 507
 508		if (new_alloc <= old_alloc) {
 509			attr_b->nres.data_size = cpu_to_le64(new_size);
 510			mi_b->dirty = dirty = true;
 511			goto ok;
 512		}
 513
 514		/*
 515		 * Add clusters. In simple case we have to:
 516		 *  - allocate space (vcn, lcn, len)
 517		 *  - update packed run in 'mi'
 518		 *  - update attr->nres.evcn
 519		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 520		 */
 521		to_allocate = new_alen - old_alen;
 522add_alloc_in_same_attr_seg:
 523		lcn = 0;
 524		if (is_mft) {
 525			/* MFT allocates clusters from MFT zone. */
 526			pre_alloc = 0;
 527		} else if (is_ext) {
 528			/* No preallocate for sparse/compress. */
 529			pre_alloc = 0;
 530		} else if (pre_alloc == -1) {
 531			pre_alloc = 0;
 532			if (type == ATTR_DATA && !name_len &&
 533			    sbi->options->prealloc) {
 534				pre_alloc =
 535					bytes_to_cluster(
 536						sbi,
 537						get_pre_allocated(new_size)) -
 538					new_alen;
 539			}
 540
 541			/* Get the last LCN to allocate from. */
 542			if (old_alen &&
 543			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
 544				lcn = SPARSE_LCN;
 545			}
 546
 547			if (lcn == SPARSE_LCN)
 548				lcn = 0;
 549			else if (lcn)
 550				lcn += 1;
 551
 552			free = wnd_zeroes(&sbi->used.bitmap);
 553			if (to_allocate > free) {
 554				err = -ENOSPC;
 555				goto out;
 556			}
 557
 558			if (pre_alloc && to_allocate + pre_alloc > free)
 559				pre_alloc = 0;
 560		}
 561
 562		vcn = old_alen;
 563
 564		if (is_ext) {
 565			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
 566					   false)) {
 567				err = -ENOMEM;
 568				goto out;
 569			}
 570			alen = to_allocate;
 571		} else {
 572			/* ~3 bytes per fragment. */
 573			err = attr_allocate_clusters(
 574				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
 575				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
 576				is_mft ? 0
 577				       : (sbi->record_size -
 578					  le32_to_cpu(rec->used) + 8) /
 579							 3 +
 580						 1,
 581				NULL, NULL);
 582			if (err)
 583				goto out;
 584		}
 585
 586		done += alen;
 587		vcn += alen;
 588		if (to_allocate > alen)
 589			to_allocate -= alen;
 590		else
 591			to_allocate = 0;
 592
 593pack_runs:
 594		err = mi_pack_runs(mi, attr, run, vcn - svcn);
 595		if (err)
 596			goto undo_1;
 597
 598		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
 599		new_alloc_tmp = (u64)next_svcn << cluster_bits;
 600		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 601		mi_b->dirty = dirty = true;
 602
 603		if (next_svcn >= vcn && !to_allocate) {
 604			/* Normal way. Update attribute and exit. */
 605			attr_b->nres.data_size = cpu_to_le64(new_size);
 606			goto ok;
 607		}
 608
 609		/* At least two MFT to avoid recursive loop. */
 610		if (is_mft && next_svcn == vcn &&
 611		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
 612			new_size = new_alloc_tmp;
 613			attr_b->nres.data_size = attr_b->nres.alloc_size;
 614			goto ok;
 615		}
 616
 617		if (le32_to_cpu(rec->used) < sbi->record_size) {
 618			old_alen = next_svcn;
 619			evcn = old_alen - 1;
 620			goto add_alloc_in_same_attr_seg;
 621		}
 622
 623		attr_b->nres.data_size = attr_b->nres.alloc_size;
 624		if (new_alloc_tmp < old_valid)
 625			attr_b->nres.valid_size = attr_b->nres.data_size;
 626
 627		if (type == ATTR_LIST) {
 628			err = ni_expand_list(ni);
 629			if (err)
 630				goto undo_2;
 631			if (next_svcn < vcn)
 632				goto pack_runs;
 633
 634			/* Layout of records is changed. */
 635			goto again;
 636		}
 637
 638		if (!ni->attr_list.size) {
 639			err = ni_create_attr_list(ni);
 640			/* In case of error layout of records is not changed. */
 641			if (err)
 642				goto undo_2;
 643			/* Layout of records is changed. */
 644		}
 645
 646		if (next_svcn >= vcn) {
 647			/* This is MFT data, repeat. */
 648			goto again;
 649		}
 650
 651		/* Insert new attribute segment. */
 652		err = ni_insert_nonresident(ni, type, name, name_len, run,
 653					    next_svcn, vcn - next_svcn,
 654					    attr_b->flags, &attr, &mi, NULL);
 655
 656		/*
 657		 * Layout of records maybe changed.
 658		 * Find base attribute to update.
 659		 */
 660		le_b = NULL;
 661		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
 662				      NULL, &mi_b);
 663		if (!attr_b) {
 664			err = -EINVAL;
 665			goto bad_inode;
 666		}
 667
 668		if (err) {
 669			/* ni_insert_nonresident failed. */
 670			attr = NULL;
 671			goto undo_2;
 672		}
 673
 674		if (!is_mft)
 
 675			run_truncate_head(run, evcn + 1);
 676
 677		svcn = le64_to_cpu(attr->nres.svcn);
 678		evcn = le64_to_cpu(attr->nres.evcn);
 679
 680		/*
 681		 * Attribute is in consistency state.
 682		 * Save this point to restore to if next steps fail.
 683		 */
 684		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
 685		attr_b->nres.valid_size = attr_b->nres.data_size =
 686			attr_b->nres.alloc_size = cpu_to_le64(old_size);
 687		mi_b->dirty = dirty = true;
 688		goto again_1;
 689	}
 690
 691	if (new_size != old_size ||
 692	    (new_alloc != old_alloc && !keep_prealloc)) {
 693		/*
 694		 * Truncate clusters. In simple case we have to:
 695		 *  - update packed run in 'mi'
 696		 *  - update attr->nres.evcn
 697		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 698		 *  - mark and trim clusters as free (vcn, lcn, len)
 699		 */
 700		CLST dlen = 0;
 701
 702		vcn = max(svcn, new_alen);
 703		new_alloc_tmp = (u64)vcn << cluster_bits;
 704
 705		if (vcn > svcn) {
 706			err = mi_pack_runs(mi, attr, run, vcn - svcn);
 707			if (err)
 708				goto out;
 709		} else if (le && le->vcn) {
 710			u16 le_sz = le16_to_cpu(le->size);
 711
 712			/*
 713			 * NOTE: List entries for one attribute are always
 714			 * the same size. We deal with last entry (vcn==0)
 715			 * and it is not first in entries array
 716			 * (list entry for std attribute always first).
 717			 * So it is safe to step back.
 718			 */
 719			mi_remove_attr(NULL, mi, attr);
 720
 721			if (!al_remove_le(ni, le)) {
 722				err = -EINVAL;
 723				goto bad_inode;
 724			}
 725
 726			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
 727		} else {
 728			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
 729			mi->dirty = true;
 730		}
 731
 732		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 733
 734		if (vcn == new_alen) {
 735			attr_b->nres.data_size = cpu_to_le64(new_size);
 736			if (new_size < old_valid)
 737				attr_b->nres.valid_size =
 738					attr_b->nres.data_size;
 739		} else {
 740			if (new_alloc_tmp <=
 741			    le64_to_cpu(attr_b->nres.data_size))
 742				attr_b->nres.data_size =
 743					attr_b->nres.alloc_size;
 744			if (new_alloc_tmp <
 745			    le64_to_cpu(attr_b->nres.valid_size))
 746				attr_b->nres.valid_size =
 747					attr_b->nres.alloc_size;
 748		}
 749		mi_b->dirty = dirty = true;
 750
 751		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
 752					true);
 753		if (err)
 754			goto out;
 755
 756		if (is_ext) {
 757			/* dlen - really deallocated clusters. */
 758			le64_sub_cpu(&attr_b->nres.total_size,
 759				     ((u64)dlen << cluster_bits));
 760		}
 761
 762		run_truncate(run, vcn);
 763
 764		if (new_alloc_tmp <= new_alloc)
 765			goto ok;
 766
 767		old_size = new_alloc_tmp;
 768		vcn = svcn - 1;
 769
 770		if (le == le_b) {
 771			attr = attr_b;
 772			mi = mi_b;
 773			evcn = svcn - 1;
 774			svcn = 0;
 775			goto next_le;
 776		}
 777
 778		if (le->type != type || le->name_len != name_len ||
 779		    memcmp(le_name(le), name, name_len * sizeof(short))) {
 780			err = -EINVAL;
 781			goto bad_inode;
 782		}
 783
 784		err = ni_load_mi(ni, le, &mi);
 785		if (err)
 786			goto out;
 787
 788		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
 
 789		if (!attr) {
 790			err = -EINVAL;
 791			goto bad_inode;
 792		}
 793		goto next_le_1;
 794	}
 795
 796ok:
 797	if (new_valid) {
 798		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
 799
 800		if (attr_b->nres.valid_size != valid) {
 801			attr_b->nres.valid_size = valid;
 802			mi_b->dirty = true;
 803		}
 804	}
 805
 806ok1:
 807	if (ret)
 808		*ret = attr_b;
 809
 810	if (((type == ATTR_DATA && !name_len) ||
 811	     (type == ATTR_ALLOC && name == I30_NAME))) {
 812		/* Update inode_set_bytes. */
 813		if (attr_b->non_res) {
 814			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 815			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
 816				inode_set_bytes(&ni->vfs_inode, new_alloc);
 817				dirty = true;
 818			}
 819		}
 820
 821		/* Don't forget to update duplicate information in parent. */
 822		if (dirty) {
 823			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
 824			mark_inode_dirty(&ni->vfs_inode);
 825		}
 826	}
 827
 828	return 0;
 829
 830undo_2:
 831	vcn -= alen;
 832	attr_b->nres.data_size = cpu_to_le64(old_size);
 833	attr_b->nres.valid_size = cpu_to_le64(old_valid);
 834	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
 835
 836	/* Restore 'attr' and 'mi'. */
 837	if (attr)
 838		goto restore_run;
 839
 840	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
 841	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
 842		attr = attr_b;
 843		le = le_b;
 844		mi = mi_b;
 845	} else if (!le_b) {
 846		err = -EINVAL;
 847		goto bad_inode;
 848	} else {
 849		le = le_b;
 850		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
 851				    &svcn, &mi);
 852		if (!attr)
 853			goto bad_inode;
 854	}
 855
 856restore_run:
 857	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
 858		is_bad = true;
 859
 860undo_1:
 861	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
 862
 863	run_truncate(run, vcn);
 864out:
 865	if (is_bad) {
 866bad_inode:
 867		_ntfs_bad_inode(&ni->vfs_inode);
 868	}
 869	return err;
 870}
 871
 872/*
 873 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
 874 *
 875 * @new == NULL means just to get current mapping for 'vcn'
 876 * @new != NULL means allocate real cluster if 'vcn' maps to hole
 877 * @zero - zeroout new allocated clusters
 878 *
 879 *  NOTE:
 880 *  - @new != NULL is called only for sparsed or compressed attributes.
 881 *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
 882 */
 883int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
 884			CLST *len, bool *new, bool zero)
 885{
 886	int err = 0;
 887	struct runs_tree *run = &ni->file.run;
 888	struct ntfs_sb_info *sbi;
 889	u8 cluster_bits;
 890	struct ATTRIB *attr = NULL, *attr_b;
 891	struct ATTR_LIST_ENTRY *le, *le_b;
 892	struct mft_inode *mi, *mi_b;
 893	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
 894	CLST alloc, evcn;
 895	unsigned fr;
 896	u64 total_size, total_size0;
 897	int step = 0;
 898
 899	if (new)
 900		*new = false;
 901
 902	/* Try to find in cache. */
 903	down_read(&ni->file.run_lock);
 904	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
 905		*len = 0;
 906	up_read(&ni->file.run_lock);
 907
 908	if (*len) {
 909		if (*lcn != SPARSE_LCN || !new)
 910			return 0; /* Fast normal way without allocation. */
 911		else if (clen > *len)
 912			clen = *len;
 913	}
 914
 915	/* No cluster in cache or we need to allocate cluster in hole. */
 916	sbi = ni->mi.sbi;
 917	cluster_bits = sbi->cluster_bits;
 918
 919	ni_lock(ni);
 920	down_write(&ni->file.run_lock);
 921
 
 
 
 
 
 
 
 
 
 
 
 922	le_b = NULL;
 923	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
 924	if (!attr_b) {
 925		err = -ENOENT;
 926		goto out;
 927	}
 928
 929	if (!attr_b->non_res) {
 930		*lcn = RESIDENT_LCN;
 931		*len = 1;
 932		goto out;
 933	}
 934
 935	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
 936	if (vcn >= asize) {
 937		if (new) {
 938			err = -EINVAL;
 939		} else {
 940			*len = 1;
 941			*lcn = SPARSE_LCN;
 942		}
 943		goto out;
 944	}
 945
 946	svcn = le64_to_cpu(attr_b->nres.svcn);
 947	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
 948
 949	attr = attr_b;
 950	le = le_b;
 951	mi = mi_b;
 952
 953	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
 954		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
 955				    &mi);
 956		if (!attr) {
 957			err = -EINVAL;
 958			goto out;
 959		}
 960		svcn = le64_to_cpu(attr->nres.svcn);
 961		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
 962	}
 963
 964	/* Load in cache actual information. */
 965	err = attr_load_runs(attr, ni, run, NULL);
 966	if (err)
 967		goto out;
 968
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 969	if (!*len) {
 970		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
 971			if (*lcn != SPARSE_LCN || !new)
 972				goto ok; /* Slow normal way without allocation. */
 973
 974			if (clen > *len)
 975				clen = *len;
 976		} else if (!new) {
 977			/* Here we may return -ENOENT.
 978			 * In any case caller gets zero length. */
 979			goto ok;
 980		}
 981	}
 982
 983	if (!is_attr_ext(attr_b)) {
 984		/* The code below only for sparsed or compressed attributes. */
 985		err = -EINVAL;
 986		goto out;
 987	}
 988
 989	vcn0 = vcn;
 990	to_alloc = clen;
 991	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
 992	/* Allocate frame aligned clusters.
 993	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
 994	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
 995	if (attr_b->nres.c_unit) {
 996		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
 997		CLST cmask = ~(clst_per_frame - 1);
 998
 999		/* Get frame aligned vcn and to_alloc. */
1000		vcn = vcn0 & cmask;
1001		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1002		if (fr < clst_per_frame)
1003			fr = clst_per_frame;
1004		zero = true;
1005
1006		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1007		if (vcn < svcn || evcn1 <= vcn) {
1008			/* Load attribute for truncated vcn. */
1009			attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
1010					    &vcn, &mi);
1011			if (!attr) {
 
1012				err = -EINVAL;
1013				goto out;
1014			}
1015			svcn = le64_to_cpu(attr->nres.svcn);
1016			evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1017			err = attr_load_runs(attr, ni, run, NULL);
1018			if (err)
1019				goto out;
1020		}
1021	}
1022
1023	if (vcn + to_alloc > asize)
1024		to_alloc = asize - vcn;
1025
1026	/* Get the last LCN to allocate from. */
1027	hint = 0;
1028
1029	if (vcn > evcn1) {
1030		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1031				   false)) {
1032			err = -ENOMEM;
1033			goto out;
1034		}
1035	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1036		hint = -1;
1037	}
1038
1039	/* Allocate and zeroout new clusters. */
1040	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1041				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1042				     fr, lcn, len);
1043	if (err)
1044		goto out;
1045	*new = true;
1046	step = 1;
1047
1048	end = vcn + alen;
1049	/* Save 'total_size0' to restore if error. */
1050	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1051	total_size = total_size0 + ((u64)alen << cluster_bits);
1052
1053	if (vcn != vcn0) {
1054		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1055			err = -EINVAL;
1056			goto out;
1057		}
1058		if (*lcn == SPARSE_LCN) {
1059			/* Internal error. Should not happened. */
1060			WARN_ON(1);
1061			err = -EINVAL;
1062			goto out;
1063		}
1064		/* Check case when vcn0 + len overlaps new allocated clusters. */
1065		if (vcn0 + *len > end)
1066			*len = end - vcn0;
1067	}
1068
1069repack:
1070	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1071	if (err)
1072		goto out;
1073
1074	attr_b->nres.total_size = cpu_to_le64(total_size);
1075	inode_set_bytes(&ni->vfs_inode, total_size);
1076	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1077
1078	mi_b->dirty = true;
1079	mark_inode_dirty(&ni->vfs_inode);
1080
1081	/* Stored [vcn : next_svcn) from [vcn : end). */
1082	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1083
1084	if (end <= evcn1) {
1085		if (next_svcn == evcn1) {
1086			/* Normal way. Update attribute and exit. */
1087			goto ok;
1088		}
1089		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1090		if (!ni->attr_list.size) {
1091			err = ni_create_attr_list(ni);
1092			if (err)
1093				goto undo1;
1094			/* Layout of records is changed. */
1095			le_b = NULL;
1096			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1097					      0, NULL, &mi_b);
1098			if (!attr_b) {
1099				err = -ENOENT;
1100				goto out;
1101			}
1102
1103			attr = attr_b;
1104			le = le_b;
1105			mi = mi_b;
1106			goto repack;
1107		}
1108	}
1109
1110	/* 
1111	 * The code below may require additional cluster (to extend attribute list)
1112	 * and / or one MFT record 
1113	 * It is too complex to undo operations if -ENOSPC occurs deep inside 
1114	 * in 'ni_insert_nonresident'.
1115	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1116	 */
1117	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1118		/* Undo step 1. */
1119		err = -ENOSPC;
1120		goto undo1;
1121	}
1122
1123	step = 2;
1124	svcn = evcn1;
1125
1126	/* Estimate next attribute. */
1127	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1128
1129	if (!attr) {
1130		/* Insert new attribute segment. */
1131		goto ins_ext;
1132	}
1133
1134	/* Try to update existed attribute segment. */
1135	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1136	evcn = le64_to_cpu(attr->nres.evcn);
1137
1138	if (end < next_svcn)
1139		end = next_svcn;
1140	while (end > evcn) {
1141		/* Remove segment [svcn : evcn). */
1142		mi_remove_attr(NULL, mi, attr);
1143
1144		if (!al_remove_le(ni, le)) {
1145			err = -EINVAL;
1146			goto out;
1147		}
1148
1149		if (evcn + 1 >= alloc) {
1150			/* Last attribute segment. */
1151			evcn1 = evcn + 1;
1152			goto ins_ext;
1153		}
1154
1155		if (ni_load_mi(ni, le, &mi)) {
1156			attr = NULL;
1157			goto out;
1158		}
1159
1160		attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1161		if (!attr) {
1162			err = -EINVAL;
1163			goto out;
1164		}
1165		svcn = le64_to_cpu(attr->nres.svcn);
1166		evcn = le64_to_cpu(attr->nres.evcn);
1167	}
1168
1169	if (end < svcn)
1170		end = svcn;
1171
1172	err = attr_load_runs(attr, ni, run, &end);
1173	if (err)
1174		goto out;
1175
1176	evcn1 = evcn + 1;
1177	attr->nres.svcn = cpu_to_le64(next_svcn);
1178	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1179	if (err)
1180		goto out;
1181
1182	le->vcn = cpu_to_le64(next_svcn);
1183	ni->attr_list.dirty = true;
1184	mi->dirty = true;
1185	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1186
1187ins_ext:
1188	if (evcn1 > next_svcn) {
1189		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1190					    next_svcn, evcn1 - next_svcn,
1191					    attr_b->flags, &attr, &mi, NULL);
1192		if (err)
1193			goto out;
1194	}
1195ok:
1196	run_truncate_around(run, vcn);
1197out:
1198	if (err && step > 1) {
1199		/* Too complex to restore. */
1200		_ntfs_bad_inode(&ni->vfs_inode);
1201	}
1202	up_write(&ni->file.run_lock);
1203	ni_unlock(ni);
1204
1205	return err;
1206
1207undo1:
1208	/* Undo step1. */
1209	attr_b->nres.total_size = cpu_to_le64(total_size0);
1210	inode_set_bytes(&ni->vfs_inode, total_size0);
1211
1212	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1213	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1214	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1215		_ntfs_bad_inode(&ni->vfs_inode);
1216	}
1217	goto out;
1218}
1219
1220int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1221{
1222	u64 vbo;
1223	struct ATTRIB *attr;
1224	u32 data_size;
 
1225
1226	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1227	if (!attr)
1228		return -EINVAL;
1229
1230	if (attr->non_res)
1231		return E_NTFS_NONRESIDENT;
1232
1233	vbo = page->index << PAGE_SHIFT;
1234	data_size = le32_to_cpu(attr->res.data_size);
1235	if (vbo < data_size) {
1236		const char *data = resident_data(attr);
1237		char *kaddr = kmap_atomic(page);
1238		u32 use = data_size - vbo;
1239
1240		if (use > PAGE_SIZE)
1241			use = PAGE_SIZE;
1242
1243		memcpy(kaddr, data + vbo, use);
1244		memset(kaddr + use, 0, PAGE_SIZE - use);
1245		kunmap_atomic(kaddr);
1246		flush_dcache_page(page);
1247		SetPageUptodate(page);
1248	} else if (!PageUptodate(page)) {
1249		zero_user_segment(page, 0, PAGE_SIZE);
1250		SetPageUptodate(page);
1251	}
1252
1253	return 0;
1254}
1255
1256int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1257{
1258	u64 vbo;
1259	struct mft_inode *mi;
1260	struct ATTRIB *attr;
1261	u32 data_size;
1262
1263	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1264	if (!attr)
1265		return -EINVAL;
1266
1267	if (attr->non_res) {
1268		/* Return special error code to check this case. */
1269		return E_NTFS_NONRESIDENT;
1270	}
1271
1272	vbo = page->index << PAGE_SHIFT;
1273	data_size = le32_to_cpu(attr->res.data_size);
1274	if (vbo < data_size) {
1275		char *data = resident_data(attr);
1276		char *kaddr = kmap_atomic(page);
1277		u32 use = data_size - vbo;
1278
1279		if (use > PAGE_SIZE)
1280			use = PAGE_SIZE;
1281		memcpy(data + vbo, kaddr, use);
1282		kunmap_atomic(kaddr);
1283		mi->dirty = true;
1284	}
1285	ni->i_valid = data_size;
1286
1287	return 0;
1288}
1289
1290/*
1291 * attr_load_runs_vcn - Load runs with VCN.
1292 */
1293int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1294		       const __le16 *name, u8 name_len, struct runs_tree *run,
1295		       CLST vcn)
1296{
1297	struct ATTRIB *attr;
1298	int err;
1299	CLST svcn, evcn;
1300	u16 ro;
1301
1302	if (!ni) {
1303		/* Is record corrupted? */
1304		return -ENOENT;
1305	}
1306
1307	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1308	if (!attr) {
1309		/* Is record corrupted? */
1310		return -ENOENT;
1311	}
1312
1313	svcn = le64_to_cpu(attr->nres.svcn);
1314	evcn = le64_to_cpu(attr->nres.evcn);
1315
1316	if (evcn < vcn || vcn < svcn) {
1317		/* Is record corrupted? */
1318		return -EINVAL;
1319	}
1320
1321	ro = le16_to_cpu(attr->nres.run_off);
1322
1323	if (ro > le32_to_cpu(attr->size))
1324		return -EINVAL;
1325
1326	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1327			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1328	if (err < 0)
1329		return err;
1330	return 0;
1331}
1332
1333/*
1334 * attr_load_runs_range - Load runs for given range [from to).
1335 */
1336int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1337			 const __le16 *name, u8 name_len, struct runs_tree *run,
1338			 u64 from, u64 to)
1339{
1340	struct ntfs_sb_info *sbi = ni->mi.sbi;
1341	u8 cluster_bits = sbi->cluster_bits;
1342	CLST vcn;
1343	CLST vcn_last = (to - 1) >> cluster_bits;
1344	CLST lcn, clen;
1345	int err;
1346
1347	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1348		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1349			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1350						 vcn);
1351			if (err)
1352				return err;
1353			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1354		}
1355	}
1356
1357	return 0;
1358}
1359
1360#ifdef CONFIG_NTFS3_LZX_XPRESS
1361/*
1362 * attr_wof_frame_info
1363 *
1364 * Read header of Xpress/LZX file to get info about frame.
1365 */
1366int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1367			struct runs_tree *run, u64 frame, u64 frames,
1368			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1369{
1370	struct ntfs_sb_info *sbi = ni->mi.sbi;
1371	u64 vbo[2], off[2], wof_size;
1372	u32 voff;
1373	u8 bytes_per_off;
1374	char *addr;
1375	struct page *page;
1376	int i, err;
1377	__le32 *off32;
1378	__le64 *off64;
1379
1380	if (ni->vfs_inode.i_size < 0x100000000ull) {
1381		/* File starts with array of 32 bit offsets. */
1382		bytes_per_off = sizeof(__le32);
1383		vbo[1] = frame << 2;
1384		*vbo_data = frames << 2;
1385	} else {
1386		/* File starts with array of 64 bit offsets. */
1387		bytes_per_off = sizeof(__le64);
1388		vbo[1] = frame << 3;
1389		*vbo_data = frames << 3;
1390	}
1391
1392	/*
1393	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1394	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1395	 */
1396	if (!attr->non_res) {
1397		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1398			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1399			return -EINVAL;
1400		}
1401		addr = resident_data(attr);
1402
1403		if (bytes_per_off == sizeof(__le32)) {
1404			off32 = Add2Ptr(addr, vbo[1]);
1405			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1406			off[1] = le32_to_cpu(off32[0]);
1407		} else {
1408			off64 = Add2Ptr(addr, vbo[1]);
1409			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1410			off[1] = le64_to_cpu(off64[0]);
1411		}
1412
1413		*vbo_data += off[0];
1414		*ondisk_size = off[1] - off[0];
1415		return 0;
1416	}
1417
1418	wof_size = le64_to_cpu(attr->nres.data_size);
1419	down_write(&ni->file.run_lock);
1420	page = ni->file.offs_page;
1421	if (!page) {
1422		page = alloc_page(GFP_KERNEL);
1423		if (!page) {
1424			err = -ENOMEM;
1425			goto out;
1426		}
1427		page->index = -1;
1428		ni->file.offs_page = page;
1429	}
1430	lock_page(page);
1431	addr = page_address(page);
1432
1433	if (vbo[1]) {
1434		voff = vbo[1] & (PAGE_SIZE - 1);
1435		vbo[0] = vbo[1] - bytes_per_off;
1436		i = 0;
1437	} else {
1438		voff = 0;
1439		vbo[0] = 0;
1440		off[0] = 0;
1441		i = 1;
1442	}
1443
1444	do {
1445		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1446
1447		if (index != page->index) {
 
1448			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1449			u64 to = min(from + PAGE_SIZE, wof_size);
1450
1451			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1452						   ARRAY_SIZE(WOF_NAME), run,
1453						   from, to);
1454			if (err)
1455				goto out1;
1456
1457			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1458					     to - from, REQ_OP_READ);
1459			if (err) {
1460				page->index = -1;
1461				goto out1;
1462			}
1463			page->index = index;
1464		}
1465
1466		if (i) {
1467			if (bytes_per_off == sizeof(__le32)) {
1468				off32 = Add2Ptr(addr, voff);
1469				off[1] = le32_to_cpu(*off32);
1470			} else {
1471				off64 = Add2Ptr(addr, voff);
1472				off[1] = le64_to_cpu(*off64);
1473			}
1474		} else if (!voff) {
1475			if (bytes_per_off == sizeof(__le32)) {
1476				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1477				off[0] = le32_to_cpu(*off32);
1478			} else {
1479				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1480				off[0] = le64_to_cpu(*off64);
1481			}
1482		} else {
1483			/* Two values in one page. */
1484			if (bytes_per_off == sizeof(__le32)) {
1485				off32 = Add2Ptr(addr, voff);
1486				off[0] = le32_to_cpu(off32[-1]);
1487				off[1] = le32_to_cpu(off32[0]);
1488			} else {
1489				off64 = Add2Ptr(addr, voff);
1490				off[0] = le64_to_cpu(off64[-1]);
1491				off[1] = le64_to_cpu(off64[0]);
1492			}
1493			break;
1494		}
1495	} while (++i < 2);
1496
1497	*vbo_data += off[0];
1498	*ondisk_size = off[1] - off[0];
1499
1500out1:
1501	unlock_page(page);
1502out:
1503	up_write(&ni->file.run_lock);
1504	return err;
1505}
1506#endif
1507
1508/*
1509 * attr_is_frame_compressed - Used to detect compressed frame.
 
 
 
 
1510 */
1511int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1512			     CLST frame, CLST *clst_data)
1513{
1514	int err;
1515	u32 clst_frame;
1516	CLST clen, lcn, vcn, alen, slen, vcn_next;
1517	size_t idx;
1518	struct runs_tree *run;
1519
1520	*clst_data = 0;
1521
1522	if (!is_attr_compressed(attr))
1523		return 0;
1524
1525	if (!attr->non_res)
1526		return 0;
1527
1528	clst_frame = 1u << attr->nres.c_unit;
1529	vcn = frame * clst_frame;
1530	run = &ni->file.run;
1531
1532	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1533		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1534					 attr->name_len, run, vcn);
1535		if (err)
1536			return err;
1537
1538		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1539			return -EINVAL;
1540	}
1541
1542	if (lcn == SPARSE_LCN) {
1543		/* Sparsed frame. */
1544		return 0;
1545	}
1546
1547	if (clen >= clst_frame) {
1548		/*
1549		 * The frame is not compressed 'cause
1550		 * it does not contain any sparse clusters.
1551		 */
1552		*clst_data = clst_frame;
1553		return 0;
1554	}
1555
1556	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1557	slen = 0;
1558	*clst_data = clen;
1559
1560	/*
1561	 * The frame is compressed if *clst_data + slen >= clst_frame.
1562	 * Check next fragments.
1563	 */
1564	while ((vcn += clen) < alen) {
1565		vcn_next = vcn;
1566
1567		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1568		    vcn_next != vcn) {
1569			err = attr_load_runs_vcn(ni, attr->type,
1570						 attr_name(attr),
1571						 attr->name_len, run, vcn_next);
1572			if (err)
1573				return err;
1574			vcn = vcn_next;
1575
1576			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1577				return -EINVAL;
1578		}
1579
1580		if (lcn == SPARSE_LCN) {
1581			slen += clen;
1582		} else {
1583			if (slen) {
1584				/*
1585				 * Data_clusters + sparse_clusters =
1586				 * not enough for frame.
1587				 */
1588				return -EINVAL;
1589			}
1590			*clst_data += clen;
1591		}
1592
1593		if (*clst_data + slen >= clst_frame) {
1594			if (!slen) {
1595				/*
1596				 * There is no sparsed clusters in this frame
1597				 * so it is not compressed.
1598				 */
1599				*clst_data = clst_frame;
1600			} else {
1601				/* Frame is compressed. */
1602			}
1603			break;
1604		}
1605	}
1606
1607	return 0;
1608}
1609
1610/*
1611 * attr_allocate_frame - Allocate/free clusters for @frame.
1612 *
1613 * Assumed: down_write(&ni->file.run_lock);
1614 */
1615int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1616			u64 new_valid)
1617{
1618	int err = 0;
1619	struct runs_tree *run = &ni->file.run;
1620	struct ntfs_sb_info *sbi = ni->mi.sbi;
1621	struct ATTRIB *attr = NULL, *attr_b;
1622	struct ATTR_LIST_ENTRY *le, *le_b;
1623	struct mft_inode *mi, *mi_b;
1624	CLST svcn, evcn1, next_svcn, len;
1625	CLST vcn, end, clst_data;
1626	u64 total_size, valid_size, data_size;
1627
1628	le_b = NULL;
1629	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1630	if (!attr_b)
1631		return -ENOENT;
1632
1633	if (!is_attr_ext(attr_b))
1634		return -EINVAL;
1635
1636	vcn = frame << NTFS_LZNT_CUNIT;
1637	total_size = le64_to_cpu(attr_b->nres.total_size);
1638
1639	svcn = le64_to_cpu(attr_b->nres.svcn);
1640	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1641	data_size = le64_to_cpu(attr_b->nres.data_size);
1642
1643	if (svcn <= vcn && vcn < evcn1) {
1644		attr = attr_b;
1645		le = le_b;
1646		mi = mi_b;
1647	} else if (!le_b) {
1648		err = -EINVAL;
1649		goto out;
1650	} else {
1651		le = le_b;
1652		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1653				    &mi);
1654		if (!attr) {
1655			err = -EINVAL;
1656			goto out;
1657		}
1658		svcn = le64_to_cpu(attr->nres.svcn);
1659		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1660	}
1661
1662	err = attr_load_runs(attr, ni, run, NULL);
1663	if (err)
1664		goto out;
1665
1666	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1667	if (err)
1668		goto out;
1669
1670	total_size -= (u64)clst_data << sbi->cluster_bits;
1671
1672	len = bytes_to_cluster(sbi, compr_size);
1673
1674	if (len == clst_data)
1675		goto out;
1676
1677	if (len < clst_data) {
1678		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1679					NULL, true);
1680		if (err)
1681			goto out;
1682
1683		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1684				   false)) {
1685			err = -ENOMEM;
1686			goto out;
1687		}
1688		end = vcn + clst_data;
1689		/* Run contains updated range [vcn + len : end). */
1690	} else {
1691		CLST alen, hint = 0;
1692		/* Get the last LCN to allocate from. */
1693		if (vcn + clst_data &&
1694		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1695				      NULL)) {
1696			hint = -1;
1697		}
1698
1699		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1700					     hint + 1, len - clst_data, NULL,
1701					     ALLOCATE_DEF, &alen, 0, NULL,
1702					     NULL);
1703		if (err)
1704			goto out;
1705
1706		end = vcn + len;
1707		/* Run contains updated range [vcn + clst_data : end). */
1708	}
1709
1710	total_size += (u64)len << sbi->cluster_bits;
1711
1712repack:
1713	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1714	if (err)
1715		goto out;
1716
1717	attr_b->nres.total_size = cpu_to_le64(total_size);
1718	inode_set_bytes(&ni->vfs_inode, total_size);
 
1719
1720	mi_b->dirty = true;
1721	mark_inode_dirty(&ni->vfs_inode);
1722
1723	/* Stored [vcn : next_svcn) from [vcn : end). */
1724	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1725
1726	if (end <= evcn1) {
1727		if (next_svcn == evcn1) {
1728			/* Normal way. Update attribute and exit. */
1729			goto ok;
1730		}
1731		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1732		if (!ni->attr_list.size) {
1733			err = ni_create_attr_list(ni);
1734			if (err)
1735				goto out;
1736			/* Layout of records is changed. */
1737			le_b = NULL;
1738			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1739					      0, NULL, &mi_b);
1740			if (!attr_b) {
1741				err = -ENOENT;
1742				goto out;
1743			}
1744
1745			attr = attr_b;
1746			le = le_b;
1747			mi = mi_b;
1748			goto repack;
1749		}
1750	}
1751
1752	svcn = evcn1;
1753
1754	/* Estimate next attribute. */
1755	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1756
1757	if (attr) {
1758		CLST alloc = bytes_to_cluster(
1759			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1760		CLST evcn = le64_to_cpu(attr->nres.evcn);
1761
1762		if (end < next_svcn)
1763			end = next_svcn;
1764		while (end > evcn) {
1765			/* Remove segment [svcn : evcn). */
1766			mi_remove_attr(NULL, mi, attr);
1767
1768			if (!al_remove_le(ni, le)) {
1769				err = -EINVAL;
1770				goto out;
1771			}
1772
1773			if (evcn + 1 >= alloc) {
1774				/* Last attribute segment. */
1775				evcn1 = evcn + 1;
1776				goto ins_ext;
1777			}
1778
1779			if (ni_load_mi(ni, le, &mi)) {
1780				attr = NULL;
1781				goto out;
1782			}
1783
1784			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1785					    &le->id);
1786			if (!attr) {
1787				err = -EINVAL;
1788				goto out;
1789			}
1790			svcn = le64_to_cpu(attr->nres.svcn);
1791			evcn = le64_to_cpu(attr->nres.evcn);
1792		}
1793
1794		if (end < svcn)
1795			end = svcn;
1796
1797		err = attr_load_runs(attr, ni, run, &end);
1798		if (err)
1799			goto out;
1800
1801		evcn1 = evcn + 1;
1802		attr->nres.svcn = cpu_to_le64(next_svcn);
1803		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1804		if (err)
1805			goto out;
1806
1807		le->vcn = cpu_to_le64(next_svcn);
1808		ni->attr_list.dirty = true;
1809		mi->dirty = true;
1810
1811		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1812	}
1813ins_ext:
1814	if (evcn1 > next_svcn) {
1815		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1816					    next_svcn, evcn1 - next_svcn,
1817					    attr_b->flags, &attr, &mi, NULL);
1818		if (err)
1819			goto out;
1820	}
1821ok:
1822	run_truncate_around(run, vcn);
1823out:
1824	if (new_valid > data_size)
1825		new_valid = data_size;
1826
1827	valid_size = le64_to_cpu(attr_b->nres.valid_size);
1828	if (new_valid != valid_size) {
1829		attr_b->nres.valid_size = cpu_to_le64(valid_size);
1830		mi_b->dirty = true;
 
 
1831	}
1832
1833	return err;
1834}
1835
1836/*
1837 * attr_collapse_range - Collapse range in file.
1838 */
1839int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1840{
1841	int err = 0;
1842	struct runs_tree *run = &ni->file.run;
1843	struct ntfs_sb_info *sbi = ni->mi.sbi;
1844	struct ATTRIB *attr = NULL, *attr_b;
1845	struct ATTR_LIST_ENTRY *le, *le_b;
1846	struct mft_inode *mi, *mi_b;
1847	CLST svcn, evcn1, len, dealloc, alen;
1848	CLST vcn, end;
1849	u64 valid_size, data_size, alloc_size, total_size;
1850	u32 mask;
1851	__le16 a_flags;
1852
1853	if (!bytes)
1854		return 0;
1855
1856	le_b = NULL;
1857	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1858	if (!attr_b)
1859		return -ENOENT;
1860
1861	if (!attr_b->non_res) {
1862		/* Attribute is resident. Nothing to do? */
1863		return 0;
1864	}
1865
1866	data_size = le64_to_cpu(attr_b->nres.data_size);
1867	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1868	a_flags = attr_b->flags;
1869
1870	if (is_attr_ext(attr_b)) {
1871		total_size = le64_to_cpu(attr_b->nres.total_size);
1872		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1873	} else {
1874		total_size = alloc_size;
1875		mask = sbi->cluster_mask;
1876	}
1877
1878	if ((vbo & mask) || (bytes & mask)) {
1879		/* Allow to collapse only cluster aligned ranges. */
1880		return -EINVAL;
1881	}
1882
1883	if (vbo > data_size)
1884		return -EINVAL;
1885
1886	down_write(&ni->file.run_lock);
1887
1888	if (vbo + bytes >= data_size) {
1889		u64 new_valid = min(ni->i_valid, vbo);
1890
1891		/* Simple truncate file at 'vbo'. */
1892		truncate_setsize(&ni->vfs_inode, vbo);
1893		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1894				    &new_valid, true, NULL);
1895
1896		if (!err && new_valid < ni->i_valid)
1897			ni->i_valid = new_valid;
1898
1899		goto out;
1900	}
1901
1902	/*
1903	 * Enumerate all attribute segments and collapse.
1904	 */
1905	alen = alloc_size >> sbi->cluster_bits;
1906	vcn = vbo >> sbi->cluster_bits;
1907	len = bytes >> sbi->cluster_bits;
1908	end = vcn + len;
1909	dealloc = 0;
1910
1911	svcn = le64_to_cpu(attr_b->nres.svcn);
1912	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1913
1914	if (svcn <= vcn && vcn < evcn1) {
1915		attr = attr_b;
1916		le = le_b;
1917		mi = mi_b;
1918	} else if (!le_b) {
1919		err = -EINVAL;
1920		goto out;
1921	} else {
1922		le = le_b;
1923		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1924				    &mi);
1925		if (!attr) {
1926			err = -EINVAL;
1927			goto out;
1928		}
1929
1930		svcn = le64_to_cpu(attr->nres.svcn);
1931		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1932	}
1933
1934	for (;;) {
1935		if (svcn >= end) {
1936			/* Shift VCN- */
1937			attr->nres.svcn = cpu_to_le64(svcn - len);
1938			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1939			if (le) {
1940				le->vcn = attr->nres.svcn;
1941				ni->attr_list.dirty = true;
1942			}
1943			mi->dirty = true;
1944		} else if (svcn < vcn || end < evcn1) {
1945			CLST vcn1, eat, next_svcn;
1946
1947			/* Collapse a part of this attribute segment. */
1948			err = attr_load_runs(attr, ni, run, &svcn);
1949			if (err)
1950				goto out;
1951			vcn1 = max(vcn, svcn);
1952			eat = min(end, evcn1) - vcn1;
1953
1954			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1955						true);
1956			if (err)
1957				goto out;
1958
1959			if (!run_collapse_range(run, vcn1, eat)) {
1960				err = -ENOMEM;
1961				goto out;
1962			}
1963
1964			if (svcn >= vcn) {
1965				/* Shift VCN */
1966				attr->nres.svcn = cpu_to_le64(vcn);
1967				if (le) {
1968					le->vcn = attr->nres.svcn;
1969					ni->attr_list.dirty = true;
1970				}
1971			}
1972
1973			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1974			if (err)
1975				goto out;
1976
1977			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1978			if (next_svcn + eat < evcn1) {
1979				err = ni_insert_nonresident(
1980					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1981					evcn1 - eat - next_svcn, a_flags, &attr,
1982					&mi, &le);
1983				if (err)
1984					goto out;
1985
1986				/* Layout of records maybe changed. */
1987				attr_b = NULL;
1988			}
1989
1990			/* Free all allocated memory. */
1991			run_truncate(run, 0);
1992		} else {
1993			u16 le_sz;
1994			u16 roff = le16_to_cpu(attr->nres.run_off);
1995
1996			if (roff > le32_to_cpu(attr->size)) {
1997				err = -EINVAL;
1998				goto out;
1999			}
2000
2001			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2002				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
2003				      le32_to_cpu(attr->size) - roff);
2004
2005			/* Delete this attribute segment. */
2006			mi_remove_attr(NULL, mi, attr);
2007			if (!le)
2008				break;
2009
2010			le_sz = le16_to_cpu(le->size);
2011			if (!al_remove_le(ni, le)) {
2012				err = -EINVAL;
2013				goto out;
2014			}
2015
2016			if (evcn1 >= alen)
2017				break;
2018
2019			if (!svcn) {
2020				/* Load next record that contains this attribute. */
2021				if (ni_load_mi(ni, le, &mi)) {
2022					err = -EINVAL;
2023					goto out;
2024				}
2025
2026				/* Look for required attribute. */
2027				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
2028						    0, &le->id);
2029				if (!attr) {
2030					err = -EINVAL;
2031					goto out;
2032				}
2033				goto next_attr;
2034			}
2035			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2036		}
2037
2038		if (evcn1 >= alen)
2039			break;
2040
2041		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2042		if (!attr) {
2043			err = -EINVAL;
2044			goto out;
2045		}
2046
2047next_attr:
2048		svcn = le64_to_cpu(attr->nres.svcn);
2049		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2050	}
2051
2052	if (!attr_b) {
2053		le_b = NULL;
2054		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2055				      &mi_b);
2056		if (!attr_b) {
2057			err = -ENOENT;
2058			goto out;
2059		}
2060	}
2061
2062	data_size -= bytes;
2063	valid_size = ni->i_valid;
2064	if (vbo + bytes <= valid_size)
2065		valid_size -= bytes;
2066	else if (vbo < valid_size)
2067		valid_size = vbo;
2068
2069	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2070	attr_b->nres.data_size = cpu_to_le64(data_size);
2071	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2072	total_size -= (u64)dealloc << sbi->cluster_bits;
2073	if (is_attr_ext(attr_b))
2074		attr_b->nres.total_size = cpu_to_le64(total_size);
2075	mi_b->dirty = true;
2076
2077	/* Update inode size. */
2078	ni->i_valid = valid_size;
2079	ni->vfs_inode.i_size = data_size;
2080	inode_set_bytes(&ni->vfs_inode, total_size);
2081	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2082	mark_inode_dirty(&ni->vfs_inode);
2083
2084out:
2085	up_write(&ni->file.run_lock);
2086	if (err)
2087		_ntfs_bad_inode(&ni->vfs_inode);
2088
2089	return err;
2090}
2091
2092/*
2093 * attr_punch_hole
2094 *
2095 * Not for normal files.
2096 */
2097int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2098{
2099	int err = 0;
2100	struct runs_tree *run = &ni->file.run;
2101	struct ntfs_sb_info *sbi = ni->mi.sbi;
2102	struct ATTRIB *attr = NULL, *attr_b;
2103	struct ATTR_LIST_ENTRY *le, *le_b;
2104	struct mft_inode *mi, *mi_b;
2105	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2106	u64 total_size, alloc_size;
2107	u32 mask;
2108	__le16 a_flags;
2109	struct runs_tree run2;
2110
2111	if (!bytes)
2112		return 0;
2113
2114	le_b = NULL;
2115	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2116	if (!attr_b)
2117		return -ENOENT;
2118
2119	if (!attr_b->non_res) {
2120		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2121		u32 from, to;
2122
2123		if (vbo > data_size)
2124			return 0;
2125
2126		from = vbo;
2127		to = min_t(u64, vbo + bytes, data_size);
2128		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2129		return 0;
2130	}
2131
2132	if (!is_attr_ext(attr_b))
2133		return -EOPNOTSUPP;
2134
2135	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2136	total_size = le64_to_cpu(attr_b->nres.total_size);
2137
2138	if (vbo >= alloc_size) {
2139		/* NOTE: It is allowed. */
2140		return 0;
2141	}
2142
2143	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2144
2145	bytes += vbo;
2146	if (bytes > alloc_size)
2147		bytes = alloc_size;
2148	bytes -= vbo;
2149
2150	if ((vbo & mask) || (bytes & mask)) {
2151		/* We have to zero a range(s). */
2152		if (frame_size == NULL) {
2153			/* Caller insists range is aligned. */
2154			return -EINVAL;
2155		}
2156		*frame_size = mask + 1;
2157		return E_NTFS_NOTALIGNED;
2158	}
2159
2160	down_write(&ni->file.run_lock);
2161	run_init(&run2);
2162	run_truncate(run, 0);
2163
2164	/*
2165	 * Enumerate all attribute segments and punch hole where necessary.
2166	 */
2167	alen = alloc_size >> sbi->cluster_bits;
2168	vcn = vbo >> sbi->cluster_bits;
2169	len = bytes >> sbi->cluster_bits;
2170	end = vcn + len;
2171	hole = 0;
2172
2173	svcn = le64_to_cpu(attr_b->nres.svcn);
2174	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2175	a_flags = attr_b->flags;
2176
2177	if (svcn <= vcn && vcn < evcn1) {
2178		attr = attr_b;
2179		le = le_b;
2180		mi = mi_b;
2181	} else if (!le_b) {
2182		err = -EINVAL;
2183		goto bad_inode;
2184	} else {
2185		le = le_b;
2186		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2187				    &mi);
2188		if (!attr) {
2189			err = -EINVAL;
2190			goto bad_inode;
2191		}
2192
2193		svcn = le64_to_cpu(attr->nres.svcn);
2194		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2195	}
2196
2197	while (svcn < end) {
2198		CLST vcn1, zero, hole2 = hole;
2199
2200		err = attr_load_runs(attr, ni, run, &svcn);
2201		if (err)
2202			goto done;
2203		vcn1 = max(vcn, svcn);
2204		zero = min(end, evcn1) - vcn1;
2205
2206		/*
2207		 * Check range [vcn1 + zero).
2208		 * Calculate how many clusters there are.
2209		 * Don't do any destructive actions.
2210		 */
2211		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2212		if (err)
2213			goto done;
2214
2215		/* Check if required range is already hole. */
2216		if (hole2 == hole)
2217			goto next_attr;
2218
2219		/* Make a clone of run to undo. */
2220		err = run_clone(run, &run2);
2221		if (err)
2222			goto done;
2223
2224		/* Make a hole range (sparse) [vcn1 + zero). */
2225		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2226			err = -ENOMEM;
2227			goto done;
2228		}
2229
2230		/* Update run in attribute segment. */
2231		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2232		if (err)
2233			goto done;
2234		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2235		if (next_svcn < evcn1) {
2236			/* Insert new attribute segment. */
2237			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2238						    next_svcn,
2239						    evcn1 - next_svcn, a_flags,
2240						    &attr, &mi, &le);
2241			if (err)
2242				goto undo_punch;
2243
2244			/* Layout of records maybe changed. */
2245			attr_b = NULL;
2246		}
2247
2248		/* Real deallocate. Should not fail. */
2249		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2250
2251next_attr:
2252		/* Free all allocated memory. */
2253		run_truncate(run, 0);
2254
2255		if (evcn1 >= alen)
2256			break;
2257
2258		/* Get next attribute segment. */
2259		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2260		if (!attr) {
2261			err = -EINVAL;
2262			goto bad_inode;
2263		}
2264
2265		svcn = le64_to_cpu(attr->nres.svcn);
2266		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2267	}
2268
2269done:
2270	if (!hole)
2271		goto out;
2272
2273	if (!attr_b) {
2274		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2275				      &mi_b);
2276		if (!attr_b) {
2277			err = -EINVAL;
2278			goto bad_inode;
2279		}
2280	}
2281
2282	total_size -= (u64)hole << sbi->cluster_bits;
2283	attr_b->nres.total_size = cpu_to_le64(total_size);
2284	mi_b->dirty = true;
2285
2286	/* Update inode size. */
2287	inode_set_bytes(&ni->vfs_inode, total_size);
2288	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2289	mark_inode_dirty(&ni->vfs_inode);
2290
2291out:
2292	run_close(&run2);
2293	up_write(&ni->file.run_lock);
2294	return err;
2295
2296bad_inode:
2297	_ntfs_bad_inode(&ni->vfs_inode);
2298	goto out;
2299
2300undo_punch:
2301	/*
2302	 * Restore packed runs.
2303	 * 'mi_pack_runs' should not fail, cause we restore original.
2304	 */
2305	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2306		goto bad_inode;
2307
2308	goto done;
2309}
2310
2311/*
2312 * attr_insert_range - Insert range (hole) in file.
2313 * Not for normal files.
2314 */
2315int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2316{
2317	int err = 0;
2318	struct runs_tree *run = &ni->file.run;
2319	struct ntfs_sb_info *sbi = ni->mi.sbi;
2320	struct ATTRIB *attr = NULL, *attr_b;
2321	struct ATTR_LIST_ENTRY *le, *le_b;
2322	struct mft_inode *mi, *mi_b;
2323	CLST vcn, svcn, evcn1, len, next_svcn;
2324	u64 data_size, alloc_size;
2325	u32 mask;
2326	__le16 a_flags;
2327
2328	if (!bytes)
2329		return 0;
2330
2331	le_b = NULL;
2332	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2333	if (!attr_b)
2334		return -ENOENT;
2335
2336	if (!is_attr_ext(attr_b)) {
2337		/* It was checked above. See fallocate. */
2338		return -EOPNOTSUPP;
2339	}
2340
2341	if (!attr_b->non_res) {
2342		data_size = le32_to_cpu(attr_b->res.data_size);
2343		alloc_size = data_size;
2344		mask = sbi->cluster_mask; /* cluster_size - 1 */
2345	} else {
2346		data_size = le64_to_cpu(attr_b->nres.data_size);
2347		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2348		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2349	}
2350
2351	if (vbo > data_size) {
2352		/* Insert range after the file size is not allowed. */
 
 
 
 
 
2353		return -EINVAL;
2354	}
2355
2356	if ((vbo & mask) || (bytes & mask)) {
2357		/* Allow to insert only frame aligned ranges. */
2358		return -EINVAL;
2359	}
2360
2361	/*
2362	 * valid_size <= data_size <= alloc_size
2363	 * Check alloc_size for maximum possible.
2364	 */
2365	if (bytes > sbi->maxbytes_sparse - alloc_size)
2366		return -EFBIG;
2367
2368	vcn = vbo >> sbi->cluster_bits;
2369	len = bytes >> sbi->cluster_bits;
2370
2371	down_write(&ni->file.run_lock);
2372
2373	if (!attr_b->non_res) {
2374		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2375				    data_size + bytes, NULL, false, NULL);
2376
2377		le_b = NULL;
2378		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2379				      &mi_b);
2380		if (!attr_b) {
2381			err = -EINVAL;
2382			goto bad_inode;
2383		}
2384
2385		if (err)
2386			goto out;
2387
2388		if (!attr_b->non_res) {
2389			/* Still resident. */
2390			char *data = Add2Ptr(attr_b,
2391					     le16_to_cpu(attr_b->res.data_off));
2392
2393			memmove(data + bytes, data, bytes);
2394			memset(data, 0, bytes);
2395			goto done;
2396		}
2397
2398		/* Resident files becomes nonresident. */
2399		data_size = le64_to_cpu(attr_b->nres.data_size);
2400		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2401	}
2402
2403	/*
2404	 * Enumerate all attribute segments and shift start vcn.
2405	 */
2406	a_flags = attr_b->flags;
2407	svcn = le64_to_cpu(attr_b->nres.svcn);
2408	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2409
2410	if (svcn <= vcn && vcn < evcn1) {
2411		attr = attr_b;
2412		le = le_b;
2413		mi = mi_b;
2414	} else if (!le_b) {
2415		err = -EINVAL;
2416		goto bad_inode;
2417	} else {
2418		le = le_b;
2419		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2420				    &mi);
2421		if (!attr) {
2422			err = -EINVAL;
2423			goto bad_inode;
2424		}
2425
2426		svcn = le64_to_cpu(attr->nres.svcn);
2427		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2428	}
2429
2430	run_truncate(run, 0); /* clear cached values. */
2431	err = attr_load_runs(attr, ni, run, NULL);
2432	if (err)
2433		goto out;
2434
2435	if (!run_insert_range(run, vcn, len)) {
2436		err = -ENOMEM;
2437		goto out;
2438	}
2439
2440	/* Try to pack in current record as much as possible. */
2441	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2442	if (err)
2443		goto out;
2444
2445	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2446
2447	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2448	       attr->type == ATTR_DATA && !attr->name_len) {
2449		le64_add_cpu(&attr->nres.svcn, len);
2450		le64_add_cpu(&attr->nres.evcn, len);
2451		if (le) {
2452			le->vcn = attr->nres.svcn;
2453			ni->attr_list.dirty = true;
2454		}
2455		mi->dirty = true;
2456	}
2457
2458	if (next_svcn < evcn1 + len) {
2459		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2460					    next_svcn, evcn1 + len - next_svcn,
2461					    a_flags, NULL, NULL, NULL);
2462
2463		le_b = NULL;
2464		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2465				      &mi_b);
2466		if (!attr_b) {
2467			err = -EINVAL;
2468			goto bad_inode;
2469		}
2470
2471		if (err) {
2472			/* ni_insert_nonresident failed. Try to undo. */
2473			goto undo_insert_range;
2474		}
2475	}
2476
2477	/*
2478	 * Update primary attribute segment.
2479	 */
2480	if (vbo <= ni->i_valid)
2481		ni->i_valid += bytes;
2482
2483	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2484	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2485
2486	/* ni->valid may be not equal valid_size (temporary). */
2487	if (ni->i_valid > data_size + bytes)
2488		attr_b->nres.valid_size = attr_b->nres.data_size;
2489	else
2490		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2491	mi_b->dirty = true;
2492
2493done:
2494	ni->vfs_inode.i_size += bytes;
2495	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2496	mark_inode_dirty(&ni->vfs_inode);
2497
2498out:
2499	run_truncate(run, 0); /* clear cached values. */
2500
2501	up_write(&ni->file.run_lock);
2502
2503	return err;
2504
2505bad_inode:
2506	_ntfs_bad_inode(&ni->vfs_inode);
2507	goto out;
2508
2509undo_insert_range:
2510	svcn = le64_to_cpu(attr_b->nres.svcn);
2511	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2512
2513	if (svcn <= vcn && vcn < evcn1) {
2514		attr = attr_b;
2515		le = le_b;
2516		mi = mi_b;
2517	} else if (!le_b) {
2518		goto bad_inode;
2519	} else {
2520		le = le_b;
2521		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2522				    &mi);
2523		if (!attr) {
2524			goto bad_inode;
2525		}
2526
2527		svcn = le64_to_cpu(attr->nres.svcn);
2528		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2529	}
2530
2531	if (attr_load_runs(attr, ni, run, NULL))
2532		goto bad_inode;
2533
2534	if (!run_collapse_range(run, vcn, len))
2535		goto bad_inode;
2536
2537	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2538		goto bad_inode;
2539
2540	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2541	       attr->type == ATTR_DATA && !attr->name_len) {
2542		le64_sub_cpu(&attr->nres.svcn, len);
2543		le64_sub_cpu(&attr->nres.evcn, len);
2544		if (le) {
2545			le->vcn = attr->nres.svcn;
2546			ni->attr_list.dirty = true;
2547		}
2548		mi->dirty = true;
2549	}
2550
2551	goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2552}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
   5 *
   6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
   7 */
   8
   9#include <linux/fs.h>
  10#include <linux/slab.h>
  11#include <linux/kernel.h>
  12
  13#include "debug.h"
  14#include "ntfs.h"
  15#include "ntfs_fs.h"
  16
  17/*
  18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
  19 * preallocate algorithm.
  20 */
  21#ifndef NTFS_MIN_LOG2_OF_CLUMP
  22#define NTFS_MIN_LOG2_OF_CLUMP 16
  23#endif
  24
  25#ifndef NTFS_MAX_LOG2_OF_CLUMP
  26#define NTFS_MAX_LOG2_OF_CLUMP 26
  27#endif
  28
  29// 16M
  30#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
  31// 16G
  32#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
  33
  34static inline u64 get_pre_allocated(u64 size)
  35{
  36	u32 clump;
  37	u8 align_shift;
  38	u64 ret;
  39
  40	if (size <= NTFS_CLUMP_MIN) {
  41		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
  42		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
  43	} else if (size >= NTFS_CLUMP_MAX) {
  44		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
  45		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
  46	} else {
  47		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
  48			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
  49		clump = 1u << align_shift;
  50	}
  51
  52	ret = (((size + clump - 1) >> align_shift)) << align_shift;
  53
  54	return ret;
  55}
  56
  57/*
  58 * attr_load_runs - Load all runs stored in @attr.
  59 */
  60static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
  61			  struct runs_tree *run, const CLST *vcn)
  62{
  63	int err;
  64	CLST svcn = le64_to_cpu(attr->nres.svcn);
  65	CLST evcn = le64_to_cpu(attr->nres.evcn);
  66	u32 asize;
  67	u16 run_off;
  68
  69	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
  70		return 0;
  71
  72	if (vcn && (evcn < *vcn || *vcn < svcn))
  73		return -EINVAL;
  74
  75	asize = le32_to_cpu(attr->size);
  76	run_off = le16_to_cpu(attr->nres.run_off);
  77
  78	if (run_off > asize)
  79		return -EINVAL;
  80
  81	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
  82			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
  83			    asize - run_off);
  84	if (err < 0)
  85		return err;
  86
  87	return 0;
  88}
  89
  90/*
  91 * run_deallocate_ex - Deallocate clusters.
  92 */
  93static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
  94			     CLST vcn, CLST len, CLST *done, bool trim)
  95{
  96	int err = 0;
  97	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
  98	size_t idx;
  99
 100	if (!len)
 101		goto out;
 102
 103	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
 104failed:
 105		run_truncate(run, vcn0);
 106		err = -EINVAL;
 107		goto out;
 108	}
 109
 110	for (;;) {
 111		if (clen > len)
 112			clen = len;
 113
 114		if (!clen) {
 115			err = -EINVAL;
 116			goto out;
 117		}
 118
 119		if (lcn != SPARSE_LCN) {
 120			if (sbi) {
 121				/* mark bitmap range [lcn + clen) as free and trim clusters. */
 122				mark_as_free_ex(sbi, lcn, clen, trim);
 123			}
 124			dn += clen;
 125		}
 126
 127		len -= clen;
 128		if (!len)
 129			break;
 130
 131		vcn_next = vcn + clen;
 132		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
 133		    vcn != vcn_next) {
 134			/* Save memory - don't load entire run. */
 135			goto failed;
 136		}
 137	}
 138
 139out:
 140	if (done)
 141		*done += dn;
 142
 143	return err;
 144}
 145
 146/*
 147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
 148 */
 149int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
 150			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
 151			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
 152			   CLST *new_lcn, CLST *new_len)
 153{
 154	int err;
 155	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
 156	size_t cnt = run->count;
 157
 158	for (;;) {
 159		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
 160					       opt);
 161
 162		if (err == -ENOSPC && pre) {
 163			pre = 0;
 164			if (*pre_alloc)
 165				*pre_alloc = 0;
 166			continue;
 167		}
 168
 169		if (err)
 170			goto out;
 171
 172		if (vcn == vcn0) {
 173			/* Return the first fragment. */
 174			if (new_lcn)
 175				*new_lcn = lcn;
 176			if (new_len)
 177				*new_len = flen;
 178		}
 179
 180		/* Add new fragment into run storage. */
 181		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
 182			/* Undo last 'ntfs_look_for_free_space' */
 183			mark_as_free_ex(sbi, lcn, len, false);
 184			err = -ENOMEM;
 185			goto out;
 186		}
 187
 188		if (opt & ALLOCATE_ZERO) {
 189			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
 190
 191			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
 192						   (sector_t)lcn << shift,
 193						   (sector_t)flen << shift,
 194						   GFP_NOFS, 0);
 195			if (err)
 196				goto out;
 197		}
 198
 199		vcn += flen;
 200
 201		if (flen >= len || (opt & ALLOCATE_MFT) ||
 202		    (fr && run->count - cnt >= fr)) {
 203			*alen = vcn - vcn0;
 204			return 0;
 205		}
 206
 207		len -= flen;
 208	}
 209
 210out:
 211	/* Undo 'ntfs_look_for_free_space' */
 212	if (vcn - vcn0) {
 213		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
 214		run_truncate(run, vcn0);
 215	}
 216
 217	return err;
 218}
 219
 220/*
 221 * attr_make_nonresident
 222 *
 223 * If page is not NULL - it is already contains resident data
 224 * and locked (called from ni_write_frame()).
 225 */
 226int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
 227			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 228			  u64 new_size, struct runs_tree *run,
 229			  struct ATTRIB **ins_attr, struct page *page)
 230{
 231	struct ntfs_sb_info *sbi;
 232	struct ATTRIB *attr_s;
 233	struct MFT_REC *rec;
 234	u32 used, asize, rsize, aoff;
 235	bool is_data;
 236	CLST len, alen;
 237	char *next;
 238	int err;
 239
 240	if (attr->non_res) {
 241		*ins_attr = attr;
 242		return 0;
 243	}
 244
 245	sbi = mi->sbi;
 246	rec = mi->mrec;
 247	attr_s = NULL;
 248	used = le32_to_cpu(rec->used);
 249	asize = le32_to_cpu(attr->size);
 250	next = Add2Ptr(attr, asize);
 251	aoff = PtrOffset(rec, attr);
 252	rsize = le32_to_cpu(attr->res.data_size);
 253	is_data = attr->type == ATTR_DATA && !attr->name_len;
 254
 255	/* len - how many clusters required to store 'rsize' bytes */
 256	if (is_attr_compressed(attr)) {
 257		u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT;
 258		len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT;
 259	} else {
 260		len = bytes_to_cluster(sbi, rsize);
 261	}
 262
 263	run_init(run);
 264
 265	/* Make a copy of original attribute. */
 266	attr_s = kmemdup(attr, asize, GFP_NOFS);
 267	if (!attr_s) {
 268		err = -ENOMEM;
 269		goto out;
 270	}
 271
 272	if (!len) {
 273		/* Empty resident -> Empty nonresident. */
 274		alen = 0;
 275	} else {
 276		const char *data = resident_data(attr);
 277
 278		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
 279					     ALLOCATE_DEF, &alen, 0, NULL,
 280					     NULL);
 281		if (err)
 282			goto out1;
 283
 284		if (!rsize) {
 285			/* Empty resident -> Non empty nonresident. */
 286		} else if (!is_data) {
 287			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
 288			if (err)
 289				goto out2;
 290		} else if (!page) {
 291			struct address_space *mapping = ni->vfs_inode.i_mapping;
 292			struct folio *folio;
 293
 294			folio = __filemap_get_folio(
 295				mapping, 0, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
 296				mapping_gfp_mask(mapping));
 297			if (IS_ERR(folio)) {
 298				err = PTR_ERR(folio);
 299				goto out2;
 300			}
 301			folio_fill_tail(folio, 0, data, rsize);
 302			folio_mark_uptodate(folio);
 303			folio_mark_dirty(folio);
 304			folio_unlock(folio);
 305			folio_put(folio);
 
 
 
 
 306		}
 307	}
 308
 309	/* Remove original attribute. */
 310	used -= asize;
 311	memmove(attr, Add2Ptr(attr, asize), used - aoff);
 312	rec->used = cpu_to_le32(used);
 313	mi->dirty = true;
 314	if (le)
 315		al_remove_le(ni, le);
 316
 317	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
 318				    attr_s->name_len, run, 0, alen,
 319				    attr_s->flags, &attr, NULL, NULL);
 320	if (err)
 321		goto out3;
 322
 323	kfree(attr_s);
 324	attr->nres.data_size = cpu_to_le64(rsize);
 325	attr->nres.valid_size = attr->nres.data_size;
 326
 327	*ins_attr = attr;
 328
 329	if (is_data)
 330		ni->ni_flags &= ~NI_FLAG_RESIDENT;
 331
 332	/* Resident attribute becomes non resident. */
 333	return 0;
 334
 335out3:
 336	attr = Add2Ptr(rec, aoff);
 337	memmove(next, attr, used - aoff);
 338	memcpy(attr, attr_s, asize);
 339	rec->used = cpu_to_le32(used + asize);
 340	mi->dirty = true;
 341out2:
 342	/* Undo: do not trim new allocated clusters. */
 343	run_deallocate(sbi, run, false);
 344	run_close(run);
 345out1:
 346	kfree(attr_s);
 347out:
 348	return err;
 349}
 350
 351/*
 352 * attr_set_size_res - Helper for attr_set_size().
 353 */
 354static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
 355			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 356			     u64 new_size, struct runs_tree *run,
 357			     struct ATTRIB **ins_attr)
 358{
 359	struct ntfs_sb_info *sbi = mi->sbi;
 360	struct MFT_REC *rec = mi->mrec;
 361	u32 used = le32_to_cpu(rec->used);
 362	u32 asize = le32_to_cpu(attr->size);
 363	u32 aoff = PtrOffset(rec, attr);
 364	u32 rsize = le32_to_cpu(attr->res.data_size);
 365	u32 tail = used - aoff - asize;
 366	char *next = Add2Ptr(attr, asize);
 367	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
 368
 369	if (dsize < 0) {
 370		memmove(next + dsize, next, tail);
 371	} else if (dsize > 0) {
 372		if (used + dsize > sbi->max_bytes_per_attr)
 373			return attr_make_nonresident(ni, attr, le, mi, new_size,
 374						     run, ins_attr, NULL);
 375
 376		memmove(next + dsize, next, tail);
 377		memset(next, 0, dsize);
 378	}
 379
 380	if (new_size > rsize)
 381		memset(Add2Ptr(resident_data(attr), rsize), 0,
 382		       new_size - rsize);
 383
 384	rec->used = cpu_to_le32(used + dsize);
 385	attr->size = cpu_to_le32(asize + dsize);
 386	attr->res.data_size = cpu_to_le32(new_size);
 387	mi->dirty = true;
 388	*ins_attr = attr;
 389
 390	return 0;
 391}
 392
 393/*
 394 * attr_set_size - Change the size of attribute.
 395 *
 396 * Extend:
 397 *   - Sparse/compressed: No allocated clusters.
 398 *   - Normal: Append allocated and preallocated new clusters.
 399 * Shrink:
 400 *   - No deallocate if @keep_prealloc is set.
 401 */
 402int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
 403		  const __le16 *name, u8 name_len, struct runs_tree *run,
 404		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
 405		  struct ATTRIB **ret)
 406{
 407	int err = 0;
 408	struct ntfs_sb_info *sbi = ni->mi.sbi;
 409	u8 cluster_bits = sbi->cluster_bits;
 410	bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
 411		      !name_len;
 412	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
 413	struct ATTRIB *attr = NULL, *attr_b;
 414	struct ATTR_LIST_ENTRY *le, *le_b;
 415	struct mft_inode *mi, *mi_b;
 416	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
 417	CLST next_svcn, pre_alloc = -1, done = 0;
 418	bool is_ext, is_bad = false;
 419	bool dirty = false;
 420	u32 align;
 421	struct MFT_REC *rec;
 422
 423again:
 424	alen = 0;
 425	le_b = NULL;
 426	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
 427			      &mi_b);
 428	if (!attr_b) {
 429		err = -ENOENT;
 430		goto bad_inode;
 431	}
 432
 433	if (!attr_b->non_res) {
 434		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
 435					&attr_b);
 436		if (err)
 437			return err;
 438
 439		/* Return if file is still resident. */
 440		if (!attr_b->non_res) {
 441			dirty = true;
 442			goto ok1;
 443		}
 444
 445		/* Layout of records may be changed, so do a full search. */
 446		goto again;
 447	}
 448
 449	is_ext = is_attr_ext(attr_b);
 450	align = sbi->cluster_size;
 451	if (is_ext)
 452		align <<= attr_b->nres.c_unit;
 453
 454	old_valid = le64_to_cpu(attr_b->nres.valid_size);
 455	old_size = le64_to_cpu(attr_b->nres.data_size);
 456	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 457
 458again_1:
 459	old_alen = old_alloc >> cluster_bits;
 460
 461	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
 462	new_alen = new_alloc >> cluster_bits;
 463
 464	if (keep_prealloc && new_size < old_size) {
 465		attr_b->nres.data_size = cpu_to_le64(new_size);
 466		mi_b->dirty = dirty = true;
 467		goto ok;
 468	}
 469
 470	vcn = old_alen - 1;
 471
 472	svcn = le64_to_cpu(attr_b->nres.svcn);
 473	evcn = le64_to_cpu(attr_b->nres.evcn);
 474
 475	if (svcn <= vcn && vcn <= evcn) {
 476		attr = attr_b;
 477		le = le_b;
 478		mi = mi_b;
 479	} else if (!le_b) {
 480		err = -EINVAL;
 481		goto bad_inode;
 482	} else {
 483		le = le_b;
 484		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
 485				    &mi);
 486		if (!attr) {
 487			err = -EINVAL;
 488			goto bad_inode;
 489		}
 490
 491next_le_1:
 492		svcn = le64_to_cpu(attr->nres.svcn);
 493		evcn = le64_to_cpu(attr->nres.evcn);
 494	}
 495	/*
 496	 * Here we have:
 497	 * attr,mi,le - last attribute segment (containing 'vcn').
 498	 * attr_b,mi_b,le_b - base (primary) attribute segment.
 499	 */
 500next_le:
 501	rec = mi->mrec;
 502	err = attr_load_runs(attr, ni, run, NULL);
 503	if (err)
 504		goto out;
 505
 506	if (new_size > old_size) {
 507		CLST to_allocate;
 508		size_t free;
 509
 510		if (new_alloc <= old_alloc) {
 511			attr_b->nres.data_size = cpu_to_le64(new_size);
 512			mi_b->dirty = dirty = true;
 513			goto ok;
 514		}
 515
 516		/*
 517		 * Add clusters. In simple case we have to:
 518		 *  - allocate space (vcn, lcn, len)
 519		 *  - update packed run in 'mi'
 520		 *  - update attr->nres.evcn
 521		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 522		 */
 523		to_allocate = new_alen - old_alen;
 524add_alloc_in_same_attr_seg:
 525		lcn = 0;
 526		if (is_mft) {
 527			/* MFT allocates clusters from MFT zone. */
 528			pre_alloc = 0;
 529		} else if (is_ext) {
 530			/* No preallocate for sparse/compress. */
 531			pre_alloc = 0;
 532		} else if (pre_alloc == -1) {
 533			pre_alloc = 0;
 534			if (type == ATTR_DATA && !name_len &&
 535			    sbi->options->prealloc) {
 536				pre_alloc = bytes_to_cluster(
 537						    sbi, get_pre_allocated(
 538								 new_size)) -
 539					    new_alen;
 
 540			}
 541
 542			/* Get the last LCN to allocate from. */
 543			if (old_alen &&
 544			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
 545				lcn = SPARSE_LCN;
 546			}
 547
 548			if (lcn == SPARSE_LCN)
 549				lcn = 0;
 550			else if (lcn)
 551				lcn += 1;
 552
 553			free = wnd_zeroes(&sbi->used.bitmap);
 554			if (to_allocate > free) {
 555				err = -ENOSPC;
 556				goto out;
 557			}
 558
 559			if (pre_alloc && to_allocate + pre_alloc > free)
 560				pre_alloc = 0;
 561		}
 562
 563		vcn = old_alen;
 564
 565		if (is_ext) {
 566			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
 567					   false)) {
 568				err = -ENOMEM;
 569				goto out;
 570			}
 571			alen = to_allocate;
 572		} else {
 573			/* ~3 bytes per fragment. */
 574			err = attr_allocate_clusters(
 575				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
 576				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
 577				is_mft ? 0 :
 578					 (sbi->record_size -
 579					  le32_to_cpu(rec->used) + 8) /
 580							 3 +
 581						 1,
 582				NULL, NULL);
 583			if (err)
 584				goto out;
 585		}
 586
 587		done += alen;
 588		vcn += alen;
 589		if (to_allocate > alen)
 590			to_allocate -= alen;
 591		else
 592			to_allocate = 0;
 593
 594pack_runs:
 595		err = mi_pack_runs(mi, attr, run, vcn - svcn);
 596		if (err)
 597			goto undo_1;
 598
 599		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
 600		new_alloc_tmp = (u64)next_svcn << cluster_bits;
 601		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 602		mi_b->dirty = dirty = true;
 603
 604		if (next_svcn >= vcn && !to_allocate) {
 605			/* Normal way. Update attribute and exit. */
 606			attr_b->nres.data_size = cpu_to_le64(new_size);
 607			goto ok;
 608		}
 609
 610		/* At least two MFT to avoid recursive loop. */
 611		if (is_mft && next_svcn == vcn &&
 612		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
 613			new_size = new_alloc_tmp;
 614			attr_b->nres.data_size = attr_b->nres.alloc_size;
 615			goto ok;
 616		}
 617
 618		if (le32_to_cpu(rec->used) < sbi->record_size) {
 619			old_alen = next_svcn;
 620			evcn = old_alen - 1;
 621			goto add_alloc_in_same_attr_seg;
 622		}
 623
 624		attr_b->nres.data_size = attr_b->nres.alloc_size;
 625		if (new_alloc_tmp < old_valid)
 626			attr_b->nres.valid_size = attr_b->nres.data_size;
 627
 628		if (type == ATTR_LIST) {
 629			err = ni_expand_list(ni);
 630			if (err)
 631				goto undo_2;
 632			if (next_svcn < vcn)
 633				goto pack_runs;
 634
 635			/* Layout of records is changed. */
 636			goto again;
 637		}
 638
 639		if (!ni->attr_list.size) {
 640			err = ni_create_attr_list(ni);
 641			/* In case of error layout of records is not changed. */
 642			if (err)
 643				goto undo_2;
 644			/* Layout of records is changed. */
 645		}
 646
 647		if (next_svcn >= vcn) {
 648			/* This is MFT data, repeat. */
 649			goto again;
 650		}
 651
 652		/* Insert new attribute segment. */
 653		err = ni_insert_nonresident(ni, type, name, name_len, run,
 654					    next_svcn, vcn - next_svcn,
 655					    attr_b->flags, &attr, &mi, NULL);
 656
 657		/*
 658		 * Layout of records maybe changed.
 659		 * Find base attribute to update.
 660		 */
 661		le_b = NULL;
 662		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
 663				      NULL, &mi_b);
 664		if (!attr_b) {
 665			err = -EINVAL;
 666			goto bad_inode;
 667		}
 668
 669		if (err) {
 670			/* ni_insert_nonresident failed. */
 671			attr = NULL;
 672			goto undo_2;
 673		}
 674
 675		/* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
 676		if (ni->mi.rno != MFT_REC_MFT)
 677			run_truncate_head(run, evcn + 1);
 678
 679		svcn = le64_to_cpu(attr->nres.svcn);
 680		evcn = le64_to_cpu(attr->nres.evcn);
 681
 682		/*
 683		 * Attribute is in consistency state.
 684		 * Save this point to restore to if next steps fail.
 685		 */
 686		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
 687		attr_b->nres.valid_size = attr_b->nres.data_size =
 688			attr_b->nres.alloc_size = cpu_to_le64(old_size);
 689		mi_b->dirty = dirty = true;
 690		goto again_1;
 691	}
 692
 693	if (new_size != old_size ||
 694	    (new_alloc != old_alloc && !keep_prealloc)) {
 695		/*
 696		 * Truncate clusters. In simple case we have to:
 697		 *  - update packed run in 'mi'
 698		 *  - update attr->nres.evcn
 699		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
 700		 *  - mark and trim clusters as free (vcn, lcn, len)
 701		 */
 702		CLST dlen = 0;
 703
 704		vcn = max(svcn, new_alen);
 705		new_alloc_tmp = (u64)vcn << cluster_bits;
 706
 707		if (vcn > svcn) {
 708			err = mi_pack_runs(mi, attr, run, vcn - svcn);
 709			if (err)
 710				goto out;
 711		} else if (le && le->vcn) {
 712			u16 le_sz = le16_to_cpu(le->size);
 713
 714			/*
 715			 * NOTE: List entries for one attribute are always
 716			 * the same size. We deal with last entry (vcn==0)
 717			 * and it is not first in entries array
 718			 * (list entry for std attribute always first).
 719			 * So it is safe to step back.
 720			 */
 721			mi_remove_attr(NULL, mi, attr);
 722
 723			if (!al_remove_le(ni, le)) {
 724				err = -EINVAL;
 725				goto bad_inode;
 726			}
 727
 728			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
 729		} else {
 730			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
 731			mi->dirty = true;
 732		}
 733
 734		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 735
 736		if (vcn == new_alen) {
 737			attr_b->nres.data_size = cpu_to_le64(new_size);
 738			if (new_size < old_valid)
 739				attr_b->nres.valid_size =
 740					attr_b->nres.data_size;
 741		} else {
 742			if (new_alloc_tmp <=
 743			    le64_to_cpu(attr_b->nres.data_size))
 744				attr_b->nres.data_size =
 745					attr_b->nres.alloc_size;
 746			if (new_alloc_tmp <
 747			    le64_to_cpu(attr_b->nres.valid_size))
 748				attr_b->nres.valid_size =
 749					attr_b->nres.alloc_size;
 750		}
 751		mi_b->dirty = dirty = true;
 752
 753		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
 754					true);
 755		if (err)
 756			goto out;
 757
 758		if (is_ext) {
 759			/* dlen - really deallocated clusters. */
 760			le64_sub_cpu(&attr_b->nres.total_size,
 761				     ((u64)dlen << cluster_bits));
 762		}
 763
 764		run_truncate(run, vcn);
 765
 766		if (new_alloc_tmp <= new_alloc)
 767			goto ok;
 768
 769		old_size = new_alloc_tmp;
 770		vcn = svcn - 1;
 771
 772		if (le == le_b) {
 773			attr = attr_b;
 774			mi = mi_b;
 775			evcn = svcn - 1;
 776			svcn = 0;
 777			goto next_le;
 778		}
 779
 780		if (le->type != type || le->name_len != name_len ||
 781		    memcmp(le_name(le), name, name_len * sizeof(short))) {
 782			err = -EINVAL;
 783			goto bad_inode;
 784		}
 785
 786		err = ni_load_mi(ni, le, &mi);
 787		if (err)
 788			goto out;
 789
 790		attr = mi_find_attr(ni, mi, NULL, type, name, name_len,
 791				    &le->id);
 792		if (!attr) {
 793			err = -EINVAL;
 794			goto bad_inode;
 795		}
 796		goto next_le_1;
 797	}
 798
 799ok:
 800	if (new_valid) {
 801		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
 802
 803		if (attr_b->nres.valid_size != valid) {
 804			attr_b->nres.valid_size = valid;
 805			mi_b->dirty = true;
 806		}
 807	}
 808
 809ok1:
 810	if (ret)
 811		*ret = attr_b;
 812
 813	if (((type == ATTR_DATA && !name_len) ||
 814	     (type == ATTR_ALLOC && name == I30_NAME))) {
 815		/* Update inode_set_bytes. */
 816		if (attr_b->non_res) {
 817			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 818			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
 819				inode_set_bytes(&ni->vfs_inode, new_alloc);
 820				dirty = true;
 821			}
 822		}
 823
 824		/* Don't forget to update duplicate information in parent. */
 825		if (dirty) {
 826			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
 827			mark_inode_dirty(&ni->vfs_inode);
 828		}
 829	}
 830
 831	return 0;
 832
 833undo_2:
 834	vcn -= alen;
 835	attr_b->nres.data_size = cpu_to_le64(old_size);
 836	attr_b->nres.valid_size = cpu_to_le64(old_valid);
 837	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
 838
 839	/* Restore 'attr' and 'mi'. */
 840	if (attr)
 841		goto restore_run;
 842
 843	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
 844	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
 845		attr = attr_b;
 846		le = le_b;
 847		mi = mi_b;
 848	} else if (!le_b) {
 849		err = -EINVAL;
 850		goto bad_inode;
 851	} else {
 852		le = le_b;
 853		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
 854				    &svcn, &mi);
 855		if (!attr)
 856			goto bad_inode;
 857	}
 858
 859restore_run:
 860	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
 861		is_bad = true;
 862
 863undo_1:
 864	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
 865
 866	run_truncate(run, vcn);
 867out:
 868	if (is_bad) {
 869bad_inode:
 870		_ntfs_bad_inode(&ni->vfs_inode);
 871	}
 872	return err;
 873}
 874
 875/*
 876 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
 877 *
 878 * @new == NULL means just to get current mapping for 'vcn'
 879 * @new != NULL means allocate real cluster if 'vcn' maps to hole
 880 * @zero - zeroout new allocated clusters
 881 *
 882 *  NOTE:
 883 *  - @new != NULL is called only for sparsed or compressed attributes.
 884 *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
 885 */
 886int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
 887			CLST *len, bool *new, bool zero)
 888{
 889	int err = 0;
 890	struct runs_tree *run = &ni->file.run;
 891	struct ntfs_sb_info *sbi;
 892	u8 cluster_bits;
 893	struct ATTRIB *attr, *attr_b;
 894	struct ATTR_LIST_ENTRY *le, *le_b;
 895	struct mft_inode *mi, *mi_b;
 896	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
 897	CLST alloc, evcn;
 898	unsigned fr;
 899	u64 total_size, total_size0;
 900	int step = 0;
 901
 902	if (new)
 903		*new = false;
 904
 905	/* Try to find in cache. */
 906	down_read(&ni->file.run_lock);
 907	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
 908		*len = 0;
 909	up_read(&ni->file.run_lock);
 910
 911	if (*len && (*lcn != SPARSE_LCN || !new))
 912		return 0; /* Fast normal way without allocation. */
 
 
 
 
 913
 914	/* No cluster in cache or we need to allocate cluster in hole. */
 915	sbi = ni->mi.sbi;
 916	cluster_bits = sbi->cluster_bits;
 917
 918	ni_lock(ni);
 919	down_write(&ni->file.run_lock);
 920
 921	/* Repeat the code above (under write lock). */
 922	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
 923		*len = 0;
 924
 925	if (*len) {
 926		if (*lcn != SPARSE_LCN || !new)
 927			goto out; /* normal way without allocation. */
 928		if (clen > *len)
 929			clen = *len;
 930	}
 931
 932	le_b = NULL;
 933	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
 934	if (!attr_b) {
 935		err = -ENOENT;
 936		goto out;
 937	}
 938
 939	if (!attr_b->non_res) {
 940		*lcn = RESIDENT_LCN;
 941		*len = 1;
 942		goto out;
 943	}
 944
 945	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
 946	if (vcn >= asize) {
 947		if (new) {
 948			err = -EINVAL;
 949		} else {
 950			*len = 1;
 951			*lcn = SPARSE_LCN;
 952		}
 953		goto out;
 954	}
 955
 956	svcn = le64_to_cpu(attr_b->nres.svcn);
 957	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
 958
 959	attr = attr_b;
 960	le = le_b;
 961	mi = mi_b;
 962
 963	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
 964		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
 965				    &mi);
 966		if (!attr) {
 967			err = -EINVAL;
 968			goto out;
 969		}
 970		svcn = le64_to_cpu(attr->nres.svcn);
 971		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
 972	}
 973
 974	/* Load in cache actual information. */
 975	err = attr_load_runs(attr, ni, run, NULL);
 976	if (err)
 977		goto out;
 978
 979	/* Check for compressed frame. */
 980	err = attr_is_frame_compressed(ni, attr_b, vcn >> NTFS_LZNT_CUNIT,
 981				       &hint, run);
 982	if (err)
 983		goto out;
 984
 985	if (hint) {
 986		/* if frame is compressed - don't touch it. */
 987		*lcn = COMPRESSED_LCN;
 988		/* length to the end of frame. */
 989		*len = NTFS_LZNT_CLUSTERS - (vcn & (NTFS_LZNT_CLUSTERS - 1));
 990		err = 0;
 991		goto out;
 992	}
 993
 994	if (!*len) {
 995		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
 996			if (*lcn != SPARSE_LCN || !new)
 997				goto ok; /* Slow normal way without allocation. */
 998
 999			if (clen > *len)
1000				clen = *len;
1001		} else if (!new) {
1002			/* Here we may return -ENOENT.
1003			 * In any case caller gets zero length. */
1004			goto ok;
1005		}
1006	}
1007
1008	if (!is_attr_ext(attr_b)) {
1009		/* The code below only for sparsed or compressed attributes. */
1010		err = -EINVAL;
1011		goto out;
1012	}
1013
1014	vcn0 = vcn;
1015	to_alloc = clen;
1016	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
1017	/* Allocate frame aligned clusters.
1018	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1019	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1020	if (attr_b->nres.c_unit) {
1021		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
1022		CLST cmask = ~(clst_per_frame - 1);
1023
1024		/* Get frame aligned vcn and to_alloc. */
1025		vcn = vcn0 & cmask;
1026		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1027		if (fr < clst_per_frame)
1028			fr = clst_per_frame;
1029		zero = true;
1030
1031		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1032		if (vcn < svcn || evcn1 <= vcn) {
1033			struct ATTRIB *attr2;
1034			/* Load runs for truncated vcn. */
1035			attr2 = ni_find_attr(ni, attr_b, &le_b, ATTR_DATA, NULL,
1036					     0, &vcn, &mi);
1037			if (!attr2) {
1038				err = -EINVAL;
1039				goto out;
1040			}
1041			evcn1 = le64_to_cpu(attr2->nres.evcn) + 1;
1042			err = attr_load_runs(attr2, ni, run, NULL);
 
1043			if (err)
1044				goto out;
1045		}
1046	}
1047
1048	if (vcn + to_alloc > asize)
1049		to_alloc = asize - vcn;
1050
1051	/* Get the last LCN to allocate from. */
1052	hint = 0;
1053
1054	if (vcn > evcn1) {
1055		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1056				   false)) {
1057			err = -ENOMEM;
1058			goto out;
1059		}
1060	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1061		hint = -1;
1062	}
1063
1064	/* Allocate and zeroout new clusters. */
1065	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1066				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1067				     fr, lcn, len);
1068	if (err)
1069		goto out;
1070	*new = true;
1071	step = 1;
1072
1073	end = vcn + alen;
1074	/* Save 'total_size0' to restore if error. */
1075	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1076	total_size = total_size0 + ((u64)alen << cluster_bits);
1077
1078	if (vcn != vcn0) {
1079		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1080			err = -EINVAL;
1081			goto out;
1082		}
1083		if (*lcn == SPARSE_LCN) {
1084			/* Internal error. Should not happened. */
1085			WARN_ON(1);
1086			err = -EINVAL;
1087			goto out;
1088		}
1089		/* Check case when vcn0 + len overlaps new allocated clusters. */
1090		if (vcn0 + *len > end)
1091			*len = end - vcn0;
1092	}
1093
1094repack:
1095	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1096	if (err)
1097		goto out;
1098
1099	attr_b->nres.total_size = cpu_to_le64(total_size);
1100	inode_set_bytes(&ni->vfs_inode, total_size);
1101	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1102
1103	mi_b->dirty = true;
1104	mark_inode_dirty(&ni->vfs_inode);
1105
1106	/* Stored [vcn : next_svcn) from [vcn : end). */
1107	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1108
1109	if (end <= evcn1) {
1110		if (next_svcn == evcn1) {
1111			/* Normal way. Update attribute and exit. */
1112			goto ok;
1113		}
1114		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1115		if (!ni->attr_list.size) {
1116			err = ni_create_attr_list(ni);
1117			if (err)
1118				goto undo1;
1119			/* Layout of records is changed. */
1120			le_b = NULL;
1121			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1122					      0, NULL, &mi_b);
1123			if (!attr_b) {
1124				err = -ENOENT;
1125				goto out;
1126			}
1127
1128			attr = attr_b;
1129			le = le_b;
1130			mi = mi_b;
1131			goto repack;
1132		}
1133	}
1134
1135	/*
1136	 * The code below may require additional cluster (to extend attribute list)
1137	 * and / or one MFT record
1138	 * It is too complex to undo operations if -ENOSPC occurs deep inside
1139	 * in 'ni_insert_nonresident'.
1140	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1141	 */
1142	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1143		/* Undo step 1. */
1144		err = -ENOSPC;
1145		goto undo1;
1146	}
1147
1148	step = 2;
1149	svcn = evcn1;
1150
1151	/* Estimate next attribute. */
1152	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1153
1154	if (!attr) {
1155		/* Insert new attribute segment. */
1156		goto ins_ext;
1157	}
1158
1159	/* Try to update existed attribute segment. */
1160	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1161	evcn = le64_to_cpu(attr->nres.evcn);
1162
1163	if (end < next_svcn)
1164		end = next_svcn;
1165	while (end > evcn) {
1166		/* Remove segment [svcn : evcn). */
1167		mi_remove_attr(NULL, mi, attr);
1168
1169		if (!al_remove_le(ni, le)) {
1170			err = -EINVAL;
1171			goto out;
1172		}
1173
1174		if (evcn + 1 >= alloc) {
1175			/* Last attribute segment. */
1176			evcn1 = evcn + 1;
1177			goto ins_ext;
1178		}
1179
1180		if (ni_load_mi(ni, le, &mi)) {
1181			attr = NULL;
1182			goto out;
1183		}
1184
1185		attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1186		if (!attr) {
1187			err = -EINVAL;
1188			goto out;
1189		}
1190		svcn = le64_to_cpu(attr->nres.svcn);
1191		evcn = le64_to_cpu(attr->nres.evcn);
1192	}
1193
1194	if (end < svcn)
1195		end = svcn;
1196
1197	err = attr_load_runs(attr, ni, run, &end);
1198	if (err)
1199		goto out;
1200
1201	evcn1 = evcn + 1;
1202	attr->nres.svcn = cpu_to_le64(next_svcn);
1203	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1204	if (err)
1205		goto out;
1206
1207	le->vcn = cpu_to_le64(next_svcn);
1208	ni->attr_list.dirty = true;
1209	mi->dirty = true;
1210	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1211
1212ins_ext:
1213	if (evcn1 > next_svcn) {
1214		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1215					    next_svcn, evcn1 - next_svcn,
1216					    attr_b->flags, &attr, &mi, NULL);
1217		if (err)
1218			goto out;
1219	}
1220ok:
1221	run_truncate_around(run, vcn);
1222out:
1223	if (err && step > 1) {
1224		/* Too complex to restore. */
1225		_ntfs_bad_inode(&ni->vfs_inode);
1226	}
1227	up_write(&ni->file.run_lock);
1228	ni_unlock(ni);
1229
1230	return err;
1231
1232undo1:
1233	/* Undo step1. */
1234	attr_b->nres.total_size = cpu_to_le64(total_size0);
1235	inode_set_bytes(&ni->vfs_inode, total_size0);
1236
1237	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1238	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1239	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1240		_ntfs_bad_inode(&ni->vfs_inode);
1241	}
1242	goto out;
1243}
1244
1245int attr_data_read_resident(struct ntfs_inode *ni, struct folio *folio)
1246{
1247	u64 vbo;
1248	struct ATTRIB *attr;
1249	u32 data_size;
1250	size_t len;
1251
1252	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1253	if (!attr)
1254		return -EINVAL;
1255
1256	if (attr->non_res)
1257		return E_NTFS_NONRESIDENT;
1258
1259	vbo = folio->index << PAGE_SHIFT;
1260	data_size = le32_to_cpu(attr->res.data_size);
1261	if (vbo > data_size)
1262		len = 0;
1263	else
1264		len = min(data_size - vbo, folio_size(folio));
 
 
 
1265
1266	folio_fill_tail(folio, 0, resident_data(attr) + vbo, len);
1267	folio_mark_uptodate(folio);
 
 
 
 
 
 
 
1268
1269	return 0;
1270}
1271
1272int attr_data_write_resident(struct ntfs_inode *ni, struct folio *folio)
1273{
1274	u64 vbo;
1275	struct mft_inode *mi;
1276	struct ATTRIB *attr;
1277	u32 data_size;
1278
1279	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1280	if (!attr)
1281		return -EINVAL;
1282
1283	if (attr->non_res) {
1284		/* Return special error code to check this case. */
1285		return E_NTFS_NONRESIDENT;
1286	}
1287
1288	vbo = folio->index << PAGE_SHIFT;
1289	data_size = le32_to_cpu(attr->res.data_size);
1290	if (vbo < data_size) {
1291		char *data = resident_data(attr);
1292		size_t len = min(data_size - vbo, folio_size(folio));
 
1293
1294		memcpy_from_folio(data + vbo, folio, 0, len);
 
 
 
1295		mi->dirty = true;
1296	}
1297	ni->i_valid = data_size;
1298
1299	return 0;
1300}
1301
1302/*
1303 * attr_load_runs_vcn - Load runs with VCN.
1304 */
1305int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1306		       const __le16 *name, u8 name_len, struct runs_tree *run,
1307		       CLST vcn)
1308{
1309	struct ATTRIB *attr;
1310	int err;
1311	CLST svcn, evcn;
1312	u16 ro;
1313
1314	if (!ni) {
1315		/* Is record corrupted? */
1316		return -ENOENT;
1317	}
1318
1319	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1320	if (!attr) {
1321		/* Is record corrupted? */
1322		return -ENOENT;
1323	}
1324
1325	svcn = le64_to_cpu(attr->nres.svcn);
1326	evcn = le64_to_cpu(attr->nres.evcn);
1327
1328	if (evcn < vcn || vcn < svcn) {
1329		/* Is record corrupted? */
1330		return -EINVAL;
1331	}
1332
1333	ro = le16_to_cpu(attr->nres.run_off);
1334
1335	if (ro > le32_to_cpu(attr->size))
1336		return -EINVAL;
1337
1338	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1339			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1340	if (err < 0)
1341		return err;
1342	return 0;
1343}
1344
1345/*
1346 * attr_load_runs_range - Load runs for given range [from to).
1347 */
1348int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1349			 const __le16 *name, u8 name_len, struct runs_tree *run,
1350			 u64 from, u64 to)
1351{
1352	struct ntfs_sb_info *sbi = ni->mi.sbi;
1353	u8 cluster_bits = sbi->cluster_bits;
1354	CLST vcn;
1355	CLST vcn_last = (to - 1) >> cluster_bits;
1356	CLST lcn, clen;
1357	int err;
1358
1359	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1360		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1361			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1362						 vcn);
1363			if (err)
1364				return err;
1365			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1366		}
1367	}
1368
1369	return 0;
1370}
1371
1372#ifdef CONFIG_NTFS3_LZX_XPRESS
1373/*
1374 * attr_wof_frame_info
1375 *
1376 * Read header of Xpress/LZX file to get info about frame.
1377 */
1378int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1379			struct runs_tree *run, u64 frame, u64 frames,
1380			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1381{
1382	struct ntfs_sb_info *sbi = ni->mi.sbi;
1383	u64 vbo[2], off[2], wof_size;
1384	u32 voff;
1385	u8 bytes_per_off;
1386	char *addr;
1387	struct folio *folio;
1388	int i, err;
1389	__le32 *off32;
1390	__le64 *off64;
1391
1392	if (ni->vfs_inode.i_size < 0x100000000ull) {
1393		/* File starts with array of 32 bit offsets. */
1394		bytes_per_off = sizeof(__le32);
1395		vbo[1] = frame << 2;
1396		*vbo_data = frames << 2;
1397	} else {
1398		/* File starts with array of 64 bit offsets. */
1399		bytes_per_off = sizeof(__le64);
1400		vbo[1] = frame << 3;
1401		*vbo_data = frames << 3;
1402	}
1403
1404	/*
1405	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1406	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1407	 */
1408	if (!attr->non_res) {
1409		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1410			_ntfs_bad_inode(&ni->vfs_inode);
1411			return -EINVAL;
1412		}
1413		addr = resident_data(attr);
1414
1415		if (bytes_per_off == sizeof(__le32)) {
1416			off32 = Add2Ptr(addr, vbo[1]);
1417			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1418			off[1] = le32_to_cpu(off32[0]);
1419		} else {
1420			off64 = Add2Ptr(addr, vbo[1]);
1421			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1422			off[1] = le64_to_cpu(off64[0]);
1423		}
1424
1425		*vbo_data += off[0];
1426		*ondisk_size = off[1] - off[0];
1427		return 0;
1428	}
1429
1430	wof_size = le64_to_cpu(attr->nres.data_size);
1431	down_write(&ni->file.run_lock);
1432	folio = ni->file.offs_folio;
1433	if (!folio) {
1434		folio = folio_alloc(GFP_KERNEL, 0);
1435		if (!folio) {
1436			err = -ENOMEM;
1437			goto out;
1438		}
1439		folio->index = -1;
1440		ni->file.offs_folio = folio;
1441	}
1442	folio_lock(folio);
1443	addr = folio_address(folio);
1444
1445	if (vbo[1]) {
1446		voff = vbo[1] & (PAGE_SIZE - 1);
1447		vbo[0] = vbo[1] - bytes_per_off;
1448		i = 0;
1449	} else {
1450		voff = 0;
1451		vbo[0] = 0;
1452		off[0] = 0;
1453		i = 1;
1454	}
1455
1456	do {
1457		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1458
1459		if (index != folio->index) {
1460			struct page *page = &folio->page;
1461			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1462			u64 to = min(from + PAGE_SIZE, wof_size);
1463
1464			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1465						   ARRAY_SIZE(WOF_NAME), run,
1466						   from, to);
1467			if (err)
1468				goto out1;
1469
1470			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1471					     to - from, REQ_OP_READ);
1472			if (err) {
1473				folio->index = -1;
1474				goto out1;
1475			}
1476			folio->index = index;
1477		}
1478
1479		if (i) {
1480			if (bytes_per_off == sizeof(__le32)) {
1481				off32 = Add2Ptr(addr, voff);
1482				off[1] = le32_to_cpu(*off32);
1483			} else {
1484				off64 = Add2Ptr(addr, voff);
1485				off[1] = le64_to_cpu(*off64);
1486			}
1487		} else if (!voff) {
1488			if (bytes_per_off == sizeof(__le32)) {
1489				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1490				off[0] = le32_to_cpu(*off32);
1491			} else {
1492				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1493				off[0] = le64_to_cpu(*off64);
1494			}
1495		} else {
1496			/* Two values in one page. */
1497			if (bytes_per_off == sizeof(__le32)) {
1498				off32 = Add2Ptr(addr, voff);
1499				off[0] = le32_to_cpu(off32[-1]);
1500				off[1] = le32_to_cpu(off32[0]);
1501			} else {
1502				off64 = Add2Ptr(addr, voff);
1503				off[0] = le64_to_cpu(off64[-1]);
1504				off[1] = le64_to_cpu(off64[0]);
1505			}
1506			break;
1507		}
1508	} while (++i < 2);
1509
1510	*vbo_data += off[0];
1511	*ondisk_size = off[1] - off[0];
1512
1513out1:
1514	folio_unlock(folio);
1515out:
1516	up_write(&ni->file.run_lock);
1517	return err;
1518}
1519#endif
1520
1521/*
1522 * attr_is_frame_compressed - Used to detect compressed frame.
1523 *
1524 * attr - base (primary) attribute segment.
1525 * run  - run to use, usually == &ni->file.run.
1526 * Only base segments contains valid 'attr->nres.c_unit'
1527 */
1528int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1529			     CLST frame, CLST *clst_data, struct runs_tree *run)
1530{
1531	int err;
1532	u32 clst_frame;
1533	CLST clen, lcn, vcn, alen, slen, vcn_next;
1534	size_t idx;
 
1535
1536	*clst_data = 0;
1537
1538	if (!is_attr_compressed(attr))
1539		return 0;
1540
1541	if (!attr->non_res)
1542		return 0;
1543
1544	clst_frame = 1u << attr->nres.c_unit;
1545	vcn = frame * clst_frame;
 
1546
1547	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1548		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1549					 attr->name_len, run, vcn);
1550		if (err)
1551			return err;
1552
1553		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1554			return -EINVAL;
1555	}
1556
1557	if (lcn == SPARSE_LCN) {
1558		/* Sparsed frame. */
1559		return 0;
1560	}
1561
1562	if (clen >= clst_frame) {
1563		/*
1564		 * The frame is not compressed 'cause
1565		 * it does not contain any sparse clusters.
1566		 */
1567		*clst_data = clst_frame;
1568		return 0;
1569	}
1570
1571	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1572	slen = 0;
1573	*clst_data = clen;
1574
1575	/*
1576	 * The frame is compressed if *clst_data + slen >= clst_frame.
1577	 * Check next fragments.
1578	 */
1579	while ((vcn += clen) < alen) {
1580		vcn_next = vcn;
1581
1582		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1583		    vcn_next != vcn) {
1584			err = attr_load_runs_vcn(ni, attr->type,
1585						 attr_name(attr),
1586						 attr->name_len, run, vcn_next);
1587			if (err)
1588				return err;
1589			vcn = vcn_next;
1590
1591			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1592				return -EINVAL;
1593		}
1594
1595		if (lcn == SPARSE_LCN) {
1596			slen += clen;
1597		} else {
1598			if (slen) {
1599				/*
1600				 * Data_clusters + sparse_clusters =
1601				 * not enough for frame.
1602				 */
1603				return -EINVAL;
1604			}
1605			*clst_data += clen;
1606		}
1607
1608		if (*clst_data + slen >= clst_frame) {
1609			if (!slen) {
1610				/*
1611				 * There is no sparsed clusters in this frame
1612				 * so it is not compressed.
1613				 */
1614				*clst_data = clst_frame;
1615			} else {
1616				/* Frame is compressed. */
1617			}
1618			break;
1619		}
1620	}
1621
1622	return 0;
1623}
1624
1625/*
1626 * attr_allocate_frame - Allocate/free clusters for @frame.
1627 *
1628 * Assumed: down_write(&ni->file.run_lock);
1629 */
1630int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1631			u64 new_valid)
1632{
1633	int err = 0;
1634	struct runs_tree *run = &ni->file.run;
1635	struct ntfs_sb_info *sbi = ni->mi.sbi;
1636	struct ATTRIB *attr = NULL, *attr_b;
1637	struct ATTR_LIST_ENTRY *le, *le_b;
1638	struct mft_inode *mi, *mi_b;
1639	CLST svcn, evcn1, next_svcn, len;
1640	CLST vcn, end, clst_data;
1641	u64 total_size, valid_size, data_size;
1642
1643	le_b = NULL;
1644	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1645	if (!attr_b)
1646		return -ENOENT;
1647
1648	if (!is_attr_ext(attr_b))
1649		return -EINVAL;
1650
1651	vcn = frame << NTFS_LZNT_CUNIT;
1652	total_size = le64_to_cpu(attr_b->nres.total_size);
1653
1654	svcn = le64_to_cpu(attr_b->nres.svcn);
1655	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1656	data_size = le64_to_cpu(attr_b->nres.data_size);
1657
1658	if (svcn <= vcn && vcn < evcn1) {
1659		attr = attr_b;
1660		le = le_b;
1661		mi = mi_b;
1662	} else if (!le_b) {
1663		err = -EINVAL;
1664		goto out;
1665	} else {
1666		le = le_b;
1667		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1668				    &mi);
1669		if (!attr) {
1670			err = -EINVAL;
1671			goto out;
1672		}
1673		svcn = le64_to_cpu(attr->nres.svcn);
1674		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1675	}
1676
1677	err = attr_load_runs(attr, ni, run, NULL);
1678	if (err)
1679		goto out;
1680
1681	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data, run);
1682	if (err)
1683		goto out;
1684
1685	total_size -= (u64)clst_data << sbi->cluster_bits;
1686
1687	len = bytes_to_cluster(sbi, compr_size);
1688
1689	if (len == clst_data)
1690		goto out;
1691
1692	if (len < clst_data) {
1693		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1694					NULL, true);
1695		if (err)
1696			goto out;
1697
1698		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1699				   false)) {
1700			err = -ENOMEM;
1701			goto out;
1702		}
1703		end = vcn + clst_data;
1704		/* Run contains updated range [vcn + len : end). */
1705	} else {
1706		CLST alen, hint = 0;
1707		/* Get the last LCN to allocate from. */
1708		if (vcn + clst_data &&
1709		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1710				      NULL)) {
1711			hint = -1;
1712		}
1713
1714		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1715					     hint + 1, len - clst_data, NULL,
1716					     ALLOCATE_DEF, &alen, 0, NULL,
1717					     NULL);
1718		if (err)
1719			goto out;
1720
1721		end = vcn + len;
1722		/* Run contains updated range [vcn + clst_data : end). */
1723	}
1724
1725	total_size += (u64)len << sbi->cluster_bits;
1726
1727repack:
1728	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1729	if (err)
1730		goto out;
1731
1732	attr_b->nres.total_size = cpu_to_le64(total_size);
1733	inode_set_bytes(&ni->vfs_inode, total_size);
1734	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1735
1736	mi_b->dirty = true;
1737	mark_inode_dirty(&ni->vfs_inode);
1738
1739	/* Stored [vcn : next_svcn) from [vcn : end). */
1740	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1741
1742	if (end <= evcn1) {
1743		if (next_svcn == evcn1) {
1744			/* Normal way. Update attribute and exit. */
1745			goto ok;
1746		}
1747		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1748		if (!ni->attr_list.size) {
1749			err = ni_create_attr_list(ni);
1750			if (err)
1751				goto out;
1752			/* Layout of records is changed. */
1753			le_b = NULL;
1754			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1755					      0, NULL, &mi_b);
1756			if (!attr_b) {
1757				err = -ENOENT;
1758				goto out;
1759			}
1760
1761			attr = attr_b;
1762			le = le_b;
1763			mi = mi_b;
1764			goto repack;
1765		}
1766	}
1767
1768	svcn = evcn1;
1769
1770	/* Estimate next attribute. */
1771	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1772
1773	if (attr) {
1774		CLST alloc = bytes_to_cluster(
1775			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1776		CLST evcn = le64_to_cpu(attr->nres.evcn);
1777
1778		if (end < next_svcn)
1779			end = next_svcn;
1780		while (end > evcn) {
1781			/* Remove segment [svcn : evcn). */
1782			mi_remove_attr(NULL, mi, attr);
1783
1784			if (!al_remove_le(ni, le)) {
1785				err = -EINVAL;
1786				goto out;
1787			}
1788
1789			if (evcn + 1 >= alloc) {
1790				/* Last attribute segment. */
1791				evcn1 = evcn + 1;
1792				goto ins_ext;
1793			}
1794
1795			if (ni_load_mi(ni, le, &mi)) {
1796				attr = NULL;
1797				goto out;
1798			}
1799
1800			attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0,
1801					    &le->id);
1802			if (!attr) {
1803				err = -EINVAL;
1804				goto out;
1805			}
1806			svcn = le64_to_cpu(attr->nres.svcn);
1807			evcn = le64_to_cpu(attr->nres.evcn);
1808		}
1809
1810		if (end < svcn)
1811			end = svcn;
1812
1813		err = attr_load_runs(attr, ni, run, &end);
1814		if (err)
1815			goto out;
1816
1817		evcn1 = evcn + 1;
1818		attr->nres.svcn = cpu_to_le64(next_svcn);
1819		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1820		if (err)
1821			goto out;
1822
1823		le->vcn = cpu_to_le64(next_svcn);
1824		ni->attr_list.dirty = true;
1825		mi->dirty = true;
1826
1827		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1828	}
1829ins_ext:
1830	if (evcn1 > next_svcn) {
1831		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1832					    next_svcn, evcn1 - next_svcn,
1833					    attr_b->flags, &attr, &mi, NULL);
1834		if (err)
1835			goto out;
1836	}
1837ok:
1838	run_truncate_around(run, vcn);
1839out:
1840	if (attr_b) {
1841		if (new_valid > data_size)
1842			new_valid = data_size;
1843
1844		valid_size = le64_to_cpu(attr_b->nres.valid_size);
1845		if (new_valid != valid_size) {
1846			attr_b->nres.valid_size = cpu_to_le64(valid_size);
1847			mi_b->dirty = true;
1848		}
1849	}
1850
1851	return err;
1852}
1853
1854/*
1855 * attr_collapse_range - Collapse range in file.
1856 */
1857int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1858{
1859	int err = 0;
1860	struct runs_tree *run = &ni->file.run;
1861	struct ntfs_sb_info *sbi = ni->mi.sbi;
1862	struct ATTRIB *attr = NULL, *attr_b;
1863	struct ATTR_LIST_ENTRY *le, *le_b;
1864	struct mft_inode *mi, *mi_b;
1865	CLST svcn, evcn1, len, dealloc, alen;
1866	CLST vcn, end;
1867	u64 valid_size, data_size, alloc_size, total_size;
1868	u32 mask;
1869	__le16 a_flags;
1870
1871	if (!bytes)
1872		return 0;
1873
1874	le_b = NULL;
1875	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1876	if (!attr_b)
1877		return -ENOENT;
1878
1879	if (!attr_b->non_res) {
1880		/* Attribute is resident. Nothing to do? */
1881		return 0;
1882	}
1883
1884	data_size = le64_to_cpu(attr_b->nres.data_size);
1885	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1886	a_flags = attr_b->flags;
1887
1888	if (is_attr_ext(attr_b)) {
1889		total_size = le64_to_cpu(attr_b->nres.total_size);
1890		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1891	} else {
1892		total_size = alloc_size;
1893		mask = sbi->cluster_mask;
1894	}
1895
1896	if ((vbo & mask) || (bytes & mask)) {
1897		/* Allow to collapse only cluster aligned ranges. */
1898		return -EINVAL;
1899	}
1900
1901	if (vbo > data_size)
1902		return -EINVAL;
1903
1904	down_write(&ni->file.run_lock);
1905
1906	if (vbo + bytes >= data_size) {
1907		u64 new_valid = min(ni->i_valid, vbo);
1908
1909		/* Simple truncate file at 'vbo'. */
1910		truncate_setsize(&ni->vfs_inode, vbo);
1911		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1912				    &new_valid, true, NULL);
1913
1914		if (!err && new_valid < ni->i_valid)
1915			ni->i_valid = new_valid;
1916
1917		goto out;
1918	}
1919
1920	/*
1921	 * Enumerate all attribute segments and collapse.
1922	 */
1923	alen = alloc_size >> sbi->cluster_bits;
1924	vcn = vbo >> sbi->cluster_bits;
1925	len = bytes >> sbi->cluster_bits;
1926	end = vcn + len;
1927	dealloc = 0;
1928
1929	svcn = le64_to_cpu(attr_b->nres.svcn);
1930	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1931
1932	if (svcn <= vcn && vcn < evcn1) {
1933		attr = attr_b;
1934		le = le_b;
1935		mi = mi_b;
1936	} else if (!le_b) {
1937		err = -EINVAL;
1938		goto out;
1939	} else {
1940		le = le_b;
1941		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1942				    &mi);
1943		if (!attr) {
1944			err = -EINVAL;
1945			goto out;
1946		}
1947
1948		svcn = le64_to_cpu(attr->nres.svcn);
1949		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1950	}
1951
1952	for (;;) {
1953		if (svcn >= end) {
1954			/* Shift VCN- */
1955			attr->nres.svcn = cpu_to_le64(svcn - len);
1956			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1957			if (le) {
1958				le->vcn = attr->nres.svcn;
1959				ni->attr_list.dirty = true;
1960			}
1961			mi->dirty = true;
1962		} else if (svcn < vcn || end < evcn1) {
1963			CLST vcn1, eat, next_svcn;
1964
1965			/* Collapse a part of this attribute segment. */
1966			err = attr_load_runs(attr, ni, run, &svcn);
1967			if (err)
1968				goto out;
1969			vcn1 = max(vcn, svcn);
1970			eat = min(end, evcn1) - vcn1;
1971
1972			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1973						true);
1974			if (err)
1975				goto out;
1976
1977			if (!run_collapse_range(run, vcn1, eat)) {
1978				err = -ENOMEM;
1979				goto out;
1980			}
1981
1982			if (svcn >= vcn) {
1983				/* Shift VCN */
1984				attr->nres.svcn = cpu_to_le64(vcn);
1985				if (le) {
1986					le->vcn = attr->nres.svcn;
1987					ni->attr_list.dirty = true;
1988				}
1989			}
1990
1991			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1992			if (err)
1993				goto out;
1994
1995			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1996			if (next_svcn + eat < evcn1) {
1997				err = ni_insert_nonresident(
1998					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1999					evcn1 - eat - next_svcn, a_flags, &attr,
2000					&mi, &le);
2001				if (err)
2002					goto out;
2003
2004				/* Layout of records maybe changed. */
2005				attr_b = NULL;
2006			}
2007
2008			/* Free all allocated memory. */
2009			run_truncate(run, 0);
2010		} else {
2011			u16 le_sz;
2012			u16 roff = le16_to_cpu(attr->nres.run_off);
2013
2014			if (roff > le32_to_cpu(attr->size)) {
2015				err = -EINVAL;
2016				goto out;
2017			}
2018
2019			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2020				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
2021				      le32_to_cpu(attr->size) - roff);
2022
2023			/* Delete this attribute segment. */
2024			mi_remove_attr(NULL, mi, attr);
2025			if (!le)
2026				break;
2027
2028			le_sz = le16_to_cpu(le->size);
2029			if (!al_remove_le(ni, le)) {
2030				err = -EINVAL;
2031				goto out;
2032			}
2033
2034			if (evcn1 >= alen)
2035				break;
2036
2037			if (!svcn) {
2038				/* Load next record that contains this attribute. */
2039				if (ni_load_mi(ni, le, &mi)) {
2040					err = -EINVAL;
2041					goto out;
2042				}
2043
2044				/* Look for required attribute. */
2045				attr = mi_find_attr(ni, mi, NULL, ATTR_DATA,
2046						    NULL, 0, &le->id);
2047				if (!attr) {
2048					err = -EINVAL;
2049					goto out;
2050				}
2051				goto next_attr;
2052			}
2053			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2054		}
2055
2056		if (evcn1 >= alen)
2057			break;
2058
2059		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2060		if (!attr) {
2061			err = -EINVAL;
2062			goto out;
2063		}
2064
2065next_attr:
2066		svcn = le64_to_cpu(attr->nres.svcn);
2067		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2068	}
2069
2070	if (!attr_b) {
2071		le_b = NULL;
2072		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2073				      &mi_b);
2074		if (!attr_b) {
2075			err = -ENOENT;
2076			goto out;
2077		}
2078	}
2079
2080	data_size -= bytes;
2081	valid_size = ni->i_valid;
2082	if (vbo + bytes <= valid_size)
2083		valid_size -= bytes;
2084	else if (vbo < valid_size)
2085		valid_size = vbo;
2086
2087	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2088	attr_b->nres.data_size = cpu_to_le64(data_size);
2089	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2090	total_size -= (u64)dealloc << sbi->cluster_bits;
2091	if (is_attr_ext(attr_b))
2092		attr_b->nres.total_size = cpu_to_le64(total_size);
2093	mi_b->dirty = true;
2094
2095	/* Update inode size. */
2096	ni->i_valid = valid_size;
2097	i_size_write(&ni->vfs_inode, data_size);
2098	inode_set_bytes(&ni->vfs_inode, total_size);
2099	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2100	mark_inode_dirty(&ni->vfs_inode);
2101
2102out:
2103	up_write(&ni->file.run_lock);
2104	if (err)
2105		_ntfs_bad_inode(&ni->vfs_inode);
2106
2107	return err;
2108}
2109
2110/*
2111 * attr_punch_hole
2112 *
2113 * Not for normal files.
2114 */
2115int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2116{
2117	int err = 0;
2118	struct runs_tree *run = &ni->file.run;
2119	struct ntfs_sb_info *sbi = ni->mi.sbi;
2120	struct ATTRIB *attr = NULL, *attr_b;
2121	struct ATTR_LIST_ENTRY *le, *le_b;
2122	struct mft_inode *mi, *mi_b;
2123	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2124	u64 total_size, alloc_size;
2125	u32 mask;
2126	__le16 a_flags;
2127	struct runs_tree run2;
2128
2129	if (!bytes)
2130		return 0;
2131
2132	le_b = NULL;
2133	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2134	if (!attr_b)
2135		return -ENOENT;
2136
2137	if (!attr_b->non_res) {
2138		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2139		u32 from, to;
2140
2141		if (vbo > data_size)
2142			return 0;
2143
2144		from = vbo;
2145		to = min_t(u64, vbo + bytes, data_size);
2146		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2147		return 0;
2148	}
2149
2150	if (!is_attr_ext(attr_b))
2151		return -EOPNOTSUPP;
2152
2153	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2154	total_size = le64_to_cpu(attr_b->nres.total_size);
2155
2156	if (vbo >= alloc_size) {
2157		/* NOTE: It is allowed. */
2158		return 0;
2159	}
2160
2161	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2162
2163	bytes += vbo;
2164	if (bytes > alloc_size)
2165		bytes = alloc_size;
2166	bytes -= vbo;
2167
2168	if ((vbo & mask) || (bytes & mask)) {
2169		/* We have to zero a range(s). */
2170		if (frame_size == NULL) {
2171			/* Caller insists range is aligned. */
2172			return -EINVAL;
2173		}
2174		*frame_size = mask + 1;
2175		return E_NTFS_NOTALIGNED;
2176	}
2177
2178	down_write(&ni->file.run_lock);
2179	run_init(&run2);
2180	run_truncate(run, 0);
2181
2182	/*
2183	 * Enumerate all attribute segments and punch hole where necessary.
2184	 */
2185	alen = alloc_size >> sbi->cluster_bits;
2186	vcn = vbo >> sbi->cluster_bits;
2187	len = bytes >> sbi->cluster_bits;
2188	end = vcn + len;
2189	hole = 0;
2190
2191	svcn = le64_to_cpu(attr_b->nres.svcn);
2192	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2193	a_flags = attr_b->flags;
2194
2195	if (svcn <= vcn && vcn < evcn1) {
2196		attr = attr_b;
2197		le = le_b;
2198		mi = mi_b;
2199	} else if (!le_b) {
2200		err = -EINVAL;
2201		goto bad_inode;
2202	} else {
2203		le = le_b;
2204		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2205				    &mi);
2206		if (!attr) {
2207			err = -EINVAL;
2208			goto bad_inode;
2209		}
2210
2211		svcn = le64_to_cpu(attr->nres.svcn);
2212		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2213	}
2214
2215	while (svcn < end) {
2216		CLST vcn1, zero, hole2 = hole;
2217
2218		err = attr_load_runs(attr, ni, run, &svcn);
2219		if (err)
2220			goto done;
2221		vcn1 = max(vcn, svcn);
2222		zero = min(end, evcn1) - vcn1;
2223
2224		/*
2225		 * Check range [vcn1 + zero).
2226		 * Calculate how many clusters there are.
2227		 * Don't do any destructive actions.
2228		 */
2229		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2230		if (err)
2231			goto done;
2232
2233		/* Check if required range is already hole. */
2234		if (hole2 == hole)
2235			goto next_attr;
2236
2237		/* Make a clone of run to undo. */
2238		err = run_clone(run, &run2);
2239		if (err)
2240			goto done;
2241
2242		/* Make a hole range (sparse) [vcn1 + zero). */
2243		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2244			err = -ENOMEM;
2245			goto done;
2246		}
2247
2248		/* Update run in attribute segment. */
2249		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2250		if (err)
2251			goto done;
2252		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2253		if (next_svcn < evcn1) {
2254			/* Insert new attribute segment. */
2255			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2256						    next_svcn,
2257						    evcn1 - next_svcn, a_flags,
2258						    &attr, &mi, &le);
2259			if (err)
2260				goto undo_punch;
2261
2262			/* Layout of records maybe changed. */
2263			attr_b = NULL;
2264		}
2265
2266		/* Real deallocate. Should not fail. */
2267		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2268
2269next_attr:
2270		/* Free all allocated memory. */
2271		run_truncate(run, 0);
2272
2273		if (evcn1 >= alen)
2274			break;
2275
2276		/* Get next attribute segment. */
2277		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2278		if (!attr) {
2279			err = -EINVAL;
2280			goto bad_inode;
2281		}
2282
2283		svcn = le64_to_cpu(attr->nres.svcn);
2284		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2285	}
2286
2287done:
2288	if (!hole)
2289		goto out;
2290
2291	if (!attr_b) {
2292		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2293				      &mi_b);
2294		if (!attr_b) {
2295			err = -EINVAL;
2296			goto bad_inode;
2297		}
2298	}
2299
2300	total_size -= (u64)hole << sbi->cluster_bits;
2301	attr_b->nres.total_size = cpu_to_le64(total_size);
2302	mi_b->dirty = true;
2303
2304	/* Update inode size. */
2305	inode_set_bytes(&ni->vfs_inode, total_size);
2306	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2307	mark_inode_dirty(&ni->vfs_inode);
2308
2309out:
2310	run_close(&run2);
2311	up_write(&ni->file.run_lock);
2312	return err;
2313
2314bad_inode:
2315	_ntfs_bad_inode(&ni->vfs_inode);
2316	goto out;
2317
2318undo_punch:
2319	/*
2320	 * Restore packed runs.
2321	 * 'mi_pack_runs' should not fail, cause we restore original.
2322	 */
2323	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2324		goto bad_inode;
2325
2326	goto done;
2327}
2328
2329/*
2330 * attr_insert_range - Insert range (hole) in file.
2331 * Not for normal files.
2332 */
2333int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2334{
2335	int err = 0;
2336	struct runs_tree *run = &ni->file.run;
2337	struct ntfs_sb_info *sbi = ni->mi.sbi;
2338	struct ATTRIB *attr = NULL, *attr_b;
2339	struct ATTR_LIST_ENTRY *le, *le_b;
2340	struct mft_inode *mi, *mi_b;
2341	CLST vcn, svcn, evcn1, len, next_svcn;
2342	u64 data_size, alloc_size;
2343	u32 mask;
2344	__le16 a_flags;
2345
2346	if (!bytes)
2347		return 0;
2348
2349	le_b = NULL;
2350	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2351	if (!attr_b)
2352		return -ENOENT;
2353
2354	if (!is_attr_ext(attr_b)) {
2355		/* It was checked above. See fallocate. */
2356		return -EOPNOTSUPP;
2357	}
2358
2359	if (!attr_b->non_res) {
2360		data_size = le32_to_cpu(attr_b->res.data_size);
2361		alloc_size = data_size;
2362		mask = sbi->cluster_mask; /* cluster_size - 1 */
2363	} else {
2364		data_size = le64_to_cpu(attr_b->nres.data_size);
2365		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2366		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2367	}
2368
2369	if (vbo >= data_size) {
2370		/*
2371		 * Insert range after the file size is not allowed.
2372		 * If the offset is equal to or greater than the end of
2373		 * file, an error is returned.  For such operations (i.e., inserting
2374		 * a hole at the end of file), ftruncate(2) should be used.
2375		 */
2376		return -EINVAL;
2377	}
2378
2379	if ((vbo & mask) || (bytes & mask)) {
2380		/* Allow to insert only frame aligned ranges. */
2381		return -EINVAL;
2382	}
2383
2384	/*
2385	 * valid_size <= data_size <= alloc_size
2386	 * Check alloc_size for maximum possible.
2387	 */
2388	if (bytes > sbi->maxbytes_sparse - alloc_size)
2389		return -EFBIG;
2390
2391	vcn = vbo >> sbi->cluster_bits;
2392	len = bytes >> sbi->cluster_bits;
2393
2394	down_write(&ni->file.run_lock);
2395
2396	if (!attr_b->non_res) {
2397		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2398				    data_size + bytes, NULL, false, NULL);
2399
2400		le_b = NULL;
2401		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2402				      &mi_b);
2403		if (!attr_b) {
2404			err = -EINVAL;
2405			goto bad_inode;
2406		}
2407
2408		if (err)
2409			goto out;
2410
2411		if (!attr_b->non_res) {
2412			/* Still resident. */
2413			char *data = Add2Ptr(attr_b,
2414					     le16_to_cpu(attr_b->res.data_off));
2415
2416			memmove(data + bytes, data, bytes);
2417			memset(data, 0, bytes);
2418			goto done;
2419		}
2420
2421		/* Resident files becomes nonresident. */
2422		data_size = le64_to_cpu(attr_b->nres.data_size);
2423		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2424	}
2425
2426	/*
2427	 * Enumerate all attribute segments and shift start vcn.
2428	 */
2429	a_flags = attr_b->flags;
2430	svcn = le64_to_cpu(attr_b->nres.svcn);
2431	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2432
2433	if (svcn <= vcn && vcn < evcn1) {
2434		attr = attr_b;
2435		le = le_b;
2436		mi = mi_b;
2437	} else if (!le_b) {
2438		err = -EINVAL;
2439		goto bad_inode;
2440	} else {
2441		le = le_b;
2442		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2443				    &mi);
2444		if (!attr) {
2445			err = -EINVAL;
2446			goto bad_inode;
2447		}
2448
2449		svcn = le64_to_cpu(attr->nres.svcn);
2450		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2451	}
2452
2453	run_truncate(run, 0); /* clear cached values. */
2454	err = attr_load_runs(attr, ni, run, NULL);
2455	if (err)
2456		goto out;
2457
2458	if (!run_insert_range(run, vcn, len)) {
2459		err = -ENOMEM;
2460		goto out;
2461	}
2462
2463	/* Try to pack in current record as much as possible. */
2464	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2465	if (err)
2466		goto out;
2467
2468	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2469
2470	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2471	       attr->type == ATTR_DATA && !attr->name_len) {
2472		le64_add_cpu(&attr->nres.svcn, len);
2473		le64_add_cpu(&attr->nres.evcn, len);
2474		if (le) {
2475			le->vcn = attr->nres.svcn;
2476			ni->attr_list.dirty = true;
2477		}
2478		mi->dirty = true;
2479	}
2480
2481	if (next_svcn < evcn1 + len) {
2482		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2483					    next_svcn, evcn1 + len - next_svcn,
2484					    a_flags, NULL, NULL, NULL);
2485
2486		le_b = NULL;
2487		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2488				      &mi_b);
2489		if (!attr_b) {
2490			err = -EINVAL;
2491			goto bad_inode;
2492		}
2493
2494		if (err) {
2495			/* ni_insert_nonresident failed. Try to undo. */
2496			goto undo_insert_range;
2497		}
2498	}
2499
2500	/*
2501	 * Update primary attribute segment.
2502	 */
2503	if (vbo <= ni->i_valid)
2504		ni->i_valid += bytes;
2505
2506	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2507	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2508
2509	/* ni->valid may be not equal valid_size (temporary). */
2510	if (ni->i_valid > data_size + bytes)
2511		attr_b->nres.valid_size = attr_b->nres.data_size;
2512	else
2513		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2514	mi_b->dirty = true;
2515
2516done:
2517	i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
2518	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2519	mark_inode_dirty(&ni->vfs_inode);
2520
2521out:
2522	run_truncate(run, 0); /* clear cached values. */
2523
2524	up_write(&ni->file.run_lock);
2525
2526	return err;
2527
2528bad_inode:
2529	_ntfs_bad_inode(&ni->vfs_inode);
2530	goto out;
2531
2532undo_insert_range:
2533	svcn = le64_to_cpu(attr_b->nres.svcn);
2534	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2535
2536	if (svcn <= vcn && vcn < evcn1) {
2537		attr = attr_b;
2538		le = le_b;
2539		mi = mi_b;
2540	} else if (!le_b) {
2541		goto bad_inode;
2542	} else {
2543		le = le_b;
2544		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2545				    &mi);
2546		if (!attr) {
2547			goto bad_inode;
2548		}
2549
2550		svcn = le64_to_cpu(attr->nres.svcn);
2551		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2552	}
2553
2554	if (attr_load_runs(attr, ni, run, NULL))
2555		goto bad_inode;
2556
2557	if (!run_collapse_range(run, vcn, len))
2558		goto bad_inode;
2559
2560	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2561		goto bad_inode;
2562
2563	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2564	       attr->type == ATTR_DATA && !attr->name_len) {
2565		le64_sub_cpu(&attr->nres.svcn, len);
2566		le64_sub_cpu(&attr->nres.evcn, len);
2567		if (le) {
2568			le->vcn = attr->nres.svcn;
2569			ni->attr_list.dirty = true;
2570		}
2571		mi->dirty = true;
2572	}
2573
2574	goto out;
2575}
2576
2577/*
2578 * attr_force_nonresident
2579 *
2580 * Convert default data attribute into non resident form.
2581 */
2582int attr_force_nonresident(struct ntfs_inode *ni)
2583{
2584	int err;
2585	struct ATTRIB *attr;
2586	struct ATTR_LIST_ENTRY *le = NULL;
2587	struct mft_inode *mi;
2588
2589	attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
2590	if (!attr) {
2591		_ntfs_bad_inode(&ni->vfs_inode);
2592		return -ENOENT;
2593	}
2594
2595	if (attr->non_res) {
2596		/* Already non resident. */
2597		return 0;
2598	}
2599
2600	down_write(&ni->file.run_lock);
2601	err = attr_make_nonresident(ni, attr, le, mi,
2602				    le32_to_cpu(attr->res.data_size),
2603				    &ni->file.run, &attr, NULL);
2604	up_write(&ni->file.run_lock);
2605
2606	return err;
2607}
2608
2609/*
2610 * Change the compression of data attribute
2611 */
2612int attr_set_compress(struct ntfs_inode *ni, bool compr)
2613{
2614	struct ATTRIB *attr;
2615	struct mft_inode *mi;
2616
2617	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
2618	if (!attr)
2619		return -ENOENT;
2620
2621	if (is_attr_compressed(attr) == !!compr) {
2622		/* Already required compressed state. */
2623		return 0;
2624	}
2625
2626	if (attr->non_res) {
2627		u16 run_off;
2628		u32 run_size;
2629		char *run;
2630
2631		if (attr->nres.data_size) {
2632			/*
2633			 * There are rare cases when it possible to change
2634			 * compress state without big changes.
2635			 * TODO: Process these cases.
2636			 */
2637			return -EOPNOTSUPP;
2638		}
2639
2640		run_off = le16_to_cpu(attr->nres.run_off);
2641		run_size = le32_to_cpu(attr->size) - run_off;
2642		run = Add2Ptr(attr, run_off);
2643
2644		if (!compr) {
2645			/* remove field 'attr->nres.total_size'. */
2646			memmove(run - 8, run, run_size);
2647			run_off -= 8;
2648		}
2649
2650		if (!mi_resize_attr(mi, attr, compr ? +8 : -8)) {
2651			/*
2652			 * Ignore rare case when there are no 8 bytes in record with attr.
2653			 * TODO: split attribute.
2654			 */
2655			return -EOPNOTSUPP;
2656		}
2657
2658		if (compr) {
2659			/* Make a gap for 'attr->nres.total_size'. */
2660			memmove(run + 8, run, run_size);
2661			run_off += 8;
2662			attr->nres.total_size = attr->nres.alloc_size;
2663		}
2664		attr->nres.run_off = cpu_to_le16(run_off);
2665	}
2666
2667	/* Update data attribute flags. */
2668	if (compr) {
2669		attr->flags |= ATTR_FLAG_COMPRESSED;
2670		attr->nres.c_unit = NTFS_LZNT_CUNIT;
2671	} else {
2672		attr->flags &= ~ATTR_FLAG_COMPRESSED;
2673		attr->nres.c_unit = 0;
2674	}
2675	mi->dirty = true;
2676
2677	return 0;
2678}