Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
   5 *
   6 */
   7
   8#include <linux/blkdev.h>
   9#include <linux/buffer_head.h>
  10#include <linux/fs.h>
  11#include <linux/kernel.h>
  12#include <linux/nls.h>
  13
  14#include "debug.h"
  15#include "ntfs.h"
  16#include "ntfs_fs.h"
  17
  18// clang-format off
  19const struct cpu_str NAME_MFT = {
  20	4, 0, { '$', 'M', 'F', 'T' },
  21};
  22const struct cpu_str NAME_MIRROR = {
  23	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
  24};
  25const struct cpu_str NAME_LOGFILE = {
  26	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
  27};
  28const struct cpu_str NAME_VOLUME = {
  29	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
  30};
  31const struct cpu_str NAME_ATTRDEF = {
  32	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
  33};
  34const struct cpu_str NAME_ROOT = {
  35	1, 0, { '.' },
  36};
  37const struct cpu_str NAME_BITMAP = {
  38	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
  39};
  40const struct cpu_str NAME_BOOT = {
  41	5, 0, { '$', 'B', 'o', 'o', 't' },
  42};
  43const struct cpu_str NAME_BADCLUS = {
  44	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
  45};
  46const struct cpu_str NAME_QUOTA = {
  47	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
  48};
  49const struct cpu_str NAME_SECURE = {
  50	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
  51};
  52const struct cpu_str NAME_UPCASE = {
  53	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
  54};
  55const struct cpu_str NAME_EXTEND = {
  56	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
  57};
  58const struct cpu_str NAME_OBJID = {
  59	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
  60};
  61const struct cpu_str NAME_REPARSE = {
  62	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
  63};
  64const struct cpu_str NAME_USNJRNL = {
  65	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
  66};
  67const __le16 BAD_NAME[4] = {
  68	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
  69};
  70const __le16 I30_NAME[4] = {
  71	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
  72};
  73const __le16 SII_NAME[4] = {
  74	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
  75};
  76const __le16 SDH_NAME[4] = {
  77	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
  78};
  79const __le16 SDS_NAME[4] = {
  80	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
  81};
  82const __le16 SO_NAME[2] = {
  83	cpu_to_le16('$'), cpu_to_le16('O'),
  84};
  85const __le16 SQ_NAME[2] = {
  86	cpu_to_le16('$'), cpu_to_le16('Q'),
  87};
  88const __le16 SR_NAME[2] = {
  89	cpu_to_le16('$'), cpu_to_le16('R'),
  90};
  91
  92#ifdef CONFIG_NTFS3_LZX_XPRESS
  93const __le16 WOF_NAME[17] = {
  94	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
  95	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
  96	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
  97	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
  98	cpu_to_le16('a'),
  99};
 100#endif
 101
 102static const __le16 CON_NAME[3] = {
 103	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
 104};
 105
 106static const __le16 NUL_NAME[3] = {
 107	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
 108};
 109
 110static const __le16 AUX_NAME[3] = {
 111	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
 112};
 113
 114static const __le16 PRN_NAME[3] = {
 115	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
 116};
 117
 118static const __le16 COM_NAME[3] = {
 119	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
 120};
 121
 122static const __le16 LPT_NAME[3] = {
 123	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
 124};
 125
 126// clang-format on
 127
 128/*
 129 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
 130 */
 131bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
 132{
 133	u16 *fixup, *ptr;
 134	u16 sample;
 135	u16 fo = le16_to_cpu(rhdr->fix_off);
 136	u16 fn = le16_to_cpu(rhdr->fix_num);
 137
 138	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
 139	    fn * SECTOR_SIZE > bytes) {
 140		return false;
 141	}
 142
 143	/* Get fixup pointer. */
 144	fixup = Add2Ptr(rhdr, fo);
 145
 146	if (*fixup >= 0x7FFF)
 147		*fixup = 1;
 148	else
 149		*fixup += 1;
 150
 151	sample = *fixup;
 152
 153	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
 154
 155	while (fn--) {
 156		*++fixup = *ptr;
 157		*ptr = sample;
 158		ptr += SECTOR_SIZE / sizeof(short);
 159	}
 160	return true;
 161}
 162
 163/*
 164 * ntfs_fix_post_read - Remove fixups after reading from disk.
 165 *
 166 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
 167 */
 168int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
 169		       bool simple)
 170{
 171	int ret;
 172	u16 *fixup, *ptr;
 173	u16 sample, fo, fn;
 174
 175	fo = le16_to_cpu(rhdr->fix_off);
 176	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
 177		      le16_to_cpu(rhdr->fix_num);
 178
 179	/* Check errors. */
 180	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
 181	    fn * SECTOR_SIZE > bytes) {
 182		return -E_NTFS_CORRUPT;
 183	}
 184
 185	/* Get fixup pointer. */
 186	fixup = Add2Ptr(rhdr, fo);
 187	sample = *fixup;
 188	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
 189	ret = 0;
 190
 191	while (fn--) {
 192		/* Test current word. */
 193		if (*ptr != sample) {
 194			/* Fixup does not match! Is it serious error? */
 195			ret = -E_NTFS_FIXUP;
 196		}
 197
 198		/* Replace fixup. */
 199		*ptr = *++fixup;
 200		ptr += SECTOR_SIZE / sizeof(short);
 201	}
 202
 203	return ret;
 204}
 205
 206/*
 207 * ntfs_extend_init - Load $Extend file.
 208 */
 209int ntfs_extend_init(struct ntfs_sb_info *sbi)
 210{
 211	int err;
 212	struct super_block *sb = sbi->sb;
 213	struct inode *inode, *inode2;
 214	struct MFT_REF ref;
 215
 216	if (sbi->volume.major_ver < 3) {
 217		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
 218		return 0;
 219	}
 220
 221	ref.low = cpu_to_le32(MFT_REC_EXTEND);
 222	ref.high = 0;
 223	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
 224	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
 225	if (IS_ERR(inode)) {
 226		err = PTR_ERR(inode);
 227		ntfs_err(sb, "Failed to load $Extend (%d).", err);
 228		inode = NULL;
 229		goto out;
 230	}
 231
 232	/* If ntfs_iget5() reads from disk it never returns bad inode. */
 233	if (!S_ISDIR(inode->i_mode)) {
 234		err = -EINVAL;
 235		goto out;
 236	}
 237
 238	/* Try to find $ObjId */
 239	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
 240	if (inode2 && !IS_ERR(inode2)) {
 241		if (is_bad_inode(inode2)) {
 242			iput(inode2);
 243		} else {
 244			sbi->objid.ni = ntfs_i(inode2);
 245			sbi->objid_no = inode2->i_ino;
 246		}
 247	}
 248
 249	/* Try to find $Quota */
 250	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
 251	if (inode2 && !IS_ERR(inode2)) {
 252		sbi->quota_no = inode2->i_ino;
 253		iput(inode2);
 254	}
 255
 256	/* Try to find $Reparse */
 257	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
 258	if (inode2 && !IS_ERR(inode2)) {
 259		sbi->reparse.ni = ntfs_i(inode2);
 260		sbi->reparse_no = inode2->i_ino;
 261	}
 262
 263	/* Try to find $UsnJrnl */
 264	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
 265	if (inode2 && !IS_ERR(inode2)) {
 266		sbi->usn_jrnl_no = inode2->i_ino;
 267		iput(inode2);
 268	}
 269
 270	err = 0;
 271out:
 272	iput(inode);
 273	return err;
 274}
 275
 276int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
 277{
 278	int err = 0;
 279	struct super_block *sb = sbi->sb;
 280	bool initialized = false;
 281	struct MFT_REF ref;
 282	struct inode *inode;
 283
 284	/* Check for 4GB. */
 285	if (ni->vfs_inode.i_size >= 0x100000000ull) {
 286		ntfs_err(sb, "\x24LogFile is large than 4G.");
 287		err = -EINVAL;
 288		goto out;
 289	}
 290
 291	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
 292
 293	ref.low = cpu_to_le32(MFT_REC_MFT);
 294	ref.high = 0;
 295	ref.seq = cpu_to_le16(1);
 296
 297	inode = ntfs_iget5(sb, &ref, NULL);
 298
 299	if (IS_ERR(inode))
 300		inode = NULL;
 301
 302	if (!inode) {
 303		/* Try to use MFT copy. */
 304		u64 t64 = sbi->mft.lbo;
 305
 306		sbi->mft.lbo = sbi->mft.lbo2;
 307		inode = ntfs_iget5(sb, &ref, NULL);
 308		sbi->mft.lbo = t64;
 309		if (IS_ERR(inode))
 310			inode = NULL;
 311	}
 312
 313	if (!inode) {
 314		err = -EINVAL;
 315		ntfs_err(sb, "Failed to load $MFT.");
 316		goto out;
 317	}
 318
 319	sbi->mft.ni = ntfs_i(inode);
 320
 321	/* LogFile should not contains attribute list. */
 322	err = ni_load_all_mi(sbi->mft.ni);
 323	if (!err)
 324		err = log_replay(ni, &initialized);
 325
 326	iput(inode);
 327	sbi->mft.ni = NULL;
 328
 329	sync_blockdev(sb->s_bdev);
 330	invalidate_bdev(sb->s_bdev);
 331
 332	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
 333		err = 0;
 334		goto out;
 335	}
 336
 337	if (sb_rdonly(sb) || !initialized)
 338		goto out;
 339
 340	/* Fill LogFile by '-1' if it is initialized. */
 341	err = ntfs_bio_fill_1(sbi, &ni->file.run);
 342
 343out:
 344	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
 345
 346	return err;
 347}
 348
 349/*
 350 * ntfs_look_for_free_space - Look for a free space in bitmap.
 351 */
 352int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
 353			     CLST *new_lcn, CLST *new_len,
 354			     enum ALLOCATE_OPT opt)
 355{
 356	int err;
 357	CLST alen;
 358	struct super_block *sb = sbi->sb;
 359	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
 360	struct wnd_bitmap *wnd = &sbi->used.bitmap;
 361
 362	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
 363	if (opt & ALLOCATE_MFT) {
 364		zlen = wnd_zone_len(wnd);
 365
 366		if (!zlen) {
 367			err = ntfs_refresh_zone(sbi);
 368			if (err)
 369				goto up_write;
 370
 371			zlen = wnd_zone_len(wnd);
 372		}
 373
 374		if (!zlen) {
 375			ntfs_err(sbi->sb, "no free space to extend mft");
 376			err = -ENOSPC;
 377			goto up_write;
 378		}
 379
 380		lcn = wnd_zone_bit(wnd);
 381		alen = min_t(CLST, len, zlen);
 382
 383		wnd_zone_set(wnd, lcn + alen, zlen - alen);
 384
 385		err = wnd_set_used(wnd, lcn, alen);
 386		if (err)
 387			goto up_write;
 388
 389		alcn = lcn;
 390		goto space_found;
 391	}
 392	/*
 393	 * 'Cause cluster 0 is always used this value means that we should use
 394	 * cached value of 'next_free_lcn' to improve performance.
 395	 */
 396	if (!lcn)
 397		lcn = sbi->used.next_free_lcn;
 398
 399	if (lcn >= wnd->nbits)
 400		lcn = 0;
 401
 402	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
 403	if (alen)
 404		goto space_found;
 405
 406	/* Try to use clusters from MftZone. */
 407	zlen = wnd_zone_len(wnd);
 408	zeroes = wnd_zeroes(wnd);
 409
 410	/* Check too big request */
 411	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
 412		err = -ENOSPC;
 413		goto up_write;
 414	}
 415
 416	/* How many clusters to cat from zone. */
 417	zlcn = wnd_zone_bit(wnd);
 418	zlen2 = zlen >> 1;
 419	ztrim = clamp_val(len, zlen2, zlen);
 420	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
 421
 422	wnd_zone_set(wnd, zlcn, new_zlen);
 423
 424	/* Allocate continues clusters. */
 425	alen = wnd_find(wnd, len, 0,
 426			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
 427	if (!alen) {
 428		err = -ENOSPC;
 429		goto up_write;
 430	}
 431
 432space_found:
 433	err = 0;
 434	*new_len = alen;
 435	*new_lcn = alcn;
 436
 437	ntfs_unmap_meta(sb, alcn, alen);
 438
 439	/* Set hint for next requests. */
 440	if (!(opt & ALLOCATE_MFT))
 441		sbi->used.next_free_lcn = alcn + alen;
 442up_write:
 443	up_write(&wnd->rw_lock);
 444	return err;
 445}
 446
 447/*
 448 * ntfs_check_for_free_space
 449 *
 450 * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
 451 */
 452bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
 453{
 454	size_t free, zlen, avail;
 455	struct wnd_bitmap *wnd;
 456
 457	wnd = &sbi->used.bitmap;
 458	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
 459	free = wnd_zeroes(wnd);
 460	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
 461	up_read(&wnd->rw_lock);
 462
 463	if (free < zlen + clen)
 464		return false;
 465
 466	avail = free - (zlen + clen);
 467
 468	wnd = &sbi->mft.bitmap;
 469	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
 470	free = wnd_zeroes(wnd);
 471	zlen = wnd_zone_len(wnd);
 472	up_read(&wnd->rw_lock);
 473
 474	if (free >= zlen + mlen)
 475		return true;
 476
 477	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
 478}
 479
 480/*
 481 * ntfs_extend_mft - Allocate additional MFT records.
 482 *
 483 * sbi->mft.bitmap is locked for write.
 484 *
 485 * NOTE: recursive:
 486 *	ntfs_look_free_mft ->
 487 *	ntfs_extend_mft ->
 488 *	attr_set_size ->
 489 *	ni_insert_nonresident ->
 490 *	ni_insert_attr ->
 491 *	ni_ins_attr_ext ->
 492 *	ntfs_look_free_mft ->
 493 *	ntfs_extend_mft
 494 *
 495 * To avoid recursive always allocate space for two new MFT records
 496 * see attrib.c: "at least two MFT to avoid recursive loop".
 497 */
 498static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
 499{
 500	int err;
 501	struct ntfs_inode *ni = sbi->mft.ni;
 502	size_t new_mft_total;
 503	u64 new_mft_bytes, new_bitmap_bytes;
 504	struct ATTRIB *attr;
 505	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
 506
 507	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
 508	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
 509
 510	/* Step 1: Resize $MFT::DATA. */
 511	down_write(&ni->file.run_lock);
 512	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
 513			    new_mft_bytes, NULL, false, &attr);
 514
 515	if (err) {
 516		up_write(&ni->file.run_lock);
 517		goto out;
 518	}
 519
 520	attr->nres.valid_size = attr->nres.data_size;
 521	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
 522	ni->mi.dirty = true;
 523
 524	/* Step 2: Resize $MFT::BITMAP. */
 525	new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
 526
 527	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
 528			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
 529
 530	/* Refresh MFT Zone if necessary. */
 531	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
 532
 533	ntfs_refresh_zone(sbi);
 534
 535	up_write(&sbi->used.bitmap.rw_lock);
 536	up_write(&ni->file.run_lock);
 537
 538	if (err)
 539		goto out;
 540
 541	err = wnd_extend(wnd, new_mft_total);
 542
 543	if (err)
 544		goto out;
 545
 546	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
 547
 548	err = _ni_write_inode(&ni->vfs_inode, 0);
 549out:
 550	return err;
 551}
 552
 553/*
 554 * ntfs_look_free_mft - Look for a free MFT record.
 555 */
 556int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
 557		       struct ntfs_inode *ni, struct mft_inode **mi)
 558{
 559	int err = 0;
 560	size_t zbit, zlen, from, to, fr;
 561	size_t mft_total;
 562	struct MFT_REF ref;
 563	struct super_block *sb = sbi->sb;
 564	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
 565	u32 ir;
 566
 567	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
 568		      MFT_REC_FREE - MFT_REC_RESERVED);
 569
 570	if (!mft)
 571		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
 572
 573	zlen = wnd_zone_len(wnd);
 574
 575	/* Always reserve space for MFT. */
 576	if (zlen) {
 577		if (mft) {
 578			zbit = wnd_zone_bit(wnd);
 579			*rno = zbit;
 580			wnd_zone_set(wnd, zbit + 1, zlen - 1);
 581		}
 582		goto found;
 583	}
 584
 585	/* No MFT zone. Find the nearest to '0' free MFT. */
 586	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
 587		/* Resize MFT */
 588		mft_total = wnd->nbits;
 589
 590		err = ntfs_extend_mft(sbi);
 591		if (!err) {
 592			zbit = mft_total;
 593			goto reserve_mft;
 594		}
 595
 596		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
 597			goto out;
 598
 599		err = 0;
 600
 601		/*
 602		 * Look for free record reserved area [11-16) ==
 603		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
 604		 * marks it as used.
 605		 */
 606		if (!sbi->mft.reserved_bitmap) {
 607			/* Once per session create internal bitmap for 5 bits. */
 608			sbi->mft.reserved_bitmap = 0xFF;
 609
 610			ref.high = 0;
 611			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
 612				struct inode *i;
 613				struct ntfs_inode *ni;
 614				struct MFT_REC *mrec;
 615
 616				ref.low = cpu_to_le32(ir);
 617				ref.seq = cpu_to_le16(ir);
 618
 619				i = ntfs_iget5(sb, &ref, NULL);
 620				if (IS_ERR(i)) {
 621next:
 622					ntfs_notice(
 623						sb,
 624						"Invalid reserved record %x",
 625						ref.low);
 626					continue;
 627				}
 628				if (is_bad_inode(i)) {
 629					iput(i);
 630					goto next;
 631				}
 632
 633				ni = ntfs_i(i);
 634
 635				mrec = ni->mi.mrec;
 636
 637				if (!is_rec_base(mrec))
 638					goto next;
 639
 640				if (mrec->hard_links)
 641					goto next;
 642
 643				if (!ni_std(ni))
 644					goto next;
 645
 646				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
 647						 NULL, 0, NULL, NULL))
 648					goto next;
 649
 650				__clear_bit(ir - MFT_REC_RESERVED,
 651					    &sbi->mft.reserved_bitmap);
 652			}
 653		}
 654
 655		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
 656		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
 657					  MFT_REC_FREE, MFT_REC_RESERVED);
 658		if (zbit >= MFT_REC_FREE) {
 659			sbi->mft.next_reserved = MFT_REC_FREE;
 660			goto out;
 661		}
 662
 663		zlen = 1;
 664		sbi->mft.next_reserved = zbit;
 665	} else {
 666reserve_mft:
 667		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
 668		if (zbit + zlen > wnd->nbits)
 669			zlen = wnd->nbits - zbit;
 670
 671		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
 672			zlen -= 1;
 673
 674		/* [zbit, zbit + zlen) will be used for MFT itself. */
 675		from = sbi->mft.used;
 676		if (from < zbit)
 677			from = zbit;
 678		to = zbit + zlen;
 679		if (from < to) {
 680			ntfs_clear_mft_tail(sbi, from, to);
 681			sbi->mft.used = to;
 682		}
 683	}
 684
 685	if (mft) {
 686		*rno = zbit;
 687		zbit += 1;
 688		zlen -= 1;
 689	}
 690
 691	wnd_zone_set(wnd, zbit, zlen);
 692
 693found:
 694	if (!mft) {
 695		/* The request to get record for general purpose. */
 696		if (sbi->mft.next_free < MFT_REC_USER)
 697			sbi->mft.next_free = MFT_REC_USER;
 698
 699		for (;;) {
 700			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
 701			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
 702				sbi->mft.next_free = sbi->mft.bitmap.nbits;
 703			} else {
 704				*rno = fr;
 705				sbi->mft.next_free = *rno + 1;
 706				break;
 707			}
 708
 709			err = ntfs_extend_mft(sbi);
 710			if (err)
 711				goto out;
 712		}
 713	}
 714
 715	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
 716		err = -ENOMEM;
 717		goto out;
 718	}
 719
 720	/* We have found a record that are not reserved for next MFT. */
 721	if (*rno >= MFT_REC_FREE)
 722		wnd_set_used(wnd, *rno, 1);
 723	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
 724		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
 725
 726out:
 727	if (!mft)
 728		up_write(&wnd->rw_lock);
 729
 730	return err;
 731}
 732
 733/*
 734 * ntfs_mark_rec_free - Mark record as free.
 735 * is_mft - true if we are changing MFT
 736 */
 737void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
 738{
 739	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
 740
 741	if (!is_mft)
 742		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
 743	if (rno >= wnd->nbits)
 744		goto out;
 745
 746	if (rno >= MFT_REC_FREE) {
 747		if (!wnd_is_used(wnd, rno, 1))
 748			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
 749		else
 750			wnd_set_free(wnd, rno, 1);
 751	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
 752		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
 753	}
 754
 755	if (rno < wnd_zone_bit(wnd))
 756		wnd_zone_set(wnd, rno, 1);
 757	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
 758		sbi->mft.next_free = rno;
 759
 760out:
 761	if (!is_mft)
 762		up_write(&wnd->rw_lock);
 763}
 764
 765/*
 766 * ntfs_clear_mft_tail - Format empty records [from, to).
 767 *
 768 * sbi->mft.bitmap is locked for write.
 769 */
 770int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
 771{
 772	int err;
 773	u32 rs;
 774	u64 vbo;
 775	struct runs_tree *run;
 776	struct ntfs_inode *ni;
 777
 778	if (from >= to)
 779		return 0;
 780
 781	rs = sbi->record_size;
 782	ni = sbi->mft.ni;
 783	run = &ni->file.run;
 784
 785	down_read(&ni->file.run_lock);
 786	vbo = (u64)from * rs;
 787	for (; from < to; from++, vbo += rs) {
 788		struct ntfs_buffers nb;
 789
 790		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
 791		if (err)
 792			goto out;
 793
 794		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
 795		nb_put(&nb);
 796		if (err)
 797			goto out;
 798	}
 799
 800out:
 801	sbi->mft.used = from;
 802	up_read(&ni->file.run_lock);
 803	return err;
 804}
 805
 806/*
 807 * ntfs_refresh_zone - Refresh MFT zone.
 808 *
 809 * sbi->used.bitmap is locked for rw.
 810 * sbi->mft.bitmap is locked for write.
 811 * sbi->mft.ni->file.run_lock for write.
 812 */
 813int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
 814{
 815	CLST lcn, vcn, len;
 816	size_t lcn_s, zlen;
 817	struct wnd_bitmap *wnd = &sbi->used.bitmap;
 818	struct ntfs_inode *ni = sbi->mft.ni;
 819
 820	/* Do not change anything unless we have non empty MFT zone. */
 821	if (wnd_zone_len(wnd))
 822		return 0;
 823
 824	vcn = bytes_to_cluster(sbi,
 825			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
 826
 827	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
 828		lcn = SPARSE_LCN;
 829
 830	/* We should always find Last Lcn for MFT. */
 831	if (lcn == SPARSE_LCN)
 832		return -EINVAL;
 833
 834	lcn_s = lcn + 1;
 835
 836	/* Try to allocate clusters after last MFT run. */
 837	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
 838	wnd_zone_set(wnd, lcn_s, zlen);
 839
 840	return 0;
 841}
 842
 843/*
 844 * ntfs_update_mftmirr - Update $MFTMirr data.
 845 */
 846void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
 847{
 848	int err;
 849	struct super_block *sb = sbi->sb;
 850	u32 blocksize, bytes;
 851	sector_t block1, block2;
 852
 853	/*
 854	 * sb can be NULL here. In this case sbi->flags should be 0 too.
 855	 */
 856	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
 857	    unlikely(ntfs3_forced_shutdown(sb)))
 858		return;
 859
 860	blocksize = sb->s_blocksize;
 861	bytes = sbi->mft.recs_mirr << sbi->record_bits;
 862	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
 863	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
 864
 865	for (; bytes >= blocksize; bytes -= blocksize) {
 866		struct buffer_head *bh1, *bh2;
 867
 868		bh1 = sb_bread(sb, block1++);
 869		if (!bh1)
 870			return;
 871
 872		bh2 = sb_getblk(sb, block2++);
 873		if (!bh2) {
 874			put_bh(bh1);
 875			return;
 876		}
 877
 878		if (buffer_locked(bh2))
 879			__wait_on_buffer(bh2);
 880
 881		lock_buffer(bh2);
 882		memcpy(bh2->b_data, bh1->b_data, blocksize);
 883		set_buffer_uptodate(bh2);
 884		mark_buffer_dirty(bh2);
 885		unlock_buffer(bh2);
 886
 887		put_bh(bh1);
 888		bh1 = NULL;
 889
 890		err = wait ? sync_dirty_buffer(bh2) : 0;
 891
 892		put_bh(bh2);
 893		if (err)
 894			return;
 895	}
 896
 897	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
 898}
 899
 900/*
 901 * ntfs_bad_inode
 902 *
 903 * Marks inode as bad and marks fs as 'dirty'
 904 */
 905void ntfs_bad_inode(struct inode *inode, const char *hint)
 906{
 907	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
 908
 909	ntfs_inode_err(inode, "%s", hint);
 910	make_bad_inode(inode);
 911	/* Avoid recursion if bad inode is $Volume. */
 912	if (inode->i_ino != MFT_REC_VOL &&
 913	    !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) {
 914		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
 915	}
 916}
 917
 918/*
 919 * ntfs_set_state
 920 *
 921 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
 922 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
 923 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
 924 */
 925int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
 926{
 927	int err;
 928	struct ATTRIB *attr;
 929	struct VOLUME_INFO *info;
 930	struct mft_inode *mi;
 931	struct ntfs_inode *ni;
 932	__le16 info_flags;
 933
 934	/*
 935	 * Do not change state if fs was real_dirty.
 936	 * Do not change state if fs already dirty(clear).
 937	 * Do not change any thing if mounted read only.
 938	 */
 939	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
 940		return 0;
 941
 942	/* Check cached value. */
 943	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
 944	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
 945		return 0;
 946
 947	ni = sbi->volume.ni;
 948	if (!ni)
 949		return -EINVAL;
 950
 951	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
 952
 953	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
 954	if (!attr) {
 955		err = -EINVAL;
 956		goto out;
 957	}
 958
 959	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
 960	if (!info) {
 961		err = -EINVAL;
 962		goto out;
 963	}
 964
 965	info_flags = info->flags;
 966
 967	switch (dirty) {
 968	case NTFS_DIRTY_ERROR:
 969		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
 970		sbi->volume.real_dirty = true;
 971		fallthrough;
 972	case NTFS_DIRTY_DIRTY:
 973		info->flags |= VOLUME_FLAG_DIRTY;
 974		break;
 975	case NTFS_DIRTY_CLEAR:
 976		info->flags &= ~VOLUME_FLAG_DIRTY;
 977		break;
 978	}
 979	/* Cache current volume flags. */
 980	if (info_flags != info->flags) {
 981		sbi->volume.flags = info->flags;
 982		mi->dirty = true;
 983	}
 984	err = 0;
 985
 986out:
 987	ni_unlock(ni);
 988	if (err)
 989		return err;
 990
 991	mark_inode_dirty_sync(&ni->vfs_inode);
 992	/* verify(!ntfs_update_mftmirr()); */
 993
 994	/* write mft record on disk. */
 995	err = _ni_write_inode(&ni->vfs_inode, 1);
 996
 997	return err;
 998}
 999
1000/*
1001 * security_hash - Calculates a hash of security descriptor.
1002 */
1003static inline __le32 security_hash(const void *sd, size_t bytes)
1004{
1005	u32 hash = 0;
1006	const __le32 *ptr = sd;
1007
1008	bytes >>= 2;
1009	while (bytes--)
1010		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1011	return cpu_to_le32(hash);
1012}
1013
1014/*
1015 * simple wrapper for sb_bread_unmovable.
1016 */
1017struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
1018{
1019	struct ntfs_sb_info *sbi = sb->s_fs_info;
1020	struct buffer_head *bh;
1021
1022	if (unlikely(block >= sbi->volume.blocks)) {
1023		/* prevent generic message "attempt to access beyond end of device" */
1024		ntfs_err(sb, "try to read out of volume at offset 0x%llx",
1025			 (u64)block << sb->s_blocksize_bits);
1026		return NULL;
1027	}
1028
1029	bh = sb_bread_unmovable(sb, block);
1030	if (bh)
1031		return bh;
1032
1033	ntfs_err(sb, "failed to read volume at offset 0x%llx",
1034		 (u64)block << sb->s_blocksize_bits);
1035	return NULL;
1036}
1037
1038int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1039{
1040	struct block_device *bdev = sb->s_bdev;
1041	u32 blocksize = sb->s_blocksize;
1042	u64 block = lbo >> sb->s_blocksize_bits;
1043	u32 off = lbo & (blocksize - 1);
1044	u32 op = blocksize - off;
1045
1046	for (; bytes; block += 1, off = 0, op = blocksize) {
1047		struct buffer_head *bh = __bread(bdev, block, blocksize);
1048
1049		if (!bh)
1050			return -EIO;
1051
1052		if (op > bytes)
1053			op = bytes;
1054
1055		memcpy(buffer, bh->b_data + off, op);
1056
1057		put_bh(bh);
1058
1059		bytes -= op;
1060		buffer = Add2Ptr(buffer, op);
1061	}
1062
1063	return 0;
1064}
1065
1066int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1067		  const void *buf, int wait)
1068{
1069	u32 blocksize = sb->s_blocksize;
1070	struct block_device *bdev = sb->s_bdev;
1071	sector_t block = lbo >> sb->s_blocksize_bits;
1072	u32 off = lbo & (blocksize - 1);
1073	u32 op = blocksize - off;
1074	struct buffer_head *bh;
1075
1076	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1077		wait = 1;
1078
1079	for (; bytes; block += 1, off = 0, op = blocksize) {
1080		if (op > bytes)
1081			op = bytes;
1082
1083		if (op < blocksize) {
1084			bh = __bread(bdev, block, blocksize);
1085			if (!bh) {
1086				ntfs_err(sb, "failed to read block %llx",
1087					 (u64)block);
1088				return -EIO;
1089			}
1090		} else {
1091			bh = __getblk(bdev, block, blocksize);
1092			if (!bh)
1093				return -ENOMEM;
1094		}
1095
1096		if (buffer_locked(bh))
1097			__wait_on_buffer(bh);
1098
1099		lock_buffer(bh);
1100		if (buf) {
1101			memcpy(bh->b_data + off, buf, op);
1102			buf = Add2Ptr(buf, op);
1103		} else {
1104			memset(bh->b_data + off, -1, op);
1105		}
1106
1107		set_buffer_uptodate(bh);
1108		mark_buffer_dirty(bh);
1109		unlock_buffer(bh);
1110
1111		if (wait) {
1112			int err = sync_dirty_buffer(bh);
1113
1114			if (err) {
1115				ntfs_err(
1116					sb,
1117					"failed to sync buffer at block %llx, error %d",
1118					(u64)block, err);
1119				put_bh(bh);
1120				return err;
1121			}
1122		}
1123
1124		put_bh(bh);
1125
1126		bytes -= op;
1127	}
1128	return 0;
1129}
1130
1131int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1132		      u64 vbo, const void *buf, size_t bytes, int sync)
1133{
1134	struct super_block *sb = sbi->sb;
1135	u8 cluster_bits = sbi->cluster_bits;
1136	u32 off = vbo & sbi->cluster_mask;
1137	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1138	u64 lbo, len;
1139	size_t idx;
1140
1141	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1142		return -ENOENT;
1143
1144	if (lcn == SPARSE_LCN)
1145		return -EINVAL;
1146
1147	lbo = ((u64)lcn << cluster_bits) + off;
1148	len = ((u64)clen << cluster_bits) - off;
1149
1150	for (;;) {
1151		u32 op = min_t(u64, len, bytes);
1152		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1153
1154		if (err)
1155			return err;
1156
1157		bytes -= op;
1158		if (!bytes)
1159			break;
1160
1161		vcn_next = vcn + clen;
1162		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1163		    vcn != vcn_next)
1164			return -ENOENT;
1165
1166		if (lcn == SPARSE_LCN)
1167			return -EINVAL;
1168
1169		if (buf)
1170			buf = Add2Ptr(buf, op);
1171
1172		lbo = ((u64)lcn << cluster_bits);
1173		len = ((u64)clen << cluster_bits);
1174	}
1175
1176	return 0;
1177}
1178
1179struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1180				   const struct runs_tree *run, u64 vbo)
1181{
1182	struct super_block *sb = sbi->sb;
1183	u8 cluster_bits = sbi->cluster_bits;
1184	CLST lcn;
1185	u64 lbo;
1186
1187	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1188		return ERR_PTR(-ENOENT);
1189
1190	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1191
1192	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1193}
1194
1195int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1196		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1197{
1198	int err;
1199	struct super_block *sb = sbi->sb;
1200	u32 blocksize = sb->s_blocksize;
1201	u8 cluster_bits = sbi->cluster_bits;
1202	u32 off = vbo & sbi->cluster_mask;
1203	u32 nbh = 0;
1204	CLST vcn_next, vcn = vbo >> cluster_bits;
1205	CLST lcn, clen;
1206	u64 lbo, len;
1207	size_t idx;
1208	struct buffer_head *bh;
1209
1210	if (!run) {
1211		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1212		if (vbo > MFT_REC_VOL * sbi->record_size) {
1213			err = -ENOENT;
1214			goto out;
1215		}
1216
1217		/* Use absolute boot's 'MFTCluster' to read record. */
1218		lbo = vbo + sbi->mft.lbo;
1219		len = sbi->record_size;
1220	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1221		err = -ENOENT;
1222		goto out;
1223	} else {
1224		if (lcn == SPARSE_LCN) {
1225			err = -EINVAL;
1226			goto out;
1227		}
1228
1229		lbo = ((u64)lcn << cluster_bits) + off;
1230		len = ((u64)clen << cluster_bits) - off;
1231	}
1232
1233	off = lbo & (blocksize - 1);
1234	if (nb) {
1235		nb->off = off;
1236		nb->bytes = bytes;
1237	}
1238
1239	for (;;) {
1240		u32 len32 = len >= bytes ? bytes : len;
1241		sector_t block = lbo >> sb->s_blocksize_bits;
1242
1243		do {
1244			u32 op = blocksize - off;
1245
1246			if (op > len32)
1247				op = len32;
1248
1249			bh = ntfs_bread(sb, block);
1250			if (!bh) {
1251				err = -EIO;
1252				goto out;
1253			}
1254
1255			if (buf) {
1256				memcpy(buf, bh->b_data + off, op);
1257				buf = Add2Ptr(buf, op);
1258			}
1259
1260			if (!nb) {
1261				put_bh(bh);
1262			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1263				err = -EINVAL;
1264				goto out;
1265			} else {
1266				nb->bh[nbh++] = bh;
1267				nb->nbufs = nbh;
1268			}
1269
1270			bytes -= op;
1271			if (!bytes)
1272				return 0;
1273			len32 -= op;
1274			block += 1;
1275			off = 0;
1276
1277		} while (len32);
1278
1279		vcn_next = vcn + clen;
1280		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1281		    vcn != vcn_next) {
1282			err = -ENOENT;
1283			goto out;
1284		}
1285
1286		if (lcn == SPARSE_LCN) {
1287			err = -EINVAL;
1288			goto out;
1289		}
1290
1291		lbo = ((u64)lcn << cluster_bits);
1292		len = ((u64)clen << cluster_bits);
1293	}
1294
1295out:
1296	if (!nbh)
1297		return err;
1298
1299	while (nbh) {
1300		put_bh(nb->bh[--nbh]);
1301		nb->bh[nbh] = NULL;
1302	}
1303
1304	nb->nbufs = 0;
1305	return err;
1306}
1307
1308/*
1309 * ntfs_read_bh
1310 *
1311 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1312 */
1313int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1314		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1315		 struct ntfs_buffers *nb)
1316{
1317	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1318
1319	if (err)
1320		return err;
1321	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1322}
1323
1324int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1325		u32 bytes, struct ntfs_buffers *nb)
1326{
1327	int err = 0;
1328	struct super_block *sb = sbi->sb;
1329	u32 blocksize = sb->s_blocksize;
1330	u8 cluster_bits = sbi->cluster_bits;
1331	CLST vcn_next, vcn = vbo >> cluster_bits;
1332	u32 off;
1333	u32 nbh = 0;
1334	CLST lcn, clen;
1335	u64 lbo, len;
1336	size_t idx;
1337
1338	nb->bytes = bytes;
1339
1340	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1341		err = -ENOENT;
1342		goto out;
1343	}
1344
1345	off = vbo & sbi->cluster_mask;
1346	lbo = ((u64)lcn << cluster_bits) + off;
1347	len = ((u64)clen << cluster_bits) - off;
1348
1349	nb->off = off = lbo & (blocksize - 1);
1350
1351	for (;;) {
1352		u32 len32 = min_t(u64, len, bytes);
1353		sector_t block = lbo >> sb->s_blocksize_bits;
1354
1355		do {
1356			u32 op;
1357			struct buffer_head *bh;
1358
1359			if (nbh >= ARRAY_SIZE(nb->bh)) {
1360				err = -EINVAL;
1361				goto out;
1362			}
1363
1364			op = blocksize - off;
1365			if (op > len32)
1366				op = len32;
1367
1368			if (op == blocksize) {
1369				bh = sb_getblk(sb, block);
1370				if (!bh) {
1371					err = -ENOMEM;
1372					goto out;
1373				}
1374				if (buffer_locked(bh))
1375					__wait_on_buffer(bh);
1376				set_buffer_uptodate(bh);
1377			} else {
1378				bh = ntfs_bread(sb, block);
1379				if (!bh) {
1380					err = -EIO;
1381					goto out;
1382				}
1383			}
1384
1385			nb->bh[nbh++] = bh;
1386			bytes -= op;
1387			if (!bytes) {
1388				nb->nbufs = nbh;
1389				return 0;
1390			}
1391
1392			block += 1;
1393			len32 -= op;
1394			off = 0;
1395		} while (len32);
1396
1397		vcn_next = vcn + clen;
1398		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1399		    vcn != vcn_next) {
1400			err = -ENOENT;
1401			goto out;
1402		}
1403
1404		lbo = ((u64)lcn << cluster_bits);
1405		len = ((u64)clen << cluster_bits);
1406	}
1407
1408out:
1409	while (nbh) {
1410		put_bh(nb->bh[--nbh]);
1411		nb->bh[nbh] = NULL;
1412	}
1413
1414	nb->nbufs = 0;
1415
1416	return err;
1417}
1418
1419int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1420		  struct ntfs_buffers *nb, int sync)
1421{
1422	int err = 0;
1423	struct super_block *sb = sbi->sb;
1424	u32 block_size = sb->s_blocksize;
1425	u32 bytes = nb->bytes;
1426	u32 off = nb->off;
1427	u16 fo = le16_to_cpu(rhdr->fix_off);
1428	u16 fn = le16_to_cpu(rhdr->fix_num);
1429	u32 idx;
1430	__le16 *fixup;
1431	__le16 sample;
1432
1433	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1434	    fn * SECTOR_SIZE > bytes) {
1435		return -EINVAL;
1436	}
1437
1438	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1439		u32 op = block_size - off;
1440		char *bh_data;
1441		struct buffer_head *bh = nb->bh[idx];
1442		__le16 *ptr, *end_data;
1443
1444		if (op > bytes)
1445			op = bytes;
1446
1447		if (buffer_locked(bh))
1448			__wait_on_buffer(bh);
1449
1450		lock_buffer(bh);
1451
1452		bh_data = bh->b_data + off;
1453		end_data = Add2Ptr(bh_data, op);
1454		memcpy(bh_data, rhdr, op);
1455
1456		if (!idx) {
1457			u16 t16;
1458
1459			fixup = Add2Ptr(bh_data, fo);
1460			sample = *fixup;
1461			t16 = le16_to_cpu(sample);
1462			if (t16 >= 0x7FFF) {
1463				sample = *fixup = cpu_to_le16(1);
1464			} else {
1465				sample = cpu_to_le16(t16 + 1);
1466				*fixup = sample;
1467			}
1468
1469			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1470		}
1471
1472		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1473
1474		do {
1475			*++fixup = *ptr;
1476			*ptr = sample;
1477			ptr += SECTOR_SIZE / sizeof(short);
1478		} while (ptr < end_data);
1479
1480		set_buffer_uptodate(bh);
1481		mark_buffer_dirty(bh);
1482		unlock_buffer(bh);
1483
1484		if (sync) {
1485			int err2 = sync_dirty_buffer(bh);
1486
1487			if (!err && err2)
1488				err = err2;
1489		}
1490
1491		bytes -= op;
1492		rhdr = Add2Ptr(rhdr, op);
1493	}
1494
1495	return err;
1496}
1497
1498/*
1499 * ntfs_bio_pages - Read/write pages from/to disk.
1500 */
1501int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1502		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1503		   enum req_op op)
1504{
1505	int err = 0;
1506	struct bio *new, *bio = NULL;
1507	struct super_block *sb = sbi->sb;
1508	struct block_device *bdev = sb->s_bdev;
1509	struct page *page;
1510	u8 cluster_bits = sbi->cluster_bits;
1511	CLST lcn, clen, vcn, vcn_next;
1512	u32 add, off, page_idx;
1513	u64 lbo, len;
1514	size_t run_idx;
1515	struct blk_plug plug;
1516
1517	if (!bytes)
1518		return 0;
1519
1520	blk_start_plug(&plug);
1521
1522	/* Align vbo and bytes to be 512 bytes aligned. */
1523	lbo = (vbo + bytes + 511) & ~511ull;
1524	vbo = vbo & ~511ull;
1525	bytes = lbo - vbo;
1526
1527	vcn = vbo >> cluster_bits;
1528	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1529		err = -ENOENT;
1530		goto out;
1531	}
1532	off = vbo & sbi->cluster_mask;
1533	page_idx = 0;
1534	page = pages[0];
1535
1536	for (;;) {
1537		lbo = ((u64)lcn << cluster_bits) + off;
1538		len = ((u64)clen << cluster_bits) - off;
1539new_bio:
1540		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1541		if (bio) {
1542			bio_chain(bio, new);
1543			submit_bio(bio);
1544		}
1545		bio = new;
1546		bio->bi_iter.bi_sector = lbo >> 9;
1547
1548		while (len) {
1549			off = vbo & (PAGE_SIZE - 1);
1550			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1551
1552			if (bio_add_page(bio, page, add, off) < add)
1553				goto new_bio;
1554
1555			if (bytes <= add)
1556				goto out;
1557			bytes -= add;
1558			vbo += add;
1559
1560			if (add + off == PAGE_SIZE) {
1561				page_idx += 1;
1562				if (WARN_ON(page_idx >= nr_pages)) {
1563					err = -EINVAL;
1564					goto out;
1565				}
1566				page = pages[page_idx];
1567			}
1568
1569			if (len <= add)
1570				break;
1571			len -= add;
1572			lbo += add;
1573		}
1574
1575		vcn_next = vcn + clen;
1576		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1577		    vcn != vcn_next) {
1578			err = -ENOENT;
1579			goto out;
1580		}
1581		off = 0;
1582	}
1583out:
1584	if (bio) {
1585		if (!err)
1586			err = submit_bio_wait(bio);
1587		bio_put(bio);
1588	}
1589	blk_finish_plug(&plug);
1590
1591	return err;
1592}
1593
1594/*
1595 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1596 *
1597 * Fill on-disk logfile range by (-1)
1598 * this means empty logfile.
1599 */
1600int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1601{
1602	int err = 0;
1603	struct super_block *sb = sbi->sb;
1604	struct block_device *bdev = sb->s_bdev;
1605	u8 cluster_bits = sbi->cluster_bits;
1606	struct bio *new, *bio = NULL;
1607	CLST lcn, clen;
1608	u64 lbo, len;
1609	size_t run_idx;
1610	struct page *fill;
1611	void *kaddr;
1612	struct blk_plug plug;
1613
1614	fill = alloc_page(GFP_KERNEL);
1615	if (!fill)
1616		return -ENOMEM;
1617
1618	kaddr = kmap_atomic(fill);
1619	memset(kaddr, -1, PAGE_SIZE);
1620	kunmap_atomic(kaddr);
1621	flush_dcache_page(fill);
1622	lock_page(fill);
1623
1624	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1625		err = -ENOENT;
1626		goto out;
1627	}
1628
1629	/*
1630	 * TODO: Try blkdev_issue_write_same.
1631	 */
1632	blk_start_plug(&plug);
1633	do {
1634		lbo = (u64)lcn << cluster_bits;
1635		len = (u64)clen << cluster_bits;
1636new_bio:
1637		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1638		if (bio) {
1639			bio_chain(bio, new);
1640			submit_bio(bio);
1641		}
1642		bio = new;
1643		bio->bi_iter.bi_sector = lbo >> 9;
1644
1645		for (;;) {
1646			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1647
1648			if (bio_add_page(bio, fill, add, 0) < add)
1649				goto new_bio;
1650
1651			lbo += add;
1652			if (len <= add)
1653				break;
1654			len -= add;
1655		}
1656	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1657
1658	if (!err)
1659		err = submit_bio_wait(bio);
1660	bio_put(bio);
1661
1662	blk_finish_plug(&plug);
1663out:
1664	unlock_page(fill);
1665	put_page(fill);
1666
1667	return err;
1668}
1669
1670int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1671		    u64 vbo, u64 *lbo, u64 *bytes)
1672{
1673	u32 off;
1674	CLST lcn, len;
1675	u8 cluster_bits = sbi->cluster_bits;
1676
1677	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1678		return -ENOENT;
1679
1680	off = vbo & sbi->cluster_mask;
1681	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1682	*bytes = ((u64)len << cluster_bits) - off;
1683
1684	return 0;
1685}
1686
1687struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1688				  enum RECORD_FLAG flag)
1689{
1690	int err = 0;
1691	struct super_block *sb = sbi->sb;
1692	struct inode *inode = new_inode(sb);
1693	struct ntfs_inode *ni;
1694
1695	if (!inode)
1696		return ERR_PTR(-ENOMEM);
1697
1698	ni = ntfs_i(inode);
1699
1700	err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1701	if (err)
1702		goto out;
1703
1704	inode->i_ino = rno;
1705	if (insert_inode_locked(inode) < 0) {
1706		err = -EIO;
1707		goto out;
1708	}
1709
1710out:
1711	if (err) {
1712		make_bad_inode(inode);
1713		iput(inode);
1714		ni = ERR_PTR(err);
1715	}
1716	return ni;
1717}
1718
1719/*
1720 * O:BAG:BAD:(A;OICI;FA;;;WD)
1721 * Owner S-1-5-32-544 (Administrators)
1722 * Group S-1-5-32-544 (Administrators)
1723 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1724 */
1725const u8 s_default_security[] __aligned(8) = {
1726	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1727	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1728	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1729	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1730	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1731	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1732	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1733};
1734
1735static_assert(sizeof(s_default_security) == 0x50);
1736
1737static inline u32 sid_length(const struct SID *sid)
1738{
1739	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1740}
1741
1742/*
1743 * is_acl_valid
1744 *
1745 * Thanks Mark Harmstone for idea.
1746 */
1747static bool is_acl_valid(const struct ACL *acl, u32 len)
1748{
1749	const struct ACE_HEADER *ace;
1750	u32 i;
1751	u16 ace_count, ace_size;
1752
1753	if (acl->AclRevision != ACL_REVISION &&
1754	    acl->AclRevision != ACL_REVISION_DS) {
1755		/*
1756		 * This value should be ACL_REVISION, unless the ACL contains an
1757		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1758		 * All ACEs in an ACL must be at the same revision level.
1759		 */
1760		return false;
1761	}
1762
1763	if (acl->Sbz1)
1764		return false;
1765
1766	if (le16_to_cpu(acl->AclSize) > len)
1767		return false;
1768
1769	if (acl->Sbz2)
1770		return false;
1771
1772	len -= sizeof(struct ACL);
1773	ace = (struct ACE_HEADER *)&acl[1];
1774	ace_count = le16_to_cpu(acl->AceCount);
1775
1776	for (i = 0; i < ace_count; i++) {
1777		if (len < sizeof(struct ACE_HEADER))
1778			return false;
1779
1780		ace_size = le16_to_cpu(ace->AceSize);
1781		if (len < ace_size)
1782			return false;
1783
1784		len -= ace_size;
1785		ace = Add2Ptr(ace, ace_size);
1786	}
1787
1788	return true;
1789}
1790
1791bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1792{
1793	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1794
1795	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1796		return false;
1797
1798	if (sd->Revision != 1)
1799		return false;
1800
1801	if (sd->Sbz1)
1802		return false;
1803
1804	if (!(sd->Control & SE_SELF_RELATIVE))
1805		return false;
1806
1807	sd_owner = le32_to_cpu(sd->Owner);
1808	if (sd_owner) {
1809		const struct SID *owner = Add2Ptr(sd, sd_owner);
1810
1811		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1812			return false;
1813
1814		if (owner->Revision != 1)
1815			return false;
1816
1817		if (sd_owner + sid_length(owner) > len)
1818			return false;
1819	}
1820
1821	sd_group = le32_to_cpu(sd->Group);
1822	if (sd_group) {
1823		const struct SID *group = Add2Ptr(sd, sd_group);
1824
1825		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1826			return false;
1827
1828		if (group->Revision != 1)
1829			return false;
1830
1831		if (sd_group + sid_length(group) > len)
1832			return false;
1833	}
1834
1835	sd_sacl = le32_to_cpu(sd->Sacl);
1836	if (sd_sacl) {
1837		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1838
1839		if (sd_sacl + sizeof(struct ACL) > len)
1840			return false;
1841
1842		if (!is_acl_valid(sacl, len - sd_sacl))
1843			return false;
1844	}
1845
1846	sd_dacl = le32_to_cpu(sd->Dacl);
1847	if (sd_dacl) {
1848		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1849
1850		if (sd_dacl + sizeof(struct ACL) > len)
1851			return false;
1852
1853		if (!is_acl_valid(dacl, len - sd_dacl))
1854			return false;
1855	}
1856
1857	return true;
1858}
1859
1860/*
1861 * ntfs_security_init - Load and parse $Secure.
1862 */
1863int ntfs_security_init(struct ntfs_sb_info *sbi)
1864{
1865	int err;
1866	struct super_block *sb = sbi->sb;
1867	struct inode *inode;
1868	struct ntfs_inode *ni;
1869	struct MFT_REF ref;
1870	struct ATTRIB *attr;
1871	struct ATTR_LIST_ENTRY *le;
1872	u64 sds_size;
1873	size_t off;
1874	struct NTFS_DE *ne;
1875	struct NTFS_DE_SII *sii_e;
1876	struct ntfs_fnd *fnd_sii = NULL;
1877	const struct INDEX_ROOT *root_sii;
1878	const struct INDEX_ROOT *root_sdh;
1879	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1880	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1881
1882	ref.low = cpu_to_le32(MFT_REC_SECURE);
1883	ref.high = 0;
1884	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1885
1886	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1887	if (IS_ERR(inode)) {
1888		err = PTR_ERR(inode);
1889		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1890		inode = NULL;
1891		goto out;
1892	}
1893
1894	ni = ntfs_i(inode);
1895
1896	le = NULL;
1897
1898	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1899			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1900	if (!attr ||
1901	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1902	    root_sdh->type != ATTR_ZERO ||
1903	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1904	    offsetof(struct INDEX_ROOT, ihdr) +
1905			    le32_to_cpu(root_sdh->ihdr.used) >
1906		    le32_to_cpu(attr->res.data_size)) {
1907		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1908		err = -EINVAL;
1909		goto out;
1910	}
1911
1912	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1913	if (err) {
1914		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1915		goto out;
1916	}
1917
1918	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1919			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1920	if (!attr ||
1921	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1922	    root_sii->type != ATTR_ZERO ||
1923	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1924	    offsetof(struct INDEX_ROOT, ihdr) +
1925			    le32_to_cpu(root_sii->ihdr.used) >
1926		    le32_to_cpu(attr->res.data_size)) {
1927		ntfs_err(sb, "$Secure::$SII is corrupted.");
1928		err = -EINVAL;
1929		goto out;
1930	}
1931
1932	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1933	if (err) {
1934		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1935		goto out;
1936	}
1937
1938	fnd_sii = fnd_get();
1939	if (!fnd_sii) {
1940		err = -ENOMEM;
1941		goto out;
1942	}
1943
1944	sds_size = inode->i_size;
1945
1946	/* Find the last valid Id. */
1947	sbi->security.next_id = SECURITY_ID_FIRST;
1948	/* Always write new security at the end of bucket. */
1949	sbi->security.next_off =
1950		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1951
1952	off = 0;
1953	ne = NULL;
1954
1955	for (;;) {
1956		u32 next_id;
1957
1958		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1959		if (err || !ne)
1960			break;
1961
1962		sii_e = (struct NTFS_DE_SII *)ne;
1963		if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1964			continue;
1965
1966		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1967		if (next_id >= sbi->security.next_id)
1968			sbi->security.next_id = next_id;
1969	}
1970
1971	sbi->security.ni = ni;
1972	inode = NULL;
1973out:
1974	iput(inode);
1975	fnd_put(fnd_sii);
1976
1977	return err;
1978}
1979
1980/*
1981 * ntfs_get_security_by_id - Read security descriptor by id.
1982 */
1983int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1984			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1985			    size_t *size)
1986{
1987	int err;
1988	int diff;
1989	struct ntfs_inode *ni = sbi->security.ni;
1990	struct ntfs_index *indx = &sbi->security.index_sii;
1991	void *p = NULL;
1992	struct NTFS_DE_SII *sii_e;
1993	struct ntfs_fnd *fnd_sii;
1994	struct SECURITY_HDR d_security;
1995	const struct INDEX_ROOT *root_sii;
1996	u32 t32;
1997
1998	*sd = NULL;
1999
2000	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2001
2002	fnd_sii = fnd_get();
2003	if (!fnd_sii) {
2004		err = -ENOMEM;
2005		goto out;
2006	}
2007
2008	root_sii = indx_get_root(indx, ni, NULL, NULL);
2009	if (!root_sii) {
2010		err = -EINVAL;
2011		goto out;
2012	}
2013
2014	/* Try to find this SECURITY descriptor in SII indexes. */
2015	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
2016			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
2017	if (err)
2018		goto out;
2019
2020	if (diff)
2021		goto out;
2022
2023	t32 = le32_to_cpu(sii_e->sec_hdr.size);
2024	if (t32 < sizeof(struct SECURITY_HDR)) {
2025		err = -EINVAL;
2026		goto out;
2027	}
2028
2029	if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2030		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2031		err = -EFBIG;
2032		goto out;
2033	}
2034
2035	*size = t32 - sizeof(struct SECURITY_HDR);
2036
2037	p = kmalloc(*size, GFP_NOFS);
2038	if (!p) {
2039		err = -ENOMEM;
2040		goto out;
2041	}
2042
2043	err = ntfs_read_run_nb(sbi, &ni->file.run,
2044			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2045			       sizeof(d_security), NULL);
2046	if (err)
2047		goto out;
2048
2049	if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2050		err = -EINVAL;
2051		goto out;
2052	}
2053
2054	err = ntfs_read_run_nb(sbi, &ni->file.run,
2055			       le64_to_cpu(sii_e->sec_hdr.off) +
2056				       sizeof(struct SECURITY_HDR),
2057			       p, *size, NULL);
2058	if (err)
2059		goto out;
2060
2061	*sd = p;
2062	p = NULL;
2063
2064out:
2065	kfree(p);
2066	fnd_put(fnd_sii);
2067	ni_unlock(ni);
2068
2069	return err;
2070}
2071
2072/*
2073 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2074 *
2075 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2076 * and it contains a mirror copy of each security descriptor.  When writing
2077 * to a security descriptor at location X, another copy will be written at
2078 * location (X+256K).
2079 * When writing a security descriptor that will cross the 256K boundary,
2080 * the pointer will be advanced by 256K to skip
2081 * over the mirror portion.
2082 */
2083int ntfs_insert_security(struct ntfs_sb_info *sbi,
2084			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2085			 u32 size_sd, __le32 *security_id, bool *inserted)
2086{
2087	int err, diff;
2088	struct ntfs_inode *ni = sbi->security.ni;
2089	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2090	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2091	struct NTFS_DE_SDH *e;
2092	struct NTFS_DE_SDH sdh_e;
2093	struct NTFS_DE_SII sii_e;
2094	struct SECURITY_HDR *d_security;
2095	u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2096	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2097	struct SECURITY_KEY hash_key;
2098	struct ntfs_fnd *fnd_sdh = NULL;
2099	const struct INDEX_ROOT *root_sdh;
2100	const struct INDEX_ROOT *root_sii;
2101	u64 mirr_off, new_sds_size;
2102	u32 next, left;
2103
2104	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2105		      SecurityDescriptorsBlockSize);
2106
2107	hash_key.hash = security_hash(sd, size_sd);
2108	hash_key.sec_id = SECURITY_ID_INVALID;
2109
2110	if (inserted)
2111		*inserted = false;
2112	*security_id = SECURITY_ID_INVALID;
2113
2114	/* Allocate a temporal buffer. */
2115	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2116	if (!d_security)
2117		return -ENOMEM;
2118
2119	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2120
2121	fnd_sdh = fnd_get();
2122	if (!fnd_sdh) {
2123		err = -ENOMEM;
2124		goto out;
2125	}
2126
2127	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2128	if (!root_sdh) {
2129		err = -EINVAL;
2130		goto out;
2131	}
2132
2133	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2134	if (!root_sii) {
2135		err = -EINVAL;
2136		goto out;
2137	}
2138
2139	/*
2140	 * Check if such security already exists.
2141	 * Use "SDH" and hash -> to get the offset in "SDS".
2142	 */
2143	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2144			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2145			fnd_sdh);
2146	if (err)
2147		goto out;
2148
2149	while (e) {
2150		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2151			err = ntfs_read_run_nb(sbi, &ni->file.run,
2152					       le64_to_cpu(e->sec_hdr.off),
2153					       d_security, new_sec_size, NULL);
2154			if (err)
2155				goto out;
2156
2157			if (le32_to_cpu(d_security->size) == new_sec_size &&
2158			    d_security->key.hash == hash_key.hash &&
2159			    !memcmp(d_security + 1, sd, size_sd)) {
2160				/* Such security already exists. */
2161				*security_id = d_security->key.sec_id;
2162				err = 0;
2163				goto out;
2164			}
2165		}
2166
2167		err = indx_find_sort(indx_sdh, ni, root_sdh,
2168				     (struct NTFS_DE **)&e, fnd_sdh);
2169		if (err)
2170			goto out;
2171
2172		if (!e || e->key.hash != hash_key.hash)
2173			break;
2174	}
2175
2176	/* Zero unused space. */
2177	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2178	left = SecurityDescriptorsBlockSize - next;
2179
2180	/* Zero gap until SecurityDescriptorsBlockSize. */
2181	if (left < new_sec_size) {
2182		/* Zero "left" bytes from sbi->security.next_off. */
2183		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2184	}
2185
2186	/* Zero tail of previous security. */
2187	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2188
2189	/*
2190	 * Example:
2191	 * 0x40438 == ni->vfs_inode.i_size
2192	 * 0x00440 == sbi->security.next_off
2193	 * need to zero [0x438-0x440)
2194	 * if (next > used) {
2195	 *  u32 tozero = next - used;
2196	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2197	 */
2198
2199	/* Format new security descriptor. */
2200	d_security->key.hash = hash_key.hash;
2201	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2202	d_security->off = cpu_to_le64(sbi->security.next_off);
2203	d_security->size = cpu_to_le32(new_sec_size);
2204	memcpy(d_security + 1, sd, size_sd);
2205
2206	/* Write main SDS bucket. */
2207	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2208				d_security, aligned_sec_size, 0);
2209
2210	if (err)
2211		goto out;
2212
2213	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2214	new_sds_size = mirr_off + aligned_sec_size;
2215
2216	if (new_sds_size > ni->vfs_inode.i_size) {
2217		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2218				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2219				    new_sds_size, &new_sds_size, false, NULL);
2220		if (err)
2221			goto out;
2222	}
2223
2224	/* Write copy SDS bucket. */
2225	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2226				aligned_sec_size, 0);
2227	if (err)
2228		goto out;
2229
2230	/* Fill SII entry. */
2231	sii_e.de.view.data_off =
2232		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2233	sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2234	sii_e.de.view.res = 0;
2235	sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2236	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2237	sii_e.de.flags = 0;
2238	sii_e.de.res = 0;
2239	sii_e.sec_id = d_security->key.sec_id;
2240	memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2241
2242	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2243	if (err)
2244		goto out;
2245
2246	/* Fill SDH entry. */
2247	sdh_e.de.view.data_off =
2248		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2249	sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2250	sdh_e.de.view.res = 0;
2251	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2252	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2253	sdh_e.de.flags = 0;
2254	sdh_e.de.res = 0;
2255	sdh_e.key.hash = d_security->key.hash;
2256	sdh_e.key.sec_id = d_security->key.sec_id;
2257	memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2258	sdh_e.magic[0] = cpu_to_le16('I');
2259	sdh_e.magic[1] = cpu_to_le16('I');
2260
2261	fnd_clear(fnd_sdh);
2262	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2263				fnd_sdh, 0);
2264	if (err)
2265		goto out;
2266
2267	*security_id = d_security->key.sec_id;
2268	if (inserted)
2269		*inserted = true;
2270
2271	/* Update Id and offset for next descriptor. */
2272	sbi->security.next_id += 1;
2273	sbi->security.next_off += aligned_sec_size;
2274
2275out:
2276	fnd_put(fnd_sdh);
2277	mark_inode_dirty(&ni->vfs_inode);
2278	ni_unlock(ni);
2279	kfree(d_security);
2280
2281	return err;
2282}
2283
2284/*
2285 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2286 */
2287int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2288{
2289	int err;
2290	struct ntfs_inode *ni = sbi->reparse.ni;
2291	struct ntfs_index *indx = &sbi->reparse.index_r;
2292	struct ATTRIB *attr;
2293	struct ATTR_LIST_ENTRY *le;
2294	const struct INDEX_ROOT *root_r;
2295
2296	if (!ni)
2297		return 0;
2298
2299	le = NULL;
2300	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2301			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2302	if (!attr) {
2303		err = -EINVAL;
2304		goto out;
2305	}
2306
2307	root_r = resident_data(attr);
2308	if (root_r->type != ATTR_ZERO ||
2309	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2310		err = -EINVAL;
2311		goto out;
2312	}
2313
2314	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2315	if (err)
2316		goto out;
2317
2318out:
2319	return err;
2320}
2321
2322/*
2323 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2324 */
2325int ntfs_objid_init(struct ntfs_sb_info *sbi)
2326{
2327	int err;
2328	struct ntfs_inode *ni = sbi->objid.ni;
2329	struct ntfs_index *indx = &sbi->objid.index_o;
2330	struct ATTRIB *attr;
2331	struct ATTR_LIST_ENTRY *le;
2332	const struct INDEX_ROOT *root;
2333
2334	if (!ni)
2335		return 0;
2336
2337	le = NULL;
2338	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2339			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2340	if (!attr) {
2341		err = -EINVAL;
2342		goto out;
2343	}
2344
2345	root = resident_data(attr);
2346	if (root->type != ATTR_ZERO ||
2347	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2348		err = -EINVAL;
2349		goto out;
2350	}
2351
2352	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2353	if (err)
2354		goto out;
2355
2356out:
2357	return err;
2358}
2359
2360int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2361{
2362	int err;
2363	struct ntfs_inode *ni = sbi->objid.ni;
2364	struct ntfs_index *indx = &sbi->objid.index_o;
2365
2366	if (!ni)
2367		return -EINVAL;
2368
2369	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2370
2371	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2372
2373	mark_inode_dirty(&ni->vfs_inode);
2374	ni_unlock(ni);
2375
2376	return err;
2377}
2378
2379int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2380			const struct MFT_REF *ref)
2381{
2382	int err;
2383	struct ntfs_inode *ni = sbi->reparse.ni;
2384	struct ntfs_index *indx = &sbi->reparse.index_r;
2385	struct NTFS_DE_R re;
2386
2387	if (!ni)
2388		return -EINVAL;
2389
2390	memset(&re, 0, sizeof(re));
2391
2392	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2393	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2394	re.de.key_size = cpu_to_le16(sizeof(re.key));
2395
2396	re.key.ReparseTag = rtag;
2397	memcpy(&re.key.ref, ref, sizeof(*ref));
2398
2399	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2400
2401	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2402
2403	mark_inode_dirty(&ni->vfs_inode);
2404	ni_unlock(ni);
2405
2406	return err;
2407}
2408
2409int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2410			const struct MFT_REF *ref)
2411{
2412	int err, diff;
2413	struct ntfs_inode *ni = sbi->reparse.ni;
2414	struct ntfs_index *indx = &sbi->reparse.index_r;
2415	struct ntfs_fnd *fnd = NULL;
2416	struct REPARSE_KEY rkey;
2417	struct NTFS_DE_R *re;
2418	struct INDEX_ROOT *root_r;
2419
2420	if (!ni)
2421		return -EINVAL;
2422
2423	rkey.ReparseTag = rtag;
2424	rkey.ref = *ref;
2425
2426	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2427
2428	if (rtag) {
2429		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2430		goto out1;
2431	}
2432
2433	fnd = fnd_get();
2434	if (!fnd) {
2435		err = -ENOMEM;
2436		goto out1;
2437	}
2438
2439	root_r = indx_get_root(indx, ni, NULL, NULL);
2440	if (!root_r) {
2441		err = -EINVAL;
2442		goto out;
2443	}
2444
2445	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2446	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2447			(struct NTFS_DE **)&re, fnd);
2448	if (err)
2449		goto out;
2450
2451	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2452		/* Impossible. Looks like volume corrupt? */
2453		goto out;
2454	}
2455
2456	memcpy(&rkey, &re->key, sizeof(rkey));
2457
2458	fnd_put(fnd);
2459	fnd = NULL;
2460
2461	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2462	if (err)
2463		goto out;
2464
2465out:
2466	fnd_put(fnd);
2467
2468out1:
2469	mark_inode_dirty(&ni->vfs_inode);
2470	ni_unlock(ni);
2471
2472	return err;
2473}
2474
2475static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2476					  CLST len)
2477{
2478	ntfs_unmap_meta(sbi->sb, lcn, len);
2479	ntfs_discard(sbi, lcn, len);
2480}
2481
2482void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2483{
2484	CLST end, i, zone_len, zlen;
2485	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2486	bool dirty = false;
2487
2488	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2489	if (!wnd_is_used(wnd, lcn, len)) {
2490		/* mark volume as dirty out of wnd->rw_lock */
2491		dirty = true;
2492
2493		end = lcn + len;
2494		len = 0;
2495		for (i = lcn; i < end; i++) {
2496			if (wnd_is_used(wnd, i, 1)) {
2497				if (!len)
2498					lcn = i;
2499				len += 1;
2500				continue;
2501			}
2502
2503			if (!len)
2504				continue;
2505
2506			if (trim)
2507				ntfs_unmap_and_discard(sbi, lcn, len);
2508
2509			wnd_set_free(wnd, lcn, len);
2510			len = 0;
2511		}
2512
2513		if (!len)
2514			goto out;
2515	}
2516
2517	if (trim)
2518		ntfs_unmap_and_discard(sbi, lcn, len);
2519	wnd_set_free(wnd, lcn, len);
2520
2521	/* append to MFT zone, if possible. */
2522	zone_len = wnd_zone_len(wnd);
2523	zlen = min(zone_len + len, sbi->zone_max);
2524
2525	if (zlen == zone_len) {
2526		/* MFT zone already has maximum size. */
2527	} else if (!zone_len) {
2528		/* Create MFT zone only if 'zlen' is large enough. */
2529		if (zlen == sbi->zone_max)
2530			wnd_zone_set(wnd, lcn, zlen);
2531	} else {
2532		CLST zone_lcn = wnd_zone_bit(wnd);
2533
2534		if (lcn + len == zone_lcn) {
2535			/* Append into head MFT zone. */
2536			wnd_zone_set(wnd, lcn, zlen);
2537		} else if (zone_lcn + zone_len == lcn) {
2538			/* Append into tail MFT zone. */
2539			wnd_zone_set(wnd, zone_lcn, zlen);
2540		}
2541	}
2542
2543out:
2544	up_write(&wnd->rw_lock);
2545	if (dirty)
2546		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2547}
2548
2549/*
2550 * run_deallocate - Deallocate clusters.
2551 */
2552int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2553		   bool trim)
2554{
2555	CLST lcn, len;
2556	size_t idx = 0;
2557
2558	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2559		if (lcn == SPARSE_LCN)
2560			continue;
2561
2562		mark_as_free_ex(sbi, lcn, len, trim);
2563	}
2564
2565	return 0;
2566}
2567
2568static inline bool name_has_forbidden_chars(const struct le_str *fname)
2569{
2570	int i, ch;
2571
2572	/* check for forbidden chars */
2573	for (i = 0; i < fname->len; ++i) {
2574		ch = le16_to_cpu(fname->name[i]);
2575
2576		/* control chars */
2577		if (ch < 0x20)
2578			return true;
2579
2580		switch (ch) {
2581		/* disallowed by Windows */
2582		case '\\':
2583		case '/':
2584		case ':':
2585		case '*':
2586		case '?':
2587		case '<':
2588		case '>':
2589		case '|':
2590		case '\"':
2591			return true;
2592
2593		default:
2594			/* allowed char */
2595			break;
2596		}
2597	}
2598
2599	/* file names cannot end with space or . */
2600	if (fname->len > 0) {
2601		ch = le16_to_cpu(fname->name[fname->len - 1]);
2602		if (ch == ' ' || ch == '.')
2603			return true;
2604	}
2605
2606	return false;
2607}
2608
2609static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2610				    const struct le_str *fname)
2611{
2612	int port_digit;
2613	const __le16 *name = fname->name;
2614	int len = fname->len;
2615	const u16 *upcase = sbi->upcase;
2616
2617	/* check for 3 chars reserved names (device names) */
2618	/* name by itself or with any extension is forbidden */
2619	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2620		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2621		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2622		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2623		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2624			return true;
2625
2626	/* check for 4 chars reserved names (port name followed by 1..9) */
2627	/* name by itself or with any extension is forbidden */
2628	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2629		port_digit = le16_to_cpu(name[3]);
2630		if (port_digit >= '1' && port_digit <= '9')
2631			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2632					    false) ||
2633			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2634					    false))
2635				return true;
2636	}
2637
2638	return false;
2639}
2640
2641/*
2642 * valid_windows_name - Check if a file name is valid in Windows.
2643 */
2644bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2645{
2646	return !name_has_forbidden_chars(fname) &&
2647	       !is_reserved_name(sbi, fname);
2648}
2649
2650/*
2651 * ntfs_set_label - updates current ntfs label.
2652 */
2653int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2654{
2655	int err;
2656	struct ATTRIB *attr;
2657	u32 uni_bytes;
2658	struct ntfs_inode *ni = sbi->volume.ni;
 
2659	/* Allocate PATH_MAX bytes. */
2660	struct cpu_str *uni = __getname();
2661
2662	if (!uni)
2663		return -ENOMEM;
2664
2665	err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2666				UTF16_LITTLE_ENDIAN);
2667	if (err < 0)
2668		goto out;
2669
2670	uni_bytes = uni->len * sizeof(u16);
2671	if (uni_bytes > NTFS_LABEL_MAX_LENGTH * sizeof(u16)) {
2672		ntfs_warn(sbi->sb, "new label is too long");
2673		err = -EFBIG;
2674		goto out;
2675	}
2676
2677	ni_lock(ni);
2678
2679	/* Ignore any errors. */
2680	ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2681
2682	err = ni_insert_resident(ni, uni_bytes, ATTR_LABEL, NULL, 0, &attr,
2683				 NULL, NULL);
2684	if (err < 0)
2685		goto unlock_out;
2686
2687	/* write new label in on-disk struct. */
2688	memcpy(resident_data(attr), uni->name, uni_bytes);
2689
2690	/* update cached value of current label. */
2691	if (len >= ARRAY_SIZE(sbi->volume.label))
2692		len = ARRAY_SIZE(sbi->volume.label) - 1;
2693	memcpy(sbi->volume.label, label, len);
2694	sbi->volume.label[len] = 0;
2695	mark_inode_dirty_sync(&ni->vfs_inode);
2696
2697unlock_out:
2698	ni_unlock(ni);
2699
2700	if (!err)
2701		err = _ni_write_inode(&ni->vfs_inode, 0);
2702
2703out:
2704	__putname(uni);
2705	return err;
2706}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
   5 *
   6 */
   7
   8#include <linux/blkdev.h>
   9#include <linux/buffer_head.h>
  10#include <linux/fs.h>
  11#include <linux/kernel.h>
  12#include <linux/nls.h>
  13
  14#include "debug.h"
  15#include "ntfs.h"
  16#include "ntfs_fs.h"
  17
  18// clang-format off
  19const struct cpu_str NAME_MFT = {
  20	4, 0, { '$', 'M', 'F', 'T' },
  21};
  22const struct cpu_str NAME_MIRROR = {
  23	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
  24};
  25const struct cpu_str NAME_LOGFILE = {
  26	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
  27};
  28const struct cpu_str NAME_VOLUME = {
  29	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
  30};
  31const struct cpu_str NAME_ATTRDEF = {
  32	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
  33};
  34const struct cpu_str NAME_ROOT = {
  35	1, 0, { '.' },
  36};
  37const struct cpu_str NAME_BITMAP = {
  38	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
  39};
  40const struct cpu_str NAME_BOOT = {
  41	5, 0, { '$', 'B', 'o', 'o', 't' },
  42};
  43const struct cpu_str NAME_BADCLUS = {
  44	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
  45};
  46const struct cpu_str NAME_QUOTA = {
  47	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
  48};
  49const struct cpu_str NAME_SECURE = {
  50	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
  51};
  52const struct cpu_str NAME_UPCASE = {
  53	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
  54};
  55const struct cpu_str NAME_EXTEND = {
  56	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
  57};
  58const struct cpu_str NAME_OBJID = {
  59	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
  60};
  61const struct cpu_str NAME_REPARSE = {
  62	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
  63};
  64const struct cpu_str NAME_USNJRNL = {
  65	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
  66};
  67const __le16 BAD_NAME[4] = {
  68	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
  69};
  70const __le16 I30_NAME[4] = {
  71	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
  72};
  73const __le16 SII_NAME[4] = {
  74	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
  75};
  76const __le16 SDH_NAME[4] = {
  77	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
  78};
  79const __le16 SDS_NAME[4] = {
  80	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
  81};
  82const __le16 SO_NAME[2] = {
  83	cpu_to_le16('$'), cpu_to_le16('O'),
  84};
  85const __le16 SQ_NAME[2] = {
  86	cpu_to_le16('$'), cpu_to_le16('Q'),
  87};
  88const __le16 SR_NAME[2] = {
  89	cpu_to_le16('$'), cpu_to_le16('R'),
  90};
  91
  92#ifdef CONFIG_NTFS3_LZX_XPRESS
  93const __le16 WOF_NAME[17] = {
  94	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
  95	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
  96	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
  97	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
  98	cpu_to_le16('a'),
  99};
 100#endif
 101
 102static const __le16 CON_NAME[3] = {
 103	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
 104};
 105
 106static const __le16 NUL_NAME[3] = {
 107	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
 108};
 109
 110static const __le16 AUX_NAME[3] = {
 111	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
 112};
 113
 114static const __le16 PRN_NAME[3] = {
 115	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
 116};
 117
 118static const __le16 COM_NAME[3] = {
 119	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
 120};
 121
 122static const __le16 LPT_NAME[3] = {
 123	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
 124};
 125
 126// clang-format on
 127
 128/*
 129 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
 130 */
 131bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
 132{
 133	u16 *fixup, *ptr;
 134	u16 sample;
 135	u16 fo = le16_to_cpu(rhdr->fix_off);
 136	u16 fn = le16_to_cpu(rhdr->fix_num);
 137
 138	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
 139	    fn * SECTOR_SIZE > bytes) {
 140		return false;
 141	}
 142
 143	/* Get fixup pointer. */
 144	fixup = Add2Ptr(rhdr, fo);
 145
 146	if (*fixup >= 0x7FFF)
 147		*fixup = 1;
 148	else
 149		*fixup += 1;
 150
 151	sample = *fixup;
 152
 153	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
 154
 155	while (fn--) {
 156		*++fixup = *ptr;
 157		*ptr = sample;
 158		ptr += SECTOR_SIZE / sizeof(short);
 159	}
 160	return true;
 161}
 162
 163/*
 164 * ntfs_fix_post_read - Remove fixups after reading from disk.
 165 *
 166 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
 167 */
 168int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
 169		       bool simple)
 170{
 171	int ret;
 172	u16 *fixup, *ptr;
 173	u16 sample, fo, fn;
 174
 175	fo = le16_to_cpu(rhdr->fix_off);
 176	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
 177		      le16_to_cpu(rhdr->fix_num);
 178
 179	/* Check errors. */
 180	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
 181	    fn * SECTOR_SIZE > bytes) {
 182		return -E_NTFS_CORRUPT;
 183	}
 184
 185	/* Get fixup pointer. */
 186	fixup = Add2Ptr(rhdr, fo);
 187	sample = *fixup;
 188	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
 189	ret = 0;
 190
 191	while (fn--) {
 192		/* Test current word. */
 193		if (*ptr != sample) {
 194			/* Fixup does not match! Is it serious error? */
 195			ret = -E_NTFS_FIXUP;
 196		}
 197
 198		/* Replace fixup. */
 199		*ptr = *++fixup;
 200		ptr += SECTOR_SIZE / sizeof(short);
 201	}
 202
 203	return ret;
 204}
 205
 206/*
 207 * ntfs_extend_init - Load $Extend file.
 208 */
 209int ntfs_extend_init(struct ntfs_sb_info *sbi)
 210{
 211	int err;
 212	struct super_block *sb = sbi->sb;
 213	struct inode *inode, *inode2;
 214	struct MFT_REF ref;
 215
 216	if (sbi->volume.major_ver < 3) {
 217		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
 218		return 0;
 219	}
 220
 221	ref.low = cpu_to_le32(MFT_REC_EXTEND);
 222	ref.high = 0;
 223	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
 224	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
 225	if (IS_ERR(inode)) {
 226		err = PTR_ERR(inode);
 227		ntfs_err(sb, "Failed to load $Extend (%d).", err);
 228		inode = NULL;
 229		goto out;
 230	}
 231
 232	/* If ntfs_iget5() reads from disk it never returns bad inode. */
 233	if (!S_ISDIR(inode->i_mode)) {
 234		err = -EINVAL;
 235		goto out;
 236	}
 237
 238	/* Try to find $ObjId */
 239	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
 240	if (inode2 && !IS_ERR(inode2)) {
 241		if (is_bad_inode(inode2)) {
 242			iput(inode2);
 243		} else {
 244			sbi->objid.ni = ntfs_i(inode2);
 245			sbi->objid_no = inode2->i_ino;
 246		}
 247	}
 248
 249	/* Try to find $Quota */
 250	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
 251	if (inode2 && !IS_ERR(inode2)) {
 252		sbi->quota_no = inode2->i_ino;
 253		iput(inode2);
 254	}
 255
 256	/* Try to find $Reparse */
 257	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
 258	if (inode2 && !IS_ERR(inode2)) {
 259		sbi->reparse.ni = ntfs_i(inode2);
 260		sbi->reparse_no = inode2->i_ino;
 261	}
 262
 263	/* Try to find $UsnJrnl */
 264	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
 265	if (inode2 && !IS_ERR(inode2)) {
 266		sbi->usn_jrnl_no = inode2->i_ino;
 267		iput(inode2);
 268	}
 269
 270	err = 0;
 271out:
 272	iput(inode);
 273	return err;
 274}
 275
 276int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
 277{
 278	int err = 0;
 279	struct super_block *sb = sbi->sb;
 280	bool initialized = false;
 281	struct MFT_REF ref;
 282	struct inode *inode;
 283
 284	/* Check for 4GB. */
 285	if (ni->vfs_inode.i_size >= 0x100000000ull) {
 286		ntfs_err(sb, "\x24LogFile is large than 4G.");
 287		err = -EINVAL;
 288		goto out;
 289	}
 290
 291	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
 292
 293	ref.low = cpu_to_le32(MFT_REC_MFT);
 294	ref.high = 0;
 295	ref.seq = cpu_to_le16(1);
 296
 297	inode = ntfs_iget5(sb, &ref, NULL);
 298
 299	if (IS_ERR(inode))
 300		inode = NULL;
 301
 302	if (!inode) {
 303		/* Try to use MFT copy. */
 304		u64 t64 = sbi->mft.lbo;
 305
 306		sbi->mft.lbo = sbi->mft.lbo2;
 307		inode = ntfs_iget5(sb, &ref, NULL);
 308		sbi->mft.lbo = t64;
 309		if (IS_ERR(inode))
 310			inode = NULL;
 311	}
 312
 313	if (!inode) {
 314		err = -EINVAL;
 315		ntfs_err(sb, "Failed to load $MFT.");
 316		goto out;
 317	}
 318
 319	sbi->mft.ni = ntfs_i(inode);
 320
 321	/* LogFile should not contains attribute list. */
 322	err = ni_load_all_mi(sbi->mft.ni);
 323	if (!err)
 324		err = log_replay(ni, &initialized);
 325
 326	iput(inode);
 327	sbi->mft.ni = NULL;
 328
 329	sync_blockdev(sb->s_bdev);
 330	invalidate_bdev(sb->s_bdev);
 331
 332	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
 333		err = 0;
 334		goto out;
 335	}
 336
 337	if (sb_rdonly(sb) || !initialized)
 338		goto out;
 339
 340	/* Fill LogFile by '-1' if it is initialized. */
 341	err = ntfs_bio_fill_1(sbi, &ni->file.run);
 342
 343out:
 344	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
 345
 346	return err;
 347}
 348
 349/*
 350 * ntfs_look_for_free_space - Look for a free space in bitmap.
 351 */
 352int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
 353			     CLST *new_lcn, CLST *new_len,
 354			     enum ALLOCATE_OPT opt)
 355{
 356	int err;
 357	CLST alen;
 358	struct super_block *sb = sbi->sb;
 359	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
 360	struct wnd_bitmap *wnd = &sbi->used.bitmap;
 361
 362	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
 363	if (opt & ALLOCATE_MFT) {
 364		zlen = wnd_zone_len(wnd);
 365
 366		if (!zlen) {
 367			err = ntfs_refresh_zone(sbi);
 368			if (err)
 369				goto up_write;
 370
 371			zlen = wnd_zone_len(wnd);
 372		}
 373
 374		if (!zlen) {
 375			ntfs_err(sbi->sb, "no free space to extend mft");
 376			err = -ENOSPC;
 377			goto up_write;
 378		}
 379
 380		lcn = wnd_zone_bit(wnd);
 381		alen = min_t(CLST, len, zlen);
 382
 383		wnd_zone_set(wnd, lcn + alen, zlen - alen);
 384
 385		err = wnd_set_used(wnd, lcn, alen);
 386		if (err)
 387			goto up_write;
 388
 389		alcn = lcn;
 390		goto space_found;
 391	}
 392	/*
 393	 * 'Cause cluster 0 is always used this value means that we should use
 394	 * cached value of 'next_free_lcn' to improve performance.
 395	 */
 396	if (!lcn)
 397		lcn = sbi->used.next_free_lcn;
 398
 399	if (lcn >= wnd->nbits)
 400		lcn = 0;
 401
 402	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
 403	if (alen)
 404		goto space_found;
 405
 406	/* Try to use clusters from MftZone. */
 407	zlen = wnd_zone_len(wnd);
 408	zeroes = wnd_zeroes(wnd);
 409
 410	/* Check too big request */
 411	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
 412		err = -ENOSPC;
 413		goto up_write;
 414	}
 415
 416	/* How many clusters to cat from zone. */
 417	zlcn = wnd_zone_bit(wnd);
 418	zlen2 = zlen >> 1;
 419	ztrim = clamp_val(len, zlen2, zlen);
 420	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
 421
 422	wnd_zone_set(wnd, zlcn, new_zlen);
 423
 424	/* Allocate continues clusters. */
 425	alen = wnd_find(wnd, len, 0,
 426			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
 427	if (!alen) {
 428		err = -ENOSPC;
 429		goto up_write;
 430	}
 431
 432space_found:
 433	err = 0;
 434	*new_len = alen;
 435	*new_lcn = alcn;
 436
 437	ntfs_unmap_meta(sb, alcn, alen);
 438
 439	/* Set hint for next requests. */
 440	if (!(opt & ALLOCATE_MFT))
 441		sbi->used.next_free_lcn = alcn + alen;
 442up_write:
 443	up_write(&wnd->rw_lock);
 444	return err;
 445}
 446
 447/*
 448 * ntfs_check_for_free_space
 449 *
 450 * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
 451 */
 452bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
 453{
 454	size_t free, zlen, avail;
 455	struct wnd_bitmap *wnd;
 456
 457	wnd = &sbi->used.bitmap;
 458	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
 459	free = wnd_zeroes(wnd);
 460	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
 461	up_read(&wnd->rw_lock);
 462
 463	if (free < zlen + clen)
 464		return false;
 465
 466	avail = free - (zlen + clen);
 467
 468	wnd = &sbi->mft.bitmap;
 469	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
 470	free = wnd_zeroes(wnd);
 471	zlen = wnd_zone_len(wnd);
 472	up_read(&wnd->rw_lock);
 473
 474	if (free >= zlen + mlen)
 475		return true;
 476
 477	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
 478}
 479
 480/*
 481 * ntfs_extend_mft - Allocate additional MFT records.
 482 *
 483 * sbi->mft.bitmap is locked for write.
 484 *
 485 * NOTE: recursive:
 486 *	ntfs_look_free_mft ->
 487 *	ntfs_extend_mft ->
 488 *	attr_set_size ->
 489 *	ni_insert_nonresident ->
 490 *	ni_insert_attr ->
 491 *	ni_ins_attr_ext ->
 492 *	ntfs_look_free_mft ->
 493 *	ntfs_extend_mft
 494 *
 495 * To avoid recursive always allocate space for two new MFT records
 496 * see attrib.c: "at least two MFT to avoid recursive loop".
 497 */
 498static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
 499{
 500	int err;
 501	struct ntfs_inode *ni = sbi->mft.ni;
 502	size_t new_mft_total;
 503	u64 new_mft_bytes, new_bitmap_bytes;
 504	struct ATTRIB *attr;
 505	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
 506
 507	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
 508	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
 509
 510	/* Step 1: Resize $MFT::DATA. */
 511	down_write(&ni->file.run_lock);
 512	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
 513			    new_mft_bytes, NULL, false, &attr);
 514
 515	if (err) {
 516		up_write(&ni->file.run_lock);
 517		goto out;
 518	}
 519
 520	attr->nres.valid_size = attr->nres.data_size;
 521	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
 522	ni->mi.dirty = true;
 523
 524	/* Step 2: Resize $MFT::BITMAP. */
 525	new_bitmap_bytes = bitmap_size(new_mft_total);
 526
 527	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
 528			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
 529
 530	/* Refresh MFT Zone if necessary. */
 531	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
 532
 533	ntfs_refresh_zone(sbi);
 534
 535	up_write(&sbi->used.bitmap.rw_lock);
 536	up_write(&ni->file.run_lock);
 537
 538	if (err)
 539		goto out;
 540
 541	err = wnd_extend(wnd, new_mft_total);
 542
 543	if (err)
 544		goto out;
 545
 546	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
 547
 548	err = _ni_write_inode(&ni->vfs_inode, 0);
 549out:
 550	return err;
 551}
 552
 553/*
 554 * ntfs_look_free_mft - Look for a free MFT record.
 555 */
 556int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
 557		       struct ntfs_inode *ni, struct mft_inode **mi)
 558{
 559	int err = 0;
 560	size_t zbit, zlen, from, to, fr;
 561	size_t mft_total;
 562	struct MFT_REF ref;
 563	struct super_block *sb = sbi->sb;
 564	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
 565	u32 ir;
 566
 567	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
 568		      MFT_REC_FREE - MFT_REC_RESERVED);
 569
 570	if (!mft)
 571		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
 572
 573	zlen = wnd_zone_len(wnd);
 574
 575	/* Always reserve space for MFT. */
 576	if (zlen) {
 577		if (mft) {
 578			zbit = wnd_zone_bit(wnd);
 579			*rno = zbit;
 580			wnd_zone_set(wnd, zbit + 1, zlen - 1);
 581		}
 582		goto found;
 583	}
 584
 585	/* No MFT zone. Find the nearest to '0' free MFT. */
 586	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
 587		/* Resize MFT */
 588		mft_total = wnd->nbits;
 589
 590		err = ntfs_extend_mft(sbi);
 591		if (!err) {
 592			zbit = mft_total;
 593			goto reserve_mft;
 594		}
 595
 596		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
 597			goto out;
 598
 599		err = 0;
 600
 601		/*
 602		 * Look for free record reserved area [11-16) ==
 603		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
 604		 * marks it as used.
 605		 */
 606		if (!sbi->mft.reserved_bitmap) {
 607			/* Once per session create internal bitmap for 5 bits. */
 608			sbi->mft.reserved_bitmap = 0xFF;
 609
 610			ref.high = 0;
 611			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
 612				struct inode *i;
 613				struct ntfs_inode *ni;
 614				struct MFT_REC *mrec;
 615
 616				ref.low = cpu_to_le32(ir);
 617				ref.seq = cpu_to_le16(ir);
 618
 619				i = ntfs_iget5(sb, &ref, NULL);
 620				if (IS_ERR(i)) {
 621next:
 622					ntfs_notice(
 623						sb,
 624						"Invalid reserved record %x",
 625						ref.low);
 626					continue;
 627				}
 628				if (is_bad_inode(i)) {
 629					iput(i);
 630					goto next;
 631				}
 632
 633				ni = ntfs_i(i);
 634
 635				mrec = ni->mi.mrec;
 636
 637				if (!is_rec_base(mrec))
 638					goto next;
 639
 640				if (mrec->hard_links)
 641					goto next;
 642
 643				if (!ni_std(ni))
 644					goto next;
 645
 646				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
 647						 NULL, 0, NULL, NULL))
 648					goto next;
 649
 650				__clear_bit(ir - MFT_REC_RESERVED,
 651					    &sbi->mft.reserved_bitmap);
 652			}
 653		}
 654
 655		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
 656		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
 657					  MFT_REC_FREE, MFT_REC_RESERVED);
 658		if (zbit >= MFT_REC_FREE) {
 659			sbi->mft.next_reserved = MFT_REC_FREE;
 660			goto out;
 661		}
 662
 663		zlen = 1;
 664		sbi->mft.next_reserved = zbit;
 665	} else {
 666reserve_mft:
 667		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
 668		if (zbit + zlen > wnd->nbits)
 669			zlen = wnd->nbits - zbit;
 670
 671		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
 672			zlen -= 1;
 673
 674		/* [zbit, zbit + zlen) will be used for MFT itself. */
 675		from = sbi->mft.used;
 676		if (from < zbit)
 677			from = zbit;
 678		to = zbit + zlen;
 679		if (from < to) {
 680			ntfs_clear_mft_tail(sbi, from, to);
 681			sbi->mft.used = to;
 682		}
 683	}
 684
 685	if (mft) {
 686		*rno = zbit;
 687		zbit += 1;
 688		zlen -= 1;
 689	}
 690
 691	wnd_zone_set(wnd, zbit, zlen);
 692
 693found:
 694	if (!mft) {
 695		/* The request to get record for general purpose. */
 696		if (sbi->mft.next_free < MFT_REC_USER)
 697			sbi->mft.next_free = MFT_REC_USER;
 698
 699		for (;;) {
 700			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
 701			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
 702				sbi->mft.next_free = sbi->mft.bitmap.nbits;
 703			} else {
 704				*rno = fr;
 705				sbi->mft.next_free = *rno + 1;
 706				break;
 707			}
 708
 709			err = ntfs_extend_mft(sbi);
 710			if (err)
 711				goto out;
 712		}
 713	}
 714
 715	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
 716		err = -ENOMEM;
 717		goto out;
 718	}
 719
 720	/* We have found a record that are not reserved for next MFT. */
 721	if (*rno >= MFT_REC_FREE)
 722		wnd_set_used(wnd, *rno, 1);
 723	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
 724		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
 725
 726out:
 727	if (!mft)
 728		up_write(&wnd->rw_lock);
 729
 730	return err;
 731}
 732
 733/*
 734 * ntfs_mark_rec_free - Mark record as free.
 735 * is_mft - true if we are changing MFT
 736 */
 737void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
 738{
 739	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
 740
 741	if (!is_mft)
 742		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
 743	if (rno >= wnd->nbits)
 744		goto out;
 745
 746	if (rno >= MFT_REC_FREE) {
 747		if (!wnd_is_used(wnd, rno, 1))
 748			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
 749		else
 750			wnd_set_free(wnd, rno, 1);
 751	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
 752		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
 753	}
 754
 755	if (rno < wnd_zone_bit(wnd))
 756		wnd_zone_set(wnd, rno, 1);
 757	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
 758		sbi->mft.next_free = rno;
 759
 760out:
 761	if (!is_mft)
 762		up_write(&wnd->rw_lock);
 763}
 764
 765/*
 766 * ntfs_clear_mft_tail - Format empty records [from, to).
 767 *
 768 * sbi->mft.bitmap is locked for write.
 769 */
 770int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
 771{
 772	int err;
 773	u32 rs;
 774	u64 vbo;
 775	struct runs_tree *run;
 776	struct ntfs_inode *ni;
 777
 778	if (from >= to)
 779		return 0;
 780
 781	rs = sbi->record_size;
 782	ni = sbi->mft.ni;
 783	run = &ni->file.run;
 784
 785	down_read(&ni->file.run_lock);
 786	vbo = (u64)from * rs;
 787	for (; from < to; from++, vbo += rs) {
 788		struct ntfs_buffers nb;
 789
 790		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
 791		if (err)
 792			goto out;
 793
 794		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
 795		nb_put(&nb);
 796		if (err)
 797			goto out;
 798	}
 799
 800out:
 801	sbi->mft.used = from;
 802	up_read(&ni->file.run_lock);
 803	return err;
 804}
 805
 806/*
 807 * ntfs_refresh_zone - Refresh MFT zone.
 808 *
 809 * sbi->used.bitmap is locked for rw.
 810 * sbi->mft.bitmap is locked for write.
 811 * sbi->mft.ni->file.run_lock for write.
 812 */
 813int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
 814{
 815	CLST lcn, vcn, len;
 816	size_t lcn_s, zlen;
 817	struct wnd_bitmap *wnd = &sbi->used.bitmap;
 818	struct ntfs_inode *ni = sbi->mft.ni;
 819
 820	/* Do not change anything unless we have non empty MFT zone. */
 821	if (wnd_zone_len(wnd))
 822		return 0;
 823
 824	vcn = bytes_to_cluster(sbi,
 825			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
 826
 827	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
 828		lcn = SPARSE_LCN;
 829
 830	/* We should always find Last Lcn for MFT. */
 831	if (lcn == SPARSE_LCN)
 832		return -EINVAL;
 833
 834	lcn_s = lcn + 1;
 835
 836	/* Try to allocate clusters after last MFT run. */
 837	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
 838	wnd_zone_set(wnd, lcn_s, zlen);
 839
 840	return 0;
 841}
 842
 843/*
 844 * ntfs_update_mftmirr - Update $MFTMirr data.
 845 */
 846void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
 847{
 848	int err;
 849	struct super_block *sb = sbi->sb;
 850	u32 blocksize, bytes;
 851	sector_t block1, block2;
 852
 853	/*
 854	 * sb can be NULL here. In this case sbi->flags should be 0 too.
 855	 */
 856	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
 857	    unlikely(ntfs3_forced_shutdown(sb)))
 858		return;
 859
 860	blocksize = sb->s_blocksize;
 861	bytes = sbi->mft.recs_mirr << sbi->record_bits;
 862	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
 863	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
 864
 865	for (; bytes >= blocksize; bytes -= blocksize) {
 866		struct buffer_head *bh1, *bh2;
 867
 868		bh1 = sb_bread(sb, block1++);
 869		if (!bh1)
 870			return;
 871
 872		bh2 = sb_getblk(sb, block2++);
 873		if (!bh2) {
 874			put_bh(bh1);
 875			return;
 876		}
 877
 878		if (buffer_locked(bh2))
 879			__wait_on_buffer(bh2);
 880
 881		lock_buffer(bh2);
 882		memcpy(bh2->b_data, bh1->b_data, blocksize);
 883		set_buffer_uptodate(bh2);
 884		mark_buffer_dirty(bh2);
 885		unlock_buffer(bh2);
 886
 887		put_bh(bh1);
 888		bh1 = NULL;
 889
 890		err = wait ? sync_dirty_buffer(bh2) : 0;
 891
 892		put_bh(bh2);
 893		if (err)
 894			return;
 895	}
 896
 897	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
 898}
 899
 900/*
 901 * ntfs_bad_inode
 902 *
 903 * Marks inode as bad and marks fs as 'dirty'
 904 */
 905void ntfs_bad_inode(struct inode *inode, const char *hint)
 906{
 907	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
 908
 909	ntfs_inode_err(inode, "%s", hint);
 910	make_bad_inode(inode);
 911	ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
 
 
 
 
 912}
 913
 914/*
 915 * ntfs_set_state
 916 *
 917 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
 918 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
 919 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
 920 */
 921int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
 922{
 923	int err;
 924	struct ATTRIB *attr;
 925	struct VOLUME_INFO *info;
 926	struct mft_inode *mi;
 927	struct ntfs_inode *ni;
 928	__le16 info_flags;
 929
 930	/*
 931	 * Do not change state if fs was real_dirty.
 932	 * Do not change state if fs already dirty(clear).
 933	 * Do not change any thing if mounted read only.
 934	 */
 935	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
 936		return 0;
 937
 938	/* Check cached value. */
 939	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
 940	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
 941		return 0;
 942
 943	ni = sbi->volume.ni;
 944	if (!ni)
 945		return -EINVAL;
 946
 947	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
 948
 949	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
 950	if (!attr) {
 951		err = -EINVAL;
 952		goto out;
 953	}
 954
 955	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
 956	if (!info) {
 957		err = -EINVAL;
 958		goto out;
 959	}
 960
 961	info_flags = info->flags;
 962
 963	switch (dirty) {
 964	case NTFS_DIRTY_ERROR:
 965		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
 966		sbi->volume.real_dirty = true;
 967		fallthrough;
 968	case NTFS_DIRTY_DIRTY:
 969		info->flags |= VOLUME_FLAG_DIRTY;
 970		break;
 971	case NTFS_DIRTY_CLEAR:
 972		info->flags &= ~VOLUME_FLAG_DIRTY;
 973		break;
 974	}
 975	/* Cache current volume flags. */
 976	if (info_flags != info->flags) {
 977		sbi->volume.flags = info->flags;
 978		mi->dirty = true;
 979	}
 980	err = 0;
 981
 982out:
 983	ni_unlock(ni);
 984	if (err)
 985		return err;
 986
 987	mark_inode_dirty_sync(&ni->vfs_inode);
 988	/* verify(!ntfs_update_mftmirr()); */
 989
 990	/* write mft record on disk. */
 991	err = _ni_write_inode(&ni->vfs_inode, 1);
 992
 993	return err;
 994}
 995
 996/*
 997 * security_hash - Calculates a hash of security descriptor.
 998 */
 999static inline __le32 security_hash(const void *sd, size_t bytes)
1000{
1001	u32 hash = 0;
1002	const __le32 *ptr = sd;
1003
1004	bytes >>= 2;
1005	while (bytes--)
1006		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1007	return cpu_to_le32(hash);
1008}
1009
1010/*
1011 * simple wrapper for sb_bread_unmovable.
1012 */
1013struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
1014{
1015	struct ntfs_sb_info *sbi = sb->s_fs_info;
1016	struct buffer_head *bh;
1017
1018	if (unlikely(block >= sbi->volume.blocks)) {
1019		/* prevent generic message "attempt to access beyond end of device" */
1020		ntfs_err(sb, "try to read out of volume at offset 0x%llx",
1021			 (u64)block << sb->s_blocksize_bits);
1022		return NULL;
1023	}
1024
1025	bh = sb_bread_unmovable(sb, block);
1026	if (bh)
1027		return bh;
1028
1029	ntfs_err(sb, "failed to read volume at offset 0x%llx",
1030		 (u64)block << sb->s_blocksize_bits);
1031	return NULL;
1032}
1033
1034int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1035{
1036	struct block_device *bdev = sb->s_bdev;
1037	u32 blocksize = sb->s_blocksize;
1038	u64 block = lbo >> sb->s_blocksize_bits;
1039	u32 off = lbo & (blocksize - 1);
1040	u32 op = blocksize - off;
1041
1042	for (; bytes; block += 1, off = 0, op = blocksize) {
1043		struct buffer_head *bh = __bread(bdev, block, blocksize);
1044
1045		if (!bh)
1046			return -EIO;
1047
1048		if (op > bytes)
1049			op = bytes;
1050
1051		memcpy(buffer, bh->b_data + off, op);
1052
1053		put_bh(bh);
1054
1055		bytes -= op;
1056		buffer = Add2Ptr(buffer, op);
1057	}
1058
1059	return 0;
1060}
1061
1062int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1063		  const void *buf, int wait)
1064{
1065	u32 blocksize = sb->s_blocksize;
1066	struct block_device *bdev = sb->s_bdev;
1067	sector_t block = lbo >> sb->s_blocksize_bits;
1068	u32 off = lbo & (blocksize - 1);
1069	u32 op = blocksize - off;
1070	struct buffer_head *bh;
1071
1072	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1073		wait = 1;
1074
1075	for (; bytes; block += 1, off = 0, op = blocksize) {
1076		if (op > bytes)
1077			op = bytes;
1078
1079		if (op < blocksize) {
1080			bh = __bread(bdev, block, blocksize);
1081			if (!bh) {
1082				ntfs_err(sb, "failed to read block %llx",
1083					 (u64)block);
1084				return -EIO;
1085			}
1086		} else {
1087			bh = __getblk(bdev, block, blocksize);
1088			if (!bh)
1089				return -ENOMEM;
1090		}
1091
1092		if (buffer_locked(bh))
1093			__wait_on_buffer(bh);
1094
1095		lock_buffer(bh);
1096		if (buf) {
1097			memcpy(bh->b_data + off, buf, op);
1098			buf = Add2Ptr(buf, op);
1099		} else {
1100			memset(bh->b_data + off, -1, op);
1101		}
1102
1103		set_buffer_uptodate(bh);
1104		mark_buffer_dirty(bh);
1105		unlock_buffer(bh);
1106
1107		if (wait) {
1108			int err = sync_dirty_buffer(bh);
1109
1110			if (err) {
1111				ntfs_err(
1112					sb,
1113					"failed to sync buffer at block %llx, error %d",
1114					(u64)block, err);
1115				put_bh(bh);
1116				return err;
1117			}
1118		}
1119
1120		put_bh(bh);
1121
1122		bytes -= op;
1123	}
1124	return 0;
1125}
1126
1127int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1128		      u64 vbo, const void *buf, size_t bytes, int sync)
1129{
1130	struct super_block *sb = sbi->sb;
1131	u8 cluster_bits = sbi->cluster_bits;
1132	u32 off = vbo & sbi->cluster_mask;
1133	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1134	u64 lbo, len;
1135	size_t idx;
1136
1137	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1138		return -ENOENT;
1139
1140	if (lcn == SPARSE_LCN)
1141		return -EINVAL;
1142
1143	lbo = ((u64)lcn << cluster_bits) + off;
1144	len = ((u64)clen << cluster_bits) - off;
1145
1146	for (;;) {
1147		u32 op = min_t(u64, len, bytes);
1148		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1149
1150		if (err)
1151			return err;
1152
1153		bytes -= op;
1154		if (!bytes)
1155			break;
1156
1157		vcn_next = vcn + clen;
1158		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1159		    vcn != vcn_next)
1160			return -ENOENT;
1161
1162		if (lcn == SPARSE_LCN)
1163			return -EINVAL;
1164
1165		if (buf)
1166			buf = Add2Ptr(buf, op);
1167
1168		lbo = ((u64)lcn << cluster_bits);
1169		len = ((u64)clen << cluster_bits);
1170	}
1171
1172	return 0;
1173}
1174
1175struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1176				   const struct runs_tree *run, u64 vbo)
1177{
1178	struct super_block *sb = sbi->sb;
1179	u8 cluster_bits = sbi->cluster_bits;
1180	CLST lcn;
1181	u64 lbo;
1182
1183	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1184		return ERR_PTR(-ENOENT);
1185
1186	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1187
1188	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1189}
1190
1191int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1192		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1193{
1194	int err;
1195	struct super_block *sb = sbi->sb;
1196	u32 blocksize = sb->s_blocksize;
1197	u8 cluster_bits = sbi->cluster_bits;
1198	u32 off = vbo & sbi->cluster_mask;
1199	u32 nbh = 0;
1200	CLST vcn_next, vcn = vbo >> cluster_bits;
1201	CLST lcn, clen;
1202	u64 lbo, len;
1203	size_t idx;
1204	struct buffer_head *bh;
1205
1206	if (!run) {
1207		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1208		if (vbo > MFT_REC_VOL * sbi->record_size) {
1209			err = -ENOENT;
1210			goto out;
1211		}
1212
1213		/* Use absolute boot's 'MFTCluster' to read record. */
1214		lbo = vbo + sbi->mft.lbo;
1215		len = sbi->record_size;
1216	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1217		err = -ENOENT;
1218		goto out;
1219	} else {
1220		if (lcn == SPARSE_LCN) {
1221			err = -EINVAL;
1222			goto out;
1223		}
1224
1225		lbo = ((u64)lcn << cluster_bits) + off;
1226		len = ((u64)clen << cluster_bits) - off;
1227	}
1228
1229	off = lbo & (blocksize - 1);
1230	if (nb) {
1231		nb->off = off;
1232		nb->bytes = bytes;
1233	}
1234
1235	for (;;) {
1236		u32 len32 = len >= bytes ? bytes : len;
1237		sector_t block = lbo >> sb->s_blocksize_bits;
1238
1239		do {
1240			u32 op = blocksize - off;
1241
1242			if (op > len32)
1243				op = len32;
1244
1245			bh = ntfs_bread(sb, block);
1246			if (!bh) {
1247				err = -EIO;
1248				goto out;
1249			}
1250
1251			if (buf) {
1252				memcpy(buf, bh->b_data + off, op);
1253				buf = Add2Ptr(buf, op);
1254			}
1255
1256			if (!nb) {
1257				put_bh(bh);
1258			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1259				err = -EINVAL;
1260				goto out;
1261			} else {
1262				nb->bh[nbh++] = bh;
1263				nb->nbufs = nbh;
1264			}
1265
1266			bytes -= op;
1267			if (!bytes)
1268				return 0;
1269			len32 -= op;
1270			block += 1;
1271			off = 0;
1272
1273		} while (len32);
1274
1275		vcn_next = vcn + clen;
1276		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1277		    vcn != vcn_next) {
1278			err = -ENOENT;
1279			goto out;
1280		}
1281
1282		if (lcn == SPARSE_LCN) {
1283			err = -EINVAL;
1284			goto out;
1285		}
1286
1287		lbo = ((u64)lcn << cluster_bits);
1288		len = ((u64)clen << cluster_bits);
1289	}
1290
1291out:
1292	if (!nbh)
1293		return err;
1294
1295	while (nbh) {
1296		put_bh(nb->bh[--nbh]);
1297		nb->bh[nbh] = NULL;
1298	}
1299
1300	nb->nbufs = 0;
1301	return err;
1302}
1303
1304/*
1305 * ntfs_read_bh
1306 *
1307 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1308 */
1309int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1310		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1311		 struct ntfs_buffers *nb)
1312{
1313	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1314
1315	if (err)
1316		return err;
1317	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1318}
1319
1320int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1321		u32 bytes, struct ntfs_buffers *nb)
1322{
1323	int err = 0;
1324	struct super_block *sb = sbi->sb;
1325	u32 blocksize = sb->s_blocksize;
1326	u8 cluster_bits = sbi->cluster_bits;
1327	CLST vcn_next, vcn = vbo >> cluster_bits;
1328	u32 off;
1329	u32 nbh = 0;
1330	CLST lcn, clen;
1331	u64 lbo, len;
1332	size_t idx;
1333
1334	nb->bytes = bytes;
1335
1336	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1337		err = -ENOENT;
1338		goto out;
1339	}
1340
1341	off = vbo & sbi->cluster_mask;
1342	lbo = ((u64)lcn << cluster_bits) + off;
1343	len = ((u64)clen << cluster_bits) - off;
1344
1345	nb->off = off = lbo & (blocksize - 1);
1346
1347	for (;;) {
1348		u32 len32 = min_t(u64, len, bytes);
1349		sector_t block = lbo >> sb->s_blocksize_bits;
1350
1351		do {
1352			u32 op;
1353			struct buffer_head *bh;
1354
1355			if (nbh >= ARRAY_SIZE(nb->bh)) {
1356				err = -EINVAL;
1357				goto out;
1358			}
1359
1360			op = blocksize - off;
1361			if (op > len32)
1362				op = len32;
1363
1364			if (op == blocksize) {
1365				bh = sb_getblk(sb, block);
1366				if (!bh) {
1367					err = -ENOMEM;
1368					goto out;
1369				}
1370				if (buffer_locked(bh))
1371					__wait_on_buffer(bh);
1372				set_buffer_uptodate(bh);
1373			} else {
1374				bh = ntfs_bread(sb, block);
1375				if (!bh) {
1376					err = -EIO;
1377					goto out;
1378				}
1379			}
1380
1381			nb->bh[nbh++] = bh;
1382			bytes -= op;
1383			if (!bytes) {
1384				nb->nbufs = nbh;
1385				return 0;
1386			}
1387
1388			block += 1;
1389			len32 -= op;
1390			off = 0;
1391		} while (len32);
1392
1393		vcn_next = vcn + clen;
1394		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1395		    vcn != vcn_next) {
1396			err = -ENOENT;
1397			goto out;
1398		}
1399
1400		lbo = ((u64)lcn << cluster_bits);
1401		len = ((u64)clen << cluster_bits);
1402	}
1403
1404out:
1405	while (nbh) {
1406		put_bh(nb->bh[--nbh]);
1407		nb->bh[nbh] = NULL;
1408	}
1409
1410	nb->nbufs = 0;
1411
1412	return err;
1413}
1414
1415int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1416		  struct ntfs_buffers *nb, int sync)
1417{
1418	int err = 0;
1419	struct super_block *sb = sbi->sb;
1420	u32 block_size = sb->s_blocksize;
1421	u32 bytes = nb->bytes;
1422	u32 off = nb->off;
1423	u16 fo = le16_to_cpu(rhdr->fix_off);
1424	u16 fn = le16_to_cpu(rhdr->fix_num);
1425	u32 idx;
1426	__le16 *fixup;
1427	__le16 sample;
1428
1429	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1430	    fn * SECTOR_SIZE > bytes) {
1431		return -EINVAL;
1432	}
1433
1434	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1435		u32 op = block_size - off;
1436		char *bh_data;
1437		struct buffer_head *bh = nb->bh[idx];
1438		__le16 *ptr, *end_data;
1439
1440		if (op > bytes)
1441			op = bytes;
1442
1443		if (buffer_locked(bh))
1444			__wait_on_buffer(bh);
1445
1446		lock_buffer(bh);
1447
1448		bh_data = bh->b_data + off;
1449		end_data = Add2Ptr(bh_data, op);
1450		memcpy(bh_data, rhdr, op);
1451
1452		if (!idx) {
1453			u16 t16;
1454
1455			fixup = Add2Ptr(bh_data, fo);
1456			sample = *fixup;
1457			t16 = le16_to_cpu(sample);
1458			if (t16 >= 0x7FFF) {
1459				sample = *fixup = cpu_to_le16(1);
1460			} else {
1461				sample = cpu_to_le16(t16 + 1);
1462				*fixup = sample;
1463			}
1464
1465			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1466		}
1467
1468		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1469
1470		do {
1471			*++fixup = *ptr;
1472			*ptr = sample;
1473			ptr += SECTOR_SIZE / sizeof(short);
1474		} while (ptr < end_data);
1475
1476		set_buffer_uptodate(bh);
1477		mark_buffer_dirty(bh);
1478		unlock_buffer(bh);
1479
1480		if (sync) {
1481			int err2 = sync_dirty_buffer(bh);
1482
1483			if (!err && err2)
1484				err = err2;
1485		}
1486
1487		bytes -= op;
1488		rhdr = Add2Ptr(rhdr, op);
1489	}
1490
1491	return err;
1492}
1493
1494/*
1495 * ntfs_bio_pages - Read/write pages from/to disk.
1496 */
1497int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1498		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1499		   enum req_op op)
1500{
1501	int err = 0;
1502	struct bio *new, *bio = NULL;
1503	struct super_block *sb = sbi->sb;
1504	struct block_device *bdev = sb->s_bdev;
1505	struct page *page;
1506	u8 cluster_bits = sbi->cluster_bits;
1507	CLST lcn, clen, vcn, vcn_next;
1508	u32 add, off, page_idx;
1509	u64 lbo, len;
1510	size_t run_idx;
1511	struct blk_plug plug;
1512
1513	if (!bytes)
1514		return 0;
1515
1516	blk_start_plug(&plug);
1517
1518	/* Align vbo and bytes to be 512 bytes aligned. */
1519	lbo = (vbo + bytes + 511) & ~511ull;
1520	vbo = vbo & ~511ull;
1521	bytes = lbo - vbo;
1522
1523	vcn = vbo >> cluster_bits;
1524	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1525		err = -ENOENT;
1526		goto out;
1527	}
1528	off = vbo & sbi->cluster_mask;
1529	page_idx = 0;
1530	page = pages[0];
1531
1532	for (;;) {
1533		lbo = ((u64)lcn << cluster_bits) + off;
1534		len = ((u64)clen << cluster_bits) - off;
1535new_bio:
1536		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1537		if (bio) {
1538			bio_chain(bio, new);
1539			submit_bio(bio);
1540		}
1541		bio = new;
1542		bio->bi_iter.bi_sector = lbo >> 9;
1543
1544		while (len) {
1545			off = vbo & (PAGE_SIZE - 1);
1546			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1547
1548			if (bio_add_page(bio, page, add, off) < add)
1549				goto new_bio;
1550
1551			if (bytes <= add)
1552				goto out;
1553			bytes -= add;
1554			vbo += add;
1555
1556			if (add + off == PAGE_SIZE) {
1557				page_idx += 1;
1558				if (WARN_ON(page_idx >= nr_pages)) {
1559					err = -EINVAL;
1560					goto out;
1561				}
1562				page = pages[page_idx];
1563			}
1564
1565			if (len <= add)
1566				break;
1567			len -= add;
1568			lbo += add;
1569		}
1570
1571		vcn_next = vcn + clen;
1572		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1573		    vcn != vcn_next) {
1574			err = -ENOENT;
1575			goto out;
1576		}
1577		off = 0;
1578	}
1579out:
1580	if (bio) {
1581		if (!err)
1582			err = submit_bio_wait(bio);
1583		bio_put(bio);
1584	}
1585	blk_finish_plug(&plug);
1586
1587	return err;
1588}
1589
1590/*
1591 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1592 *
1593 * Fill on-disk logfile range by (-1)
1594 * this means empty logfile.
1595 */
1596int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1597{
1598	int err = 0;
1599	struct super_block *sb = sbi->sb;
1600	struct block_device *bdev = sb->s_bdev;
1601	u8 cluster_bits = sbi->cluster_bits;
1602	struct bio *new, *bio = NULL;
1603	CLST lcn, clen;
1604	u64 lbo, len;
1605	size_t run_idx;
1606	struct page *fill;
1607	void *kaddr;
1608	struct blk_plug plug;
1609
1610	fill = alloc_page(GFP_KERNEL);
1611	if (!fill)
1612		return -ENOMEM;
1613
1614	kaddr = kmap_atomic(fill);
1615	memset(kaddr, -1, PAGE_SIZE);
1616	kunmap_atomic(kaddr);
1617	flush_dcache_page(fill);
1618	lock_page(fill);
1619
1620	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1621		err = -ENOENT;
1622		goto out;
1623	}
1624
1625	/*
1626	 * TODO: Try blkdev_issue_write_same.
1627	 */
1628	blk_start_plug(&plug);
1629	do {
1630		lbo = (u64)lcn << cluster_bits;
1631		len = (u64)clen << cluster_bits;
1632new_bio:
1633		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1634		if (bio) {
1635			bio_chain(bio, new);
1636			submit_bio(bio);
1637		}
1638		bio = new;
1639		bio->bi_iter.bi_sector = lbo >> 9;
1640
1641		for (;;) {
1642			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1643
1644			if (bio_add_page(bio, fill, add, 0) < add)
1645				goto new_bio;
1646
1647			lbo += add;
1648			if (len <= add)
1649				break;
1650			len -= add;
1651		}
1652	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1653
1654	if (!err)
1655		err = submit_bio_wait(bio);
1656	bio_put(bio);
1657
1658	blk_finish_plug(&plug);
1659out:
1660	unlock_page(fill);
1661	put_page(fill);
1662
1663	return err;
1664}
1665
1666int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1667		    u64 vbo, u64 *lbo, u64 *bytes)
1668{
1669	u32 off;
1670	CLST lcn, len;
1671	u8 cluster_bits = sbi->cluster_bits;
1672
1673	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1674		return -ENOENT;
1675
1676	off = vbo & sbi->cluster_mask;
1677	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1678	*bytes = ((u64)len << cluster_bits) - off;
1679
1680	return 0;
1681}
1682
1683struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1684				  enum RECORD_FLAG flag)
1685{
1686	int err = 0;
1687	struct super_block *sb = sbi->sb;
1688	struct inode *inode = new_inode(sb);
1689	struct ntfs_inode *ni;
1690
1691	if (!inode)
1692		return ERR_PTR(-ENOMEM);
1693
1694	ni = ntfs_i(inode);
1695
1696	err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1697	if (err)
1698		goto out;
1699
1700	inode->i_ino = rno;
1701	if (insert_inode_locked(inode) < 0) {
1702		err = -EIO;
1703		goto out;
1704	}
1705
1706out:
1707	if (err) {
1708		make_bad_inode(inode);
1709		iput(inode);
1710		ni = ERR_PTR(err);
1711	}
1712	return ni;
1713}
1714
1715/*
1716 * O:BAG:BAD:(A;OICI;FA;;;WD)
1717 * Owner S-1-5-32-544 (Administrators)
1718 * Group S-1-5-32-544 (Administrators)
1719 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1720 */
1721const u8 s_default_security[] __aligned(8) = {
1722	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1723	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1724	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1725	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1726	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1727	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1728	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1729};
1730
1731static_assert(sizeof(s_default_security) == 0x50);
1732
1733static inline u32 sid_length(const struct SID *sid)
1734{
1735	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1736}
1737
1738/*
1739 * is_acl_valid
1740 *
1741 * Thanks Mark Harmstone for idea.
1742 */
1743static bool is_acl_valid(const struct ACL *acl, u32 len)
1744{
1745	const struct ACE_HEADER *ace;
1746	u32 i;
1747	u16 ace_count, ace_size;
1748
1749	if (acl->AclRevision != ACL_REVISION &&
1750	    acl->AclRevision != ACL_REVISION_DS) {
1751		/*
1752		 * This value should be ACL_REVISION, unless the ACL contains an
1753		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1754		 * All ACEs in an ACL must be at the same revision level.
1755		 */
1756		return false;
1757	}
1758
1759	if (acl->Sbz1)
1760		return false;
1761
1762	if (le16_to_cpu(acl->AclSize) > len)
1763		return false;
1764
1765	if (acl->Sbz2)
1766		return false;
1767
1768	len -= sizeof(struct ACL);
1769	ace = (struct ACE_HEADER *)&acl[1];
1770	ace_count = le16_to_cpu(acl->AceCount);
1771
1772	for (i = 0; i < ace_count; i++) {
1773		if (len < sizeof(struct ACE_HEADER))
1774			return false;
1775
1776		ace_size = le16_to_cpu(ace->AceSize);
1777		if (len < ace_size)
1778			return false;
1779
1780		len -= ace_size;
1781		ace = Add2Ptr(ace, ace_size);
1782	}
1783
1784	return true;
1785}
1786
1787bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1788{
1789	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1790
1791	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1792		return false;
1793
1794	if (sd->Revision != 1)
1795		return false;
1796
1797	if (sd->Sbz1)
1798		return false;
1799
1800	if (!(sd->Control & SE_SELF_RELATIVE))
1801		return false;
1802
1803	sd_owner = le32_to_cpu(sd->Owner);
1804	if (sd_owner) {
1805		const struct SID *owner = Add2Ptr(sd, sd_owner);
1806
1807		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1808			return false;
1809
1810		if (owner->Revision != 1)
1811			return false;
1812
1813		if (sd_owner + sid_length(owner) > len)
1814			return false;
1815	}
1816
1817	sd_group = le32_to_cpu(sd->Group);
1818	if (sd_group) {
1819		const struct SID *group = Add2Ptr(sd, sd_group);
1820
1821		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1822			return false;
1823
1824		if (group->Revision != 1)
1825			return false;
1826
1827		if (sd_group + sid_length(group) > len)
1828			return false;
1829	}
1830
1831	sd_sacl = le32_to_cpu(sd->Sacl);
1832	if (sd_sacl) {
1833		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1834
1835		if (sd_sacl + sizeof(struct ACL) > len)
1836			return false;
1837
1838		if (!is_acl_valid(sacl, len - sd_sacl))
1839			return false;
1840	}
1841
1842	sd_dacl = le32_to_cpu(sd->Dacl);
1843	if (sd_dacl) {
1844		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1845
1846		if (sd_dacl + sizeof(struct ACL) > len)
1847			return false;
1848
1849		if (!is_acl_valid(dacl, len - sd_dacl))
1850			return false;
1851	}
1852
1853	return true;
1854}
1855
1856/*
1857 * ntfs_security_init - Load and parse $Secure.
1858 */
1859int ntfs_security_init(struct ntfs_sb_info *sbi)
1860{
1861	int err;
1862	struct super_block *sb = sbi->sb;
1863	struct inode *inode;
1864	struct ntfs_inode *ni;
1865	struct MFT_REF ref;
1866	struct ATTRIB *attr;
1867	struct ATTR_LIST_ENTRY *le;
1868	u64 sds_size;
1869	size_t off;
1870	struct NTFS_DE *ne;
1871	struct NTFS_DE_SII *sii_e;
1872	struct ntfs_fnd *fnd_sii = NULL;
1873	const struct INDEX_ROOT *root_sii;
1874	const struct INDEX_ROOT *root_sdh;
1875	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1876	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1877
1878	ref.low = cpu_to_le32(MFT_REC_SECURE);
1879	ref.high = 0;
1880	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1881
1882	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1883	if (IS_ERR(inode)) {
1884		err = PTR_ERR(inode);
1885		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1886		inode = NULL;
1887		goto out;
1888	}
1889
1890	ni = ntfs_i(inode);
1891
1892	le = NULL;
1893
1894	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1895			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1896	if (!attr ||
1897	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1898	    root_sdh->type != ATTR_ZERO ||
1899	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1900	    offsetof(struct INDEX_ROOT, ihdr) +
1901			    le32_to_cpu(root_sdh->ihdr.used) >
1902		    le32_to_cpu(attr->res.data_size)) {
1903		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1904		err = -EINVAL;
1905		goto out;
1906	}
1907
1908	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1909	if (err) {
1910		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1911		goto out;
1912	}
1913
1914	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1915			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1916	if (!attr ||
1917	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1918	    root_sii->type != ATTR_ZERO ||
1919	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1920	    offsetof(struct INDEX_ROOT, ihdr) +
1921			    le32_to_cpu(root_sii->ihdr.used) >
1922		    le32_to_cpu(attr->res.data_size)) {
1923		ntfs_err(sb, "$Secure::$SII is corrupted.");
1924		err = -EINVAL;
1925		goto out;
1926	}
1927
1928	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1929	if (err) {
1930		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1931		goto out;
1932	}
1933
1934	fnd_sii = fnd_get();
1935	if (!fnd_sii) {
1936		err = -ENOMEM;
1937		goto out;
1938	}
1939
1940	sds_size = inode->i_size;
1941
1942	/* Find the last valid Id. */
1943	sbi->security.next_id = SECURITY_ID_FIRST;
1944	/* Always write new security at the end of bucket. */
1945	sbi->security.next_off =
1946		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1947
1948	off = 0;
1949	ne = NULL;
1950
1951	for (;;) {
1952		u32 next_id;
1953
1954		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1955		if (err || !ne)
1956			break;
1957
1958		sii_e = (struct NTFS_DE_SII *)ne;
1959		if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1960			continue;
1961
1962		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1963		if (next_id >= sbi->security.next_id)
1964			sbi->security.next_id = next_id;
1965	}
1966
1967	sbi->security.ni = ni;
1968	inode = NULL;
1969out:
1970	iput(inode);
1971	fnd_put(fnd_sii);
1972
1973	return err;
1974}
1975
1976/*
1977 * ntfs_get_security_by_id - Read security descriptor by id.
1978 */
1979int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1980			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1981			    size_t *size)
1982{
1983	int err;
1984	int diff;
1985	struct ntfs_inode *ni = sbi->security.ni;
1986	struct ntfs_index *indx = &sbi->security.index_sii;
1987	void *p = NULL;
1988	struct NTFS_DE_SII *sii_e;
1989	struct ntfs_fnd *fnd_sii;
1990	struct SECURITY_HDR d_security;
1991	const struct INDEX_ROOT *root_sii;
1992	u32 t32;
1993
1994	*sd = NULL;
1995
1996	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1997
1998	fnd_sii = fnd_get();
1999	if (!fnd_sii) {
2000		err = -ENOMEM;
2001		goto out;
2002	}
2003
2004	root_sii = indx_get_root(indx, ni, NULL, NULL);
2005	if (!root_sii) {
2006		err = -EINVAL;
2007		goto out;
2008	}
2009
2010	/* Try to find this SECURITY descriptor in SII indexes. */
2011	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
2012			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
2013	if (err)
2014		goto out;
2015
2016	if (diff)
2017		goto out;
2018
2019	t32 = le32_to_cpu(sii_e->sec_hdr.size);
2020	if (t32 < sizeof(struct SECURITY_HDR)) {
2021		err = -EINVAL;
2022		goto out;
2023	}
2024
2025	if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2026		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2027		err = -EFBIG;
2028		goto out;
2029	}
2030
2031	*size = t32 - sizeof(struct SECURITY_HDR);
2032
2033	p = kmalloc(*size, GFP_NOFS);
2034	if (!p) {
2035		err = -ENOMEM;
2036		goto out;
2037	}
2038
2039	err = ntfs_read_run_nb(sbi, &ni->file.run,
2040			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2041			       sizeof(d_security), NULL);
2042	if (err)
2043		goto out;
2044
2045	if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2046		err = -EINVAL;
2047		goto out;
2048	}
2049
2050	err = ntfs_read_run_nb(sbi, &ni->file.run,
2051			       le64_to_cpu(sii_e->sec_hdr.off) +
2052				       sizeof(struct SECURITY_HDR),
2053			       p, *size, NULL);
2054	if (err)
2055		goto out;
2056
2057	*sd = p;
2058	p = NULL;
2059
2060out:
2061	kfree(p);
2062	fnd_put(fnd_sii);
2063	ni_unlock(ni);
2064
2065	return err;
2066}
2067
2068/*
2069 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2070 *
2071 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2072 * and it contains a mirror copy of each security descriptor.  When writing
2073 * to a security descriptor at location X, another copy will be written at
2074 * location (X+256K).
2075 * When writing a security descriptor that will cross the 256K boundary,
2076 * the pointer will be advanced by 256K to skip
2077 * over the mirror portion.
2078 */
2079int ntfs_insert_security(struct ntfs_sb_info *sbi,
2080			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2081			 u32 size_sd, __le32 *security_id, bool *inserted)
2082{
2083	int err, diff;
2084	struct ntfs_inode *ni = sbi->security.ni;
2085	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2086	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2087	struct NTFS_DE_SDH *e;
2088	struct NTFS_DE_SDH sdh_e;
2089	struct NTFS_DE_SII sii_e;
2090	struct SECURITY_HDR *d_security;
2091	u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2092	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2093	struct SECURITY_KEY hash_key;
2094	struct ntfs_fnd *fnd_sdh = NULL;
2095	const struct INDEX_ROOT *root_sdh;
2096	const struct INDEX_ROOT *root_sii;
2097	u64 mirr_off, new_sds_size;
2098	u32 next, left;
2099
2100	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2101		      SecurityDescriptorsBlockSize);
2102
2103	hash_key.hash = security_hash(sd, size_sd);
2104	hash_key.sec_id = SECURITY_ID_INVALID;
2105
2106	if (inserted)
2107		*inserted = false;
2108	*security_id = SECURITY_ID_INVALID;
2109
2110	/* Allocate a temporal buffer. */
2111	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2112	if (!d_security)
2113		return -ENOMEM;
2114
2115	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2116
2117	fnd_sdh = fnd_get();
2118	if (!fnd_sdh) {
2119		err = -ENOMEM;
2120		goto out;
2121	}
2122
2123	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2124	if (!root_sdh) {
2125		err = -EINVAL;
2126		goto out;
2127	}
2128
2129	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2130	if (!root_sii) {
2131		err = -EINVAL;
2132		goto out;
2133	}
2134
2135	/*
2136	 * Check if such security already exists.
2137	 * Use "SDH" and hash -> to get the offset in "SDS".
2138	 */
2139	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2140			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2141			fnd_sdh);
2142	if (err)
2143		goto out;
2144
2145	while (e) {
2146		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2147			err = ntfs_read_run_nb(sbi, &ni->file.run,
2148					       le64_to_cpu(e->sec_hdr.off),
2149					       d_security, new_sec_size, NULL);
2150			if (err)
2151				goto out;
2152
2153			if (le32_to_cpu(d_security->size) == new_sec_size &&
2154			    d_security->key.hash == hash_key.hash &&
2155			    !memcmp(d_security + 1, sd, size_sd)) {
2156				/* Such security already exists. */
2157				*security_id = d_security->key.sec_id;
2158				err = 0;
2159				goto out;
2160			}
2161		}
2162
2163		err = indx_find_sort(indx_sdh, ni, root_sdh,
2164				     (struct NTFS_DE **)&e, fnd_sdh);
2165		if (err)
2166			goto out;
2167
2168		if (!e || e->key.hash != hash_key.hash)
2169			break;
2170	}
2171
2172	/* Zero unused space. */
2173	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2174	left = SecurityDescriptorsBlockSize - next;
2175
2176	/* Zero gap until SecurityDescriptorsBlockSize. */
2177	if (left < new_sec_size) {
2178		/* Zero "left" bytes from sbi->security.next_off. */
2179		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2180	}
2181
2182	/* Zero tail of previous security. */
2183	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2184
2185	/*
2186	 * Example:
2187	 * 0x40438 == ni->vfs_inode.i_size
2188	 * 0x00440 == sbi->security.next_off
2189	 * need to zero [0x438-0x440)
2190	 * if (next > used) {
2191	 *  u32 tozero = next - used;
2192	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2193	 */
2194
2195	/* Format new security descriptor. */
2196	d_security->key.hash = hash_key.hash;
2197	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2198	d_security->off = cpu_to_le64(sbi->security.next_off);
2199	d_security->size = cpu_to_le32(new_sec_size);
2200	memcpy(d_security + 1, sd, size_sd);
2201
2202	/* Write main SDS bucket. */
2203	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2204				d_security, aligned_sec_size, 0);
2205
2206	if (err)
2207		goto out;
2208
2209	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2210	new_sds_size = mirr_off + aligned_sec_size;
2211
2212	if (new_sds_size > ni->vfs_inode.i_size) {
2213		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2214				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2215				    new_sds_size, &new_sds_size, false, NULL);
2216		if (err)
2217			goto out;
2218	}
2219
2220	/* Write copy SDS bucket. */
2221	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2222				aligned_sec_size, 0);
2223	if (err)
2224		goto out;
2225
2226	/* Fill SII entry. */
2227	sii_e.de.view.data_off =
2228		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2229	sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2230	sii_e.de.view.res = 0;
2231	sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2232	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2233	sii_e.de.flags = 0;
2234	sii_e.de.res = 0;
2235	sii_e.sec_id = d_security->key.sec_id;
2236	memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2237
2238	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2239	if (err)
2240		goto out;
2241
2242	/* Fill SDH entry. */
2243	sdh_e.de.view.data_off =
2244		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2245	sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2246	sdh_e.de.view.res = 0;
2247	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2248	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2249	sdh_e.de.flags = 0;
2250	sdh_e.de.res = 0;
2251	sdh_e.key.hash = d_security->key.hash;
2252	sdh_e.key.sec_id = d_security->key.sec_id;
2253	memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2254	sdh_e.magic[0] = cpu_to_le16('I');
2255	sdh_e.magic[1] = cpu_to_le16('I');
2256
2257	fnd_clear(fnd_sdh);
2258	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2259				fnd_sdh, 0);
2260	if (err)
2261		goto out;
2262
2263	*security_id = d_security->key.sec_id;
2264	if (inserted)
2265		*inserted = true;
2266
2267	/* Update Id and offset for next descriptor. */
2268	sbi->security.next_id += 1;
2269	sbi->security.next_off += aligned_sec_size;
2270
2271out:
2272	fnd_put(fnd_sdh);
2273	mark_inode_dirty(&ni->vfs_inode);
2274	ni_unlock(ni);
2275	kfree(d_security);
2276
2277	return err;
2278}
2279
2280/*
2281 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2282 */
2283int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2284{
2285	int err;
2286	struct ntfs_inode *ni = sbi->reparse.ni;
2287	struct ntfs_index *indx = &sbi->reparse.index_r;
2288	struct ATTRIB *attr;
2289	struct ATTR_LIST_ENTRY *le;
2290	const struct INDEX_ROOT *root_r;
2291
2292	if (!ni)
2293		return 0;
2294
2295	le = NULL;
2296	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2297			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2298	if (!attr) {
2299		err = -EINVAL;
2300		goto out;
2301	}
2302
2303	root_r = resident_data(attr);
2304	if (root_r->type != ATTR_ZERO ||
2305	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2306		err = -EINVAL;
2307		goto out;
2308	}
2309
2310	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2311	if (err)
2312		goto out;
2313
2314out:
2315	return err;
2316}
2317
2318/*
2319 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2320 */
2321int ntfs_objid_init(struct ntfs_sb_info *sbi)
2322{
2323	int err;
2324	struct ntfs_inode *ni = sbi->objid.ni;
2325	struct ntfs_index *indx = &sbi->objid.index_o;
2326	struct ATTRIB *attr;
2327	struct ATTR_LIST_ENTRY *le;
2328	const struct INDEX_ROOT *root;
2329
2330	if (!ni)
2331		return 0;
2332
2333	le = NULL;
2334	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2335			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2336	if (!attr) {
2337		err = -EINVAL;
2338		goto out;
2339	}
2340
2341	root = resident_data(attr);
2342	if (root->type != ATTR_ZERO ||
2343	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2344		err = -EINVAL;
2345		goto out;
2346	}
2347
2348	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2349	if (err)
2350		goto out;
2351
2352out:
2353	return err;
2354}
2355
2356int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2357{
2358	int err;
2359	struct ntfs_inode *ni = sbi->objid.ni;
2360	struct ntfs_index *indx = &sbi->objid.index_o;
2361
2362	if (!ni)
2363		return -EINVAL;
2364
2365	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2366
2367	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2368
2369	mark_inode_dirty(&ni->vfs_inode);
2370	ni_unlock(ni);
2371
2372	return err;
2373}
2374
2375int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2376			const struct MFT_REF *ref)
2377{
2378	int err;
2379	struct ntfs_inode *ni = sbi->reparse.ni;
2380	struct ntfs_index *indx = &sbi->reparse.index_r;
2381	struct NTFS_DE_R re;
2382
2383	if (!ni)
2384		return -EINVAL;
2385
2386	memset(&re, 0, sizeof(re));
2387
2388	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2389	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2390	re.de.key_size = cpu_to_le16(sizeof(re.key));
2391
2392	re.key.ReparseTag = rtag;
2393	memcpy(&re.key.ref, ref, sizeof(*ref));
2394
2395	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2396
2397	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2398
2399	mark_inode_dirty(&ni->vfs_inode);
2400	ni_unlock(ni);
2401
2402	return err;
2403}
2404
2405int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2406			const struct MFT_REF *ref)
2407{
2408	int err, diff;
2409	struct ntfs_inode *ni = sbi->reparse.ni;
2410	struct ntfs_index *indx = &sbi->reparse.index_r;
2411	struct ntfs_fnd *fnd = NULL;
2412	struct REPARSE_KEY rkey;
2413	struct NTFS_DE_R *re;
2414	struct INDEX_ROOT *root_r;
2415
2416	if (!ni)
2417		return -EINVAL;
2418
2419	rkey.ReparseTag = rtag;
2420	rkey.ref = *ref;
2421
2422	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2423
2424	if (rtag) {
2425		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2426		goto out1;
2427	}
2428
2429	fnd = fnd_get();
2430	if (!fnd) {
2431		err = -ENOMEM;
2432		goto out1;
2433	}
2434
2435	root_r = indx_get_root(indx, ni, NULL, NULL);
2436	if (!root_r) {
2437		err = -EINVAL;
2438		goto out;
2439	}
2440
2441	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2442	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2443			(struct NTFS_DE **)&re, fnd);
2444	if (err)
2445		goto out;
2446
2447	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2448		/* Impossible. Looks like volume corrupt? */
2449		goto out;
2450	}
2451
2452	memcpy(&rkey, &re->key, sizeof(rkey));
2453
2454	fnd_put(fnd);
2455	fnd = NULL;
2456
2457	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2458	if (err)
2459		goto out;
2460
2461out:
2462	fnd_put(fnd);
2463
2464out1:
2465	mark_inode_dirty(&ni->vfs_inode);
2466	ni_unlock(ni);
2467
2468	return err;
2469}
2470
2471static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2472					  CLST len)
2473{
2474	ntfs_unmap_meta(sbi->sb, lcn, len);
2475	ntfs_discard(sbi, lcn, len);
2476}
2477
2478void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2479{
2480	CLST end, i, zone_len, zlen;
2481	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2482	bool dirty = false;
2483
2484	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2485	if (!wnd_is_used(wnd, lcn, len)) {
2486		/* mark volume as dirty out of wnd->rw_lock */
2487		dirty = true;
2488
2489		end = lcn + len;
2490		len = 0;
2491		for (i = lcn; i < end; i++) {
2492			if (wnd_is_used(wnd, i, 1)) {
2493				if (!len)
2494					lcn = i;
2495				len += 1;
2496				continue;
2497			}
2498
2499			if (!len)
2500				continue;
2501
2502			if (trim)
2503				ntfs_unmap_and_discard(sbi, lcn, len);
2504
2505			wnd_set_free(wnd, lcn, len);
2506			len = 0;
2507		}
2508
2509		if (!len)
2510			goto out;
2511	}
2512
2513	if (trim)
2514		ntfs_unmap_and_discard(sbi, lcn, len);
2515	wnd_set_free(wnd, lcn, len);
2516
2517	/* append to MFT zone, if possible. */
2518	zone_len = wnd_zone_len(wnd);
2519	zlen = min(zone_len + len, sbi->zone_max);
2520
2521	if (zlen == zone_len) {
2522		/* MFT zone already has maximum size. */
2523	} else if (!zone_len) {
2524		/* Create MFT zone only if 'zlen' is large enough. */
2525		if (zlen == sbi->zone_max)
2526			wnd_zone_set(wnd, lcn, zlen);
2527	} else {
2528		CLST zone_lcn = wnd_zone_bit(wnd);
2529
2530		if (lcn + len == zone_lcn) {
2531			/* Append into head MFT zone. */
2532			wnd_zone_set(wnd, lcn, zlen);
2533		} else if (zone_lcn + zone_len == lcn) {
2534			/* Append into tail MFT zone. */
2535			wnd_zone_set(wnd, zone_lcn, zlen);
2536		}
2537	}
2538
2539out:
2540	up_write(&wnd->rw_lock);
2541	if (dirty)
2542		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2543}
2544
2545/*
2546 * run_deallocate - Deallocate clusters.
2547 */
2548int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2549		   bool trim)
2550{
2551	CLST lcn, len;
2552	size_t idx = 0;
2553
2554	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2555		if (lcn == SPARSE_LCN)
2556			continue;
2557
2558		mark_as_free_ex(sbi, lcn, len, trim);
2559	}
2560
2561	return 0;
2562}
2563
2564static inline bool name_has_forbidden_chars(const struct le_str *fname)
2565{
2566	int i, ch;
2567
2568	/* check for forbidden chars */
2569	for (i = 0; i < fname->len; ++i) {
2570		ch = le16_to_cpu(fname->name[i]);
2571
2572		/* control chars */
2573		if (ch < 0x20)
2574			return true;
2575
2576		switch (ch) {
2577		/* disallowed by Windows */
2578		case '\\':
2579		case '/':
2580		case ':':
2581		case '*':
2582		case '?':
2583		case '<':
2584		case '>':
2585		case '|':
2586		case '\"':
2587			return true;
2588
2589		default:
2590			/* allowed char */
2591			break;
2592		}
2593	}
2594
2595	/* file names cannot end with space or . */
2596	if (fname->len > 0) {
2597		ch = le16_to_cpu(fname->name[fname->len - 1]);
2598		if (ch == ' ' || ch == '.')
2599			return true;
2600	}
2601
2602	return false;
2603}
2604
2605static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2606				    const struct le_str *fname)
2607{
2608	int port_digit;
2609	const __le16 *name = fname->name;
2610	int len = fname->len;
2611	const u16 *upcase = sbi->upcase;
2612
2613	/* check for 3 chars reserved names (device names) */
2614	/* name by itself or with any extension is forbidden */
2615	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2616		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2617		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2618		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2619		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2620			return true;
2621
2622	/* check for 4 chars reserved names (port name followed by 1..9) */
2623	/* name by itself or with any extension is forbidden */
2624	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2625		port_digit = le16_to_cpu(name[3]);
2626		if (port_digit >= '1' && port_digit <= '9')
2627			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2628					    false) ||
2629			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2630					    false))
2631				return true;
2632	}
2633
2634	return false;
2635}
2636
2637/*
2638 * valid_windows_name - Check if a file name is valid in Windows.
2639 */
2640bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2641{
2642	return !name_has_forbidden_chars(fname) &&
2643	       !is_reserved_name(sbi, fname);
2644}
2645
2646/*
2647 * ntfs_set_label - updates current ntfs label.
2648 */
2649int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2650{
2651	int err;
2652	struct ATTRIB *attr;
 
2653	struct ntfs_inode *ni = sbi->volume.ni;
2654	const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
2655	/* Allocate PATH_MAX bytes. */
2656	struct cpu_str *uni = __getname();
2657
2658	if (!uni)
2659		return -ENOMEM;
2660
2661	err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2662				UTF16_LITTLE_ENDIAN);
2663	if (err < 0)
2664		goto out;
2665
2666	if (uni->len > max_ulen) {
 
2667		ntfs_warn(sbi->sb, "new label is too long");
2668		err = -EFBIG;
2669		goto out;
2670	}
2671
2672	ni_lock(ni);
2673
2674	/* Ignore any errors. */
2675	ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2676
2677	err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
2678				 0, &attr, NULL, NULL);
2679	if (err < 0)
2680		goto unlock_out;
2681
2682	/* write new label in on-disk struct. */
2683	memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
2684
2685	/* update cached value of current label. */
2686	if (len >= ARRAY_SIZE(sbi->volume.label))
2687		len = ARRAY_SIZE(sbi->volume.label) - 1;
2688	memcpy(sbi->volume.label, label, len);
2689	sbi->volume.label[len] = 0;
2690	mark_inode_dirty_sync(&ni->vfs_inode);
2691
2692unlock_out:
2693	ni_unlock(ni);
2694
2695	if (!err)
2696		err = _ni_write_inode(&ni->vfs_inode, 0);
2697
2698out:
2699	__putname(uni);
2700	return err;
2701}