Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (C) 2011 Novell Inc.
   5 */
   6
   7#include <linux/fs.h>
   8#include <linux/slab.h>
   9#include <linux/cred.h>
  10#include <linux/xattr.h>
 
  11#include <linux/ratelimit.h>
  12#include <linux/fiemap.h>
  13#include <linux/fileattr.h>
  14#include <linux/security.h>
  15#include <linux/namei.h>
  16#include <linux/posix_acl.h>
  17#include <linux/posix_acl_xattr.h>
  18#include "overlayfs.h"
  19
  20
  21int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
  22		struct iattr *attr)
  23{
  24	int err;
  25	struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
  26	bool full_copy_up = false;
  27	struct dentry *upperdentry;
  28	const struct cred *old_cred;
  29
  30	err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
  31	if (err)
  32		return err;
  33
 
 
 
 
  34	if (attr->ia_valid & ATTR_SIZE) {
 
 
 
 
 
 
  35		/* Truncate should trigger data copy up as well */
  36		full_copy_up = true;
  37	}
  38
  39	if (!full_copy_up)
  40		err = ovl_copy_up(dentry);
  41	else
  42		err = ovl_copy_up_with_data(dentry);
  43	if (!err) {
  44		struct inode *winode = NULL;
  45
  46		upperdentry = ovl_dentry_upper(dentry);
  47
  48		if (attr->ia_valid & ATTR_SIZE) {
  49			winode = d_inode(upperdentry);
  50			err = get_write_access(winode);
  51			if (err)
  52				goto out;
  53		}
  54
  55		if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
  56			attr->ia_valid &= ~ATTR_MODE;
  57
  58		/*
  59		 * We might have to translate ovl file into real file object
  60		 * once use cases emerge.  For now, simply don't let underlying
  61		 * filesystem rely on attr->ia_file
  62		 */
  63		attr->ia_valid &= ~ATTR_FILE;
  64
  65		/*
  66		 * If open(O_TRUNC) is done, VFS calls ->setattr with ATTR_OPEN
  67		 * set.  Overlayfs does not pass O_TRUNC flag to underlying
  68		 * filesystem during open -> do not pass ATTR_OPEN.  This
  69		 * disables optimization in fuse which assumes open(O_TRUNC)
  70		 * already set file size to 0.  But we never passed O_TRUNC to
  71		 * fuse.  So by clearing ATTR_OPEN, fuse will be forced to send
  72		 * setattr request to server.
  73		 */
  74		attr->ia_valid &= ~ATTR_OPEN;
  75
  76		err = ovl_want_write(dentry);
  77		if (err)
  78			goto out_put_write;
  79
  80		inode_lock(upperdentry->d_inode);
  81		old_cred = ovl_override_creds(dentry->d_sb);
  82		err = ovl_do_notify_change(ofs, upperdentry, attr);
  83		ovl_revert_creds(old_cred);
  84		if (!err)
  85			ovl_copyattr(dentry->d_inode);
  86		inode_unlock(upperdentry->d_inode);
  87		ovl_drop_write(dentry);
  88
  89out_put_write:
  90		if (winode)
  91			put_write_access(winode);
  92	}
 
 
  93out:
  94	return err;
  95}
  96
  97static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
  98{
  99	struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
 100	bool samefs = ovl_same_fs(ofs);
 101	unsigned int xinobits = ovl_xino_bits(ofs);
 102	unsigned int xinoshift = 64 - xinobits;
 103
 104	if (samefs) {
 105		/*
 106		 * When all layers are on the same fs, all real inode
 107		 * number are unique, so we use the overlay st_dev,
 108		 * which is friendly to du -x.
 109		 */
 110		stat->dev = dentry->d_sb->s_dev;
 111		return;
 112	} else if (xinobits) {
 113		/*
 114		 * All inode numbers of underlying fs should not be using the
 115		 * high xinobits, so we use high xinobits to partition the
 116		 * overlay st_ino address space. The high bits holds the fsid
 117		 * (upper fsid is 0). The lowest xinobit is reserved for mapping
 118		 * the non-persistent inode numbers range in case of overflow.
 119		 * This way all overlay inode numbers are unique and use the
 120		 * overlay st_dev.
 121		 */
 122		if (likely(!(stat->ino >> xinoshift))) {
 123			stat->ino |= ((u64)fsid) << (xinoshift + 1);
 124			stat->dev = dentry->d_sb->s_dev;
 125			return;
 126		} else if (ovl_xino_warn(ofs)) {
 127			pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
 128					    dentry, stat->ino, xinobits);
 129		}
 130	}
 131
 132	/* The inode could not be mapped to a unified st_ino address space */
 133	if (S_ISDIR(dentry->d_inode->i_mode)) {
 134		/*
 135		 * Always use the overlay st_dev for directories, so 'find
 136		 * -xdev' will scan the entire overlay mount and won't cross the
 137		 * overlay mount boundaries.
 138		 *
 139		 * If not all layers are on the same fs the pair {real st_ino;
 140		 * overlay st_dev} is not unique, so use the non persistent
 141		 * overlay st_ino for directories.
 142		 */
 143		stat->dev = dentry->d_sb->s_dev;
 144		stat->ino = dentry->d_inode->i_ino;
 145	} else {
 146		/*
 147		 * For non-samefs setup, if we cannot map all layers st_ino
 148		 * to a unified address space, we need to make sure that st_dev
 149		 * is unique per underlying fs, so we use the unique anonymous
 150		 * bdev assigned to the underlying fs.
 151		 */
 152		stat->dev = ofs->fs[fsid].pseudo_dev;
 153	}
 
 
 154}
 155
 156int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
 157		struct kstat *stat, u32 request_mask, unsigned int flags)
 158{
 159	struct dentry *dentry = path->dentry;
 160	enum ovl_path_type type;
 161	struct path realpath;
 162	const struct cred *old_cred;
 163	struct inode *inode = d_inode(dentry);
 164	bool is_dir = S_ISDIR(inode->i_mode);
 165	int fsid = 0;
 166	int err;
 167	bool metacopy_blocks = false;
 168
 169	metacopy_blocks = ovl_is_metacopy_dentry(dentry);
 170
 171	type = ovl_path_real(dentry, &realpath);
 172	old_cred = ovl_override_creds(dentry->d_sb);
 173	err = vfs_getattr_nosec(&realpath, stat, request_mask, flags);
 174	if (err)
 175		goto out;
 176
 177	/* Report the effective immutable/append-only STATX flags */
 178	generic_fill_statx_attr(inode, stat);
 179
 180	/*
 181	 * For non-dir or same fs, we use st_ino of the copy up origin.
 182	 * This guaranties constant st_dev/st_ino across copy up.
 183	 * With xino feature and non-samefs, we use st_ino of the copy up
 184	 * origin masked with high bits that represent the layer id.
 185	 *
 186	 * If lower filesystem supports NFS file handles, this also guaranties
 187	 * persistent st_ino across mount cycle.
 188	 */
 189	if (!is_dir || ovl_same_dev(OVL_FS(dentry->d_sb))) {
 190		if (!OVL_TYPE_UPPER(type)) {
 191			fsid = ovl_layer_lower(dentry)->fsid;
 192		} else if (OVL_TYPE_ORIGIN(type)) {
 193			struct kstat lowerstat;
 194			u32 lowermask = STATX_INO | STATX_BLOCKS |
 195					(!is_dir ? STATX_NLINK : 0);
 196
 197			ovl_path_lower(dentry, &realpath);
 198			err = vfs_getattr_nosec(&realpath, &lowerstat, lowermask,
 199						flags);
 200			if (err)
 201				goto out;
 202
 203			/*
 204			 * Lower hardlinks may be broken on copy up to different
 205			 * upper files, so we cannot use the lower origin st_ino
 206			 * for those different files, even for the same fs case.
 207			 *
 208			 * Similarly, several redirected dirs can point to the
 209			 * same dir on a lower layer. With the "verify_lower"
 210			 * feature, we do not use the lower origin st_ino, if
 211			 * we haven't verified that this redirect is unique.
 212			 *
 213			 * With inodes index enabled, it is safe to use st_ino
 214			 * of an indexed origin. The index validates that the
 215			 * upper hardlink is not broken and that a redirected
 216			 * dir is the only redirect to that origin.
 217			 */
 218			if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
 219			    (!ovl_verify_lower(dentry->d_sb) &&
 220			     (is_dir || lowerstat.nlink == 1))) {
 221				fsid = ovl_layer_lower(dentry)->fsid;
 222				stat->ino = lowerstat.ino;
 223			}
 224
 225			/*
 226			 * If we are querying a metacopy dentry and lower
 227			 * dentry is data dentry, then use the blocks we
 228			 * queried just now. We don't have to do additional
 229			 * vfs_getattr(). If lower itself is metacopy, then
 230			 * additional vfs_getattr() is unavoidable.
 231			 */
 232			if (metacopy_blocks &&
 233			    realpath.dentry == ovl_dentry_lowerdata(dentry)) {
 234				stat->blocks = lowerstat.blocks;
 235				metacopy_blocks = false;
 236			}
 237		}
 238
 239		if (metacopy_blocks) {
 240			/*
 241			 * If lower is not same as lowerdata or if there was
 242			 * no origin on upper, we can end up here.
 243			 * With lazy lowerdata lookup, guess lowerdata blocks
 244			 * from size to avoid lowerdata lookup on stat(2).
 245			 */
 246			struct kstat lowerdatastat;
 247			u32 lowermask = STATX_BLOCKS;
 248
 249			ovl_path_lowerdata(dentry, &realpath);
 250			if (realpath.dentry) {
 251				err = vfs_getattr_nosec(&realpath, &lowerdatastat,
 252							lowermask, flags);
 253				if (err)
 254					goto out;
 255			} else {
 256				lowerdatastat.blocks =
 257					round_up(stat->size, stat->blksize) >> 9;
 258			}
 259			stat->blocks = lowerdatastat.blocks;
 260		}
 261	}
 262
 263	ovl_map_dev_ino(dentry, stat, fsid);
 
 
 264
 265	/*
 266	 * It's probably not worth it to count subdirs to get the
 267	 * correct link count.  nlink=1 seems to pacify 'find' and
 268	 * other utilities.
 269	 */
 270	if (is_dir && OVL_TYPE_MERGE(type))
 271		stat->nlink = 1;
 272
 273	/*
 274	 * Return the overlay inode nlinks for indexed upper inodes.
 275	 * Overlay inode nlink counts the union of the upper hardlinks
 276	 * and non-covered lower hardlinks. It does not include the upper
 277	 * index hardlink.
 278	 */
 279	if (!is_dir && ovl_test_flag(OVL_INDEX, d_inode(dentry)))
 280		stat->nlink = dentry->d_inode->i_nlink;
 281
 282out:
 283	ovl_revert_creds(old_cred);
 284
 285	return err;
 286}
 287
 288int ovl_permission(struct mnt_idmap *idmap,
 289		   struct inode *inode, int mask)
 290{
 291	struct inode *upperinode = ovl_inode_upper(inode);
 292	struct inode *realinode;
 293	struct path realpath;
 294	const struct cred *old_cred;
 295	int err;
 296
 297	/* Careful in RCU walk mode */
 298	realinode = ovl_i_path_real(inode, &realpath);
 299	if (!realinode) {
 300		WARN_ON(!(mask & MAY_NOT_BLOCK));
 301		return -ECHILD;
 302	}
 303
 304	/*
 305	 * Check overlay inode with the creds of task and underlying inode
 306	 * with creds of mounter
 307	 */
 308	err = generic_permission(&nop_mnt_idmap, inode, mask);
 309	if (err)
 310		return err;
 311
 312	old_cred = ovl_override_creds(inode->i_sb);
 313	if (!upperinode &&
 314	    !special_file(realinode->i_mode) && mask & MAY_WRITE) {
 315		mask &= ~(MAY_WRITE | MAY_APPEND);
 316		/* Make sure mounter can read file for copy up later */
 317		mask |= MAY_READ;
 318	}
 319	err = inode_permission(mnt_idmap(realpath.mnt), realinode, mask);
 320	ovl_revert_creds(old_cred);
 321
 322	return err;
 323}
 324
 325static const char *ovl_get_link(struct dentry *dentry,
 326				struct inode *inode,
 327				struct delayed_call *done)
 328{
 329	const struct cred *old_cred;
 330	const char *p;
 331
 332	if (!dentry)
 333		return ERR_PTR(-ECHILD);
 334
 335	old_cred = ovl_override_creds(dentry->d_sb);
 336	p = vfs_get_link(ovl_dentry_real(dentry), done);
 337	ovl_revert_creds(old_cred);
 338	return p;
 339}
 340
 341#ifdef CONFIG_FS_POSIX_ACL
 342/*
 343 * Apply the idmapping of the layer to POSIX ACLs. The caller must pass a clone
 344 * of the POSIX ACLs retrieved from the lower layer to this function to not
 345 * alter the POSIX ACLs for the underlying filesystem.
 346 */
 347static void ovl_idmap_posix_acl(const struct inode *realinode,
 348				struct mnt_idmap *idmap,
 349				struct posix_acl *acl)
 350{
 351	struct user_namespace *fs_userns = i_user_ns(realinode);
 352
 353	for (unsigned int i = 0; i < acl->a_count; i++) {
 354		vfsuid_t vfsuid;
 355		vfsgid_t vfsgid;
 356
 357		struct posix_acl_entry *e = &acl->a_entries[i];
 358		switch (e->e_tag) {
 359		case ACL_USER:
 360			vfsuid = make_vfsuid(idmap, fs_userns, e->e_uid);
 361			e->e_uid = vfsuid_into_kuid(vfsuid);
 362			break;
 363		case ACL_GROUP:
 364			vfsgid = make_vfsgid(idmap, fs_userns, e->e_gid);
 365			e->e_gid = vfsgid_into_kgid(vfsgid);
 366			break;
 367		}
 368	}
 369}
 370
 371/*
 372 * The @noperm argument is used to skip permission checking and is a temporary
 373 * measure. Quoting Miklos from an earlier discussion:
 374 *
 375 * > So there are two paths to getting an acl:
 376 * > 1) permission checking and 2) retrieving the value via getxattr(2).
 377 * > This is a similar situation as reading a symlink vs. following it.
 378 * > When following a symlink overlayfs always reads the link on the
 379 * > underlying fs just as if it was a readlink(2) call, calling
 380 * > security_inode_readlink() instead of security_inode_follow_link().
 381 * > This is logical: we are reading the link from the underlying storage,
 382 * > and following it on overlayfs.
 383 * >
 384 * > Applying the same logic to acl: we do need to call the
 385 * > security_inode_getxattr() on the underlying fs, even if just want to
 386 * > check permissions on overlay. This is currently not done, which is an
 387 * > inconsistency.
 388 * >
 389 * > Maybe adding the check to ovl_get_acl() is the right way to go, but
 390 * > I'm a little afraid of a performance regression.  Will look into that.
 391 *
 392 * Until we have made a decision allow this helper to take the @noperm
 393 * argument. We should hopefully be able to remove it soon.
 394 */
 395struct posix_acl *ovl_get_acl_path(const struct path *path,
 396				   const char *acl_name, bool noperm)
 397{
 398	struct posix_acl *real_acl, *clone;
 399	struct mnt_idmap *idmap;
 400	struct inode *realinode = d_inode(path->dentry);
 401
 402	idmap = mnt_idmap(path->mnt);
 403
 404	if (noperm)
 405		real_acl = get_inode_acl(realinode, posix_acl_type(acl_name));
 406	else
 407		real_acl = vfs_get_acl(idmap, path->dentry, acl_name);
 408	if (IS_ERR_OR_NULL(real_acl))
 409		return real_acl;
 410
 411	if (!is_idmapped_mnt(path->mnt))
 412		return real_acl;
 413
 414	/*
 415        * We cannot alter the ACLs returned from the relevant layer as that
 416        * would alter the cached values filesystem wide for the lower
 417        * filesystem. Instead we can clone the ACLs and then apply the
 418        * relevant idmapping of the layer.
 419        */
 420	clone = posix_acl_clone(real_acl, GFP_KERNEL);
 421	posix_acl_release(real_acl); /* release original acl */
 422	if (!clone)
 423		return ERR_PTR(-ENOMEM);
 424
 425	ovl_idmap_posix_acl(realinode, idmap, clone);
 426	return clone;
 427}
 428
 429/*
 430 * When the relevant layer is an idmapped mount we need to take the idmapping
 431 * of the layer into account and translate any ACL_{GROUP,USER} values
 432 * according to the idmapped mount.
 433 *
 434 * We cannot alter the ACLs returned from the relevant layer as that would
 435 * alter the cached values filesystem wide for the lower filesystem. Instead we
 436 * can clone the ACLs and then apply the relevant idmapping of the layer.
 437 *
 438 * This is obviously only relevant when idmapped layers are used.
 439 */
 440struct posix_acl *do_ovl_get_acl(struct mnt_idmap *idmap,
 441				 struct inode *inode, int type,
 442				 bool rcu, bool noperm)
 443{
 444	struct inode *realinode;
 445	struct posix_acl *acl;
 446	struct path realpath;
 447
 448	/* Careful in RCU walk mode */
 449	realinode = ovl_i_path_real(inode, &realpath);
 450	if (!realinode) {
 451		WARN_ON(!rcu);
 452		return ERR_PTR(-ECHILD);
 453	}
 454
 455	if (!IS_POSIXACL(realinode))
 456		return NULL;
 457
 458	if (rcu) {
 459		/*
 460		 * If the layer is idmapped drop out of RCU path walk
 461		 * so we can clone the ACLs.
 462		 */
 463		if (is_idmapped_mnt(realpath.mnt))
 464			return ERR_PTR(-ECHILD);
 465
 466		acl = get_cached_acl_rcu(realinode, type);
 467	} else {
 468		const struct cred *old_cred;
 469
 470		old_cred = ovl_override_creds(inode->i_sb);
 471		acl = ovl_get_acl_path(&realpath, posix_acl_xattr_name(type), noperm);
 472		ovl_revert_creds(old_cred);
 473	}
 474
 475	return acl;
 476}
 477
 478static int ovl_set_or_remove_acl(struct dentry *dentry, struct inode *inode,
 479				 struct posix_acl *acl, int type)
 480{
 481	int err;
 482	struct path realpath;
 483	const char *acl_name;
 484	const struct cred *old_cred;
 485	struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
 486	struct dentry *upperdentry = ovl_dentry_upper(dentry);
 487	struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
 
 488
 489	/*
 490	 * If ACL is to be removed from a lower file, check if it exists in
 491	 * the first place before copying it up.
 492	 */
 493	acl_name = posix_acl_xattr_name(type);
 494	if (!acl && !upperdentry) {
 495		struct posix_acl *real_acl;
 496
 497		ovl_path_lower(dentry, &realpath);
 498		old_cred = ovl_override_creds(dentry->d_sb);
 499		real_acl = vfs_get_acl(mnt_idmap(realpath.mnt), realdentry,
 500				       acl_name);
 501		ovl_revert_creds(old_cred);
 502		if (IS_ERR(real_acl)) {
 503			err = PTR_ERR(real_acl);
 504			goto out;
 505		}
 506		posix_acl_release(real_acl);
 507	}
 508
 509	if (!upperdentry) {
 510		err = ovl_copy_up(dentry);
 511		if (err)
 512			goto out;
 513
 514		realdentry = ovl_dentry_upper(dentry);
 515	}
 516
 517	err = ovl_want_write(dentry);
 518	if (err)
 519		goto out;
 520
 521	old_cred = ovl_override_creds(dentry->d_sb);
 522	if (acl)
 523		err = ovl_do_set_acl(ofs, realdentry, acl_name, acl);
 524	else
 525		err = ovl_do_remove_acl(ofs, realdentry, acl_name);
 526	ovl_revert_creds(old_cred);
 527	ovl_drop_write(dentry);
 
 528
 529	/* copy c/mtime */
 530	ovl_copyattr(inode);
 
 
 
 531out:
 532	return err;
 533}
 534
 535int ovl_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
 536		struct posix_acl *acl, int type)
 537{
 538	int err;
 539	struct inode *inode = d_inode(dentry);
 540	struct dentry *workdir = ovl_workdir(dentry);
 541	struct inode *realinode = ovl_inode_real(inode);
 542
 543	if (!IS_POSIXACL(d_inode(workdir)))
 544		return -EOPNOTSUPP;
 545	if (!realinode->i_op->set_acl)
 546		return -EOPNOTSUPP;
 547	if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
 548		return acl ? -EACCES : 0;
 549	if (!inode_owner_or_capable(&nop_mnt_idmap, inode))
 550		return -EPERM;
 551
 552	/*
 553	 * Check if sgid bit needs to be cleared (actual setacl operation will
 554	 * be done with mounter's capabilities and so that won't do it for us).
 555	 */
 556	if (unlikely(inode->i_mode & S_ISGID) && type == ACL_TYPE_ACCESS &&
 557	    !in_group_p(inode->i_gid) &&
 558	    !capable_wrt_inode_uidgid(&nop_mnt_idmap, inode, CAP_FSETID)) {
 559		struct iattr iattr = { .ia_valid = ATTR_KILL_SGID };
 560
 561		err = ovl_setattr(&nop_mnt_idmap, dentry, &iattr);
 562		if (err)
 563			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 564	}
 565
 566	return ovl_set_or_remove_acl(dentry, inode, acl, type);
 567}
 568#endif
 569
 570int ovl_update_time(struct inode *inode, int flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571{
 572	if (flags & S_ATIME) {
 573		struct ovl_fs *ofs = OVL_FS(inode->i_sb);
 574		struct path upperpath = {
 575			.mnt = ovl_upper_mnt(ofs),
 576			.dentry = ovl_upperdentry_dereference(OVL_I(inode)),
 577		};
 578
 579		if (upperpath.dentry) {
 580			touch_atime(&upperpath);
 581			inode_set_atime_to_ts(inode,
 582					      inode_get_atime(d_inode(upperpath.dentry)));
 583		}
 584	}
 585	return 0;
 586}
 587
 588static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 589		      u64 start, u64 len)
 590{
 591	int err;
 592	struct inode *realinode = ovl_inode_realdata(inode);
 593	const struct cred *old_cred;
 594
 595	if (!realinode)
 596		return -EIO;
 597
 598	if (!realinode->i_op->fiemap)
 599		return -EOPNOTSUPP;
 600
 601	old_cred = ovl_override_creds(inode->i_sb);
 602	err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
 603	ovl_revert_creds(old_cred);
 604
 605	return err;
 606}
 607
 608/*
 609 * Work around the fact that security_file_ioctl() takes a file argument.
 610 * Introducing security_inode_fileattr_get/set() hooks would solve this issue
 611 * properly.
 612 */
 613static int ovl_security_fileattr(const struct path *realpath, struct fileattr *fa,
 614				 bool set)
 615{
 616	struct file *file;
 617	unsigned int cmd;
 618	int err;
 619	unsigned int flags;
 620
 621	flags = O_RDONLY;
 622	if (force_o_largefile())
 623		flags |= O_LARGEFILE;
 624
 625	file = dentry_open(realpath, flags, current_cred());
 626	if (IS_ERR(file))
 627		return PTR_ERR(file);
 628
 629	if (set)
 630		cmd = fa->fsx_valid ? FS_IOC_FSSETXATTR : FS_IOC_SETFLAGS;
 631	else
 632		cmd = fa->fsx_valid ? FS_IOC_FSGETXATTR : FS_IOC_GETFLAGS;
 633
 634	err = security_file_ioctl(file, cmd, 0);
 635	fput(file);
 636
 637	return err;
 638}
 639
 640int ovl_real_fileattr_set(const struct path *realpath, struct fileattr *fa)
 641{
 642	int err;
 643
 644	err = ovl_security_fileattr(realpath, fa, true);
 645	if (err)
 646		return err;
 647
 648	return vfs_fileattr_set(mnt_idmap(realpath->mnt), realpath->dentry, fa);
 649}
 650
 651int ovl_fileattr_set(struct mnt_idmap *idmap,
 652		     struct dentry *dentry, struct fileattr *fa)
 653{
 654	struct inode *inode = d_inode(dentry);
 655	struct path upperpath;
 656	const struct cred *old_cred;
 657	unsigned int flags;
 658	int err;
 659
 660	err = ovl_copy_up(dentry);
 661	if (!err) {
 662		ovl_path_real(dentry, &upperpath);
 663
 664		err = ovl_want_write(dentry);
 665		if (err)
 666			goto out;
 667
 668		old_cred = ovl_override_creds(inode->i_sb);
 669		/*
 670		 * Store immutable/append-only flags in xattr and clear them
 671		 * in upper fileattr (in case they were set by older kernel)
 672		 * so children of "ovl-immutable" directories lower aliases of
 673		 * "ovl-immutable" hardlinks could be copied up.
 674		 * Clear xattr when flags are cleared.
 675		 */
 676		err = ovl_set_protattr(inode, upperpath.dentry, fa);
 677		if (!err)
 678			err = ovl_real_fileattr_set(&upperpath, fa);
 679		ovl_revert_creds(old_cred);
 680		ovl_drop_write(dentry);
 681
 682		/*
 683		 * Merge real inode flags with inode flags read from
 684		 * overlay.protattr xattr
 685		 */
 686		flags = ovl_inode_real(inode)->i_flags & OVL_COPY_I_FLAGS_MASK;
 687
 688		BUILD_BUG_ON(OVL_PROT_I_FLAGS_MASK & ~OVL_COPY_I_FLAGS_MASK);
 689		flags |= inode->i_flags & OVL_PROT_I_FLAGS_MASK;
 690		inode_set_flags(inode, flags, OVL_COPY_I_FLAGS_MASK);
 691
 692		/* Update ctime */
 693		ovl_copyattr(inode);
 694	}
 695out:
 696	return err;
 697}
 698
 699/* Convert inode protection flags to fileattr flags */
 700static void ovl_fileattr_prot_flags(struct inode *inode, struct fileattr *fa)
 701{
 702	BUILD_BUG_ON(OVL_PROT_FS_FLAGS_MASK & ~FS_COMMON_FL);
 703	BUILD_BUG_ON(OVL_PROT_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON);
 704
 705	if (inode->i_flags & S_APPEND) {
 706		fa->flags |= FS_APPEND_FL;
 707		fa->fsx_xflags |= FS_XFLAG_APPEND;
 708	}
 709	if (inode->i_flags & S_IMMUTABLE) {
 710		fa->flags |= FS_IMMUTABLE_FL;
 711		fa->fsx_xflags |= FS_XFLAG_IMMUTABLE;
 712	}
 713}
 714
 715int ovl_real_fileattr_get(const struct path *realpath, struct fileattr *fa)
 716{
 717	int err;
 718
 719	err = ovl_security_fileattr(realpath, fa, false);
 720	if (err)
 721		return err;
 722
 723	err = vfs_fileattr_get(realpath->dentry, fa);
 724	if (err == -ENOIOCTLCMD)
 725		err = -ENOTTY;
 726	return err;
 727}
 728
 729int ovl_fileattr_get(struct dentry *dentry, struct fileattr *fa)
 730{
 731	struct inode *inode = d_inode(dentry);
 732	struct path realpath;
 733	const struct cred *old_cred;
 734	int err;
 735
 736	ovl_path_real(dentry, &realpath);
 737
 738	old_cred = ovl_override_creds(inode->i_sb);
 739	err = ovl_real_fileattr_get(&realpath, fa);
 740	ovl_fileattr_prot_flags(inode, fa);
 741	ovl_revert_creds(old_cred);
 742
 743	return err;
 744}
 745
 746static const struct inode_operations ovl_file_inode_operations = {
 747	.setattr	= ovl_setattr,
 748	.permission	= ovl_permission,
 749	.getattr	= ovl_getattr,
 750	.listxattr	= ovl_listxattr,
 751	.get_inode_acl	= ovl_get_inode_acl,
 752	.get_acl	= ovl_get_acl,
 753	.set_acl	= ovl_set_acl,
 754	.update_time	= ovl_update_time,
 755	.fiemap		= ovl_fiemap,
 756	.fileattr_get	= ovl_fileattr_get,
 757	.fileattr_set	= ovl_fileattr_set,
 758};
 759
 760static const struct inode_operations ovl_symlink_inode_operations = {
 761	.setattr	= ovl_setattr,
 762	.get_link	= ovl_get_link,
 763	.getattr	= ovl_getattr,
 764	.listxattr	= ovl_listxattr,
 765	.update_time	= ovl_update_time,
 766};
 767
 768static const struct inode_operations ovl_special_inode_operations = {
 769	.setattr	= ovl_setattr,
 770	.permission	= ovl_permission,
 771	.getattr	= ovl_getattr,
 772	.listxattr	= ovl_listxattr,
 773	.get_inode_acl	= ovl_get_inode_acl,
 774	.get_acl	= ovl_get_acl,
 775	.set_acl	= ovl_set_acl,
 776	.update_time	= ovl_update_time,
 777};
 778
 779static const struct address_space_operations ovl_aops = {
 780	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
 781	.direct_IO		= noop_direct_IO,
 782};
 783
 784/*
 785 * It is possible to stack overlayfs instance on top of another
 786 * overlayfs instance as lower layer. We need to annotate the
 787 * stackable i_mutex locks according to stack level of the super
 788 * block instance. An overlayfs instance can never be in stack
 789 * depth 0 (there is always a real fs below it).  An overlayfs
 790 * inode lock will use the lockdep annotation ovl_i_mutex_key[depth].
 791 *
 792 * For example, here is a snip from /proc/lockdep_chains after
 793 * dir_iterate of nested overlayfs:
 794 *
 795 * [...] &ovl_i_mutex_dir_key[depth]   (stack_depth=2)
 796 * [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1)
 797 * [...] &type->i_mutex_dir_key        (stack_depth=0)
 798 *
 799 * Locking order w.r.t ovl_want_write() is important for nested overlayfs.
 800 *
 801 * This chain is valid:
 802 * - inode->i_rwsem			(inode_lock[2])
 803 * - upper_mnt->mnt_sb->s_writers	(ovl_want_write[0])
 804 * - OVL_I(inode)->lock			(ovl_inode_lock[2])
 805 * - OVL_I(lowerinode)->lock		(ovl_inode_lock[1])
 806 *
 807 * And this chain is valid:
 808 * - inode->i_rwsem			(inode_lock[2])
 809 * - OVL_I(inode)->lock			(ovl_inode_lock[2])
 810 * - lowerinode->i_rwsem		(inode_lock[1])
 811 * - OVL_I(lowerinode)->lock		(ovl_inode_lock[1])
 812 *
 813 * But lowerinode->i_rwsem SHOULD NOT be acquired while ovl_want_write() is
 814 * held, because it is in reverse order of the non-nested case using the same
 815 * upper fs:
 816 * - inode->i_rwsem			(inode_lock[1])
 817 * - upper_mnt->mnt_sb->s_writers	(ovl_want_write[0])
 818 * - OVL_I(inode)->lock			(ovl_inode_lock[1])
 819 */
 820#define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH
 821
 822static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode)
 823{
 824#ifdef CONFIG_LOCKDEP
 825	static struct lock_class_key ovl_i_mutex_key[OVL_MAX_NESTING];
 826	static struct lock_class_key ovl_i_mutex_dir_key[OVL_MAX_NESTING];
 827	static struct lock_class_key ovl_i_lock_key[OVL_MAX_NESTING];
 828
 829	int depth = inode->i_sb->s_stack_depth - 1;
 830
 831	if (WARN_ON_ONCE(depth < 0 || depth >= OVL_MAX_NESTING))
 832		depth = 0;
 833
 834	if (S_ISDIR(inode->i_mode))
 835		lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_dir_key[depth]);
 836	else
 837		lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_key[depth]);
 838
 839	lockdep_set_class(&OVL_I(inode)->lock, &ovl_i_lock_key[depth]);
 840#endif
 841}
 842
 843static void ovl_next_ino(struct inode *inode)
 844{
 845	struct ovl_fs *ofs = OVL_FS(inode->i_sb);
 846
 847	inode->i_ino = atomic_long_inc_return(&ofs->last_ino);
 848	if (unlikely(!inode->i_ino))
 849		inode->i_ino = atomic_long_inc_return(&ofs->last_ino);
 850}
 851
 852static void ovl_map_ino(struct inode *inode, unsigned long ino, int fsid)
 853{
 854	struct ovl_fs *ofs = OVL_FS(inode->i_sb);
 855	int xinobits = ovl_xino_bits(ofs);
 856	unsigned int xinoshift = 64 - xinobits;
 857
 858	/*
 859	 * When d_ino is consistent with st_ino (samefs or i_ino has enough
 860	 * bits to encode layer), set the same value used for st_ino to i_ino,
 861	 * so inode number exposed via /proc/locks and a like will be
 862	 * consistent with d_ino and st_ino values. An i_ino value inconsistent
 863	 * with d_ino also causes nfsd readdirplus to fail.
 864	 */
 865	inode->i_ino = ino;
 866	if (ovl_same_fs(ofs)) {
 867		return;
 868	} else if (xinobits && likely(!(ino >> xinoshift))) {
 869		inode->i_ino |= (unsigned long)fsid << (xinoshift + 1);
 870		return;
 871	}
 872
 873	/*
 874	 * For directory inodes on non-samefs with xino disabled or xino
 875	 * overflow, we allocate a non-persistent inode number, to be used for
 876	 * resolving st_ino collisions in ovl_map_dev_ino().
 877	 *
 878	 * To avoid ino collision with legitimate xino values from upper
 879	 * layer (fsid 0), use the lowest xinobit to map the non
 880	 * persistent inode numbers to the unified st_ino address space.
 881	 */
 882	if (S_ISDIR(inode->i_mode)) {
 883		ovl_next_ino(inode);
 884		if (xinobits) {
 885			inode->i_ino &= ~0UL >> xinobits;
 886			inode->i_ino |= 1UL << xinoshift;
 887		}
 888	}
 889}
 890
 891void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip,
 892		    unsigned long ino, int fsid)
 893{
 894	struct inode *realinode;
 895	struct ovl_inode *oi = OVL_I(inode);
 896
 897	oi->__upperdentry = oip->upperdentry;
 898	oi->oe = oip->oe;
 899	oi->redirect = oip->redirect;
 900	oi->lowerdata_redirect = oip->lowerdata_redirect;
 
 
 901
 902	realinode = ovl_inode_real(inode);
 903	ovl_copyattr(inode);
 904	ovl_copyflags(realinode, inode);
 905	ovl_map_ino(inode, ino, fsid);
 906}
 907
 908static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
 909{
 910	inode->i_mode = mode;
 911	inode->i_flags |= S_NOCMTIME;
 912#ifdef CONFIG_FS_POSIX_ACL
 913	inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
 914#endif
 915
 916	ovl_lockdep_annotate_inode_mutex_key(inode);
 917
 918	switch (mode & S_IFMT) {
 919	case S_IFREG:
 920		inode->i_op = &ovl_file_inode_operations;
 921		inode->i_fop = &ovl_file_operations;
 922		inode->i_mapping->a_ops = &ovl_aops;
 923		break;
 924
 925	case S_IFDIR:
 926		inode->i_op = &ovl_dir_inode_operations;
 927		inode->i_fop = &ovl_dir_operations;
 928		break;
 929
 930	case S_IFLNK:
 931		inode->i_op = &ovl_symlink_inode_operations;
 932		break;
 933
 934	default:
 935		inode->i_op = &ovl_special_inode_operations;
 936		init_special_inode(inode, mode, rdev);
 937		break;
 938	}
 939}
 940
 941/*
 942 * With inodes index enabled, an overlay inode nlink counts the union of upper
 943 * hardlinks and non-covered lower hardlinks. During the lifetime of a non-pure
 944 * upper inode, the following nlink modifying operations can happen:
 945 *
 946 * 1. Lower hardlink copy up
 947 * 2. Upper hardlink created, unlinked or renamed over
 948 * 3. Lower hardlink whiteout or renamed over
 949 *
 950 * For the first, copy up case, the union nlink does not change, whether the
 951 * operation succeeds or fails, but the upper inode nlink may change.
 952 * Therefore, before copy up, we store the union nlink value relative to the
 953 * lower inode nlink in the index inode xattr .overlay.nlink.
 954 *
 955 * For the second, upper hardlink case, the union nlink should be incremented
 956 * or decremented IFF the operation succeeds, aligned with nlink change of the
 957 * upper inode. Therefore, before link/unlink/rename, we store the union nlink
 958 * value relative to the upper inode nlink in the index inode.
 959 *
 960 * For the last, lower cover up case, we simplify things by preceding the
 961 * whiteout or cover up with copy up. This makes sure that there is an index
 962 * upper inode where the nlink xattr can be stored before the copied up upper
 963 * entry is unlink.
 964 */
 965#define OVL_NLINK_ADD_UPPER	(1 << 0)
 966
 967/*
 968 * On-disk format for indexed nlink:
 969 *
 970 * nlink relative to the upper inode - "U[+-]NUM"
 971 * nlink relative to the lower inode - "L[+-]NUM"
 972 */
 973
 974static int ovl_set_nlink_common(struct dentry *dentry,
 975				struct dentry *realdentry, const char *format)
 976{
 977	struct inode *inode = d_inode(dentry);
 978	struct inode *realinode = d_inode(realdentry);
 979	char buf[13];
 980	int len;
 981
 982	len = snprintf(buf, sizeof(buf), format,
 983		       (int) (inode->i_nlink - realinode->i_nlink));
 984
 985	if (WARN_ON(len >= sizeof(buf)))
 986		return -EIO;
 987
 988	return ovl_setxattr(OVL_FS(inode->i_sb), ovl_dentry_upper(dentry),
 989			    OVL_XATTR_NLINK, buf, len);
 990}
 991
 992int ovl_set_nlink_upper(struct dentry *dentry)
 993{
 994	return ovl_set_nlink_common(dentry, ovl_dentry_upper(dentry), "U%+i");
 995}
 996
 997int ovl_set_nlink_lower(struct dentry *dentry)
 998{
 999	return ovl_set_nlink_common(dentry, ovl_dentry_lower(dentry), "L%+i");
1000}
1001
1002unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry,
1003			   struct dentry *upperdentry,
1004			   unsigned int fallback)
1005{
1006	int nlink_diff;
1007	int nlink;
1008	char buf[13];
1009	int err;
1010
1011	if (!lowerdentry || !upperdentry || d_inode(lowerdentry)->i_nlink == 1)
1012		return fallback;
1013
1014	err = ovl_getxattr_upper(ofs, upperdentry, OVL_XATTR_NLINK,
1015				 &buf, sizeof(buf) - 1);
1016	if (err < 0)
1017		goto fail;
1018
1019	buf[err] = '\0';
1020	if ((buf[0] != 'L' && buf[0] != 'U') ||
1021	    (buf[1] != '+' && buf[1] != '-'))
1022		goto fail;
1023
1024	err = kstrtoint(buf + 1, 10, &nlink_diff);
1025	if (err < 0)
1026		goto fail;
1027
1028	nlink = d_inode(buf[0] == 'L' ? lowerdentry : upperdentry)->i_nlink;
1029	nlink += nlink_diff;
1030
1031	if (nlink <= 0)
1032		goto fail;
1033
1034	return nlink;
1035
1036fail:
1037	pr_warn_ratelimited("failed to get index nlink (%pd2, err=%i)\n",
1038			    upperdentry, err);
1039	return fallback;
1040}
1041
1042struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev)
1043{
1044	struct inode *inode;
1045
1046	inode = new_inode(sb);
1047	if (inode)
1048		ovl_fill_inode(inode, mode, rdev);
1049
1050	return inode;
1051}
1052
1053static int ovl_inode_test(struct inode *inode, void *data)
1054{
1055	return inode->i_private == data;
1056}
1057
1058static int ovl_inode_set(struct inode *inode, void *data)
1059{
1060	inode->i_private = data;
1061	return 0;
1062}
1063
1064static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
1065			     struct dentry *upperdentry, bool strict)
1066{
1067	/*
1068	 * For directories, @strict verify from lookup path performs consistency
1069	 * checks, so NULL lower/upper in dentry must match NULL lower/upper in
1070	 * inode. Non @strict verify from NFS handle decode path passes NULL for
1071	 * 'unknown' lower/upper.
1072	 */
1073	if (S_ISDIR(inode->i_mode) && strict) {
1074		/* Real lower dir moved to upper layer under us? */
1075		if (!lowerdentry && ovl_inode_lower(inode))
1076			return false;
1077
1078		/* Lookup of an uncovered redirect origin? */
1079		if (!upperdentry && ovl_inode_upper(inode))
1080			return false;
1081	}
1082
1083	/*
1084	 * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL.
1085	 * This happens when finding a copied up overlay inode for a renamed
1086	 * or hardlinked overlay dentry and lower dentry cannot be followed
1087	 * by origin because lower fs does not support file handles.
1088	 */
1089	if (lowerdentry && ovl_inode_lower(inode) != d_inode(lowerdentry))
1090		return false;
1091
1092	/*
1093	 * Allow non-NULL __upperdentry in inode even if upperdentry is NULL.
1094	 * This happens when finding a lower alias for a copied up hard link.
1095	 */
1096	if (upperdentry && ovl_inode_upper(inode) != d_inode(upperdentry))
1097		return false;
1098
1099	return true;
1100}
1101
1102struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
1103			       bool is_upper)
1104{
1105	struct inode *inode, *key = d_inode(real);
1106
1107	inode = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
1108	if (!inode)
1109		return NULL;
1110
1111	if (!ovl_verify_inode(inode, is_upper ? NULL : real,
1112			      is_upper ? real : NULL, false)) {
1113		iput(inode);
1114		return ERR_PTR(-ESTALE);
1115	}
1116
1117	return inode;
1118}
1119
1120bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir)
1121{
1122	struct inode *key = d_inode(dir);
1123	struct inode *trap;
1124	bool res;
1125
1126	trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
1127	if (!trap)
1128		return false;
1129
1130	res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) &&
1131				  !ovl_inode_lower(trap);
1132
1133	iput(trap);
1134	return res;
1135}
1136
1137/*
1138 * Create an inode cache entry for layer root dir, that will intentionally
1139 * fail ovl_verify_inode(), so any lookup that will find some layer root
1140 * will fail.
1141 */
1142struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
1143{
1144	struct inode *key = d_inode(dir);
1145	struct inode *trap;
1146
1147	if (!d_is_dir(dir))
1148		return ERR_PTR(-ENOTDIR);
1149
1150	trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test,
1151			    ovl_inode_set, key);
1152	if (!trap)
1153		return ERR_PTR(-ENOMEM);
1154
1155	if (!(trap->i_state & I_NEW)) {
1156		/* Conflicting layer roots? */
1157		iput(trap);
1158		return ERR_PTR(-ELOOP);
1159	}
1160
1161	trap->i_mode = S_IFDIR;
1162	trap->i_flags = S_DEAD;
1163	unlock_new_inode(trap);
1164
1165	return trap;
1166}
1167
1168/*
1169 * Does overlay inode need to be hashed by lower inode?
1170 */
1171static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
1172			     struct dentry *lower, bool index)
1173{
1174	struct ovl_fs *ofs = OVL_FS(sb);
1175
1176	/* No, if pure upper */
1177	if (!lower)
1178		return false;
1179
1180	/* Yes, if already indexed */
1181	if (index)
1182		return true;
1183
1184	/* Yes, if won't be copied up */
1185	if (!ovl_upper_mnt(ofs))
1186		return true;
1187
1188	/* No, if lower hardlink is or will be broken on copy up */
1189	if ((upper || !ovl_indexdir(sb)) &&
1190	    !d_is_dir(lower) && d_inode(lower)->i_nlink > 1)
1191		return false;
1192
1193	/* No, if non-indexed upper with NFS export */
1194	if (ofs->config.nfs_export && upper)
1195		return false;
1196
1197	/* Otherwise, hash by lower inode for fsnotify */
1198	return true;
1199}
1200
1201static struct inode *ovl_iget5(struct super_block *sb, struct inode *newinode,
1202			       struct inode *key)
1203{
1204	return newinode ? inode_insert5(newinode, (unsigned long) key,
1205					 ovl_inode_test, ovl_inode_set, key) :
1206			  iget5_locked(sb, (unsigned long) key,
1207				       ovl_inode_test, ovl_inode_set, key);
1208}
1209
1210struct inode *ovl_get_inode(struct super_block *sb,
1211			    struct ovl_inode_params *oip)
1212{
1213	struct ovl_fs *ofs = OVL_FS(sb);
1214	struct dentry *upperdentry = oip->upperdentry;
1215	struct ovl_path *lowerpath = ovl_lowerpath(oip->oe);
1216	struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
1217	struct inode *inode;
1218	struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
1219	struct path realpath = {
1220		.dentry = upperdentry ?: lowerdentry,
1221		.mnt = upperdentry ? ovl_upper_mnt(ofs) : lowerpath->layer->mnt,
1222	};
1223	bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
1224					oip->index);
1225	int fsid = bylower ? lowerpath->layer->fsid : 0;
1226	bool is_dir;
1227	unsigned long ino = 0;
1228	int err = oip->newinode ? -EEXIST : -ENOMEM;
1229
1230	if (!realinode)
1231		realinode = d_inode(lowerdentry);
1232
1233	/*
1234	 * Copy up origin (lower) may exist for non-indexed upper, but we must
1235	 * not use lower as hash key if this is a broken hardlink.
1236	 */
1237	is_dir = S_ISDIR(realinode->i_mode);
1238	if (upperdentry || bylower) {
1239		struct inode *key = d_inode(bylower ? lowerdentry :
1240						      upperdentry);
1241		unsigned int nlink = is_dir ? 1 : realinode->i_nlink;
1242
1243		inode = ovl_iget5(sb, oip->newinode, key);
1244		if (!inode)
1245			goto out_err;
1246		if (!(inode->i_state & I_NEW)) {
1247			/*
1248			 * Verify that the underlying files stored in the inode
1249			 * match those in the dentry.
1250			 */
1251			if (!ovl_verify_inode(inode, lowerdentry, upperdentry,
1252					      true)) {
1253				iput(inode);
1254				err = -ESTALE;
1255				goto out_err;
1256			}
1257
1258			dput(upperdentry);
1259			ovl_free_entry(oip->oe);
1260			kfree(oip->redirect);
1261			kfree(oip->lowerdata_redirect);
1262			goto out;
1263		}
1264
1265		/* Recalculate nlink for non-dir due to indexing */
1266		if (!is_dir)
1267			nlink = ovl_get_nlink(ofs, lowerdentry, upperdentry,
1268					      nlink);
1269		set_nlink(inode, nlink);
1270		ino = key->i_ino;
1271	} else {
1272		/* Lower hardlink that will be broken on copy up */
1273		inode = new_inode(sb);
1274		if (!inode) {
1275			err = -ENOMEM;
1276			goto out_err;
1277		}
1278		ino = realinode->i_ino;
1279		fsid = lowerpath->layer->fsid;
1280	}
1281	ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev);
1282	ovl_inode_init(inode, oip, ino, fsid);
1283
1284	if (upperdentry && ovl_is_impuredir(sb, upperdentry))
1285		ovl_set_flag(OVL_IMPURE, inode);
1286
1287	if (oip->index)
1288		ovl_set_flag(OVL_INDEX, inode);
1289
 
 
1290	if (bylower)
1291		ovl_set_flag(OVL_CONST_INO, inode);
1292
1293	/* Check for non-merge dir that may have whiteouts */
1294	if (is_dir) {
1295		if (((upperdentry && lowerdentry) || ovl_numlower(oip->oe) > 1) ||
1296		    ovl_path_check_origin_xattr(ofs, &realpath)) {
1297			ovl_set_flag(OVL_WHITEOUTS, inode);
1298		}
1299	}
1300
1301	/* Check for immutable/append-only inode flags in xattr */
1302	if (upperdentry)
1303		ovl_check_protattr(inode, upperdentry);
1304
1305	if (inode->i_state & I_NEW)
1306		unlock_new_inode(inode);
1307out:
1308	return inode;
1309
1310out_err:
1311	pr_warn_ratelimited("failed to get inode (%i)\n", err);
1312	inode = ERR_PTR(err);
1313	goto out;
1314}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (C) 2011 Novell Inc.
   5 */
   6
   7#include <linux/fs.h>
   8#include <linux/slab.h>
   9#include <linux/cred.h>
  10#include <linux/xattr.h>
  11#include <linux/posix_acl.h>
  12#include <linux/ratelimit.h>
  13#include <linux/fiemap.h>
 
 
 
 
 
  14#include "overlayfs.h"
  15
  16
  17int ovl_setattr(struct dentry *dentry, struct iattr *attr)
 
  18{
  19	int err;
 
  20	bool full_copy_up = false;
  21	struct dentry *upperdentry;
  22	const struct cred *old_cred;
  23
  24	err = setattr_prepare(dentry, attr);
  25	if (err)
  26		return err;
  27
  28	err = ovl_want_write(dentry);
  29	if (err)
  30		goto out;
  31
  32	if (attr->ia_valid & ATTR_SIZE) {
  33		struct inode *realinode = d_inode(ovl_dentry_real(dentry));
  34
  35		err = -ETXTBSY;
  36		if (atomic_read(&realinode->i_writecount) < 0)
  37			goto out_drop_write;
  38
  39		/* Truncate should trigger data copy up as well */
  40		full_copy_up = true;
  41	}
  42
  43	if (!full_copy_up)
  44		err = ovl_copy_up(dentry);
  45	else
  46		err = ovl_copy_up_with_data(dentry);
  47	if (!err) {
  48		struct inode *winode = NULL;
  49
  50		upperdentry = ovl_dentry_upper(dentry);
  51
  52		if (attr->ia_valid & ATTR_SIZE) {
  53			winode = d_inode(upperdentry);
  54			err = get_write_access(winode);
  55			if (err)
  56				goto out_drop_write;
  57		}
  58
  59		if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
  60			attr->ia_valid &= ~ATTR_MODE;
  61
  62		/*
  63		 * We might have to translate ovl file into real file object
  64		 * once use cases emerge.  For now, simply don't let underlying
  65		 * filesystem rely on attr->ia_file
  66		 */
  67		attr->ia_valid &= ~ATTR_FILE;
  68
  69		/*
  70		 * If open(O_TRUNC) is done, VFS calls ->setattr with ATTR_OPEN
  71		 * set.  Overlayfs does not pass O_TRUNC flag to underlying
  72		 * filesystem during open -> do not pass ATTR_OPEN.  This
  73		 * disables optimization in fuse which assumes open(O_TRUNC)
  74		 * already set file size to 0.  But we never passed O_TRUNC to
  75		 * fuse.  So by clearing ATTR_OPEN, fuse will be forced to send
  76		 * setattr request to server.
  77		 */
  78		attr->ia_valid &= ~ATTR_OPEN;
  79
 
 
 
 
  80		inode_lock(upperdentry->d_inode);
  81		old_cred = ovl_override_creds(dentry->d_sb);
  82		err = notify_change(upperdentry, attr, NULL);
  83		revert_creds(old_cred);
  84		if (!err)
  85			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
  86		inode_unlock(upperdentry->d_inode);
 
  87
 
  88		if (winode)
  89			put_write_access(winode);
  90	}
  91out_drop_write:
  92	ovl_drop_write(dentry);
  93out:
  94	return err;
  95}
  96
  97static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
  98{
  99	bool samefs = ovl_same_fs(dentry->d_sb);
 100	unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
 
 101	unsigned int xinoshift = 64 - xinobits;
 102
 103	if (samefs) {
 104		/*
 105		 * When all layers are on the same fs, all real inode
 106		 * number are unique, so we use the overlay st_dev,
 107		 * which is friendly to du -x.
 108		 */
 109		stat->dev = dentry->d_sb->s_dev;
 110		return 0;
 111	} else if (xinobits) {
 112		/*
 113		 * All inode numbers of underlying fs should not be using the
 114		 * high xinobits, so we use high xinobits to partition the
 115		 * overlay st_ino address space. The high bits holds the fsid
 116		 * (upper fsid is 0). The lowest xinobit is reserved for mapping
 117		 * the non-peresistent inode numbers range in case of overflow.
 118		 * This way all overlay inode numbers are unique and use the
 119		 * overlay st_dev.
 120		 */
 121		if (likely(!(stat->ino >> xinoshift))) {
 122			stat->ino |= ((u64)fsid) << (xinoshift + 1);
 123			stat->dev = dentry->d_sb->s_dev;
 124			return 0;
 125		} else if (ovl_xino_warn(dentry->d_sb)) {
 126			pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
 127					    dentry, stat->ino, xinobits);
 128		}
 129	}
 130
 131	/* The inode could not be mapped to a unified st_ino address space */
 132	if (S_ISDIR(dentry->d_inode->i_mode)) {
 133		/*
 134		 * Always use the overlay st_dev for directories, so 'find
 135		 * -xdev' will scan the entire overlay mount and won't cross the
 136		 * overlay mount boundaries.
 137		 *
 138		 * If not all layers are on the same fs the pair {real st_ino;
 139		 * overlay st_dev} is not unique, so use the non persistent
 140		 * overlay st_ino for directories.
 141		 */
 142		stat->dev = dentry->d_sb->s_dev;
 143		stat->ino = dentry->d_inode->i_ino;
 144	} else {
 145		/*
 146		 * For non-samefs setup, if we cannot map all layers st_ino
 147		 * to a unified address space, we need to make sure that st_dev
 148		 * is unique per underlying fs, so we use the unique anonymous
 149		 * bdev assigned to the underlying fs.
 150		 */
 151		stat->dev = OVL_FS(dentry->d_sb)->fs[fsid].pseudo_dev;
 152	}
 153
 154	return 0;
 155}
 156
 157int ovl_getattr(const struct path *path, struct kstat *stat,
 158		u32 request_mask, unsigned int flags)
 159{
 160	struct dentry *dentry = path->dentry;
 161	enum ovl_path_type type;
 162	struct path realpath;
 163	const struct cred *old_cred;
 164	bool is_dir = S_ISDIR(dentry->d_inode->i_mode);
 
 165	int fsid = 0;
 166	int err;
 167	bool metacopy_blocks = false;
 168
 169	metacopy_blocks = ovl_is_metacopy_dentry(dentry);
 170
 171	type = ovl_path_real(dentry, &realpath);
 172	old_cred = ovl_override_creds(dentry->d_sb);
 173	err = vfs_getattr(&realpath, stat, request_mask, flags);
 174	if (err)
 175		goto out;
 176
 
 
 
 177	/*
 178	 * For non-dir or same fs, we use st_ino of the copy up origin.
 179	 * This guaranties constant st_dev/st_ino across copy up.
 180	 * With xino feature and non-samefs, we use st_ino of the copy up
 181	 * origin masked with high bits that represent the layer id.
 182	 *
 183	 * If lower filesystem supports NFS file handles, this also guaranties
 184	 * persistent st_ino across mount cycle.
 185	 */
 186	if (!is_dir || ovl_same_dev(dentry->d_sb)) {
 187		if (!OVL_TYPE_UPPER(type)) {
 188			fsid = ovl_layer_lower(dentry)->fsid;
 189		} else if (OVL_TYPE_ORIGIN(type)) {
 190			struct kstat lowerstat;
 191			u32 lowermask = STATX_INO | STATX_BLOCKS |
 192					(!is_dir ? STATX_NLINK : 0);
 193
 194			ovl_path_lower(dentry, &realpath);
 195			err = vfs_getattr(&realpath, &lowerstat,
 196					  lowermask, flags);
 197			if (err)
 198				goto out;
 199
 200			/*
 201			 * Lower hardlinks may be broken on copy up to different
 202			 * upper files, so we cannot use the lower origin st_ino
 203			 * for those different files, even for the same fs case.
 204			 *
 205			 * Similarly, several redirected dirs can point to the
 206			 * same dir on a lower layer. With the "verify_lower"
 207			 * feature, we do not use the lower origin st_ino, if
 208			 * we haven't verified that this redirect is unique.
 209			 *
 210			 * With inodes index enabled, it is safe to use st_ino
 211			 * of an indexed origin. The index validates that the
 212			 * upper hardlink is not broken and that a redirected
 213			 * dir is the only redirect to that origin.
 214			 */
 215			if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
 216			    (!ovl_verify_lower(dentry->d_sb) &&
 217			     (is_dir || lowerstat.nlink == 1))) {
 218				fsid = ovl_layer_lower(dentry)->fsid;
 219				stat->ino = lowerstat.ino;
 220			}
 221
 222			/*
 223			 * If we are querying a metacopy dentry and lower
 224			 * dentry is data dentry, then use the blocks we
 225			 * queried just now. We don't have to do additional
 226			 * vfs_getattr(). If lower itself is metacopy, then
 227			 * additional vfs_getattr() is unavoidable.
 228			 */
 229			if (metacopy_blocks &&
 230			    realpath.dentry == ovl_dentry_lowerdata(dentry)) {
 231				stat->blocks = lowerstat.blocks;
 232				metacopy_blocks = false;
 233			}
 234		}
 235
 236		if (metacopy_blocks) {
 237			/*
 238			 * If lower is not same as lowerdata or if there was
 239			 * no origin on upper, we can end up here.
 
 
 240			 */
 241			struct kstat lowerdatastat;
 242			u32 lowermask = STATX_BLOCKS;
 243
 244			ovl_path_lowerdata(dentry, &realpath);
 245			err = vfs_getattr(&realpath, &lowerdatastat,
 246					  lowermask, flags);
 247			if (err)
 248				goto out;
 
 
 
 
 
 249			stat->blocks = lowerdatastat.blocks;
 250		}
 251	}
 252
 253	err = ovl_map_dev_ino(dentry, stat, fsid);
 254	if (err)
 255		goto out;
 256
 257	/*
 258	 * It's probably not worth it to count subdirs to get the
 259	 * correct link count.  nlink=1 seems to pacify 'find' and
 260	 * other utilities.
 261	 */
 262	if (is_dir && OVL_TYPE_MERGE(type))
 263		stat->nlink = 1;
 264
 265	/*
 266	 * Return the overlay inode nlinks for indexed upper inodes.
 267	 * Overlay inode nlink counts the union of the upper hardlinks
 268	 * and non-covered lower hardlinks. It does not include the upper
 269	 * index hardlink.
 270	 */
 271	if (!is_dir && ovl_test_flag(OVL_INDEX, d_inode(dentry)))
 272		stat->nlink = dentry->d_inode->i_nlink;
 273
 274out:
 275	revert_creds(old_cred);
 276
 277	return err;
 278}
 279
 280int ovl_permission(struct inode *inode, int mask)
 
 281{
 282	struct inode *upperinode = ovl_inode_upper(inode);
 283	struct inode *realinode = upperinode ?: ovl_inode_lower(inode);
 
 284	const struct cred *old_cred;
 285	int err;
 286
 287	/* Careful in RCU walk mode */
 
 288	if (!realinode) {
 289		WARN_ON(!(mask & MAY_NOT_BLOCK));
 290		return -ECHILD;
 291	}
 292
 293	/*
 294	 * Check overlay inode with the creds of task and underlying inode
 295	 * with creds of mounter
 296	 */
 297	err = generic_permission(inode, mask);
 298	if (err)
 299		return err;
 300
 301	old_cred = ovl_override_creds(inode->i_sb);
 302	if (!upperinode &&
 303	    !special_file(realinode->i_mode) && mask & MAY_WRITE) {
 304		mask &= ~(MAY_WRITE | MAY_APPEND);
 305		/* Make sure mounter can read file for copy up later */
 306		mask |= MAY_READ;
 307	}
 308	err = inode_permission(realinode, mask);
 309	revert_creds(old_cred);
 310
 311	return err;
 312}
 313
 314static const char *ovl_get_link(struct dentry *dentry,
 315				struct inode *inode,
 316				struct delayed_call *done)
 317{
 318	const struct cred *old_cred;
 319	const char *p;
 320
 321	if (!dentry)
 322		return ERR_PTR(-ECHILD);
 323
 324	old_cred = ovl_override_creds(dentry->d_sb);
 325	p = vfs_get_link(ovl_dentry_real(dentry), done);
 326	revert_creds(old_cred);
 327	return p;
 328}
 329
 330bool ovl_is_private_xattr(const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 331{
 332	return strncmp(name, OVL_XATTR_PREFIX,
 333		       sizeof(OVL_XATTR_PREFIX) - 1) == 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 334}
 335
 336int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
 337		  const void *value, size_t size, int flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 338{
 339	int err;
 340	struct dentry *upperdentry = ovl_i_dentry_upper(inode);
 
 
 
 
 341	struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
 342	const struct cred *old_cred;
 343
 344	err = ovl_want_write(dentry);
 345	if (err)
 346		goto out;
 
 
 
 
 347
 348	if (!value && !upperdentry) {
 349		err = vfs_getxattr(realdentry, name, NULL, 0);
 350		if (err < 0)
 351			goto out_drop_write;
 
 
 
 
 
 
 352	}
 353
 354	if (!upperdentry) {
 355		err = ovl_copy_up(dentry);
 356		if (err)
 357			goto out_drop_write;
 358
 359		realdentry = ovl_dentry_upper(dentry);
 360	}
 361
 
 
 
 
 362	old_cred = ovl_override_creds(dentry->d_sb);
 363	if (value)
 364		err = vfs_setxattr(realdentry, name, value, size, flags);
 365	else {
 366		WARN_ON(flags != XATTR_REPLACE);
 367		err = vfs_removexattr(realdentry, name);
 368	}
 369	revert_creds(old_cred);
 370
 371	/* copy c/mtime */
 372	ovl_copyattr(d_inode(realdentry), inode);
 373
 374out_drop_write:
 375	ovl_drop_write(dentry);
 376out:
 377	return err;
 378}
 379
 380int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
 381		  void *value, size_t size)
 382{
 383	ssize_t res;
 384	const struct cred *old_cred;
 385	struct dentry *realdentry =
 386		ovl_i_dentry_upper(inode) ?: ovl_dentry_lower(dentry);
 387
 388	old_cred = ovl_override_creds(dentry->d_sb);
 389	res = vfs_getxattr(realdentry, name, value, size);
 390	revert_creds(old_cred);
 391	return res;
 392}
 
 
 
 393
 394static bool ovl_can_list(const char *s)
 395{
 396	/* List all non-trusted xatts */
 397	if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
 398		return true;
 
 
 
 399
 400	/* Never list trusted.overlay, list other trusted for superuser only */
 401	return !ovl_is_private_xattr(s) &&
 402	       ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
 403}
 404
 405ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 406{
 407	struct dentry *realdentry = ovl_dentry_real(dentry);
 408	ssize_t res;
 409	size_t len;
 410	char *s;
 411	const struct cred *old_cred;
 412
 413	old_cred = ovl_override_creds(dentry->d_sb);
 414	res = vfs_listxattr(realdentry, list, size);
 415	revert_creds(old_cred);
 416	if (res <= 0 || size == 0)
 417		return res;
 418
 419	/* filter out private xattrs */
 420	for (s = list, len = res; len;) {
 421		size_t slen = strnlen(s, len) + 1;
 422
 423		/* underlying fs providing us with an broken xattr list? */
 424		if (WARN_ON(slen > len))
 425			return -EIO;
 426
 427		len -= slen;
 428		if (!ovl_can_list(s)) {
 429			res -= slen;
 430			memmove(s, s + slen, len);
 431		} else {
 432			s += slen;
 433		}
 434	}
 435
 436	return res;
 437}
 
 438
 439struct posix_acl *ovl_get_acl(struct inode *inode, int type)
 440{
 441	struct inode *realinode = ovl_inode_real(inode);
 442	const struct cred *old_cred;
 443	struct posix_acl *acl;
 444
 445	if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
 446		return NULL;
 447
 448	old_cred = ovl_override_creds(inode->i_sb);
 449	acl = get_acl(realinode, type);
 450	revert_creds(old_cred);
 451
 452	return acl;
 453}
 454
 455int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags)
 456{
 457	if (flags & S_ATIME) {
 458		struct ovl_fs *ofs = inode->i_sb->s_fs_info;
 459		struct path upperpath = {
 460			.mnt = ovl_upper_mnt(ofs),
 461			.dentry = ovl_upperdentry_dereference(OVL_I(inode)),
 462		};
 463
 464		if (upperpath.dentry) {
 465			touch_atime(&upperpath);
 466			inode->i_atime = d_inode(upperpath.dentry)->i_atime;
 
 467		}
 468	}
 469	return 0;
 470}
 471
 472static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 473		      u64 start, u64 len)
 474{
 475	int err;
 476	struct inode *realinode = ovl_inode_real(inode);
 477	const struct cred *old_cred;
 478
 
 
 
 479	if (!realinode->i_op->fiemap)
 480		return -EOPNOTSUPP;
 481
 482	old_cred = ovl_override_creds(inode->i_sb);
 483	err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
 484	revert_creds(old_cred);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 485
 486	return err;
 487}
 488
 489static const struct inode_operations ovl_file_inode_operations = {
 490	.setattr	= ovl_setattr,
 491	.permission	= ovl_permission,
 492	.getattr	= ovl_getattr,
 493	.listxattr	= ovl_listxattr,
 
 494	.get_acl	= ovl_get_acl,
 
 495	.update_time	= ovl_update_time,
 496	.fiemap		= ovl_fiemap,
 
 
 497};
 498
 499static const struct inode_operations ovl_symlink_inode_operations = {
 500	.setattr	= ovl_setattr,
 501	.get_link	= ovl_get_link,
 502	.getattr	= ovl_getattr,
 503	.listxattr	= ovl_listxattr,
 504	.update_time	= ovl_update_time,
 505};
 506
 507static const struct inode_operations ovl_special_inode_operations = {
 508	.setattr	= ovl_setattr,
 509	.permission	= ovl_permission,
 510	.getattr	= ovl_getattr,
 511	.listxattr	= ovl_listxattr,
 
 512	.get_acl	= ovl_get_acl,
 
 513	.update_time	= ovl_update_time,
 514};
 515
 516static const struct address_space_operations ovl_aops = {
 517	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
 518	.direct_IO		= noop_direct_IO,
 519};
 520
 521/*
 522 * It is possible to stack overlayfs instance on top of another
 523 * overlayfs instance as lower layer. We need to annotate the
 524 * stackable i_mutex locks according to stack level of the super
 525 * block instance. An overlayfs instance can never be in stack
 526 * depth 0 (there is always a real fs below it).  An overlayfs
 527 * inode lock will use the lockdep annotaion ovl_i_mutex_key[depth].
 528 *
 529 * For example, here is a snip from /proc/lockdep_chains after
 530 * dir_iterate of nested overlayfs:
 531 *
 532 * [...] &ovl_i_mutex_dir_key[depth]   (stack_depth=2)
 533 * [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1)
 534 * [...] &type->i_mutex_dir_key        (stack_depth=0)
 535 *
 536 * Locking order w.r.t ovl_want_write() is important for nested overlayfs.
 537 *
 538 * This chain is valid:
 539 * - inode->i_rwsem			(inode_lock[2])
 540 * - upper_mnt->mnt_sb->s_writers	(ovl_want_write[0])
 541 * - OVL_I(inode)->lock			(ovl_inode_lock[2])
 542 * - OVL_I(lowerinode)->lock		(ovl_inode_lock[1])
 543 *
 544 * And this chain is valid:
 545 * - inode->i_rwsem			(inode_lock[2])
 546 * - OVL_I(inode)->lock			(ovl_inode_lock[2])
 547 * - lowerinode->i_rwsem		(inode_lock[1])
 548 * - OVL_I(lowerinode)->lock		(ovl_inode_lock[1])
 549 *
 550 * But lowerinode->i_rwsem SHOULD NOT be acquired while ovl_want_write() is
 551 * held, because it is in reverse order of the non-nested case using the same
 552 * upper fs:
 553 * - inode->i_rwsem			(inode_lock[1])
 554 * - upper_mnt->mnt_sb->s_writers	(ovl_want_write[0])
 555 * - OVL_I(inode)->lock			(ovl_inode_lock[1])
 556 */
 557#define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH
 558
 559static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode)
 560{
 561#ifdef CONFIG_LOCKDEP
 562	static struct lock_class_key ovl_i_mutex_key[OVL_MAX_NESTING];
 563	static struct lock_class_key ovl_i_mutex_dir_key[OVL_MAX_NESTING];
 564	static struct lock_class_key ovl_i_lock_key[OVL_MAX_NESTING];
 565
 566	int depth = inode->i_sb->s_stack_depth - 1;
 567
 568	if (WARN_ON_ONCE(depth < 0 || depth >= OVL_MAX_NESTING))
 569		depth = 0;
 570
 571	if (S_ISDIR(inode->i_mode))
 572		lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_dir_key[depth]);
 573	else
 574		lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_key[depth]);
 575
 576	lockdep_set_class(&OVL_I(inode)->lock, &ovl_i_lock_key[depth]);
 577#endif
 578}
 579
 580static void ovl_next_ino(struct inode *inode)
 581{
 582	struct ovl_fs *ofs = inode->i_sb->s_fs_info;
 583
 584	inode->i_ino = atomic_long_inc_return(&ofs->last_ino);
 585	if (unlikely(!inode->i_ino))
 586		inode->i_ino = atomic_long_inc_return(&ofs->last_ino);
 587}
 588
 589static void ovl_map_ino(struct inode *inode, unsigned long ino, int fsid)
 590{
 591	int xinobits = ovl_xino_bits(inode->i_sb);
 
 592	unsigned int xinoshift = 64 - xinobits;
 593
 594	/*
 595	 * When d_ino is consistent with st_ino (samefs or i_ino has enough
 596	 * bits to encode layer), set the same value used for st_ino to i_ino,
 597	 * so inode number exposed via /proc/locks and a like will be
 598	 * consistent with d_ino and st_ino values. An i_ino value inconsistent
 599	 * with d_ino also causes nfsd readdirplus to fail.
 600	 */
 601	inode->i_ino = ino;
 602	if (ovl_same_fs(inode->i_sb)) {
 603		return;
 604	} else if (xinobits && likely(!(ino >> xinoshift))) {
 605		inode->i_ino |= (unsigned long)fsid << (xinoshift + 1);
 606		return;
 607	}
 608
 609	/*
 610	 * For directory inodes on non-samefs with xino disabled or xino
 611	 * overflow, we allocate a non-persistent inode number, to be used for
 612	 * resolving st_ino collisions in ovl_map_dev_ino().
 613	 *
 614	 * To avoid ino collision with legitimate xino values from upper
 615	 * layer (fsid 0), use the lowest xinobit to map the non
 616	 * persistent inode numbers to the unified st_ino address space.
 617	 */
 618	if (S_ISDIR(inode->i_mode)) {
 619		ovl_next_ino(inode);
 620		if (xinobits) {
 621			inode->i_ino &= ~0UL >> xinobits;
 622			inode->i_ino |= 1UL << xinoshift;
 623		}
 624	}
 625}
 626
 627void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip,
 628		    unsigned long ino, int fsid)
 629{
 630	struct inode *realinode;
 
 631
 632	if (oip->upperdentry)
 633		OVL_I(inode)->__upperdentry = oip->upperdentry;
 634	if (oip->lowerpath && oip->lowerpath->dentry)
 635		OVL_I(inode)->lower = igrab(d_inode(oip->lowerpath->dentry));
 636	if (oip->lowerdata)
 637		OVL_I(inode)->lowerdata = igrab(d_inode(oip->lowerdata));
 638
 639	realinode = ovl_inode_real(inode);
 640	ovl_copyattr(realinode, inode);
 641	ovl_copyflags(realinode, inode);
 642	ovl_map_ino(inode, ino, fsid);
 643}
 644
 645static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
 646{
 647	inode->i_mode = mode;
 648	inode->i_flags |= S_NOCMTIME;
 649#ifdef CONFIG_FS_POSIX_ACL
 650	inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
 651#endif
 652
 653	ovl_lockdep_annotate_inode_mutex_key(inode);
 654
 655	switch (mode & S_IFMT) {
 656	case S_IFREG:
 657		inode->i_op = &ovl_file_inode_operations;
 658		inode->i_fop = &ovl_file_operations;
 659		inode->i_mapping->a_ops = &ovl_aops;
 660		break;
 661
 662	case S_IFDIR:
 663		inode->i_op = &ovl_dir_inode_operations;
 664		inode->i_fop = &ovl_dir_operations;
 665		break;
 666
 667	case S_IFLNK:
 668		inode->i_op = &ovl_symlink_inode_operations;
 669		break;
 670
 671	default:
 672		inode->i_op = &ovl_special_inode_operations;
 673		init_special_inode(inode, mode, rdev);
 674		break;
 675	}
 676}
 677
 678/*
 679 * With inodes index enabled, an overlay inode nlink counts the union of upper
 680 * hardlinks and non-covered lower hardlinks. During the lifetime of a non-pure
 681 * upper inode, the following nlink modifying operations can happen:
 682 *
 683 * 1. Lower hardlink copy up
 684 * 2. Upper hardlink created, unlinked or renamed over
 685 * 3. Lower hardlink whiteout or renamed over
 686 *
 687 * For the first, copy up case, the union nlink does not change, whether the
 688 * operation succeeds or fails, but the upper inode nlink may change.
 689 * Therefore, before copy up, we store the union nlink value relative to the
 690 * lower inode nlink in the index inode xattr trusted.overlay.nlink.
 691 *
 692 * For the second, upper hardlink case, the union nlink should be incremented
 693 * or decremented IFF the operation succeeds, aligned with nlink change of the
 694 * upper inode. Therefore, before link/unlink/rename, we store the union nlink
 695 * value relative to the upper inode nlink in the index inode.
 696 *
 697 * For the last, lower cover up case, we simplify things by preceding the
 698 * whiteout or cover up with copy up. This makes sure that there is an index
 699 * upper inode where the nlink xattr can be stored before the copied up upper
 700 * entry is unlink.
 701 */
 702#define OVL_NLINK_ADD_UPPER	(1 << 0)
 703
 704/*
 705 * On-disk format for indexed nlink:
 706 *
 707 * nlink relative to the upper inode - "U[+-]NUM"
 708 * nlink relative to the lower inode - "L[+-]NUM"
 709 */
 710
 711static int ovl_set_nlink_common(struct dentry *dentry,
 712				struct dentry *realdentry, const char *format)
 713{
 714	struct inode *inode = d_inode(dentry);
 715	struct inode *realinode = d_inode(realdentry);
 716	char buf[13];
 717	int len;
 718
 719	len = snprintf(buf, sizeof(buf), format,
 720		       (int) (inode->i_nlink - realinode->i_nlink));
 721
 722	if (WARN_ON(len >= sizeof(buf)))
 723		return -EIO;
 724
 725	return ovl_do_setxattr(ovl_dentry_upper(dentry),
 726			       OVL_XATTR_NLINK, buf, len, 0);
 727}
 728
 729int ovl_set_nlink_upper(struct dentry *dentry)
 730{
 731	return ovl_set_nlink_common(dentry, ovl_dentry_upper(dentry), "U%+i");
 732}
 733
 734int ovl_set_nlink_lower(struct dentry *dentry)
 735{
 736	return ovl_set_nlink_common(dentry, ovl_dentry_lower(dentry), "L%+i");
 737}
 738
 739unsigned int ovl_get_nlink(struct dentry *lowerdentry,
 740			   struct dentry *upperdentry,
 741			   unsigned int fallback)
 742{
 743	int nlink_diff;
 744	int nlink;
 745	char buf[13];
 746	int err;
 747
 748	if (!lowerdentry || !upperdentry || d_inode(lowerdentry)->i_nlink == 1)
 749		return fallback;
 750
 751	err = vfs_getxattr(upperdentry, OVL_XATTR_NLINK, &buf, sizeof(buf) - 1);
 
 752	if (err < 0)
 753		goto fail;
 754
 755	buf[err] = '\0';
 756	if ((buf[0] != 'L' && buf[0] != 'U') ||
 757	    (buf[1] != '+' && buf[1] != '-'))
 758		goto fail;
 759
 760	err = kstrtoint(buf + 1, 10, &nlink_diff);
 761	if (err < 0)
 762		goto fail;
 763
 764	nlink = d_inode(buf[0] == 'L' ? lowerdentry : upperdentry)->i_nlink;
 765	nlink += nlink_diff;
 766
 767	if (nlink <= 0)
 768		goto fail;
 769
 770	return nlink;
 771
 772fail:
 773	pr_warn_ratelimited("failed to get index nlink (%pd2, err=%i)\n",
 774			    upperdentry, err);
 775	return fallback;
 776}
 777
 778struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev)
 779{
 780	struct inode *inode;
 781
 782	inode = new_inode(sb);
 783	if (inode)
 784		ovl_fill_inode(inode, mode, rdev);
 785
 786	return inode;
 787}
 788
 789static int ovl_inode_test(struct inode *inode, void *data)
 790{
 791	return inode->i_private == data;
 792}
 793
 794static int ovl_inode_set(struct inode *inode, void *data)
 795{
 796	inode->i_private = data;
 797	return 0;
 798}
 799
 800static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
 801			     struct dentry *upperdentry, bool strict)
 802{
 803	/*
 804	 * For directories, @strict verify from lookup path performs consistency
 805	 * checks, so NULL lower/upper in dentry must match NULL lower/upper in
 806	 * inode. Non @strict verify from NFS handle decode path passes NULL for
 807	 * 'unknown' lower/upper.
 808	 */
 809	if (S_ISDIR(inode->i_mode) && strict) {
 810		/* Real lower dir moved to upper layer under us? */
 811		if (!lowerdentry && ovl_inode_lower(inode))
 812			return false;
 813
 814		/* Lookup of an uncovered redirect origin? */
 815		if (!upperdentry && ovl_inode_upper(inode))
 816			return false;
 817	}
 818
 819	/*
 820	 * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL.
 821	 * This happens when finding a copied up overlay inode for a renamed
 822	 * or hardlinked overlay dentry and lower dentry cannot be followed
 823	 * by origin because lower fs does not support file handles.
 824	 */
 825	if (lowerdentry && ovl_inode_lower(inode) != d_inode(lowerdentry))
 826		return false;
 827
 828	/*
 829	 * Allow non-NULL __upperdentry in inode even if upperdentry is NULL.
 830	 * This happens when finding a lower alias for a copied up hard link.
 831	 */
 832	if (upperdentry && ovl_inode_upper(inode) != d_inode(upperdentry))
 833		return false;
 834
 835	return true;
 836}
 837
 838struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
 839			       bool is_upper)
 840{
 841	struct inode *inode, *key = d_inode(real);
 842
 843	inode = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
 844	if (!inode)
 845		return NULL;
 846
 847	if (!ovl_verify_inode(inode, is_upper ? NULL : real,
 848			      is_upper ? real : NULL, false)) {
 849		iput(inode);
 850		return ERR_PTR(-ESTALE);
 851	}
 852
 853	return inode;
 854}
 855
 856bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir)
 857{
 858	struct inode *key = d_inode(dir);
 859	struct inode *trap;
 860	bool res;
 861
 862	trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
 863	if (!trap)
 864		return false;
 865
 866	res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) &&
 867				  !ovl_inode_lower(trap);
 868
 869	iput(trap);
 870	return res;
 871}
 872
 873/*
 874 * Create an inode cache entry for layer root dir, that will intentionally
 875 * fail ovl_verify_inode(), so any lookup that will find some layer root
 876 * will fail.
 877 */
 878struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
 879{
 880	struct inode *key = d_inode(dir);
 881	struct inode *trap;
 882
 883	if (!d_is_dir(dir))
 884		return ERR_PTR(-ENOTDIR);
 885
 886	trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test,
 887			    ovl_inode_set, key);
 888	if (!trap)
 889		return ERR_PTR(-ENOMEM);
 890
 891	if (!(trap->i_state & I_NEW)) {
 892		/* Conflicting layer roots? */
 893		iput(trap);
 894		return ERR_PTR(-ELOOP);
 895	}
 896
 897	trap->i_mode = S_IFDIR;
 898	trap->i_flags = S_DEAD;
 899	unlock_new_inode(trap);
 900
 901	return trap;
 902}
 903
 904/*
 905 * Does overlay inode need to be hashed by lower inode?
 906 */
 907static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
 908			     struct dentry *lower, bool index)
 909{
 910	struct ovl_fs *ofs = sb->s_fs_info;
 911
 912	/* No, if pure upper */
 913	if (!lower)
 914		return false;
 915
 916	/* Yes, if already indexed */
 917	if (index)
 918		return true;
 919
 920	/* Yes, if won't be copied up */
 921	if (!ovl_upper_mnt(ofs))
 922		return true;
 923
 924	/* No, if lower hardlink is or will be broken on copy up */
 925	if ((upper || !ovl_indexdir(sb)) &&
 926	    !d_is_dir(lower) && d_inode(lower)->i_nlink > 1)
 927		return false;
 928
 929	/* No, if non-indexed upper with NFS export */
 930	if (sb->s_export_op && upper)
 931		return false;
 932
 933	/* Otherwise, hash by lower inode for fsnotify */
 934	return true;
 935}
 936
 937static struct inode *ovl_iget5(struct super_block *sb, struct inode *newinode,
 938			       struct inode *key)
 939{
 940	return newinode ? inode_insert5(newinode, (unsigned long) key,
 941					 ovl_inode_test, ovl_inode_set, key) :
 942			  iget5_locked(sb, (unsigned long) key,
 943				       ovl_inode_test, ovl_inode_set, key);
 944}
 945
 946struct inode *ovl_get_inode(struct super_block *sb,
 947			    struct ovl_inode_params *oip)
 948{
 
 949	struct dentry *upperdentry = oip->upperdentry;
 950	struct ovl_path *lowerpath = oip->lowerpath;
 951	struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
 952	struct inode *inode;
 953	struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
 
 
 
 
 954	bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
 955					oip->index);
 956	int fsid = bylower ? lowerpath->layer->fsid : 0;
 957	bool is_dir;
 958	unsigned long ino = 0;
 959	int err = oip->newinode ? -EEXIST : -ENOMEM;
 960
 961	if (!realinode)
 962		realinode = d_inode(lowerdentry);
 963
 964	/*
 965	 * Copy up origin (lower) may exist for non-indexed upper, but we must
 966	 * not use lower as hash key if this is a broken hardlink.
 967	 */
 968	is_dir = S_ISDIR(realinode->i_mode);
 969	if (upperdentry || bylower) {
 970		struct inode *key = d_inode(bylower ? lowerdentry :
 971						      upperdentry);
 972		unsigned int nlink = is_dir ? 1 : realinode->i_nlink;
 973
 974		inode = ovl_iget5(sb, oip->newinode, key);
 975		if (!inode)
 976			goto out_err;
 977		if (!(inode->i_state & I_NEW)) {
 978			/*
 979			 * Verify that the underlying files stored in the inode
 980			 * match those in the dentry.
 981			 */
 982			if (!ovl_verify_inode(inode, lowerdentry, upperdentry,
 983					      true)) {
 984				iput(inode);
 985				err = -ESTALE;
 986				goto out_err;
 987			}
 988
 989			dput(upperdentry);
 
 990			kfree(oip->redirect);
 
 991			goto out;
 992		}
 993
 994		/* Recalculate nlink for non-dir due to indexing */
 995		if (!is_dir)
 996			nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink);
 
 997		set_nlink(inode, nlink);
 998		ino = key->i_ino;
 999	} else {
1000		/* Lower hardlink that will be broken on copy up */
1001		inode = new_inode(sb);
1002		if (!inode) {
1003			err = -ENOMEM;
1004			goto out_err;
1005		}
1006		ino = realinode->i_ino;
1007		fsid = lowerpath->layer->fsid;
1008	}
1009	ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev);
1010	ovl_inode_init(inode, oip, ino, fsid);
1011
1012	if (upperdentry && ovl_is_impuredir(upperdentry))
1013		ovl_set_flag(OVL_IMPURE, inode);
1014
1015	if (oip->index)
1016		ovl_set_flag(OVL_INDEX, inode);
1017
1018	OVL_I(inode)->redirect = oip->redirect;
1019
1020	if (bylower)
1021		ovl_set_flag(OVL_CONST_INO, inode);
1022
1023	/* Check for non-merge dir that may have whiteouts */
1024	if (is_dir) {
1025		if (((upperdentry && lowerdentry) || oip->numlower > 1) ||
1026		    ovl_check_origin_xattr(upperdentry ?: lowerdentry)) {
1027			ovl_set_flag(OVL_WHITEOUTS, inode);
1028		}
1029	}
 
 
 
 
1030
1031	if (inode->i_state & I_NEW)
1032		unlock_new_inode(inode);
1033out:
1034	return inode;
1035
1036out_err:
1037	pr_warn_ratelimited("failed to get inode (%i)\n", err);
1038	inode = ERR_PTR(err);
1039	goto out;
1040}