Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_bit.h"
  25#include "xfs_sb.h"
  26#include "xfs_ag.h"
  27#include "xfs_mount.h"
  28#include "xfs_inode.h"
  29#include "xfs_ialloc.h"
  30#include "xfs_itable.h"
  31#include "xfs_quota.h"
  32#include "xfs_error.h"
  33#include "xfs_bmap.h"
  34#include "xfs_bmap_btree.h"
  35#include "xfs_trans.h"
  36#include "xfs_trans_space.h"
  37#include "xfs_qm.h"
  38#include "xfs_trace.h"
  39#include "xfs_icache.h"
  40#include "xfs_cksum.h"
  41#include "xfs_dinode.h"
 
 
 
 
 
 
  42
  43/*
  44 * The global quota manager. There is only one of these for the entire
  45 * system, _not_ one per file system. XQM keeps track of the overall
  46 * quota functionality, including maintaining the freelist and hash
  47 * tables of dquots.
  48 */
  49STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
  50STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
  51
  52
  53STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  54/*
  55 * We use the batch lookup interface to iterate over the dquots as it
  56 * currently is the only interface into the radix tree code that allows
  57 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  58 * operations is fine as all callers are used either during mount/umount
  59 * or quotaoff.
  60 */
  61#define XFS_DQ_LOOKUP_BATCH	32
  62
  63STATIC int
  64xfs_qm_dquot_walk(
  65	struct xfs_mount	*mp,
  66	int			type,
  67	int			(*execute)(struct xfs_dquot *dqp, void *data),
  68	void			*data)
  69{
  70	struct xfs_quotainfo	*qi = mp->m_quotainfo;
  71	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
  72	uint32_t		next_index;
  73	int			last_error = 0;
  74	int			skipped;
  75	int			nr_found;
  76
  77restart:
  78	skipped = 0;
  79	next_index = 0;
  80	nr_found = 0;
  81
  82	while (1) {
  83		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  84		int		error = 0;
  85		int		i;
  86
  87		mutex_lock(&qi->qi_tree_lock);
  88		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  89					next_index, XFS_DQ_LOOKUP_BATCH);
  90		if (!nr_found) {
  91			mutex_unlock(&qi->qi_tree_lock);
  92			break;
  93		}
  94
  95		for (i = 0; i < nr_found; i++) {
  96			struct xfs_dquot *dqp = batch[i];
  97
  98			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
  99
 100			error = execute(batch[i], data);
 101			if (error == EAGAIN) {
 102				skipped++;
 103				continue;
 104			}
 105			if (error && last_error != EFSCORRUPTED)
 106				last_error = error;
 107		}
 108
 109		mutex_unlock(&qi->qi_tree_lock);
 110
 111		/* bail out if the filesystem is corrupted.  */
 112		if (last_error == EFSCORRUPTED) {
 113			skipped = 0;
 114			break;
 115		}
 
 
 
 116	}
 117
 118	if (skipped) {
 119		delay(1);
 120		goto restart;
 121	}
 122
 123	return last_error;
 124}
 125
 126
 127/*
 128 * Purge a dquot from all tracking data structures and free it.
 129 */
 130STATIC int
 131xfs_qm_dqpurge(
 132	struct xfs_dquot	*dqp,
 133	void			*data)
 134{
 135	struct xfs_mount	*mp = dqp->q_mount;
 136	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 137
 138	xfs_dqlock(dqp);
 139	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
 140		xfs_dqunlock(dqp);
 141		return EAGAIN;
 142	}
 143
 144	dqp->dq_flags |= XFS_DQ_FREEING;
 145
 146	xfs_dqflock(dqp);
 147
 148	/*
 149	 * If we are turning this type of quotas off, we don't care
 150	 * about the dirty metadata sitting in this dquot. OTOH, if
 151	 * we're unmounting, we do care, so we flush it and wait.
 152	 */
 153	if (XFS_DQ_IS_DIRTY(dqp)) {
 154		struct xfs_buf	*bp = NULL;
 155		int		error;
 156
 157		/*
 158		 * We don't care about getting disk errors here. We need
 159		 * to purge this dquot anyway, so we go ahead regardless.
 160		 */
 161		error = xfs_qm_dqflush(dqp, &bp);
 162		if (error) {
 163			xfs_warn(mp, "%s: dquot %p flush failed",
 164				__func__, dqp);
 165		} else {
 
 
 
 
 
 
 
 
 
 
 166			error = xfs_bwrite(bp);
 167			xfs_buf_relse(bp);
 168		}
 169		xfs_dqflock(dqp);
 170	}
 
 171
 
 172	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 173	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 174	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
 175
 176	xfs_dqfunlock(dqp);
 177	xfs_dqunlock(dqp);
 178
 179	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
 180			  be32_to_cpu(dqp->q_core.d_id));
 181	qi->qi_dquots--;
 182
 183	/*
 184	 * We move dquots to the freelist as soon as their reference count
 185	 * hits zero, so it really should be on the freelist here.
 186	 */
 187	ASSERT(!list_empty(&dqp->q_lru));
 188	list_lru_del(&qi->qi_lru, &dqp->q_lru);
 189	XFS_STATS_DEC(xs_qm_dquot_unused);
 190
 191	xfs_qm_dqdestroy(dqp);
 192	return 0;
 193}
 194
 195/*
 196 * Release the group or project dquot pointers the user dquots maybe carrying
 197 * around as a hint, and proceed to purge the user dquot cache if requested.
 198*/
 199STATIC int
 200xfs_qm_dqpurge_hints(
 201	struct xfs_dquot	*dqp,
 202	void			*data)
 203{
 204	struct xfs_dquot	*gdqp = NULL;
 205	struct xfs_dquot	*pdqp = NULL;
 206	uint			flags = *((uint *)data);
 207
 208	xfs_dqlock(dqp);
 209	if (dqp->dq_flags & XFS_DQ_FREEING) {
 210		xfs_dqunlock(dqp);
 211		return EAGAIN;
 212	}
 213
 214	/* If this quota has a hint attached, prepare for releasing it now */
 215	gdqp = dqp->q_gdquot;
 216	if (gdqp)
 217		dqp->q_gdquot = NULL;
 218
 219	pdqp = dqp->q_pdquot;
 220	if (pdqp)
 221		dqp->q_pdquot = NULL;
 222
 
 223	xfs_dqunlock(dqp);
 224
 225	if (gdqp)
 226		xfs_qm_dqrele(gdqp);
 227	if (pdqp)
 228		xfs_qm_dqrele(pdqp);
 229
 230	if (flags & XFS_QMOPT_UQUOTA)
 231		return xfs_qm_dqpurge(dqp, NULL);
 232
 233	return 0;
 234}
 235
 236/*
 237 * Purge the dquot cache.
 238 */
 239void
 240xfs_qm_dqpurge_all(
 241	struct xfs_mount	*mp,
 242	uint			flags)
 243{
 244	/*
 245	 * We have to release group/project dquot hint(s) from the user dquot
 246	 * at first if they are there, otherwise we would run into an infinite
 247	 * loop while walking through radix tree to purge other type of dquots
 248	 * since their refcount is not zero if the user dquot refers to them
 249	 * as hint.
 250	 *
 251	 * Call the special xfs_qm_dqpurge_hints() will end up go through the
 252	 * general xfs_qm_dqpurge() against user dquot cache if requested.
 253	 */
 254	xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
 255
 256	if (flags & XFS_QMOPT_GQUOTA)
 257		xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
 258	if (flags & XFS_QMOPT_PQUOTA)
 259		xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
 260}
 261
 262/*
 263 * Just destroy the quotainfo structure.
 264 */
 265void
 266xfs_qm_unmount(
 267	struct xfs_mount	*mp)
 268{
 269	if (mp->m_quotainfo) {
 270		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
 271		xfs_qm_destroy_quotainfo(mp);
 272	}
 273}
 274
 275
 276/*
 277 * This is called from xfs_mountfs to start quotas and initialize all
 278 * necessary data structures like quotainfo.  This is also responsible for
 279 * running a quotacheck as necessary.  We are guaranteed that the superblock
 280 * is consistently read in at this point.
 281 *
 282 * If we fail here, the mount will continue with quota turned off. We don't
 283 * need to inidicate success or failure at all.
 284 */
 285void
 286xfs_qm_mount_quotas(
 287	xfs_mount_t	*mp)
 288{
 289	int		error = 0;
 290	uint		sbf;
 291
 292	/*
 293	 * If quotas on realtime volumes is not supported, we disable
 294	 * quotas immediately.
 295	 */
 296	if (mp->m_sb.sb_rextents) {
 297		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
 298		mp->m_qflags = 0;
 299		goto write_changes;
 300	}
 301
 302	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 303
 304	/*
 305	 * Allocate the quotainfo structure inside the mount struct, and
 306	 * create quotainode(s), and change/rev superblock if necessary.
 307	 */
 308	error = xfs_qm_init_quotainfo(mp);
 309	if (error) {
 310		/*
 311		 * We must turn off quotas.
 312		 */
 313		ASSERT(mp->m_quotainfo == NULL);
 314		mp->m_qflags = 0;
 315		goto write_changes;
 316	}
 317	/*
 318	 * If any of the quotas are not consistent, do a quotacheck.
 319	 */
 320	if (XFS_QM_NEED_QUOTACHECK(mp)) {
 321		error = xfs_qm_quotacheck(mp);
 322		if (error) {
 323			/* Quotacheck failed and disabled quotas. */
 324			return;
 325		}
 326	}
 327	/* 
 328	 * If one type of quotas is off, then it will lose its
 329	 * quotachecked status, since we won't be doing accounting for
 330	 * that type anymore.
 331	 */
 332	if (!XFS_IS_UQUOTA_ON(mp))
 333		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
 334	if (!XFS_IS_GQUOTA_ON(mp))
 335		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
 336	if (!XFS_IS_PQUOTA_ON(mp))
 337		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
 338
 339 write_changes:
 340	/*
 341	 * We actually don't have to acquire the m_sb_lock at all.
 342	 * This can only be called from mount, and that's single threaded. XXX
 343	 */
 344	spin_lock(&mp->m_sb_lock);
 345	sbf = mp->m_sb.sb_qflags;
 346	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
 347	spin_unlock(&mp->m_sb_lock);
 348
 349	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
 350		if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
 351			/*
 352			 * We could only have been turning quotas off.
 353			 * We aren't in very good shape actually because
 354			 * the incore structures are convinced that quotas are
 355			 * off, but the on disk superblock doesn't know that !
 356			 */
 357			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
 358			xfs_alert(mp, "%s: Superblock update failed!",
 359				__func__);
 360		}
 361	}
 362
 363	if (error) {
 364		xfs_warn(mp, "Failed to initialize disk quotas.");
 365		return;
 366	}
 367}
 368
 369/*
 370 * Called from the vfsops layer.
 371 */
 372void
 373xfs_qm_unmount_quotas(
 374	xfs_mount_t	*mp)
 375{
 376	/*
 377	 * Release the dquots that root inode, et al might be holding,
 378	 * before we flush quotas and blow away the quotainfo structure.
 379	 */
 380	ASSERT(mp->m_rootip);
 381	xfs_qm_dqdetach(mp->m_rootip);
 382	if (mp->m_rbmip)
 383		xfs_qm_dqdetach(mp->m_rbmip);
 384	if (mp->m_rsumip)
 385		xfs_qm_dqdetach(mp->m_rsumip);
 
 
 
 386
 387	/*
 388	 * Release the quota inodes.
 389	 */
 390	if (mp->m_quotainfo) {
 391		if (mp->m_quotainfo->qi_uquotaip) {
 392			IRELE(mp->m_quotainfo->qi_uquotaip);
 393			mp->m_quotainfo->qi_uquotaip = NULL;
 394		}
 395		if (mp->m_quotainfo->qi_gquotaip) {
 396			IRELE(mp->m_quotainfo->qi_gquotaip);
 397			mp->m_quotainfo->qi_gquotaip = NULL;
 398		}
 399		if (mp->m_quotainfo->qi_pquotaip) {
 400			IRELE(mp->m_quotainfo->qi_pquotaip);
 401			mp->m_quotainfo->qi_pquotaip = NULL;
 402		}
 403	}
 404}
 405
 406STATIC int
 407xfs_qm_dqattach_one(
 408	xfs_inode_t	*ip,
 409	xfs_dqid_t	id,
 410	uint		type,
 411	uint		doalloc,
 412	xfs_dquot_t	*udqhint, /* hint */
 413	xfs_dquot_t	**IO_idqpp)
 414{
 415	xfs_dquot_t	*dqp;
 416	int		error;
 417
 418	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 419	error = 0;
 420
 421	/*
 422	 * See if we already have it in the inode itself. IO_idqpp is
 423	 * &i_udquot or &i_gdquot. This made the code look weird, but
 424	 * made the logic a lot simpler.
 425	 */
 426	dqp = *IO_idqpp;
 427	if (dqp) {
 428		trace_xfs_dqattach_found(dqp);
 429		return 0;
 430	}
 431
 432	/*
 433	 * udqhint is the i_udquot field in inode, and is non-NULL only
 434	 * when the type arg is group/project. Its purpose is to save a
 435	 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
 436	 * the user dquot.
 437	 */
 438	if (udqhint) {
 439		ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
 440		xfs_dqlock(udqhint);
 441
 442		/*
 443		 * No need to take dqlock to look at the id.
 444		 *
 445		 * The ID can't change until it gets reclaimed, and it won't
 446		 * be reclaimed as long as we have a ref from inode and we
 447		 * hold the ilock.
 448		 */
 449		if (type == XFS_DQ_GROUP)
 450			dqp = udqhint->q_gdquot;
 451		else
 452			dqp = udqhint->q_pdquot;
 453		if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
 454			ASSERT(*IO_idqpp == NULL);
 455
 456			*IO_idqpp = xfs_qm_dqhold(dqp);
 457			xfs_dqunlock(udqhint);
 458			return 0;
 459		}
 460
 461		/*
 462		 * We can't hold a dquot lock when we call the dqget code.
 463		 * We'll deadlock in no time, because of (not conforming to)
 464		 * lock ordering - the inodelock comes before any dquot lock,
 465		 * and we may drop and reacquire the ilock in xfs_qm_dqget().
 466		 */
 467		xfs_dqunlock(udqhint);
 468	}
 469
 470	/*
 471	 * Find the dquot from somewhere. This bumps the
 472	 * reference count of dquot and returns it locked.
 473	 * This can return ENOENT if dquot didn't exist on
 474	 * disk and we didn't ask it to allocate;
 475	 * ESRCH if quotas got turned off suddenly.
 476	 */
 477	error = xfs_qm_dqget(ip->i_mount, ip, id, type,
 478			     doalloc | XFS_QMOPT_DOWARN, &dqp);
 479	if (error)
 480		return error;
 481
 482	trace_xfs_dqattach_get(dqp);
 483
 484	/*
 485	 * dqget may have dropped and re-acquired the ilock, but it guarantees
 486	 * that the dquot returned is the one that should go in the inode.
 487	 */
 488	*IO_idqpp = dqp;
 489	xfs_dqunlock(dqp);
 490	return 0;
 491}
 492
 493
 494/*
 495 * Given a udquot and group/project type, attach the group/project
 496 * dquot pointer to the udquot as a hint for future lookups.
 497 */
 498STATIC void
 499xfs_qm_dqattach_hint(
 500	struct xfs_inode	*ip,
 501	int			type)
 502{
 503	struct xfs_dquot **dqhintp;
 504	struct xfs_dquot *dqp;
 505	struct xfs_dquot *udq = ip->i_udquot;
 506
 507	ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
 508
 509	xfs_dqlock(udq);
 510
 511	if (type == XFS_DQ_GROUP) {
 512		dqp = ip->i_gdquot;
 513		dqhintp = &udq->q_gdquot;
 514	} else {
 515		dqp = ip->i_pdquot;
 516		dqhintp = &udq->q_pdquot;
 517	}
 518
 519	if (*dqhintp) {
 520		struct xfs_dquot *tmp;
 521
 522		if (*dqhintp == dqp)
 523			goto done;
 524
 525		tmp = *dqhintp;
 526		*dqhintp = NULL;
 527		xfs_qm_dqrele(tmp);
 528	}
 529
 530	*dqhintp = xfs_qm_dqhold(dqp);
 531done:
 532	xfs_dqunlock(udq);
 533}
 534
 535static bool
 536xfs_qm_need_dqattach(
 537	struct xfs_inode	*ip)
 538{
 539	struct xfs_mount	*mp = ip->i_mount;
 540
 541	if (!XFS_IS_QUOTA_RUNNING(mp))
 542		return false;
 543	if (!XFS_IS_QUOTA_ON(mp))
 544		return false;
 545	if (!XFS_NOT_DQATTACHED(mp, ip))
 546		return false;
 547	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 548		return false;
 
 
 549	return true;
 550}
 551
 552/*
 553 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 554 * into account.
 555 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
 556 * Inode may get unlocked and relocked in here, and the caller must deal with
 557 * the consequences.
 558 */
 559int
 560xfs_qm_dqattach_locked(
 561	xfs_inode_t	*ip,
 562	uint		flags)
 563{
 564	xfs_mount_t	*mp = ip->i_mount;
 565	uint		nquotas = 0;
 566	int		error = 0;
 567
 568	if (!xfs_qm_need_dqattach(ip))
 569		return 0;
 570
 571	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
 572
 573	if (XFS_IS_UQUOTA_ON(mp)) {
 574		error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
 575						flags & XFS_QMOPT_DQALLOC,
 576						NULL, &ip->i_udquot);
 577		if (error)
 578			goto done;
 579		nquotas++;
 580	}
 581
 582	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 583	if (XFS_IS_GQUOTA_ON(mp)) {
 584		error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
 585						flags & XFS_QMOPT_DQALLOC,
 586						ip->i_udquot, &ip->i_gdquot);
 587		/*
 588		 * Don't worry about the udquot that we may have
 589		 * attached above. It'll get detached, if not already.
 590		 */
 591		if (error)
 592			goto done;
 593		nquotas++;
 594	}
 595
 596	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 597	if (XFS_IS_PQUOTA_ON(mp)) {
 598		error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
 599						flags & XFS_QMOPT_DQALLOC,
 600						ip->i_udquot, &ip->i_pdquot);
 601		/*
 602		 * Don't worry about the udquot that we may have
 603		 * attached above. It'll get detached, if not already.
 604		 */
 605		if (error)
 606			goto done;
 607		nquotas++;
 608	}
 609
 
 610	/*
 611	 * Attach this group/project quota to the user quota as a hint.
 612	 * This WON'T, in general, result in a thrash.
 613	 */
 614	if (nquotas > 1 && ip->i_udquot) {
 615		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 616		ASSERT(ip->i_gdquot || !XFS_IS_GQUOTA_ON(mp));
 617		ASSERT(ip->i_pdquot || !XFS_IS_PQUOTA_ON(mp));
 618
 619		/*
 620		 * We do not have i_udquot locked at this point, but this check
 621		 * is OK since we don't depend on the i_gdquot to be accurate
 622		 * 100% all the time. It is just a hint, and this will
 623		 * succeed in general.
 624		 */
 625		if (ip->i_udquot->q_gdquot != ip->i_gdquot)
 626			xfs_qm_dqattach_hint(ip, XFS_DQ_GROUP);
 627
 628		if (ip->i_udquot->q_pdquot != ip->i_pdquot)
 629			xfs_qm_dqattach_hint(ip, XFS_DQ_PROJ);
 630	}
 631
 632 done:
 633#ifdef DEBUG
 634	if (!error) {
 635		if (XFS_IS_UQUOTA_ON(mp))
 636			ASSERT(ip->i_udquot);
 637		if (XFS_IS_GQUOTA_ON(mp))
 638			ASSERT(ip->i_gdquot);
 639		if (XFS_IS_PQUOTA_ON(mp))
 640			ASSERT(ip->i_pdquot);
 641	}
 642	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 643#endif
 644	return error;
 645}
 646
 647int
 648xfs_qm_dqattach(
 649	struct xfs_inode	*ip,
 650	uint			flags)
 651{
 652	int			error;
 653
 654	if (!xfs_qm_need_dqattach(ip))
 655		return 0;
 656
 657	xfs_ilock(ip, XFS_ILOCK_EXCL);
 658	error = xfs_qm_dqattach_locked(ip, flags);
 659	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 660
 661	return error;
 662}
 663
 664/*
 665 * Release dquots (and their references) if any.
 666 * The inode should be locked EXCL except when this's called by
 667 * xfs_ireclaim.
 668 */
 669void
 670xfs_qm_dqdetach(
 671	xfs_inode_t	*ip)
 672{
 673	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
 674		return;
 675
 676	trace_xfs_dquot_dqdetach(ip);
 677
 678	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
 679	if (ip->i_udquot) {
 680		xfs_qm_dqrele(ip->i_udquot);
 681		ip->i_udquot = NULL;
 682	}
 683	if (ip->i_gdquot) {
 684		xfs_qm_dqrele(ip->i_gdquot);
 685		ip->i_gdquot = NULL;
 686	}
 687	if (ip->i_pdquot) {
 688		xfs_qm_dqrele(ip->i_pdquot);
 689		ip->i_pdquot = NULL;
 690	}
 691}
 692
 693struct xfs_qm_isolate {
 694	struct list_head	buffers;
 695	struct list_head	dispose;
 696};
 697
 698static enum lru_status
 699xfs_qm_dquot_isolate(
 700	struct list_head	*item,
 701	spinlock_t		*lru_lock,
 702	void			*arg)
 
 703{
 704	struct xfs_dquot	*dqp = container_of(item,
 705						struct xfs_dquot, q_lru);
 706	struct xfs_qm_isolate	*isol = arg;
 707
 708	if (!xfs_dqlock_nowait(dqp))
 709		goto out_miss_busy;
 710
 711	/*
 
 
 
 
 
 
 
 
 712	 * This dquot has acquired a reference in the meantime remove it from
 713	 * the freelist and try again.
 714	 */
 715	if (dqp->q_nrefs) {
 716		xfs_dqunlock(dqp);
 717		XFS_STATS_INC(xs_qm_dqwants);
 718
 719		trace_xfs_dqreclaim_want(dqp);
 720		list_del_init(&dqp->q_lru);
 721		XFS_STATS_DEC(xs_qm_dquot_unused);
 722		return LRU_REMOVED;
 723	}
 724
 725	/*
 726	 * If the dquot is dirty, flush it. If it's already being flushed, just
 727	 * skip it so there is time for the IO to complete before we try to
 728	 * reclaim it again on the next LRU pass.
 729	 */
 730	if (!xfs_dqflock_nowait(dqp)) {
 731		xfs_dqunlock(dqp);
 732		goto out_miss_busy;
 733	}
 734
 735	if (XFS_DQ_IS_DIRTY(dqp)) {
 736		struct xfs_buf	*bp = NULL;
 737		int		error;
 738
 739		trace_xfs_dqreclaim_dirty(dqp);
 740
 741		/* we have to drop the LRU lock to flush the dquot */
 742		spin_unlock(lru_lock);
 743
 744		error = xfs_qm_dqflush(dqp, &bp);
 745		if (error) {
 746			xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
 747				 __func__, dqp);
 748			goto out_unlock_dirty;
 749		}
 750
 
 
 
 
 
 
 
 
 751		xfs_buf_delwri_queue(bp, &isol->buffers);
 752		xfs_buf_relse(bp);
 753		goto out_unlock_dirty;
 754	}
 
 
 755	xfs_dqfunlock(dqp);
 756
 757	/*
 758	 * Prevent lookups now that we are past the point of no return.
 759	 */
 760	dqp->dq_flags |= XFS_DQ_FREEING;
 761	xfs_dqunlock(dqp);
 762
 763	ASSERT(dqp->q_nrefs == 0);
 764	list_move_tail(&dqp->q_lru, &isol->dispose);
 765	XFS_STATS_DEC(xs_qm_dquot_unused);
 766	trace_xfs_dqreclaim_done(dqp);
 767	XFS_STATS_INC(xs_qm_dqreclaims);
 768	return LRU_REMOVED;
 769
 
 
 770out_miss_busy:
 771	trace_xfs_dqreclaim_busy(dqp);
 772	XFS_STATS_INC(xs_qm_dqreclaim_misses);
 773	return LRU_SKIP;
 774
 775out_unlock_dirty:
 776	trace_xfs_dqreclaim_busy(dqp);
 777	XFS_STATS_INC(xs_qm_dqreclaim_misses);
 778	xfs_dqunlock(dqp);
 779	spin_lock(lru_lock);
 780	return LRU_RETRY;
 781}
 782
 783static unsigned long
 784xfs_qm_shrink_scan(
 785	struct shrinker		*shrink,
 786	struct shrink_control	*sc)
 787{
 788	struct xfs_quotainfo	*qi = container_of(shrink,
 789					struct xfs_quotainfo, qi_shrinker);
 790	struct xfs_qm_isolate	isol;
 791	unsigned long		freed;
 792	int			error;
 793	unsigned long		nr_to_scan = sc->nr_to_scan;
 794
 795	if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
 796		return 0;
 797
 798	INIT_LIST_HEAD(&isol.buffers);
 799	INIT_LIST_HEAD(&isol.dispose);
 800
 801	freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol,
 802					&nr_to_scan);
 803
 804	error = xfs_buf_delwri_submit(&isol.buffers);
 805	if (error)
 806		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
 807
 808	while (!list_empty(&isol.dispose)) {
 809		struct xfs_dquot	*dqp;
 810
 811		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
 812		list_del_init(&dqp->q_lru);
 813		xfs_qm_dqfree_one(dqp);
 814	}
 815
 816	return freed;
 817}
 818
 819static unsigned long
 820xfs_qm_shrink_count(
 821	struct shrinker		*shrink,
 822	struct shrink_control	*sc)
 823{
 824	struct xfs_quotainfo	*qi = container_of(shrink,
 825					struct xfs_quotainfo, qi_shrinker);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 826
 827	return list_lru_count_node(&qi->qi_lru, sc->nid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 828}
 829
 830/*
 831 * This initializes all the quota information that's kept in the
 832 * mount structure
 833 */
 834STATIC int
 835xfs_qm_init_quotainfo(
 836	xfs_mount_t	*mp)
 837{
 838	xfs_quotainfo_t *qinf;
 839	int		error;
 840	xfs_dquot_t	*dqp;
 841
 842	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 843
 844	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
 
 845
 846	error = -list_lru_init(&qinf->qi_lru);
 847	if (error)
 848		goto out_free_qinf;
 849
 850	/*
 851	 * See if quotainodes are setup, and if not, allocate them,
 852	 * and change the superblock accordingly.
 853	 */
 854	error = xfs_qm_init_quotainos(mp);
 
 
 
 855	if (error)
 856		goto out_free_lru;
 857
 858	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
 859	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 860	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
 861	mutex_init(&qinf->qi_tree_lock);
 862
 863	/* mutex used to serialize quotaoffs */
 864	mutex_init(&qinf->qi_quotaofflock);
 865
 866	/* Precalc some constants */
 867	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 868	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(mp,
 869							qinf->qi_dqchunklen);
 
 
 
 
 
 
 
 
 
 
 870
 871	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 872
 873	/*
 874	 * We try to get the limits from the superuser's limits fields.
 875	 * This is quite hacky, but it is standard quota practice.
 876	 *
 877	 * We look at the USR dquot with id == 0 first, but if user quotas
 878	 * are not enabled we goto the GRP dquot with id == 0.
 879	 * We don't really care to keep separate default limits for user
 880	 * and group quotas, at least not at this point.
 881	 *
 882	 * Since we may not have done a quotacheck by this point, just read
 883	 * the dquot without attaching it to any hashtables or lists.
 884	 */
 885	error = xfs_qm_dqread(mp, 0,
 886			XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
 887			 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
 888			  XFS_DQ_PROJ),
 889			XFS_QMOPT_DOWARN, &dqp);
 890	if (!error) {
 891		xfs_disk_dquot_t	*ddqp = &dqp->q_core;
 
 
 892
 893		/*
 894		 * The warnings and timers set the grace period given to
 895		 * a user or group before he or she can not perform any
 896		 * more writing. If it is zero, a default is used.
 897		 */
 898		qinf->qi_btimelimit = ddqp->d_btimer ?
 899			be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
 900		qinf->qi_itimelimit = ddqp->d_itimer ?
 901			be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
 902		qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
 903			be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
 904		qinf->qi_bwarnlimit = ddqp->d_bwarns ?
 905			be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
 906		qinf->qi_iwarnlimit = ddqp->d_iwarns ?
 907			be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
 908		qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
 909			be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
 910		qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
 911		qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
 912		qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
 913		qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
 914		qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
 915		qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
 916
 917		xfs_qm_dqdestroy(dqp);
 918	} else {
 919		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
 920		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
 921		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
 922		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
 923		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
 924		qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
 925	}
 926
 927	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
 928	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
 929	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
 930	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
 931	register_shrinker(&qinf->qi_shrinker);
 932	return 0;
 933
 
 
 
 
 934out_free_lru:
 935	list_lru_destroy(&qinf->qi_lru);
 936out_free_qinf:
 937	kmem_free(qinf);
 938	mp->m_quotainfo = NULL;
 939	return error;
 940}
 941
 942
 943/*
 944 * Gets called when unmounting a filesystem or when all quotas get
 945 * turned off.
 946 * This purges the quota inodes, destroys locks and frees itself.
 947 */
 948void
 949xfs_qm_destroy_quotainfo(
 950	xfs_mount_t	*mp)
 951{
 952	xfs_quotainfo_t *qi;
 953
 954	qi = mp->m_quotainfo;
 955	ASSERT(qi != NULL);
 956
 957	unregister_shrinker(&qi->qi_shrinker);
 958	list_lru_destroy(&qi->qi_lru);
 959
 960	if (qi->qi_uquotaip) {
 961		IRELE(qi->qi_uquotaip);
 962		qi->qi_uquotaip = NULL; /* paranoia */
 963	}
 964	if (qi->qi_gquotaip) {
 965		IRELE(qi->qi_gquotaip);
 966		qi->qi_gquotaip = NULL;
 967	}
 968	if (qi->qi_pquotaip) {
 969		IRELE(qi->qi_pquotaip);
 970		qi->qi_pquotaip = NULL;
 971	}
 972	mutex_destroy(&qi->qi_quotaofflock);
 973	kmem_free(qi);
 974	mp->m_quotainfo = NULL;
 975}
 976
 
 
 
 
 
 
 
 
 
 
 
 977/*
 978 * Create an inode and return with a reference already taken, but unlocked
 979 * This is how we create quota inodes
 980 */
 981STATIC int
 982xfs_qm_qino_alloc(
 983	xfs_mount_t	*mp,
 984	xfs_inode_t	**ip,
 985	__int64_t	sbfields,
 986	uint		flags)
 987{
 988	xfs_trans_t	*tp;
 989	int		error;
 990	int		committed;
 991
 992	*ip = NULL;
 993	/*
 994	 * With superblock that doesn't have separate pquotino, we
 995	 * share an inode between gquota and pquota. If the on-disk
 996	 * superblock has GQUOTA and the filesystem is now mounted
 997	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
 998	 * vice-versa.
 999	 */
1000	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
1001			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
1002		xfs_ino_t ino = NULLFSINO;
1003
1004		if ((flags & XFS_QMOPT_PQUOTA) &&
1005			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
1006			ino = mp->m_sb.sb_gquotino;
1007			ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
 
 
 
 
1008		} else if ((flags & XFS_QMOPT_GQUOTA) &&
1009			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
1010			ino = mp->m_sb.sb_pquotino;
1011			ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
 
 
 
 
1012		}
1013		if (ino != NULLFSINO) {
1014			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
1015			if (error)
1016				return error;
 
1017			mp->m_sb.sb_gquotino = NULLFSINO;
1018			mp->m_sb.sb_pquotino = NULLFSINO;
 
1019		}
1020	}
1021
1022	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
1023	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
1024				  XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
1025	if (error) {
1026		xfs_trans_cancel(tp, 0);
1027		return error;
1028	}
1029
1030	if (!*ip) {
1031		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
1032								&committed);
 
 
 
 
 
 
 
1033		if (error) {
1034			xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
1035					 XFS_TRANS_ABORT);
1036			return error;
1037		}
 
 
1038	}
1039
1040	/*
1041	 * Make the changes in the superblock, and log those too.
1042	 * sbfields arg may contain fields other than *QUOTINO;
1043	 * VERSIONNUM for example.
1044	 */
1045	spin_lock(&mp->m_sb_lock);
1046	if (flags & XFS_QMOPT_SBVERSION) {
1047		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
1048		ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1049			XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
1050				(XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1051				 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
1052				 XFS_SB_QFLAGS));
1053
1054		xfs_sb_version_addquota(&mp->m_sb);
1055		mp->m_sb.sb_uquotino = NULLFSINO;
1056		mp->m_sb.sb_gquotino = NULLFSINO;
1057		mp->m_sb.sb_pquotino = NULLFSINO;
1058
1059		/* qflags will get updated fully _after_ quotacheck */
1060		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
1061	}
1062	if (flags & XFS_QMOPT_UQUOTA)
1063		mp->m_sb.sb_uquotino = (*ip)->i_ino;
1064	else if (flags & XFS_QMOPT_GQUOTA)
1065		mp->m_sb.sb_gquotino = (*ip)->i_ino;
1066	else
1067		mp->m_sb.sb_pquotino = (*ip)->i_ino;
1068	spin_unlock(&mp->m_sb_lock);
1069	xfs_mod_sb(tp, sbfields);
1070
1071	if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
 
 
1072		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
1073		return error;
1074	}
1075	return 0;
 
 
 
 
1076}
1077
1078
1079STATIC void
1080xfs_qm_reset_dqcounts(
1081	xfs_mount_t	*mp,
1082	xfs_buf_t	*bp,
1083	xfs_dqid_t	id,
1084	uint		type)
1085{
1086	struct xfs_dqblk	*dqb;
1087	int			j;
1088
1089	trace_xfs_reset_dqcounts(bp, _RET_IP_);
1090
1091	/*
1092	 * Reset all counters and timers. They'll be
1093	 * started afresh by xfs_qm_quotacheck.
1094	 */
1095#ifdef DEBUG
1096	j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
1097	do_div(j, sizeof(xfs_dqblk_t));
1098	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
1099#endif
1100	dqb = bp->b_addr;
1101	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
1102		struct xfs_disk_dquot	*ddq;
1103
1104		ddq = (struct xfs_disk_dquot *)&dqb[j];
1105
1106		/*
1107		 * Do a sanity check, and if needed, repair the dqblk. Don't
1108		 * output any warnings because it's perfectly possible to
1109		 * find uninitialised dquot blks. See comment in xfs_dqcheck.
 
 
 
 
 
 
 
 
 
1110		 */
1111		xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
1112			    "xfs_quotacheck");
1113		ddq->d_bcount = 0;
1114		ddq->d_icount = 0;
1115		ddq->d_rtbcount = 0;
1116		ddq->d_btimer = 0;
1117		ddq->d_itimer = 0;
1118		ddq->d_rtbtimer = 0;
1119		ddq->d_bwarns = 0;
1120		ddq->d_iwarns = 0;
1121		ddq->d_rtbwarns = 0;
1122
1123		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1124			xfs_update_cksum((char *)&dqb[j],
1125					 sizeof(struct xfs_dqblk),
1126					 XFS_DQUOT_CRC_OFF);
1127		}
1128	}
1129}
1130
1131STATIC int
1132xfs_qm_dqiter_bufs(
1133	struct xfs_mount	*mp,
1134	xfs_dqid_t		firstid,
1135	xfs_fsblock_t		bno,
1136	xfs_filblks_t		blkcnt,
1137	uint			flags,
1138	struct list_head	*buffer_list)
1139{
1140	struct xfs_buf		*bp;
1141	int			error;
1142	int			type;
1143
1144	ASSERT(blkcnt > 0);
1145	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
1146		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
1147	error = 0;
1148
1149	/*
1150	 * Blkcnt arg can be a very big number, and might even be
1151	 * larger than the log itself. So, we have to break it up into
1152	 * manageable-sized transactions.
1153	 * Note that we don't start a permanent transaction here; we might
1154	 * not be able to get a log reservation for the whole thing up front,
1155	 * and we don't really care to either, because we just discard
1156	 * everything if we were to crash in the middle of this loop.
1157	 */
1158	while (blkcnt--) {
1159		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1160			      XFS_FSB_TO_DADDR(mp, bno),
1161			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1162			      &xfs_dquot_buf_ops);
1163
1164		/*
1165		 * CRC and validation errors will return a EFSCORRUPTED here. If
1166		 * this occurs, re-read without CRC validation so that we can
1167		 * repair the damage via xfs_qm_reset_dqcounts(). This process
1168		 * will leave a trace in the log indicating corruption has
1169		 * been detected.
1170		 */
1171		if (error == EFSCORRUPTED) {
1172			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1173				      XFS_FSB_TO_DADDR(mp, bno),
1174				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1175				      NULL);
1176		}
1177
1178		if (error)
1179			break;
1180
 
 
 
 
 
 
1181		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1182		xfs_buf_delwri_queue(bp, buffer_list);
1183		xfs_buf_relse(bp);
1184
1185		/* goto the next block. */
1186		bno++;
1187		firstid += mp->m_quotainfo->qi_dqperchunk;
1188	}
1189
1190	return error;
1191}
1192
1193/*
1194 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
1195 * caller supplied function for every chunk of dquots that we find.
1196 */
1197STATIC int
1198xfs_qm_dqiterate(
1199	struct xfs_mount	*mp,
1200	struct xfs_inode	*qip,
1201	uint			flags,
1202	struct list_head	*buffer_list)
1203{
1204	struct xfs_bmbt_irec	*map;
1205	int			i, nmaps;	/* number of map entries */
1206	int			error;		/* return value */
1207	xfs_fileoff_t		lblkno;
1208	xfs_filblks_t		maxlblkcnt;
1209	xfs_dqid_t		firstid;
1210	xfs_fsblock_t		rablkno;
1211	xfs_filblks_t		rablkcnt;
1212
1213	error = 0;
1214	/*
1215	 * This looks racy, but we can't keep an inode lock across a
1216	 * trans_reserve. But, this gets called during quotacheck, and that
1217	 * happens only at mount time which is single threaded.
1218	 */
1219	if (qip->i_d.di_nblocks == 0)
1220		return 0;
1221
1222	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
 
1223
1224	lblkno = 0;
1225	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1226	do {
1227		uint		lock_mode;
1228
1229		nmaps = XFS_DQITER_MAP_SIZE;
1230		/*
1231		 * We aren't changing the inode itself. Just changing
1232		 * some of its data. No new blocks are added here, and
1233		 * the inode is never added to the transaction.
1234		 */
1235		lock_mode = xfs_ilock_data_map_shared(qip);
1236		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1237				       map, &nmaps, 0);
1238		xfs_iunlock(qip, lock_mode);
1239		if (error)
1240			break;
1241
1242		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1243		for (i = 0; i < nmaps; i++) {
1244			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1245			ASSERT(map[i].br_blockcount);
1246
1247
1248			lblkno += map[i].br_blockcount;
1249
1250			if (map[i].br_startblock == HOLESTARTBLOCK)
1251				continue;
1252
1253			firstid = (xfs_dqid_t) map[i].br_startoff *
1254				mp->m_quotainfo->qi_dqperchunk;
1255			/*
1256			 * Do a read-ahead on the next extent.
1257			 */
1258			if ((i+1 < nmaps) &&
1259			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1260				rablkcnt =  map[i+1].br_blockcount;
1261				rablkno = map[i+1].br_startblock;
1262				while (rablkcnt--) {
1263					xfs_buf_readahead(mp->m_ddev_targp,
1264					       XFS_FSB_TO_DADDR(mp, rablkno),
1265					       mp->m_quotainfo->qi_dqchunklen,
1266					       NULL);
1267					rablkno++;
1268				}
1269			}
1270			/*
1271			 * Iterate thru all the blks in the extent and
1272			 * reset the counters of all the dquots inside them.
1273			 */
1274			error = xfs_qm_dqiter_bufs(mp, firstid,
1275						   map[i].br_startblock,
1276						   map[i].br_blockcount,
1277						   flags, buffer_list);
1278			if (error)
1279				goto out;
1280		}
1281	} while (nmaps > 0);
1282
1283out:
1284	kmem_free(map);
1285	return error;
1286}
1287
1288/*
1289 * Called by dqusage_adjust in doing a quotacheck.
1290 *
1291 * Given the inode, and a dquot id this updates both the incore dqout as well
1292 * as the buffer copy. This is so that once the quotacheck is done, we can
1293 * just log all the buffers, as opposed to logging numerous updates to
1294 * individual dquots.
1295 */
1296STATIC int
1297xfs_qm_quotacheck_dqadjust(
1298	struct xfs_inode	*ip,
1299	xfs_dqid_t		id,
1300	uint			type,
1301	xfs_qcnt_t		nblks,
1302	xfs_qcnt_t		rtblks)
1303{
1304	struct xfs_mount	*mp = ip->i_mount;
1305	struct xfs_dquot	*dqp;
 
1306	int			error;
1307
1308	error = xfs_qm_dqget(mp, ip, id, type,
1309			     XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1310	if (error) {
1311		/*
1312		 * Shouldn't be able to turn off quotas here.
1313		 */
1314		ASSERT(error != ESRCH);
1315		ASSERT(error != ENOENT);
1316		return error;
1317	}
1318
 
 
 
 
1319	trace_xfs_dqadjust(dqp);
1320
1321	/*
1322	 * Adjust the inode count and the block count to reflect this inode's
1323	 * resource usage.
1324	 */
1325	be64_add_cpu(&dqp->q_core.d_icount, 1);
1326	dqp->q_res_icount++;
1327	if (nblks) {
1328		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1329		dqp->q_res_bcount += nblks;
1330	}
1331	if (rtblks) {
1332		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1333		dqp->q_res_rtbcount += rtblks;
1334	}
1335
1336	/*
1337	 * Set default limits, adjust timers (since we changed usages)
1338	 *
1339	 * There are no timers for the default values set in the root dquot.
1340	 */
1341	if (dqp->q_core.d_id) {
1342		xfs_qm_adjust_dqlimits(mp, dqp);
1343		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1344	}
1345
1346	dqp->dq_flags |= XFS_DQ_DIRTY;
1347	xfs_qm_dqput(dqp);
1348	return 0;
1349}
1350
1351STATIC int
1352xfs_qm_get_rtblks(
1353	xfs_inode_t	*ip,
1354	xfs_qcnt_t	*O_rtblks)
1355{
1356	xfs_filblks_t	rtblks;			/* total rt blks */
1357	xfs_extnum_t	idx;			/* extent record index */
1358	xfs_ifork_t	*ifp;			/* inode fork pointer */
1359	xfs_extnum_t	nextents;		/* number of extent entries */
1360	int		error;
1361
1362	ASSERT(XFS_IS_REALTIME_INODE(ip));
1363	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1364	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1365		if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1366			return error;
1367	}
1368	rtblks = 0;
1369	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1370	for (idx = 0; idx < nextents; idx++)
1371		rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1372	*O_rtblks = (xfs_qcnt_t)rtblks;
1373	return 0;
1374}
1375
1376/*
1377 * callback routine supplied to bulkstat(). Given an inumber, find its
1378 * dquots and update them to account for resources taken by that inode.
1379 */
1380/* ARGSUSED */
1381STATIC int
1382xfs_qm_dqusage_adjust(
1383	xfs_mount_t	*mp,		/* mount point for filesystem */
1384	xfs_ino_t	ino,		/* inode number to get data for */
1385	void		__user *buffer,	/* not used */
1386	int		ubsize,		/* not used */
1387	int		*ubused,	/* not used */
1388	int		*res)		/* result code value */
1389{
1390	xfs_inode_t	*ip;
1391	xfs_qcnt_t	nblks, rtblks = 0;
1392	int		error;
1393
1394	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1395
1396	/*
1397	 * rootino must have its resources accounted for, not so with the quota
1398	 * inodes.
1399	 */
1400	if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1401		*res = BULKSTAT_RV_NOTHING;
1402		return XFS_ERROR(EINVAL);
1403	}
1404
1405	/*
1406	 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1407	 * interface expects the inode to be exclusively locked because that's
1408	 * the case in all other instances. It's OK that we do this because
1409	 * quotacheck is done only at mount time.
1410	 */
1411	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1412	if (error) {
1413		*res = BULKSTAT_RV_NOTHING;
 
1414		return error;
 
 
 
 
 
 
 
 
 
 
 
 
1415	}
1416
 
 
 
 
1417	ASSERT(ip->i_delayed_blks == 0);
1418
 
1419	if (XFS_IS_REALTIME_INODE(ip)) {
1420		/*
1421		 * Walk thru the extent list and count the realtime blocks.
1422		 */
1423		error = xfs_qm_get_rtblks(ip, &rtblks);
1424		if (error)
1425			goto error0;
 
1426	}
1427
1428	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
 
1429
1430	/*
1431	 * Add the (disk blocks and inode) resources occupied by this
1432	 * inode to its dquots. We do this adjustment in the incore dquot,
1433	 * and also copy the changes to its buffer.
1434	 * We don't care about putting these changes in a transaction
1435	 * envelope because if we crash in the middle of a 'quotacheck'
1436	 * we have to start from the beginning anyway.
1437	 * Once we're done, we'll log all the dquot bufs.
1438	 *
1439	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1440	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1441	 */
1442	if (XFS_IS_UQUOTA_ON(mp)) {
1443		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1444						   XFS_DQ_USER, nblks, rtblks);
1445		if (error)
1446			goto error0;
1447	}
1448
1449	if (XFS_IS_GQUOTA_ON(mp)) {
1450		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1451						   XFS_DQ_GROUP, nblks, rtblks);
1452		if (error)
1453			goto error0;
1454	}
1455
1456	if (XFS_IS_PQUOTA_ON(mp)) {
1457		error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1458						   XFS_DQ_PROJ, nblks, rtblks);
1459		if (error)
1460			goto error0;
1461	}
1462
1463	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1464	IRELE(ip);
1465	*res = BULKSTAT_RV_DIDONE;
1466	return 0;
1467
1468error0:
1469	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1470	IRELE(ip);
1471	*res = BULKSTAT_RV_GIVEUP;
1472	return error;
1473}
1474
1475STATIC int
1476xfs_qm_flush_one(
1477	struct xfs_dquot	*dqp,
1478	void			*data)
1479{
 
1480	struct list_head	*buffer_list = data;
1481	struct xfs_buf		*bp = NULL;
1482	int			error = 0;
1483
1484	xfs_dqlock(dqp);
1485	if (dqp->dq_flags & XFS_DQ_FREEING)
1486		goto out_unlock;
1487	if (!XFS_DQ_IS_DIRTY(dqp))
1488		goto out_unlock;
1489
1490	xfs_dqflock(dqp);
1491	error = xfs_qm_dqflush(dqp, &bp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1492	if (error)
1493		goto out_unlock;
 
 
 
 
1494
1495	xfs_buf_delwri_queue(bp, buffer_list);
 
 
1496	xfs_buf_relse(bp);
1497out_unlock:
1498	xfs_dqunlock(dqp);
1499	return error;
1500}
1501
1502/*
1503 * Walk thru all the filesystem inodes and construct a consistent view
1504 * of the disk quota world. If the quotacheck fails, disable quotas.
1505 */
1506int
1507xfs_qm_quotacheck(
1508	xfs_mount_t	*mp)
1509{
1510	int			done, count, error, error2;
1511	xfs_ino_t		lastino;
1512	size_t			structsz;
1513	uint			flags;
1514	LIST_HEAD		(buffer_list);
1515	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1516	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1517	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1518
1519	count = INT_MAX;
1520	structsz = 1;
1521	lastino = 0;
1522	flags = 0;
1523
1524	ASSERT(uip || gip || pip);
1525	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1526
1527	xfs_notice(mp, "Quotacheck needed: Please wait.");
1528
1529	/*
1530	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1531	 * their counters to zero. We need a clean slate.
1532	 * We don't log our changes till later.
1533	 */
1534	if (uip) {
1535		error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1536					 &buffer_list);
1537		if (error)
1538			goto error_return;
1539		flags |= XFS_UQUOTA_CHKD;
1540	}
1541
1542	if (gip) {
1543		error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
1544					 &buffer_list);
1545		if (error)
1546			goto error_return;
1547		flags |= XFS_GQUOTA_CHKD;
1548	}
1549
1550	if (pip) {
1551		error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
1552					 &buffer_list);
1553		if (error)
1554			goto error_return;
1555		flags |= XFS_PQUOTA_CHKD;
1556	}
1557
1558	do {
1559		/*
1560		 * Iterate thru all the inodes in the file system,
1561		 * adjusting the corresponding dquot counters in core.
1562		 */
1563		error = xfs_bulkstat(mp, &lastino, &count,
1564				     xfs_qm_dqusage_adjust,
1565				     structsz, NULL, &done);
1566		if (error)
1567			break;
1568
1569	} while (!done);
 
 
 
 
 
 
1570
1571	/*
1572	 * We've made all the changes that we need to make incore.  Flush them
1573	 * down to disk buffers if everything was updated successfully.
1574	 */
1575	if (XFS_IS_UQUOTA_ON(mp)) {
1576		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1577					  &buffer_list);
1578	}
1579	if (XFS_IS_GQUOTA_ON(mp)) {
1580		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1581					   &buffer_list);
1582		if (!error)
1583			error = error2;
1584	}
1585	if (XFS_IS_PQUOTA_ON(mp)) {
1586		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1587					   &buffer_list);
1588		if (!error)
1589			error = error2;
1590	}
1591
1592	error2 = xfs_buf_delwri_submit(&buffer_list);
1593	if (!error)
1594		error = error2;
1595
1596	/*
1597	 * We can get this error if we couldn't do a dquot allocation inside
1598	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1599	 * dirty dquots that might be cached, we just want to get rid of them
1600	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1601	 * at this point (because we intentionally didn't in dqget_noattach).
1602	 */
1603	if (error) {
1604		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1605		goto error_return;
1606	}
1607
1608	/*
1609	 * If one type of quotas is off, then it will lose its
1610	 * quotachecked status, since we won't be doing accounting for
1611	 * that type anymore.
1612	 */
1613	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1614	mp->m_qflags |= flags;
1615
1616 error_return:
1617	while (!list_empty(&buffer_list)) {
1618		struct xfs_buf *bp =
1619			list_first_entry(&buffer_list, struct xfs_buf, b_list);
1620		list_del_init(&bp->b_list);
1621		xfs_buf_relse(bp);
1622	}
1623
1624	if (error) {
1625		xfs_warn(mp,
1626	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1627			error);
1628		/*
1629		 * We must turn off quotas.
1630		 */
1631		ASSERT(mp->m_quotainfo != NULL);
1632		xfs_qm_destroy_quotainfo(mp);
1633		if (xfs_mount_reset_sbqflags(mp)) {
1634			xfs_warn(mp,
1635				"Quotacheck: Failed to reset quota flags.");
1636		}
1637	} else
 
1638		xfs_notice(mp, "Quotacheck: Done.");
1639	return (error);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1640}
1641
1642/*
1643 * This is called after the superblock has been read in and we're ready to
1644 * iget the quota inodes.
1645 */
1646STATIC int
1647xfs_qm_init_quotainos(
1648	xfs_mount_t	*mp)
1649{
1650	struct xfs_inode	*uip = NULL;
1651	struct xfs_inode	*gip = NULL;
1652	struct xfs_inode	*pip = NULL;
1653	int			error;
1654	__int64_t		sbflags = 0;
1655	uint			flags = 0;
1656
1657	ASSERT(mp->m_quotainfo);
1658
1659	/*
1660	 * Get the uquota and gquota inodes
1661	 */
1662	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1663		if (XFS_IS_UQUOTA_ON(mp) &&
1664		    mp->m_sb.sb_uquotino != NULLFSINO) {
1665			ASSERT(mp->m_sb.sb_uquotino > 0);
1666			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1667					     0, 0, &uip);
1668			if (error)
1669				return XFS_ERROR(error);
1670		}
1671		if (XFS_IS_GQUOTA_ON(mp) &&
1672		    mp->m_sb.sb_gquotino != NULLFSINO) {
1673			ASSERT(mp->m_sb.sb_gquotino > 0);
1674			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1675					     0, 0, &gip);
1676			if (error)
1677				goto error_rele;
1678		}
1679		if (XFS_IS_PQUOTA_ON(mp) &&
1680		    mp->m_sb.sb_pquotino != NULLFSINO) {
1681			ASSERT(mp->m_sb.sb_pquotino > 0);
1682			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1683					     0, 0, &pip);
1684			if (error)
1685				goto error_rele;
1686		}
1687	} else {
1688		flags |= XFS_QMOPT_SBVERSION;
1689		sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1690			    XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
1691			    XFS_SB_QFLAGS);
1692	}
1693
1694	/*
1695	 * Create the three inodes, if they don't exist already. The changes
1696	 * made above will get added to a transaction and logged in one of
1697	 * the qino_alloc calls below.  If the device is readonly,
1698	 * temporarily switch to read-write to do this.
1699	 */
1700	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1701		error = xfs_qm_qino_alloc(mp, &uip,
1702					      sbflags | XFS_SB_UQUOTINO,
1703					      flags | XFS_QMOPT_UQUOTA);
1704		if (error)
1705			goto error_rele;
1706
1707		flags &= ~XFS_QMOPT_SBVERSION;
1708	}
1709	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1710		error = xfs_qm_qino_alloc(mp, &gip,
1711					  sbflags | XFS_SB_GQUOTINO,
1712					  flags | XFS_QMOPT_GQUOTA);
1713		if (error)
1714			goto error_rele;
1715
1716		flags &= ~XFS_QMOPT_SBVERSION;
1717	}
1718	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1719		error = xfs_qm_qino_alloc(mp, &pip,
1720					  sbflags | XFS_SB_PQUOTINO,
1721					  flags | XFS_QMOPT_PQUOTA);
1722		if (error)
1723			goto error_rele;
1724	}
1725
1726	mp->m_quotainfo->qi_uquotaip = uip;
1727	mp->m_quotainfo->qi_gquotaip = gip;
1728	mp->m_quotainfo->qi_pquotaip = pip;
1729
1730	return 0;
1731
1732error_rele:
1733	if (uip)
1734		IRELE(uip);
1735	if (gip)
1736		IRELE(gip);
1737	if (pip)
1738		IRELE(pip);
1739	return XFS_ERROR(error);
1740}
1741
1742STATIC void
1743xfs_qm_dqfree_one(
1744	struct xfs_dquot	*dqp)
1745{
1746	struct xfs_mount	*mp = dqp->q_mount;
1747	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1748
1749	mutex_lock(&qi->qi_tree_lock);
1750	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1751			  be32_to_cpu(dqp->q_core.d_id));
1752
1753	qi->qi_dquots--;
1754	mutex_unlock(&qi->qi_tree_lock);
1755
1756	xfs_qm_dqdestroy(dqp);
1757}
1758
1759/*
1760 * Start a transaction and write the incore superblock changes to
1761 * disk. flags parameter indicates which fields have changed.
1762 */
1763int
1764xfs_qm_write_sb_changes(
1765	xfs_mount_t	*mp,
1766	__int64_t	flags)
1767{
1768	xfs_trans_t	*tp;
1769	int		error;
1770
1771	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1772	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
1773	if (error) {
1774		xfs_trans_cancel(tp, 0);
1775		return error;
1776	}
1777
1778	xfs_mod_sb(tp, flags);
1779	error = xfs_trans_commit(tp, 0);
1780
1781	return error;
1782}
1783
1784
1785/* --------------- utility functions for vnodeops ---------------- */
1786
1787
1788/*
1789 * Given an inode, a uid, gid and prid make sure that we have
1790 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1791 * quotas by creating this file.
1792 * This also attaches dquot(s) to the given inode after locking it,
1793 * and returns the dquots corresponding to the uid and/or gid.
1794 *
1795 * in	: inode (unlocked)
1796 * out	: udquot, gdquot with references taken and unlocked
1797 */
1798int
1799xfs_qm_vop_dqalloc(
1800	struct xfs_inode	*ip,
1801	xfs_dqid_t		uid,
1802	xfs_dqid_t		gid,
1803	prid_t			prid,
1804	uint			flags,
1805	struct xfs_dquot	**O_udqpp,
1806	struct xfs_dquot	**O_gdqpp,
1807	struct xfs_dquot	**O_pdqpp)
1808{
1809	struct xfs_mount	*mp = ip->i_mount;
 
 
1810	struct xfs_dquot	*uq = NULL;
1811	struct xfs_dquot	*gq = NULL;
1812	struct xfs_dquot	*pq = NULL;
1813	int			error;
1814	uint			lockflags;
1815
1816	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1817		return 0;
1818
 
 
1819	lockflags = XFS_ILOCK_EXCL;
1820	xfs_ilock(ip, lockflags);
1821
1822	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1823		gid = ip->i_d.di_gid;
1824
1825	/*
1826	 * Attach the dquot(s) to this inode, doing a dquot allocation
1827	 * if necessary. The dquot(s) will not be locked.
1828	 */
1829	if (XFS_NOT_DQATTACHED(mp, ip)) {
1830		error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1831		if (error) {
1832			xfs_iunlock(ip, lockflags);
1833			return error;
1834		}
1835	}
1836
1837	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1838		if (ip->i_d.di_uid != uid) {
 
1839			/*
1840			 * What we need is the dquot that has this uid, and
1841			 * if we send the inode to dqget, the uid of the inode
1842			 * takes priority over what's sent in the uid argument.
1843			 * We must unlock inode here before calling dqget if
1844			 * we're not sending the inode, because otherwise
1845			 * we'll deadlock by doing trans_reserve while
1846			 * holding ilock.
1847			 */
1848			xfs_iunlock(ip, lockflags);
1849			error = xfs_qm_dqget(mp, NULL, uid,
1850						 XFS_DQ_USER,
1851						 XFS_QMOPT_DQALLOC |
1852						 XFS_QMOPT_DOWARN,
1853						 &uq);
1854			if (error) {
1855				ASSERT(error != ENOENT);
1856				return error;
1857			}
1858			/*
1859			 * Get the ilock in the right order.
1860			 */
1861			xfs_dqunlock(uq);
1862			lockflags = XFS_ILOCK_SHARED;
1863			xfs_ilock(ip, lockflags);
1864		} else {
1865			/*
1866			 * Take an extra reference, because we'll return
1867			 * this to caller
1868			 */
1869			ASSERT(ip->i_udquot);
1870			uq = xfs_qm_dqhold(ip->i_udquot);
1871		}
1872	}
1873	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1874		if (ip->i_d.di_gid != gid) {
 
1875			xfs_iunlock(ip, lockflags);
1876			error = xfs_qm_dqget(mp, NULL, gid,
1877						 XFS_DQ_GROUP,
1878						 XFS_QMOPT_DQALLOC |
1879						 XFS_QMOPT_DOWARN,
1880						 &gq);
1881			if (error) {
1882				ASSERT(error != ENOENT);
1883				goto error_rele;
1884			}
1885			xfs_dqunlock(gq);
1886			lockflags = XFS_ILOCK_SHARED;
1887			xfs_ilock(ip, lockflags);
1888		} else {
1889			ASSERT(ip->i_gdquot);
1890			gq = xfs_qm_dqhold(ip->i_gdquot);
1891		}
1892	}
1893	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1894		if (xfs_get_projid(ip) != prid) {
 
1895			xfs_iunlock(ip, lockflags);
1896			error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1897						 XFS_DQ_PROJ,
1898						 XFS_QMOPT_DQALLOC |
1899						 XFS_QMOPT_DOWARN,
1900						 &pq);
1901			if (error) {
1902				ASSERT(error != ENOENT);
1903				goto error_rele;
1904			}
1905			xfs_dqunlock(pq);
1906			lockflags = XFS_ILOCK_SHARED;
1907			xfs_ilock(ip, lockflags);
1908		} else {
1909			ASSERT(ip->i_pdquot);
1910			pq = xfs_qm_dqhold(ip->i_pdquot);
1911		}
1912	}
1913	if (uq)
1914		trace_xfs_dquot_dqalloc(ip);
1915
1916	xfs_iunlock(ip, lockflags);
1917	if (O_udqpp)
1918		*O_udqpp = uq;
1919	else if (uq)
1920		xfs_qm_dqrele(uq);
1921	if (O_gdqpp)
1922		*O_gdqpp = gq;
1923	else if (gq)
1924		xfs_qm_dqrele(gq);
1925	if (O_pdqpp)
1926		*O_pdqpp = pq;
1927	else if (pq)
1928		xfs_qm_dqrele(pq);
1929	return 0;
1930
1931error_rele:
1932	if (gq)
1933		xfs_qm_dqrele(gq);
1934	if (uq)
1935		xfs_qm_dqrele(uq);
1936	return error;
1937}
1938
1939/*
1940 * Actually transfer ownership, and do dquot modifications.
1941 * These were already reserved.
1942 */
1943xfs_dquot_t *
1944xfs_qm_vop_chown(
1945	xfs_trans_t	*tp,
1946	xfs_inode_t	*ip,
1947	xfs_dquot_t	**IO_olddq,
1948	xfs_dquot_t	*newdq)
1949{
1950	xfs_dquot_t	*prevdq;
1951	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1952				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1953
1954
1955	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1956	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1957
1958	/* old dquot */
1959	prevdq = *IO_olddq;
1960	ASSERT(prevdq);
1961	ASSERT(prevdq != newdq);
1962
1963	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1964	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
 
 
 
 
 
1965
1966	/* the sparkling new dquot */
1967	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1968	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1969
1970	/*
1971	 * Take an extra reference, because the inode is going to keep
1972	 * this dquot pointer even after the trans_commit.
1973	 */
1974	*IO_olddq = xfs_qm_dqhold(newdq);
1975
1976	return prevdq;
1977}
1978
1979/*
1980 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1981 */
1982int
1983xfs_qm_vop_chown_reserve(
1984	struct xfs_trans	*tp,
1985	struct xfs_inode	*ip,
1986	struct xfs_dquot	*udqp,
1987	struct xfs_dquot	*gdqp,
1988	struct xfs_dquot	*pdqp,
1989	uint			flags)
1990{
1991	struct xfs_mount	*mp = ip->i_mount;
1992	uint			delblks, blkflags, prjflags = 0;
1993	struct xfs_dquot	*udq_unres = NULL;
1994	struct xfs_dquot	*gdq_unres = NULL;
1995	struct xfs_dquot	*pdq_unres = NULL;
1996	struct xfs_dquot	*udq_delblks = NULL;
1997	struct xfs_dquot	*gdq_delblks = NULL;
1998	struct xfs_dquot	*pdq_delblks = NULL;
1999	int			error;
2000
2001
2002	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2003	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2004
2005	delblks = ip->i_delayed_blks;
2006	blkflags = XFS_IS_REALTIME_INODE(ip) ?
2007			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
2008
2009	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
2010	    ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
2011		udq_delblks = udqp;
2012		/*
2013		 * If there are delayed allocation blocks, then we have to
2014		 * unreserve those from the old dquot, and add them to the
2015		 * new dquot.
2016		 */
2017		if (delblks) {
2018			ASSERT(ip->i_udquot);
2019			udq_unres = ip->i_udquot;
2020		}
2021	}
2022	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
2023	    ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
2024		gdq_delblks = gdqp;
2025		if (delblks) {
2026			ASSERT(ip->i_gdquot);
2027			gdq_unres = ip->i_gdquot;
2028		}
2029	}
2030
2031	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
2032	    xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
2033		prjflags = XFS_QMOPT_ENOSPC;
2034		pdq_delblks = pdqp;
2035		if (delblks) {
2036			ASSERT(ip->i_pdquot);
2037			pdq_unres = ip->i_pdquot;
2038		}
2039	}
2040
2041	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
2042				udq_delblks, gdq_delblks, pdq_delblks,
2043				ip->i_d.di_nblocks, 1,
2044				flags | blkflags | prjflags);
2045	if (error)
2046		return error;
2047
2048	/*
2049	 * Do the delayed blks reservations/unreservations now. Since, these
2050	 * are done without the help of a transaction, if a reservation fails
2051	 * its previous reservations won't be automatically undone by trans
2052	 * code. So, we have to do it manually here.
2053	 */
2054	if (delblks) {
2055		/*
2056		 * Do the reservations first. Unreservation can't fail.
2057		 */
2058		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
2059		ASSERT(udq_unres || gdq_unres || pdq_unres);
2060		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2061			    udq_delblks, gdq_delblks, pdq_delblks,
2062			    (xfs_qcnt_t)delblks, 0,
2063			    flags | blkflags | prjflags);
2064		if (error)
2065			return error;
2066		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2067				udq_unres, gdq_unres, pdq_unres,
2068				-((xfs_qcnt_t)delblks), 0, blkflags);
2069	}
2070
2071	return (0);
2072}
2073
2074int
2075xfs_qm_vop_rename_dqattach(
2076	struct xfs_inode	**i_tab)
2077{
2078	struct xfs_mount	*mp = i_tab[0]->i_mount;
2079	int			i;
2080
2081	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2082		return 0;
2083
2084	for (i = 0; (i < 4 && i_tab[i]); i++) {
2085		struct xfs_inode	*ip = i_tab[i];
2086		int			error;
2087
2088		/*
2089		 * Watch out for duplicate entries in the table.
2090		 */
2091		if (i == 0 || ip != i_tab[i-1]) {
2092			if (XFS_NOT_DQATTACHED(mp, ip)) {
2093				error = xfs_qm_dqattach(ip, 0);
2094				if (error)
2095					return error;
2096			}
2097		}
2098	}
2099	return 0;
2100}
2101
2102void
2103xfs_qm_vop_create_dqattach(
2104	struct xfs_trans	*tp,
2105	struct xfs_inode	*ip,
2106	struct xfs_dquot	*udqp,
2107	struct xfs_dquot	*gdqp,
2108	struct xfs_dquot	*pdqp)
2109{
2110	struct xfs_mount	*mp = tp->t_mountp;
2111
2112	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2113		return;
2114
2115	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2116	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2117
2118	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
2119		ASSERT(ip->i_udquot == NULL);
2120		ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2121
2122		ip->i_udquot = xfs_qm_dqhold(udqp);
2123		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2124	}
2125	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
2126		ASSERT(ip->i_gdquot == NULL);
2127		ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
 
2128		ip->i_gdquot = xfs_qm_dqhold(gdqp);
2129		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2130	}
2131	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
2132		ASSERT(ip->i_pdquot == NULL);
2133		ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
2134
2135		ip->i_pdquot = xfs_qm_dqhold(pdqp);
2136		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
2137	}
 
 
2138}
2139
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_sb.h"
 
  14#include "xfs_mount.h"
  15#include "xfs_inode.h"
  16#include "xfs_iwalk.h"
 
  17#include "xfs_quota.h"
 
  18#include "xfs_bmap.h"
  19#include "xfs_bmap_util.h"
  20#include "xfs_trans.h"
  21#include "xfs_trans_space.h"
  22#include "xfs_qm.h"
  23#include "xfs_trace.h"
  24#include "xfs_icache.h"
  25#include "xfs_error.h"
  26#include "xfs_ag.h"
  27#include "xfs_ialloc.h"
  28#include "xfs_log_priv.h"
  29#include "xfs_health.h"
  30#include "xfs_da_format.h"
  31#include "xfs_metafile.h"
  32#include "xfs_rtgroup.h"
  33
  34/*
  35 * The global quota manager. There is only one of these for the entire
  36 * system, _not_ one per file system. XQM keeps track of the overall
  37 * quota functionality, including maintaining the freelist and hash
  38 * tables of dquots.
  39 */
  40STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
  41STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
 
  42
  43STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  44/*
  45 * We use the batch lookup interface to iterate over the dquots as it
  46 * currently is the only interface into the radix tree code that allows
  47 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  48 * operations is fine as all callers are used either during mount/umount
  49 * or quotaoff.
  50 */
  51#define XFS_DQ_LOOKUP_BATCH	32
  52
  53STATIC int
  54xfs_qm_dquot_walk(
  55	struct xfs_mount	*mp,
  56	xfs_dqtype_t		type,
  57	int			(*execute)(struct xfs_dquot *dqp, void *data),
  58	void			*data)
  59{
  60	struct xfs_quotainfo	*qi = mp->m_quotainfo;
  61	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
  62	uint32_t		next_index;
  63	int			last_error = 0;
  64	int			skipped;
  65	int			nr_found;
  66
  67restart:
  68	skipped = 0;
  69	next_index = 0;
  70	nr_found = 0;
  71
  72	while (1) {
  73		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  74		int		error;
  75		int		i;
  76
  77		mutex_lock(&qi->qi_tree_lock);
  78		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  79					next_index, XFS_DQ_LOOKUP_BATCH);
  80		if (!nr_found) {
  81			mutex_unlock(&qi->qi_tree_lock);
  82			break;
  83		}
  84
  85		for (i = 0; i < nr_found; i++) {
  86			struct xfs_dquot *dqp = batch[i];
  87
  88			next_index = dqp->q_id + 1;
  89
  90			error = execute(batch[i], data);
  91			if (error == -EAGAIN) {
  92				skipped++;
  93				continue;
  94			}
  95			if (error && last_error != -EFSCORRUPTED)
  96				last_error = error;
  97		}
  98
  99		mutex_unlock(&qi->qi_tree_lock);
 100
 101		/* bail out if the filesystem is corrupted.  */
 102		if (last_error == -EFSCORRUPTED) {
 103			skipped = 0;
 104			break;
 105		}
 106		/* we're done if id overflows back to zero */
 107		if (!next_index)
 108			break;
 109	}
 110
 111	if (skipped) {
 112		delay(1);
 113		goto restart;
 114	}
 115
 116	return last_error;
 117}
 118
 119
 120/*
 121 * Purge a dquot from all tracking data structures and free it.
 122 */
 123STATIC int
 124xfs_qm_dqpurge(
 125	struct xfs_dquot	*dqp,
 126	void			*data)
 127{
 128	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
 129	int			error = -EAGAIN;
 130
 131	xfs_dqlock(dqp);
 132	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
 133		goto out_unlock;
 
 
 134
 135	dqp->q_flags |= XFS_DQFLAG_FREEING;
 136
 137	xfs_dqflock(dqp);
 138
 139	/*
 140	 * If we are turning this type of quotas off, we don't care
 141	 * about the dirty metadata sitting in this dquot. OTOH, if
 142	 * we're unmounting, we do care, so we flush it and wait.
 143	 */
 144	if (XFS_DQ_IS_DIRTY(dqp)) {
 145		struct xfs_buf	*bp = NULL;
 
 146
 147		/*
 148		 * We don't care about getting disk errors here. We need
 149		 * to purge this dquot anyway, so we go ahead regardless.
 150		 */
 151		error = xfs_dquot_use_attached_buf(dqp, &bp);
 152		if (error == -EAGAIN) {
 153			xfs_dqfunlock(dqp);
 154			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
 155			goto out_unlock;
 156		}
 157		if (!bp)
 158			goto out_funlock;
 159
 160		/*
 161		 * dqflush completes dqflock on error, and the bwrite ioend
 162		 * does it on success.
 163		 */
 164		error = xfs_qm_dqflush(dqp, bp);
 165		if (!error) {
 166			error = xfs_bwrite(bp);
 167			xfs_buf_relse(bp);
 168		}
 169		xfs_dqflock(dqp);
 170	}
 171	xfs_dquot_detach_buf(dqp);
 172
 173out_funlock:
 174	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 175	ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
 176		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
 177
 178	xfs_dqfunlock(dqp);
 179	xfs_dqunlock(dqp);
 180
 181	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
 
 182	qi->qi_dquots--;
 183
 184	/*
 185	 * We move dquots to the freelist as soon as their reference count
 186	 * hits zero, so it really should be on the freelist here.
 187	 */
 188	ASSERT(!list_empty(&dqp->q_lru));
 189	list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
 190	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 191
 192	xfs_qm_dqdestroy(dqp);
 193	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194
 195out_unlock:
 196	xfs_dqunlock(dqp);
 197	return error;
 
 
 
 
 
 
 
 
 
 198}
 199
 200/*
 201 * Purge the dquot cache.
 202 */
 203static void
 204xfs_qm_dqpurge_all(
 205	struct xfs_mount	*mp)
 
 206{
 207	xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
 208	xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
 209	xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 210}
 211
 212/*
 213 * Just destroy the quotainfo structure.
 214 */
 215void
 216xfs_qm_unmount(
 217	struct xfs_mount	*mp)
 218{
 219	if (mp->m_quotainfo) {
 220		xfs_qm_dqpurge_all(mp);
 221		xfs_qm_destroy_quotainfo(mp);
 222	}
 223}
 224
 225static void
 226xfs_qm_unmount_rt(
 227	struct xfs_mount	*mp)
 
 
 
 
 
 
 
 
 
 
 228{
 229	struct xfs_rtgroup	*rtg = xfs_rtgroup_grab(mp, 0);
 
 230
 231	if (!rtg)
 232		return;
 233	if (rtg->rtg_inodes[XFS_RTGI_BITMAP])
 234		xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_BITMAP]);
 235	if (rtg->rtg_inodes[XFS_RTGI_SUMMARY])
 236		xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_SUMMARY]);
 237	xfs_rtgroup_rele(rtg);
 238}
 
 
 
 239
 240STATIC void
 241xfs_qm_destroy_quotainos(
 242	struct xfs_quotainfo	*qi)
 243{
 244	if (qi->qi_uquotaip) {
 245		xfs_irele(qi->qi_uquotaip);
 246		qi->qi_uquotaip = NULL; /* paranoia */
 
 
 
 
 
 247	}
 248	if (qi->qi_gquotaip) {
 249		xfs_irele(qi->qi_gquotaip);
 250		qi->qi_gquotaip = NULL;
 
 
 
 
 
 
 251	}
 252	if (qi->qi_pquotaip) {
 253		xfs_irele(qi->qi_pquotaip);
 254		qi->qi_pquotaip = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 255	}
 256	if (qi->qi_dirip) {
 257		xfs_irele(qi->qi_dirip);
 258		qi->qi_dirip = NULL;
 
 259	}
 260}
 261
 262/*
 263 * Called from the vfsops layer.
 264 */
 265void
 266xfs_qm_unmount_quotas(
 267	xfs_mount_t	*mp)
 268{
 269	/*
 270	 * Release the dquots that root inode, et al might be holding,
 271	 * before we flush quotas and blow away the quotainfo structure.
 272	 */
 273	ASSERT(mp->m_rootip);
 274	xfs_qm_dqdetach(mp->m_rootip);
 275
 276	/*
 277	 * For pre-RTG file systems, the RT inodes have quotas attached,
 278	 * detach them now.
 279	 */
 280	if (!xfs_has_rtgroups(mp))
 281		xfs_qm_unmount_rt(mp);
 282
 283	/*
 284	 * Release the quota inodes.
 285	 */
 286	if (mp->m_quotainfo)
 287		xfs_qm_destroy_quotainos(mp->m_quotainfo);
 
 
 
 
 
 
 
 
 
 
 
 
 288}
 289
 290STATIC int
 291xfs_qm_dqattach_one(
 292	struct xfs_inode	*ip,
 293	xfs_dqtype_t		type,
 294	bool			doalloc,
 295	struct xfs_dquot	**IO_idqpp)
 
 
 296{
 297	struct xfs_dquot	*dqp;
 298	int			error;
 299
 300	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
 301	error = 0;
 302
 303	/*
 304	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
 305	 * or &i_gdquot. This made the code look weird, but made the logic a lot
 306	 * simpler.
 307	 */
 308	dqp = *IO_idqpp;
 309	if (dqp) {
 310		trace_xfs_dqattach_found(dqp);
 311		return 0;
 312	}
 313
 314	/*
 315	 * Find the dquot from somewhere. This bumps the reference count of
 316	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
 317	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
 318	 * turned off suddenly.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 319	 */
 320	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
 
 321	if (error)
 322		return error;
 323
 324	trace_xfs_dqattach_get(dqp);
 325
 326	/*
 327	 * dqget may have dropped and re-acquired the ilock, but it guarantees
 328	 * that the dquot returned is the one that should go in the inode.
 329	 */
 330	*IO_idqpp = dqp;
 331	xfs_dqunlock(dqp);
 332	return 0;
 333}
 334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335static bool
 336xfs_qm_need_dqattach(
 337	struct xfs_inode	*ip)
 338{
 339	struct xfs_mount	*mp = ip->i_mount;
 340
 
 
 341	if (!XFS_IS_QUOTA_ON(mp))
 342		return false;
 343	if (!XFS_NOT_DQATTACHED(mp, ip))
 344		return false;
 345	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 346		return false;
 347	if (xfs_is_metadir_inode(ip))
 348		return false;
 349	return true;
 350}
 351
 352/*
 353 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 354 * into account.
 355 * If @doalloc is true, the dquot(s) will be allocated if needed.
 356 * Inode may get unlocked and relocked in here, and the caller must deal with
 357 * the consequences.
 358 */
 359int
 360xfs_qm_dqattach_locked(
 361	xfs_inode_t	*ip,
 362	bool		doalloc)
 363{
 364	xfs_mount_t	*mp = ip->i_mount;
 
 365	int		error = 0;
 366
 367	if (!xfs_qm_need_dqattach(ip))
 368		return 0;
 369
 370	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
 371	ASSERT(!xfs_is_metadir_inode(ip));
 372
 373	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
 374		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
 375				doalloc, &ip->i_udquot);
 
 376		if (error)
 377			goto done;
 378		ASSERT(ip->i_udquot);
 379	}
 380
 381	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
 382		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
 383				doalloc, &ip->i_gdquot);
 
 
 
 
 
 
 384		if (error)
 385			goto done;
 386		ASSERT(ip->i_gdquot);
 387	}
 388
 389	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
 390		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
 391				doalloc, &ip->i_pdquot);
 
 
 
 
 
 
 392		if (error)
 393			goto done;
 394		ASSERT(ip->i_pdquot);
 395	}
 396
 397done:
 398	/*
 399	 * Don't worry about the dquots that we may have attached before any
 400	 * error - they'll get detached later if it has not already been done.
 401	 */
 402	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 403	return error;
 404}
 405
 406int
 407xfs_qm_dqattach(
 408	struct xfs_inode	*ip)
 
 409{
 410	int			error;
 411
 412	if (!xfs_qm_need_dqattach(ip))
 413		return 0;
 414
 415	xfs_ilock(ip, XFS_ILOCK_EXCL);
 416	error = xfs_qm_dqattach_locked(ip, false);
 417	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 418
 419	return error;
 420}
 421
 422/*
 423 * Release dquots (and their references) if any.
 424 * The inode should be locked EXCL except when this's called by
 425 * xfs_ireclaim.
 426 */
 427void
 428xfs_qm_dqdetach(
 429	xfs_inode_t	*ip)
 430{
 431	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
 432		return;
 433
 434	trace_xfs_dquot_dqdetach(ip);
 435
 436	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
 437	if (ip->i_udquot) {
 438		xfs_qm_dqrele(ip->i_udquot);
 439		ip->i_udquot = NULL;
 440	}
 441	if (ip->i_gdquot) {
 442		xfs_qm_dqrele(ip->i_gdquot);
 443		ip->i_gdquot = NULL;
 444	}
 445	if (ip->i_pdquot) {
 446		xfs_qm_dqrele(ip->i_pdquot);
 447		ip->i_pdquot = NULL;
 448	}
 449}
 450
 451struct xfs_qm_isolate {
 452	struct list_head	buffers;
 453	struct list_head	dispose;
 454};
 455
 456static enum lru_status
 457xfs_qm_dquot_isolate(
 458	struct list_head	*item,
 459	struct list_lru_one	*lru,
 460	void			*arg)
 461		__releases(&lru->lock) __acquires(&lru->lock)
 462{
 463	struct xfs_dquot	*dqp = container_of(item,
 464						struct xfs_dquot, q_lru);
 465	struct xfs_qm_isolate	*isol = arg;
 466
 467	if (!xfs_dqlock_nowait(dqp))
 468		goto out_miss_busy;
 469
 470	/*
 471	 * If something else is freeing this dquot and hasn't yet removed it
 472	 * from the LRU, leave it for the freeing task to complete the freeing
 473	 * process rather than risk it being free from under us here.
 474	 */
 475	if (dqp->q_flags & XFS_DQFLAG_FREEING)
 476		goto out_miss_unlock;
 477
 478	/*
 479	 * This dquot has acquired a reference in the meantime remove it from
 480	 * the freelist and try again.
 481	 */
 482	if (dqp->q_nrefs) {
 483		xfs_dqunlock(dqp);
 484		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
 485
 486		trace_xfs_dqreclaim_want(dqp);
 487		list_lru_isolate(lru, &dqp->q_lru);
 488		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 489		return LRU_REMOVED;
 490	}
 491
 492	/*
 493	 * If the dquot is dirty, flush it. If it's already being flushed, just
 494	 * skip it so there is time for the IO to complete before we try to
 495	 * reclaim it again on the next LRU pass.
 496	 */
 497	if (!xfs_dqflock_nowait(dqp))
 498		goto out_miss_unlock;
 
 
 499
 500	if (XFS_DQ_IS_DIRTY(dqp)) {
 501		struct xfs_buf	*bp = NULL;
 502		int		error;
 503
 504		trace_xfs_dqreclaim_dirty(dqp);
 505
 506		/* we have to drop the LRU lock to flush the dquot */
 507		spin_unlock(&lru->lock);
 508
 509		error = xfs_dquot_use_attached_buf(dqp, &bp);
 510		if (!bp || error == -EAGAIN) {
 511			xfs_dqfunlock(dqp);
 
 512			goto out_unlock_dirty;
 513		}
 514
 515		/*
 516		 * dqflush completes dqflock on error, and the delwri ioend
 517		 * does it on success.
 518		 */
 519		error = xfs_qm_dqflush(dqp, bp);
 520		if (error)
 521			goto out_unlock_dirty;
 522
 523		xfs_buf_delwri_queue(bp, &isol->buffers);
 524		xfs_buf_relse(bp);
 525		goto out_unlock_dirty;
 526	}
 527
 528	xfs_dquot_detach_buf(dqp);
 529	xfs_dqfunlock(dqp);
 530
 531	/*
 532	 * Prevent lookups now that we are past the point of no return.
 533	 */
 534	dqp->q_flags |= XFS_DQFLAG_FREEING;
 535	xfs_dqunlock(dqp);
 536
 537	ASSERT(dqp->q_nrefs == 0);
 538	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
 539	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 540	trace_xfs_dqreclaim_done(dqp);
 541	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
 542	return LRU_REMOVED;
 543
 544out_miss_unlock:
 545	xfs_dqunlock(dqp);
 546out_miss_busy:
 547	trace_xfs_dqreclaim_busy(dqp);
 548	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 549	return LRU_SKIP;
 550
 551out_unlock_dirty:
 552	trace_xfs_dqreclaim_busy(dqp);
 553	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 554	xfs_dqunlock(dqp);
 
 555	return LRU_RETRY;
 556}
 557
 558static unsigned long
 559xfs_qm_shrink_scan(
 560	struct shrinker		*shrink,
 561	struct shrink_control	*sc)
 562{
 563	struct xfs_quotainfo	*qi = shrink->private_data;
 
 564	struct xfs_qm_isolate	isol;
 565	unsigned long		freed;
 566	int			error;
 
 567
 568	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
 569		return 0;
 570
 571	INIT_LIST_HEAD(&isol.buffers);
 572	INIT_LIST_HEAD(&isol.dispose);
 573
 574	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
 575				     xfs_qm_dquot_isolate, &isol);
 576
 577	error = xfs_buf_delwri_submit(&isol.buffers);
 578	if (error)
 579		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
 580
 581	while (!list_empty(&isol.dispose)) {
 582		struct xfs_dquot	*dqp;
 583
 584		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
 585		list_del_init(&dqp->q_lru);
 586		xfs_qm_dqfree_one(dqp);
 587	}
 588
 589	return freed;
 590}
 591
 592static unsigned long
 593xfs_qm_shrink_count(
 594	struct shrinker		*shrink,
 595	struct shrink_control	*sc)
 596{
 597	struct xfs_quotainfo	*qi = shrink->private_data;
 598
 599	return list_lru_shrink_count(&qi->qi_lru, sc);
 600}
 601
 602STATIC void
 603xfs_qm_set_defquota(
 604	struct xfs_mount	*mp,
 605	xfs_dqtype_t		type,
 606	struct xfs_quotainfo	*qinf)
 607{
 608	struct xfs_dquot	*dqp;
 609	struct xfs_def_quota	*defq;
 610	int			error;
 611
 612	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 613	if (error)
 614		return;
 615
 616	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
 617
 618	/*
 619	 * Timers and warnings have been already set, let's just set the
 620	 * default limits for this quota type
 621	 */
 622	defq->blk.hard = dqp->q_blk.hardlimit;
 623	defq->blk.soft = dqp->q_blk.softlimit;
 624	defq->ino.hard = dqp->q_ino.hardlimit;
 625	defq->ino.soft = dqp->q_ino.softlimit;
 626	defq->rtb.hard = dqp->q_rtb.hardlimit;
 627	defq->rtb.soft = dqp->q_rtb.softlimit;
 628	xfs_qm_dqdestroy(dqp);
 629}
 630
 631/* Initialize quota time limits from the root dquot. */
 632static void
 633xfs_qm_init_timelimits(
 634	struct xfs_mount	*mp,
 635	xfs_dqtype_t		type)
 636{
 637	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
 638	struct xfs_def_quota	*defq;
 639	struct xfs_dquot	*dqp;
 640	int			error;
 641
 642	defq = xfs_get_defquota(qinf, type);
 643
 644	defq->blk.time = XFS_QM_BTIMELIMIT;
 645	defq->ino.time = XFS_QM_ITIMELIMIT;
 646	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
 647
 648	/*
 649	 * We try to get the limits from the superuser's limits fields.
 650	 * This is quite hacky, but it is standard quota practice.
 651	 *
 652	 * Since we may not have done a quotacheck by this point, just read
 653	 * the dquot without attaching it to any hashtables or lists.
 654	 */
 655	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 656	if (error)
 657		return;
 658
 659	/*
 660	 * The warnings and timers set the grace period given to
 661	 * a user or group before he or she can not perform any
 662	 * more writing. If it is zero, a default is used.
 663	 */
 664	if (dqp->q_blk.timer)
 665		defq->blk.time = dqp->q_blk.timer;
 666	if (dqp->q_ino.timer)
 667		defq->ino.time = dqp->q_ino.timer;
 668	if (dqp->q_rtb.timer)
 669		defq->rtb.time = dqp->q_rtb.timer;
 670
 671	xfs_qm_dqdestroy(dqp);
 672}
 673
 674static int
 675xfs_qm_load_metadir_qinos(
 676	struct xfs_mount	*mp,
 677	struct xfs_quotainfo	*qi)
 678{
 679	struct xfs_trans	*tp;
 680	int			error;
 681
 682	error = xfs_trans_alloc_empty(mp, &tp);
 683	if (error)
 684		return error;
 685
 686	error = xfs_dqinode_load_parent(tp, &qi->qi_dirip);
 687	if (error == -ENOENT) {
 688		/* no quota dir directory, but we'll create one later */
 689		error = 0;
 690		goto out_trans;
 691	}
 692	if (error)
 693		goto out_trans;
 694
 695	if (XFS_IS_UQUOTA_ON(mp)) {
 696		error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_USER,
 697				&qi->qi_uquotaip);
 698		if (error && error != -ENOENT)
 699			goto out_trans;
 700	}
 701
 702	if (XFS_IS_GQUOTA_ON(mp)) {
 703		error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_GROUP,
 704				&qi->qi_gquotaip);
 705		if (error && error != -ENOENT)
 706			goto out_trans;
 707	}
 708
 709	if (XFS_IS_PQUOTA_ON(mp)) {
 710		error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_PROJ,
 711				&qi->qi_pquotaip);
 712		if (error && error != -ENOENT)
 713			goto out_trans;
 714	}
 715
 716	error = 0;
 717out_trans:
 718	xfs_trans_cancel(tp);
 719	return error;
 720}
 721
 722/* Create quota inodes in the metadata directory tree. */
 723STATIC int
 724xfs_qm_create_metadir_qinos(
 725	struct xfs_mount	*mp,
 726	struct xfs_quotainfo	*qi)
 727{
 728	int			error;
 729
 730	if (!qi->qi_dirip) {
 731		error = xfs_dqinode_mkdir_parent(mp, &qi->qi_dirip);
 732		if (error && error != -EEXIST)
 733			return error;
 734		/*
 735		 * If the /quotas dirent points to an inode that isn't
 736		 * loadable, qi_dirip will be NULL but mkdir_parent will return
 737		 * -EEXIST.  In this case the metadir is corrupt, so bail out.
 738		 */
 739		if (XFS_IS_CORRUPT(mp, qi->qi_dirip == NULL))
 740			return -EFSCORRUPTED;
 741	}
 742
 743	if (XFS_IS_UQUOTA_ON(mp) && !qi->qi_uquotaip) {
 744		error = xfs_dqinode_metadir_create(qi->qi_dirip,
 745				XFS_DQTYPE_USER, &qi->qi_uquotaip);
 746		if (error)
 747			return error;
 748	}
 749
 750	if (XFS_IS_GQUOTA_ON(mp) && !qi->qi_gquotaip) {
 751		error = xfs_dqinode_metadir_create(qi->qi_dirip,
 752				XFS_DQTYPE_GROUP, &qi->qi_gquotaip);
 753		if (error)
 754			return error;
 755	}
 756
 757	if (XFS_IS_PQUOTA_ON(mp) && !qi->qi_pquotaip) {
 758		error = xfs_dqinode_metadir_create(qi->qi_dirip,
 759				XFS_DQTYPE_PROJ, &qi->qi_pquotaip);
 760		if (error)
 761			return error;
 762	}
 763
 764	return 0;
 765}
 766
 767/*
 768 * Add QUOTABIT to sb_versionnum and initialize qflags in preparation for
 769 * creating quota files on a metadir filesystem.
 770 */
 771STATIC int
 772xfs_qm_prep_metadir_sb(
 773	struct xfs_mount	*mp)
 774{
 775	struct xfs_trans	*tp;
 776	int			error;
 777
 778	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0, 0, &tp);
 779	if (error)
 780		return error;
 781
 782	spin_lock(&mp->m_sb_lock);
 783
 784	xfs_add_quota(mp);
 785
 786	/* qflags will get updated fully _after_ quotacheck */
 787	mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
 788
 789	spin_unlock(&mp->m_sb_lock);
 790	xfs_log_sb(tp);
 791
 792	return xfs_trans_commit(tp);
 793}
 794
 795/*
 796 * Load existing quota inodes or create them.  Since this is a V5 filesystem,
 797 * we don't have to deal with the grp/prjquota switcheroo thing from V4.
 798 */
 799STATIC int
 800xfs_qm_init_metadir_qinos(
 801	struct xfs_mount	*mp)
 802{
 803	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 804	int			error;
 805
 806	if (!xfs_has_quota(mp)) {
 807		error = xfs_qm_prep_metadir_sb(mp);
 808		if (error)
 809			return error;
 810	}
 811
 812	error = xfs_qm_load_metadir_qinos(mp, qi);
 813	if (error)
 814		goto out_err;
 815
 816	error = xfs_qm_create_metadir_qinos(mp, qi);
 817	if (error)
 818		goto out_err;
 819
 820	/* The only user of the quota dir inode is online fsck */
 821#if !IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
 822	xfs_irele(qi->qi_dirip);
 823	qi->qi_dirip = NULL;
 824#endif
 825	return 0;
 826out_err:
 827	xfs_qm_destroy_quotainos(mp->m_quotainfo);
 828	return error;
 829}
 830
 831/*
 832 * This initializes all the quota information that's kept in the
 833 * mount structure
 834 */
 835STATIC int
 836xfs_qm_init_quotainfo(
 837	struct xfs_mount	*mp)
 838{
 839	struct xfs_quotainfo	*qinf;
 840	int			error;
 
 841
 842	ASSERT(XFS_IS_QUOTA_ON(mp));
 843
 844	qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo),
 845					GFP_KERNEL | __GFP_NOFAIL);
 846
 847	error = list_lru_init(&qinf->qi_lru);
 848	if (error)
 849		goto out_free_qinf;
 850
 851	/*
 852	 * See if quotainodes are setup, and if not, allocate them,
 853	 * and change the superblock accordingly.
 854	 */
 855	if (xfs_has_metadir(mp))
 856		error = xfs_qm_init_metadir_qinos(mp);
 857	else
 858		error = xfs_qm_init_quotainos(mp);
 859	if (error)
 860		goto out_free_lru;
 861
 862	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL);
 863	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL);
 864	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL);
 865	mutex_init(&qinf->qi_tree_lock);
 866
 867	/* mutex used to serialize quotaoffs */
 868	mutex_init(&qinf->qi_quotaofflock);
 869
 870	/* Precalc some constants */
 871	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 872	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
 873	if (xfs_has_bigtime(mp)) {
 874		qinf->qi_expiry_min =
 875			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
 876		qinf->qi_expiry_max =
 877			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
 878	} else {
 879		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
 880		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
 881	}
 882	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
 883			qinf->qi_expiry_max);
 884
 885	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 886
 887	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
 888	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
 889	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
 890
 891	if (XFS_IS_UQUOTA_ON(mp))
 892		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
 893	if (XFS_IS_GQUOTA_ON(mp))
 894		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
 895	if (XFS_IS_PQUOTA_ON(mp))
 896		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
 897
 898	qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s",
 899					   mp->m_super->s_id);
 900	if (!qinf->qi_shrinker) {
 901		error = -ENOMEM;
 902		goto out_free_inos;
 903	}
 904
 905	qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
 906	qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
 907	qinf->qi_shrinker->private_data = qinf;
 908
 909	shrinker_register(qinf->qi_shrinker);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 910
 911	xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks);
 912	xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks);
 
 
 
 
 
 
 
 913
 
 
 
 
 
 914	return 0;
 915
 916out_free_inos:
 917	mutex_destroy(&qinf->qi_quotaofflock);
 918	mutex_destroy(&qinf->qi_tree_lock);
 919	xfs_qm_destroy_quotainos(qinf);
 920out_free_lru:
 921	list_lru_destroy(&qinf->qi_lru);
 922out_free_qinf:
 923	kfree(qinf);
 924	mp->m_quotainfo = NULL;
 925	return error;
 926}
 927
 
 928/*
 929 * Gets called when unmounting a filesystem or when all quotas get
 930 * turned off.
 931 * This purges the quota inodes, destroys locks and frees itself.
 932 */
 933void
 934xfs_qm_destroy_quotainfo(
 935	struct xfs_mount	*mp)
 936{
 937	struct xfs_quotainfo	*qi;
 938
 939	qi = mp->m_quotainfo;
 940	ASSERT(qi != NULL);
 941
 942	shrinker_free(qi->qi_shrinker);
 943	list_lru_destroy(&qi->qi_lru);
 944	xfs_qm_destroy_quotainos(qi);
 945	mutex_destroy(&qi->qi_tree_lock);
 
 
 
 
 
 
 
 
 
 
 
 946	mutex_destroy(&qi->qi_quotaofflock);
 947	kfree(qi);
 948	mp->m_quotainfo = NULL;
 949}
 950
 951static inline enum xfs_metafile_type
 952xfs_qm_metafile_type(
 953	unsigned int		flags)
 954{
 955	if (flags & XFS_QMOPT_UQUOTA)
 956		return XFS_METAFILE_USRQUOTA;
 957	else if (flags & XFS_QMOPT_GQUOTA)
 958		return XFS_METAFILE_GRPQUOTA;
 959	return XFS_METAFILE_PRJQUOTA;
 960}
 961
 962/*
 963 * Create an inode and return with a reference already taken, but unlocked
 964 * This is how we create quota inodes
 965 */
 966STATIC int
 967xfs_qm_qino_alloc(
 968	struct xfs_mount	*mp,
 969	struct xfs_inode	**ipp,
 970	unsigned int		flags)
 971{
 972	struct xfs_trans	*tp;
 973	enum xfs_metafile_type	metafile_type = xfs_qm_metafile_type(flags);
 974	int			error;
 975	bool			need_alloc = true;
 976
 977	*ipp = NULL;
 978	/*
 979	 * With superblock that doesn't have separate pquotino, we
 980	 * share an inode between gquota and pquota. If the on-disk
 981	 * superblock has GQUOTA and the filesystem is now mounted
 982	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
 983	 * vice-versa.
 984	 */
 985	if (!xfs_has_pquotino(mp) &&
 986			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
 987		xfs_ino_t ino = NULLFSINO;
 988
 989		if ((flags & XFS_QMOPT_PQUOTA) &&
 990			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
 991			ino = mp->m_sb.sb_gquotino;
 992			if (XFS_IS_CORRUPT(mp,
 993					   mp->m_sb.sb_pquotino != NULLFSINO)) {
 994				xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
 995				return -EFSCORRUPTED;
 996			}
 997		} else if ((flags & XFS_QMOPT_GQUOTA) &&
 998			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
 999			ino = mp->m_sb.sb_pquotino;
1000			if (XFS_IS_CORRUPT(mp,
1001					   mp->m_sb.sb_gquotino != NULLFSINO)) {
1002				xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
1003				return -EFSCORRUPTED;
1004			}
1005		}
1006		if (ino != NULLFSINO) {
1007			error = xfs_metafile_iget(mp, ino, metafile_type, ipp);
1008			if (error)
1009				return error;
1010
1011			mp->m_sb.sb_gquotino = NULLFSINO;
1012			mp->m_sb.sb_pquotino = NULLFSINO;
1013			need_alloc = false;
1014		}
1015	}
1016
1017	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
1018			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
1019			0, 0, &tp);
1020	if (error)
 
1021		return error;
 
1022
1023	if (need_alloc) {
1024		struct xfs_icreate_args	args = {
1025			.mode		= S_IFREG,
1026			.flags		= XFS_ICREATE_UNLINKABLE,
1027		};
1028		xfs_ino_t	ino;
1029
1030		error = xfs_dialloc(&tp, &args, &ino);
1031		if (!error)
1032			error = xfs_icreate(tp, ino, &args, ipp);
1033		if (error) {
1034			xfs_trans_cancel(tp);
 
1035			return error;
1036		}
1037		if (xfs_has_metadir(mp))
1038			xfs_metafile_set_iflag(tp, *ipp, metafile_type);
1039	}
1040
1041	/*
1042	 * Make the changes in the superblock, and log those too.
1043	 * sbfields arg may contain fields other than *QUOTINO;
1044	 * VERSIONNUM for example.
1045	 */
1046	spin_lock(&mp->m_sb_lock);
1047	if (flags & XFS_QMOPT_SBVERSION) {
1048		ASSERT(!xfs_has_quota(mp));
 
 
 
 
 
1049
1050		xfs_add_quota(mp);
1051		mp->m_sb.sb_uquotino = NULLFSINO;
1052		mp->m_sb.sb_gquotino = NULLFSINO;
1053		mp->m_sb.sb_pquotino = NULLFSINO;
1054
1055		/* qflags will get updated fully _after_ quotacheck */
1056		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
1057	}
1058	if (flags & XFS_QMOPT_UQUOTA)
1059		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
1060	else if (flags & XFS_QMOPT_GQUOTA)
1061		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
1062	else
1063		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
1064	spin_unlock(&mp->m_sb_lock);
1065	xfs_log_sb(tp);
1066
1067	error = xfs_trans_commit(tp);
1068	if (error) {
1069		ASSERT(xfs_is_shutdown(mp));
1070		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
 
1071	}
1072	if (need_alloc) {
1073		xfs_iunlock(*ipp, XFS_ILOCK_EXCL);
1074		xfs_finish_inode_setup(*ipp);
1075	}
1076	return error;
1077}
1078
1079
1080STATIC void
1081xfs_qm_reset_dqcounts(
1082	struct xfs_mount	*mp,
1083	struct xfs_buf		*bp,
1084	xfs_dqid_t		id,
1085	xfs_dqtype_t		type)
1086{
1087	struct xfs_dqblk	*dqb;
1088	int			j;
1089
1090	trace_xfs_reset_dqcounts(bp, _RET_IP_);
1091
1092	/*
1093	 * Reset all counters and timers. They'll be
1094	 * started afresh by xfs_qm_quotacheck.
1095	 */
1096#ifdef DEBUG
1097	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
1098		sizeof(struct xfs_dqblk);
1099	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
1100#endif
1101	dqb = bp->b_addr;
1102	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
1103		struct xfs_disk_dquot	*ddq;
1104
1105		ddq = (struct xfs_disk_dquot *)&dqb[j];
1106
1107		/*
1108		 * Do a sanity check, and if needed, repair the dqblk. Don't
1109		 * output any warnings because it's perfectly possible to
1110		 * find uninitialised dquot blks. See comment in
1111		 * xfs_dquot_verify.
1112		 */
1113		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
1114		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
1115			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
1116
1117		/*
1118		 * Reset type in case we are reusing group quota file for
1119		 * project quotas or vice versa
1120		 */
1121		ddq->d_type = type;
 
1122		ddq->d_bcount = 0;
1123		ddq->d_icount = 0;
1124		ddq->d_rtbcount = 0;
 
 
 
 
 
 
1125
1126		/*
1127		 * dquot id 0 stores the default grace period and the maximum
1128		 * warning limit that were set by the administrator, so we
1129		 * should not reset them.
1130		 */
1131		if (ddq->d_id != 0) {
1132			ddq->d_btimer = 0;
1133			ddq->d_itimer = 0;
1134			ddq->d_rtbtimer = 0;
1135			ddq->d_bwarns = 0;
1136			ddq->d_iwarns = 0;
1137			ddq->d_rtbwarns = 0;
1138			if (xfs_has_bigtime(mp))
1139				ddq->d_type |= XFS_DQTYPE_BIGTIME;
1140		}
1141
1142		if (xfs_has_crc(mp)) {
1143			xfs_update_cksum((char *)&dqb[j],
1144					 sizeof(struct xfs_dqblk),
1145					 XFS_DQUOT_CRC_OFF);
1146		}
1147	}
1148}
1149
1150STATIC int
1151xfs_qm_reset_dqcounts_all(
1152	struct xfs_mount	*mp,
1153	xfs_dqid_t		firstid,
1154	xfs_fsblock_t		bno,
1155	xfs_filblks_t		blkcnt,
1156	xfs_dqtype_t		type,
1157	struct list_head	*buffer_list)
1158{
1159	struct xfs_buf		*bp;
1160	int			error = 0;
 
1161
1162	ASSERT(blkcnt > 0);
 
 
 
1163
1164	/*
1165	 * Blkcnt arg can be a very big number, and might even be
1166	 * larger than the log itself. So, we have to break it up into
1167	 * manageable-sized transactions.
1168	 * Note that we don't start a permanent transaction here; we might
1169	 * not be able to get a log reservation for the whole thing up front,
1170	 * and we don't really care to either, because we just discard
1171	 * everything if we were to crash in the middle of this loop.
1172	 */
1173	while (blkcnt--) {
1174		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1175			      XFS_FSB_TO_DADDR(mp, bno),
1176			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1177			      &xfs_dquot_buf_ops);
1178
1179		/*
1180		 * CRC and validation errors will return a EFSCORRUPTED here. If
1181		 * this occurs, re-read without CRC validation so that we can
1182		 * repair the damage via xfs_qm_reset_dqcounts(). This process
1183		 * will leave a trace in the log indicating corruption has
1184		 * been detected.
1185		 */
1186		if (error == -EFSCORRUPTED) {
1187			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1188				      XFS_FSB_TO_DADDR(mp, bno),
1189				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1190				      NULL);
1191		}
1192
1193		if (error)
1194			break;
1195
1196		/*
1197		 * A corrupt buffer might not have a verifier attached, so
1198		 * make sure we have the correct one attached before writeback
1199		 * occurs.
1200		 */
1201		bp->b_ops = &xfs_dquot_buf_ops;
1202		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1203		xfs_buf_delwri_queue(bp, buffer_list);
1204		xfs_buf_relse(bp);
1205
1206		/* goto the next block. */
1207		bno++;
1208		firstid += mp->m_quotainfo->qi_dqperchunk;
1209	}
1210
1211	return error;
1212}
1213
1214/*
1215 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
1216 * counters for every chunk of dquots that we find.
1217 */
1218STATIC int
1219xfs_qm_reset_dqcounts_buf(
1220	struct xfs_mount	*mp,
1221	struct xfs_inode	*qip,
1222	xfs_dqtype_t		type,
1223	struct list_head	*buffer_list)
1224{
1225	struct xfs_bmbt_irec	*map;
1226	int			i, nmaps;	/* number of map entries */
1227	int			error;		/* return value */
1228	xfs_fileoff_t		lblkno;
1229	xfs_filblks_t		maxlblkcnt;
1230	xfs_dqid_t		firstid;
1231	xfs_fsblock_t		rablkno;
1232	xfs_filblks_t		rablkcnt;
1233
1234	error = 0;
1235	/*
1236	 * This looks racy, but we can't keep an inode lock across a
1237	 * trans_reserve. But, this gets called during quotacheck, and that
1238	 * happens only at mount time which is single threaded.
1239	 */
1240	if (qip->i_nblocks == 0)
1241		return 0;
1242
1243	map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
1244			GFP_KERNEL | __GFP_NOFAIL);
1245
1246	lblkno = 0;
1247	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1248	do {
1249		uint		lock_mode;
1250
1251		nmaps = XFS_DQITER_MAP_SIZE;
1252		/*
1253		 * We aren't changing the inode itself. Just changing
1254		 * some of its data. No new blocks are added here, and
1255		 * the inode is never added to the transaction.
1256		 */
1257		lock_mode = xfs_ilock_data_map_shared(qip);
1258		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1259				       map, &nmaps, 0);
1260		xfs_iunlock(qip, lock_mode);
1261		if (error)
1262			break;
1263
1264		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1265		for (i = 0; i < nmaps; i++) {
1266			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1267			ASSERT(map[i].br_blockcount);
1268
1269
1270			lblkno += map[i].br_blockcount;
1271
1272			if (map[i].br_startblock == HOLESTARTBLOCK)
1273				continue;
1274
1275			firstid = (xfs_dqid_t) map[i].br_startoff *
1276				mp->m_quotainfo->qi_dqperchunk;
1277			/*
1278			 * Do a read-ahead on the next extent.
1279			 */
1280			if ((i+1 < nmaps) &&
1281			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1282				rablkcnt =  map[i+1].br_blockcount;
1283				rablkno = map[i+1].br_startblock;
1284				while (rablkcnt--) {
1285					xfs_buf_readahead(mp->m_ddev_targp,
1286					       XFS_FSB_TO_DADDR(mp, rablkno),
1287					       mp->m_quotainfo->qi_dqchunklen,
1288					       &xfs_dquot_buf_ops);
1289					rablkno++;
1290				}
1291			}
1292			/*
1293			 * Iterate thru all the blks in the extent and
1294			 * reset the counters of all the dquots inside them.
1295			 */
1296			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1297						   map[i].br_startblock,
1298						   map[i].br_blockcount,
1299						   type, buffer_list);
1300			if (error)
1301				goto out;
1302		}
1303	} while (nmaps > 0);
1304
1305out:
1306	kfree(map);
1307	return error;
1308}
1309
1310/*
1311 * Called by dqusage_adjust in doing a quotacheck.
1312 *
1313 * Given the inode, and a dquot id this updates both the incore dqout as well
1314 * as the buffer copy. This is so that once the quotacheck is done, we can
1315 * just log all the buffers, as opposed to logging numerous updates to
1316 * individual dquots.
1317 */
1318STATIC int
1319xfs_qm_quotacheck_dqadjust(
1320	struct xfs_inode	*ip,
1321	xfs_dqtype_t		type,
 
1322	xfs_qcnt_t		nblks,
1323	xfs_qcnt_t		rtblks)
1324{
1325	struct xfs_mount	*mp = ip->i_mount;
1326	struct xfs_dquot	*dqp;
1327	xfs_dqid_t		id;
1328	int			error;
1329
1330	id = xfs_qm_id_for_quotatype(ip, type);
1331	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1332	if (error) {
1333		/*
1334		 * Shouldn't be able to turn off quotas here.
1335		 */
1336		ASSERT(error != -ESRCH);
1337		ASSERT(error != -ENOENT);
1338		return error;
1339	}
1340
1341	error = xfs_dquot_attach_buf(NULL, dqp);
1342	if (error)
1343		return error;
1344
1345	trace_xfs_dqadjust(dqp);
1346
1347	/*
1348	 * Adjust the inode count and the block count to reflect this inode's
1349	 * resource usage.
1350	 */
1351	dqp->q_ino.count++;
1352	dqp->q_ino.reserved++;
1353	if (nblks) {
1354		dqp->q_blk.count += nblks;
1355		dqp->q_blk.reserved += nblks;
1356	}
1357	if (rtblks) {
1358		dqp->q_rtb.count += rtblks;
1359		dqp->q_rtb.reserved += rtblks;
1360	}
1361
1362	/*
1363	 * Set default limits, adjust timers (since we changed usages)
1364	 *
1365	 * There are no timers for the default values set in the root dquot.
1366	 */
1367	if (dqp->q_id) {
1368		xfs_qm_adjust_dqlimits(dqp);
1369		xfs_qm_adjust_dqtimers(dqp);
1370	}
1371
1372	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1373	xfs_qm_dqput(dqp);
1374	return 0;
1375}
1376
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1377/*
1378 * callback routine supplied to bulkstat(). Given an inumber, find its
1379 * dquots and update them to account for resources taken by that inode.
1380 */
1381/* ARGSUSED */
1382STATIC int
1383xfs_qm_dqusage_adjust(
1384	struct xfs_mount	*mp,
1385	struct xfs_trans	*tp,
1386	xfs_ino_t		ino,
1387	void			*data)
1388{
1389	struct xfs_inode	*ip;
1390	xfs_filblks_t		nblks, rtblks;
1391	unsigned int		lock_mode;
1392	int			error;
 
1393
1394	ASSERT(XFS_IS_QUOTA_ON(mp));
1395
1396	/*
1397	 * rootino must have its resources accounted for, not so with the quota
1398	 * inodes.
1399	 */
1400	if (xfs_is_quota_inode(&mp->m_sb, ino))
1401		return 0;
 
 
1402
1403	/*
1404	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1405	 * at mount time and therefore nobody will be racing chown/chproj.
 
 
1406	 */
1407	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1408	if (error == -EINVAL || error == -ENOENT)
1409		return 0;
1410	if (error)
1411		return error;
1412
1413	/*
1414	 * Reload the incore unlinked list to avoid failure in inodegc.
1415	 * Use an unlocked check here because unrecovered unlinked inodes
1416	 * should be somewhat rare.
1417	 */
1418	if (xfs_inode_unlinked_incomplete(ip)) {
1419		error = xfs_inode_reload_unlinked(ip);
1420		if (error) {
1421			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1422			goto error0;
1423		}
1424	}
1425
1426	/* Metadata directory files are not accounted to user-visible quotas. */
1427	if (xfs_is_metadir_inode(ip))
1428		goto error0;
1429
1430	ASSERT(ip->i_delayed_blks == 0);
1431
1432	lock_mode = xfs_ilock_data_map_shared(ip);
1433	if (XFS_IS_REALTIME_INODE(ip)) {
1434		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1435		if (error) {
1436			xfs_iunlock(ip, lock_mode);
 
 
1437			goto error0;
1438		}
1439	}
1440	xfs_inode_count_blocks(tp, ip, &nblks, &rtblks);
1441	xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
1442	xfs_iunlock(ip, lock_mode);
1443
1444	/*
1445	 * Add the (disk blocks and inode) resources occupied by this
1446	 * inode to its dquots. We do this adjustment in the incore dquot,
1447	 * and also copy the changes to its buffer.
1448	 * We don't care about putting these changes in a transaction
1449	 * envelope because if we crash in the middle of a 'quotacheck'
1450	 * we have to start from the beginning anyway.
1451	 * Once we're done, we'll log all the dquot bufs.
1452	 *
1453	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1454	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1455	 */
1456	if (XFS_IS_UQUOTA_ON(mp)) {
1457		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1458				rtblks);
1459		if (error)
1460			goto error0;
1461	}
1462
1463	if (XFS_IS_GQUOTA_ON(mp)) {
1464		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1465				rtblks);
1466		if (error)
1467			goto error0;
1468	}
1469
1470	if (XFS_IS_PQUOTA_ON(mp)) {
1471		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1472				rtblks);
1473		if (error)
1474			goto error0;
1475	}
1476
 
 
 
 
 
1477error0:
1478	xfs_irele(ip);
 
 
1479	return error;
1480}
1481
1482STATIC int
1483xfs_qm_flush_one(
1484	struct xfs_dquot	*dqp,
1485	void			*data)
1486{
1487	struct xfs_mount	*mp = dqp->q_mount;
1488	struct list_head	*buffer_list = data;
1489	struct xfs_buf		*bp = NULL;
1490	int			error = 0;
1491
1492	xfs_dqlock(dqp);
1493	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1494		goto out_unlock;
1495	if (!XFS_DQ_IS_DIRTY(dqp))
1496		goto out_unlock;
1497
1498	/*
1499	 * The only way the dquot is already flush locked by the time quotacheck
1500	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1501	 * it for the final time. Quotacheck collects all dquot bufs in the
1502	 * local delwri queue before dquots are dirtied, so reclaim can't have
1503	 * possibly queued it for I/O. The only way out is to push the buffer to
1504	 * cycle the flush lock.
1505	 */
1506	if (!xfs_dqflock_nowait(dqp)) {
1507		/* buf is pinned in-core by delwri list */
1508		error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1509				mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1510		if (error)
1511			goto out_unlock;
1512
1513		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1514			error = -EAGAIN;
1515			xfs_buf_relse(bp);
1516			goto out_unlock;
1517		}
1518		xfs_buf_unlock(bp);
1519
1520		xfs_buf_delwri_pushbuf(bp, buffer_list);
1521		xfs_buf_rele(bp);
1522
1523		error = -EAGAIN;
1524		goto out_unlock;
1525	}
1526
1527	error = xfs_dquot_use_attached_buf(dqp, &bp);
1528	if (error)
1529		goto out_unlock;
1530	if (!bp) {
1531		error = -EFSCORRUPTED;
1532		goto out_unlock;
1533	}
1534
1535	error = xfs_qm_dqflush(dqp, bp);
1536	if (!error)
1537		xfs_buf_delwri_queue(bp, buffer_list);
1538	xfs_buf_relse(bp);
1539out_unlock:
1540	xfs_dqunlock(dqp);
1541	return error;
1542}
1543
1544/*
1545 * Walk thru all the filesystem inodes and construct a consistent view
1546 * of the disk quota world. If the quotacheck fails, disable quotas.
1547 */
1548STATIC int
1549xfs_qm_quotacheck(
1550	xfs_mount_t	*mp)
1551{
1552	int			error, error2;
 
 
1553	uint			flags;
1554	LIST_HEAD		(buffer_list);
1555	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1556	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1557	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1558
 
 
 
1559	flags = 0;
1560
1561	ASSERT(uip || gip || pip);
1562	ASSERT(XFS_IS_QUOTA_ON(mp));
1563
1564	xfs_notice(mp, "Quotacheck needed: Please wait.");
1565
1566	/*
1567	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1568	 * their counters to zero. We need a clean slate.
1569	 * We don't log our changes till later.
1570	 */
1571	if (uip) {
1572		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1573					 &buffer_list);
1574		if (error)
1575			goto error_return;
1576		flags |= XFS_UQUOTA_CHKD;
1577	}
1578
1579	if (gip) {
1580		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1581					 &buffer_list);
1582		if (error)
1583			goto error_return;
1584		flags |= XFS_GQUOTA_CHKD;
1585	}
1586
1587	if (pip) {
1588		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1589					 &buffer_list);
1590		if (error)
1591			goto error_return;
1592		flags |= XFS_PQUOTA_CHKD;
1593	}
1594
1595	xfs_set_quotacheck_running(mp);
1596	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1597			NULL);
1598	xfs_clear_quotacheck_running(mp);
 
 
 
 
 
 
1599
1600	/*
1601	 * On error, the inode walk may have partially populated the dquot
1602	 * caches.  We must purge them before disabling quota and tearing down
1603	 * the quotainfo, or else the dquots will leak.
1604	 */
1605	if (error)
1606		goto error_purge;
1607
1608	/*
1609	 * We've made all the changes that we need to make incore.  Flush them
1610	 * down to disk buffers if everything was updated successfully.
1611	 */
1612	if (XFS_IS_UQUOTA_ON(mp)) {
1613		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1614					  &buffer_list);
1615	}
1616	if (XFS_IS_GQUOTA_ON(mp)) {
1617		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1618					   &buffer_list);
1619		if (!error)
1620			error = error2;
1621	}
1622	if (XFS_IS_PQUOTA_ON(mp)) {
1623		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1624					   &buffer_list);
1625		if (!error)
1626			error = error2;
1627	}
1628
1629	error2 = xfs_buf_delwri_submit(&buffer_list);
1630	if (!error)
1631		error = error2;
1632
1633	/*
1634	 * We can get this error if we couldn't do a dquot allocation inside
1635	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1636	 * dirty dquots that might be cached, we just want to get rid of them
1637	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1638	 * at this point (because we intentionally didn't in dqget_noattach).
1639	 */
1640	if (error)
1641		goto error_purge;
 
 
1642
1643	/*
1644	 * If one type of quotas is off, then it will lose its
1645	 * quotachecked status, since we won't be doing accounting for
1646	 * that type anymore.
1647	 */
1648	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1649	mp->m_qflags |= flags;
1650
1651error_return:
1652	xfs_buf_delwri_cancel(&buffer_list);
 
 
 
 
 
1653
1654	if (error) {
1655		xfs_warn(mp,
1656	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1657			error);
1658		/*
1659		 * We must turn off quotas.
1660		 */
1661		ASSERT(mp->m_quotainfo != NULL);
1662		xfs_qm_destroy_quotainfo(mp);
1663		if (xfs_mount_reset_sbqflags(mp)) {
1664			xfs_warn(mp,
1665				"Quotacheck: Failed to reset quota flags.");
1666		}
1667		xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
1668	} else {
1669		xfs_notice(mp, "Quotacheck: Done.");
1670		xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
1671	}
1672
1673	return error;
1674
1675error_purge:
1676	/*
1677	 * On error, we may have inodes queued for inactivation. This may try
1678	 * to attach dquots to the inode before running cleanup operations on
1679	 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1680	 * below that frees mp->m_quotainfo. To avoid this race, flush all the
1681	 * pending inodegc operations before we purge the dquots from memory,
1682	 * ensuring that background inactivation is idle whilst we turn off
1683	 * quotas.
1684	 */
1685	xfs_inodegc_flush(mp);
1686	xfs_qm_dqpurge_all(mp);
1687	goto error_return;
1688
1689}
1690
1691/*
1692 * This is called from xfs_mountfs to start quotas and initialize all
1693 * necessary data structures like quotainfo.  This is also responsible for
1694 * running a quotacheck as necessary.  We are guaranteed that the superblock
1695 * is consistently read in at this point.
1696 *
1697 * If we fail here, the mount will continue with quota turned off. We don't
1698 * need to inidicate success or failure at all.
1699 */
1700void
1701xfs_qm_mount_quotas(
1702	struct xfs_mount	*mp)
1703{
1704	int			error = 0;
1705	uint			sbf;
1706
1707	/*
1708	 * If quotas on realtime volumes is not supported, disable quotas
1709	 * immediately.  We only support rtquota if rtgroups are enabled to
1710	 * avoid problems with older kernels.
1711	 */
1712	if (mp->m_sb.sb_rextents && !xfs_has_rtgroups(mp)) {
1713		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1714		mp->m_qflags = 0;
1715		goto write_changes;
1716	}
1717
1718	ASSERT(XFS_IS_QUOTA_ON(mp));
1719
1720	/*
1721	 * Allocate the quotainfo structure inside the mount struct, and
1722	 * create quotainode(s), and change/rev superblock if necessary.
1723	 */
1724	error = xfs_qm_init_quotainfo(mp);
1725	if (error) {
1726		/*
1727		 * We must turn off quotas.
1728		 */
1729		ASSERT(mp->m_quotainfo == NULL);
1730		mp->m_qflags = 0;
1731		goto write_changes;
1732	}
1733	/*
1734	 * If any of the quotas are not consistent, do a quotacheck.
1735	 */
1736	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1737		error = xfs_qm_quotacheck(mp);
1738		if (error) {
1739			/* Quotacheck failed and disabled quotas. */
1740			return;
1741		}
1742	}
1743	/*
1744	 * If one type of quotas is off, then it will lose its
1745	 * quotachecked status, since we won't be doing accounting for
1746	 * that type anymore.
1747	 */
1748	if (!XFS_IS_UQUOTA_ON(mp))
1749		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1750	if (!XFS_IS_GQUOTA_ON(mp))
1751		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1752	if (!XFS_IS_PQUOTA_ON(mp))
1753		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1754
1755 write_changes:
1756	/*
1757	 * We actually don't have to acquire the m_sb_lock at all.
1758	 * This can only be called from mount, and that's single threaded. XXX
1759	 */
1760	spin_lock(&mp->m_sb_lock);
1761	sbf = mp->m_sb.sb_qflags;
1762	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1763	spin_unlock(&mp->m_sb_lock);
1764
1765	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1766		if (xfs_sync_sb(mp, false)) {
1767			/*
1768			 * We could only have been turning quotas off.
1769			 * We aren't in very good shape actually because
1770			 * the incore structures are convinced that quotas are
1771			 * off, but the on disk superblock doesn't know that !
1772			 */
1773			ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1774			xfs_alert(mp, "%s: Superblock update failed!",
1775				__func__);
1776		}
1777	}
1778
1779	if (error) {
1780		xfs_warn(mp, "Failed to initialize disk quotas, err %d.", error);
1781		return;
1782	}
1783}
1784
1785/*
1786 * Load the inode for a given type of quota, assuming that the sb fields have
1787 * been sorted out.  This is not true when switching quota types on a V4
1788 * filesystem, so do not use this function for that.
1789 *
1790 * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
1791 * success; or a negative errno.
1792 */
1793int
1794xfs_qm_qino_load(
1795	struct xfs_mount	*mp,
1796	xfs_dqtype_t		type,
1797	struct xfs_inode	**ipp)
1798{
1799	struct xfs_trans	*tp;
1800	struct xfs_inode	*dp = NULL;
1801	int			error;
1802
1803	error = xfs_trans_alloc_empty(mp, &tp);
1804	if (error)
1805		return error;
1806
1807	if (xfs_has_metadir(mp)) {
1808		error = xfs_dqinode_load_parent(tp, &dp);
1809		if (error)
1810			goto out_cancel;
1811	}
1812
1813	error = xfs_dqinode_load(tp, dp, type, ipp);
1814	if (dp)
1815		xfs_irele(dp);
1816out_cancel:
1817	xfs_trans_cancel(tp);
1818	return error;
1819}
1820
1821/*
1822 * This is called after the superblock has been read in and we're ready to
1823 * iget the quota inodes.
1824 */
1825STATIC int
1826xfs_qm_init_quotainos(
1827	xfs_mount_t	*mp)
1828{
1829	struct xfs_inode	*uip = NULL;
1830	struct xfs_inode	*gip = NULL;
1831	struct xfs_inode	*pip = NULL;
1832	int			error;
 
1833	uint			flags = 0;
1834
1835	ASSERT(mp->m_quotainfo);
1836
1837	/*
1838	 * Get the uquota and gquota inodes
1839	 */
1840	if (xfs_has_quota(mp)) {
1841		if (XFS_IS_UQUOTA_ON(mp) &&
1842		    mp->m_sb.sb_uquotino != NULLFSINO) {
1843			ASSERT(mp->m_sb.sb_uquotino > 0);
1844			error = xfs_qm_qino_load(mp, XFS_DQTYPE_USER, &uip);
 
1845			if (error)
1846				return error;
1847		}
1848		if (XFS_IS_GQUOTA_ON(mp) &&
1849		    mp->m_sb.sb_gquotino != NULLFSINO) {
1850			ASSERT(mp->m_sb.sb_gquotino > 0);
1851			error = xfs_qm_qino_load(mp, XFS_DQTYPE_GROUP, &gip);
 
1852			if (error)
1853				goto error_rele;
1854		}
1855		if (XFS_IS_PQUOTA_ON(mp) &&
1856		    mp->m_sb.sb_pquotino != NULLFSINO) {
1857			ASSERT(mp->m_sb.sb_pquotino > 0);
1858			error = xfs_qm_qino_load(mp, XFS_DQTYPE_PROJ, &pip);
 
1859			if (error)
1860				goto error_rele;
1861		}
1862	} else {
1863		flags |= XFS_QMOPT_SBVERSION;
 
 
 
1864	}
1865
1866	/*
1867	 * Create the three inodes, if they don't exist already. The changes
1868	 * made above will get added to a transaction and logged in one of
1869	 * the qino_alloc calls below.  If the device is readonly,
1870	 * temporarily switch to read-write to do this.
1871	 */
1872	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1873		error = xfs_qm_qino_alloc(mp, &uip,
 
1874					      flags | XFS_QMOPT_UQUOTA);
1875		if (error)
1876			goto error_rele;
1877
1878		flags &= ~XFS_QMOPT_SBVERSION;
1879	}
1880	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1881		error = xfs_qm_qino_alloc(mp, &gip,
 
1882					  flags | XFS_QMOPT_GQUOTA);
1883		if (error)
1884			goto error_rele;
1885
1886		flags &= ~XFS_QMOPT_SBVERSION;
1887	}
1888	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1889		error = xfs_qm_qino_alloc(mp, &pip,
 
1890					  flags | XFS_QMOPT_PQUOTA);
1891		if (error)
1892			goto error_rele;
1893	}
1894
1895	mp->m_quotainfo->qi_uquotaip = uip;
1896	mp->m_quotainfo->qi_gquotaip = gip;
1897	mp->m_quotainfo->qi_pquotaip = pip;
1898
1899	return 0;
1900
1901error_rele:
1902	if (uip)
1903		xfs_irele(uip);
1904	if (gip)
1905		xfs_irele(gip);
1906	if (pip)
1907		xfs_irele(pip);
1908	return error;
1909}
1910
1911STATIC void
1912xfs_qm_dqfree_one(
1913	struct xfs_dquot	*dqp)
1914{
1915	struct xfs_mount	*mp = dqp->q_mount;
1916	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1917
1918	mutex_lock(&qi->qi_tree_lock);
1919	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
 
1920
1921	qi->qi_dquots--;
1922	mutex_unlock(&qi->qi_tree_lock);
1923
1924	xfs_qm_dqdestroy(dqp);
1925}
1926
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1927/* --------------- utility functions for vnodeops ---------------- */
1928
1929
1930/*
1931 * Given an inode, a uid, gid and prid make sure that we have
1932 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1933 * quotas by creating this file.
1934 * This also attaches dquot(s) to the given inode after locking it,
1935 * and returns the dquots corresponding to the uid and/or gid.
1936 *
1937 * in	: inode (unlocked)
1938 * out	: udquot, gdquot with references taken and unlocked
1939 */
1940int
1941xfs_qm_vop_dqalloc(
1942	struct xfs_inode	*ip,
1943	kuid_t			uid,
1944	kgid_t			gid,
1945	prid_t			prid,
1946	uint			flags,
1947	struct xfs_dquot	**O_udqpp,
1948	struct xfs_dquot	**O_gdqpp,
1949	struct xfs_dquot	**O_pdqpp)
1950{
1951	struct xfs_mount	*mp = ip->i_mount;
1952	struct inode		*inode = VFS_I(ip);
1953	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1954	struct xfs_dquot	*uq = NULL;
1955	struct xfs_dquot	*gq = NULL;
1956	struct xfs_dquot	*pq = NULL;
1957	int			error;
1958	uint			lockflags;
1959
1960	if (!XFS_IS_QUOTA_ON(mp))
1961		return 0;
1962
1963	ASSERT(!xfs_is_metadir_inode(ip));
1964
1965	lockflags = XFS_ILOCK_EXCL;
1966	xfs_ilock(ip, lockflags);
1967
1968	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1969		gid = inode->i_gid;
1970
1971	/*
1972	 * Attach the dquot(s) to this inode, doing a dquot allocation
1973	 * if necessary. The dquot(s) will not be locked.
1974	 */
1975	if (XFS_NOT_DQATTACHED(mp, ip)) {
1976		error = xfs_qm_dqattach_locked(ip, true);
1977		if (error) {
1978			xfs_iunlock(ip, lockflags);
1979			return error;
1980		}
1981	}
1982
1983	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1984		ASSERT(O_udqpp);
1985		if (!uid_eq(inode->i_uid, uid)) {
1986			/*
1987			 * What we need is the dquot that has this uid, and
1988			 * if we send the inode to dqget, the uid of the inode
1989			 * takes priority over what's sent in the uid argument.
1990			 * We must unlock inode here before calling dqget if
1991			 * we're not sending the inode, because otherwise
1992			 * we'll deadlock by doing trans_reserve while
1993			 * holding ilock.
1994			 */
1995			xfs_iunlock(ip, lockflags);
1996			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1997					XFS_DQTYPE_USER, true, &uq);
 
 
 
1998			if (error) {
1999				ASSERT(error != -ENOENT);
2000				return error;
2001			}
2002			/*
2003			 * Get the ilock in the right order.
2004			 */
2005			xfs_dqunlock(uq);
2006			lockflags = XFS_ILOCK_SHARED;
2007			xfs_ilock(ip, lockflags);
2008		} else {
2009			/*
2010			 * Take an extra reference, because we'll return
2011			 * this to caller
2012			 */
2013			ASSERT(ip->i_udquot);
2014			uq = xfs_qm_dqhold(ip->i_udquot);
2015		}
2016	}
2017	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
2018		ASSERT(O_gdqpp);
2019		if (!gid_eq(inode->i_gid, gid)) {
2020			xfs_iunlock(ip, lockflags);
2021			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
2022					XFS_DQTYPE_GROUP, true, &gq);
 
 
 
2023			if (error) {
2024				ASSERT(error != -ENOENT);
2025				goto error_rele;
2026			}
2027			xfs_dqunlock(gq);
2028			lockflags = XFS_ILOCK_SHARED;
2029			xfs_ilock(ip, lockflags);
2030		} else {
2031			ASSERT(ip->i_gdquot);
2032			gq = xfs_qm_dqhold(ip->i_gdquot);
2033		}
2034	}
2035	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
2036		ASSERT(O_pdqpp);
2037		if (ip->i_projid != prid) {
2038			xfs_iunlock(ip, lockflags);
2039			error = xfs_qm_dqget(mp, prid,
2040					XFS_DQTYPE_PROJ, true, &pq);
 
 
 
2041			if (error) {
2042				ASSERT(error != -ENOENT);
2043				goto error_rele;
2044			}
2045			xfs_dqunlock(pq);
2046			lockflags = XFS_ILOCK_SHARED;
2047			xfs_ilock(ip, lockflags);
2048		} else {
2049			ASSERT(ip->i_pdquot);
2050			pq = xfs_qm_dqhold(ip->i_pdquot);
2051		}
2052	}
2053	trace_xfs_dquot_dqalloc(ip);
 
2054
2055	xfs_iunlock(ip, lockflags);
2056	if (O_udqpp)
2057		*O_udqpp = uq;
2058	else
2059		xfs_qm_dqrele(uq);
2060	if (O_gdqpp)
2061		*O_gdqpp = gq;
2062	else
2063		xfs_qm_dqrele(gq);
2064	if (O_pdqpp)
2065		*O_pdqpp = pq;
2066	else
2067		xfs_qm_dqrele(pq);
2068	return 0;
2069
2070error_rele:
2071	xfs_qm_dqrele(gq);
2072	xfs_qm_dqrele(uq);
 
 
2073	return error;
2074}
2075
2076/*
2077 * Actually transfer ownership, and do dquot modifications.
2078 * These were already reserved.
2079 */
2080struct xfs_dquot *
2081xfs_qm_vop_chown(
2082	struct xfs_trans	*tp,
2083	struct xfs_inode	*ip,
2084	struct xfs_dquot	**IO_olddq,
2085	struct xfs_dquot	*newdq)
2086{
2087	struct xfs_dquot	*prevdq;
2088	xfs_filblks_t		dblocks, rblocks;
2089	bool			isrt = XFS_IS_REALTIME_INODE(ip);
2090
2091	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
2092	ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
2093	ASSERT(!xfs_is_metadir_inode(ip));
2094
2095	/* old dquot */
2096	prevdq = *IO_olddq;
2097	ASSERT(prevdq);
2098	ASSERT(prevdq != newdq);
2099
2100	xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks);
2101
2102	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_BCOUNT,
2103			-(xfs_qcnt_t)dblocks);
2104	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_RTBCOUNT,
2105			-(xfs_qcnt_t)rblocks);
2106	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
2107
2108	/* the sparkling new dquot */
2109	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_BCOUNT, dblocks);
2110	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_RTBCOUNT, rblocks);
2111	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
2112
2113	/*
2114	 * Back when we made quota reservations for the chown, we reserved the
2115	 * ondisk blocks + delalloc blocks with the new dquot.  Now that we've
2116	 * switched the dquots, decrease the new dquot's block reservation
2117	 * (having already bumped up the real counter) so that we don't have
2118	 * any reservation to give back when we commit.
2119	 */
2120	xfs_trans_mod_dquot(tp, newdq,
2121			isrt ? XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
2122			-ip->i_delayed_blks);
2123
2124	/*
2125	 * Give the incore reservation for delalloc blocks back to the old
2126	 * dquot.  We don't normally handle delalloc quota reservations
2127	 * transactionally, so just lock the dquot and subtract from the
2128	 * reservation.  Dirty the transaction because it's too late to turn
2129	 * back now.
2130	 */
2131	tp->t_flags |= XFS_TRANS_DIRTY;
2132	xfs_dqlock(prevdq);
2133	if (isrt) {
2134		ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks);
2135		prevdq->q_rtb.reserved -= ip->i_delayed_blks;
2136	} else {
2137		ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
2138		prevdq->q_blk.reserved -= ip->i_delayed_blks;
2139	}
2140	xfs_dqunlock(prevdq);
2141
2142	/*
2143	 * Take an extra reference, because the inode is going to keep
2144	 * this dquot pointer even after the trans_commit.
2145	 */
2146	*IO_olddq = xfs_qm_dqhold(newdq);
2147
2148	return prevdq;
2149}
2150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2151int
2152xfs_qm_vop_rename_dqattach(
2153	struct xfs_inode	**i_tab)
2154{
2155	struct xfs_mount	*mp = i_tab[0]->i_mount;
2156	int			i;
2157
2158	if (!XFS_IS_QUOTA_ON(mp))
2159		return 0;
2160
2161	for (i = 0; (i < 4 && i_tab[i]); i++) {
2162		struct xfs_inode	*ip = i_tab[i];
2163		int			error;
2164
2165		/*
2166		 * Watch out for duplicate entries in the table.
2167		 */
2168		if (i == 0 || ip != i_tab[i-1]) {
2169			if (XFS_NOT_DQATTACHED(mp, ip)) {
2170				error = xfs_qm_dqattach(ip);
2171				if (error)
2172					return error;
2173			}
2174		}
2175	}
2176	return 0;
2177}
2178
2179void
2180xfs_qm_vop_create_dqattach(
2181	struct xfs_trans	*tp,
2182	struct xfs_inode	*ip,
2183	struct xfs_dquot	*udqp,
2184	struct xfs_dquot	*gdqp,
2185	struct xfs_dquot	*pdqp)
2186{
2187	struct xfs_mount	*mp = tp->t_mountp;
2188
2189	if (!XFS_IS_QUOTA_ON(mp))
2190		return;
2191
2192	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
2193	ASSERT(!xfs_is_metadir_inode(ip));
2194
2195	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
2196		ASSERT(ip->i_udquot == NULL);
2197		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
2198
2199		ip->i_udquot = xfs_qm_dqhold(udqp);
 
2200	}
2201	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
2202		ASSERT(ip->i_gdquot == NULL);
2203		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
2204
2205		ip->i_gdquot = xfs_qm_dqhold(gdqp);
 
2206	}
2207	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
2208		ASSERT(ip->i_pdquot == NULL);
2209		ASSERT(ip->i_projid == pdqp->q_id);
2210
2211		ip->i_pdquot = xfs_qm_dqhold(pdqp);
 
2212	}
2213
2214	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
2215}
2216
2217/* Decide if this inode's dquot is near an enforcement boundary. */
2218bool
2219xfs_inode_near_dquot_enforcement(
2220	struct xfs_inode	*ip,
2221	xfs_dqtype_t		type)
2222{
2223	struct xfs_dquot	*dqp;
2224	struct xfs_dquot_res	*res;
2225	struct xfs_dquot_pre	*pre;
2226	int64_t			freesp;
2227
2228	/* We only care for quotas that are enabled and enforced. */
2229	dqp = xfs_inode_dquot(ip, type);
2230	if (!dqp || !xfs_dquot_is_enforced(dqp))
2231		return false;
2232
2233	if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
2234	    xfs_dquot_res_over_limits(&dqp->q_blk) ||
2235	    xfs_dquot_res_over_limits(&dqp->q_rtb))
2236		return true;
2237
2238	if (XFS_IS_REALTIME_INODE(ip)) {
2239		res = &dqp->q_rtb;
2240		pre = &dqp->q_rtb_prealloc;
2241	} else {
2242		res = &dqp->q_blk;
2243		pre = &dqp->q_blk_prealloc;
2244	}
2245
2246	/* For space on the data device, check the various thresholds. */
2247	if (!pre->q_prealloc_hi_wmark)
2248		return false;
2249
2250	if (res->reserved < pre->q_prealloc_lo_wmark)
2251		return false;
2252
2253	if (res->reserved >= pre->q_prealloc_hi_wmark)
2254		return true;
2255
2256	freesp = pre->q_prealloc_hi_wmark - res->reserved;
2257	if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT])
2258		return true;
2259
2260	return false;
2261}