Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_sb.h"
  14#include "xfs_mount.h"
  15#include "xfs_inode.h"
  16#include "xfs_iwalk.h"
  17#include "xfs_quota.h"
  18#include "xfs_bmap.h"
  19#include "xfs_bmap_util.h"
  20#include "xfs_trans.h"
  21#include "xfs_trans_space.h"
  22#include "xfs_qm.h"
  23#include "xfs_trace.h"
  24#include "xfs_icache.h"
  25#include "xfs_error.h"
 
 
  26
  27/*
  28 * The global quota manager. There is only one of these for the entire
  29 * system, _not_ one per file system. XQM keeps track of the overall
  30 * quota functionality, including maintaining the freelist and hash
  31 * tables of dquots.
  32 */
  33STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
  34STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
  35
  36STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
  37STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  38/*
  39 * We use the batch lookup interface to iterate over the dquots as it
  40 * currently is the only interface into the radix tree code that allows
  41 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  42 * operations is fine as all callers are used either during mount/umount
  43 * or quotaoff.
  44 */
  45#define XFS_DQ_LOOKUP_BATCH	32
  46
  47STATIC int
  48xfs_qm_dquot_walk(
  49	struct xfs_mount	*mp,
  50	xfs_dqtype_t		type,
  51	int			(*execute)(struct xfs_dquot *dqp, void *data),
  52	void			*data)
  53{
  54	struct xfs_quotainfo	*qi = mp->m_quotainfo;
  55	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
  56	uint32_t		next_index;
  57	int			last_error = 0;
  58	int			skipped;
  59	int			nr_found;
  60
  61restart:
  62	skipped = 0;
  63	next_index = 0;
  64	nr_found = 0;
  65
  66	while (1) {
  67		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  68		int		error = 0;
  69		int		i;
  70
  71		mutex_lock(&qi->qi_tree_lock);
  72		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  73					next_index, XFS_DQ_LOOKUP_BATCH);
  74		if (!nr_found) {
  75			mutex_unlock(&qi->qi_tree_lock);
  76			break;
  77		}
  78
  79		for (i = 0; i < nr_found; i++) {
  80			struct xfs_dquot *dqp = batch[i];
  81
  82			next_index = dqp->q_id + 1;
  83
  84			error = execute(batch[i], data);
  85			if (error == -EAGAIN) {
  86				skipped++;
  87				continue;
  88			}
  89			if (error && last_error != -EFSCORRUPTED)
  90				last_error = error;
  91		}
  92
  93		mutex_unlock(&qi->qi_tree_lock);
  94
  95		/* bail out if the filesystem is corrupted.  */
  96		if (last_error == -EFSCORRUPTED) {
  97			skipped = 0;
  98			break;
  99		}
 100		/* we're done if id overflows back to zero */
 101		if (!next_index)
 102			break;
 103	}
 104
 105	if (skipped) {
 106		delay(1);
 107		goto restart;
 108	}
 109
 110	return last_error;
 111}
 112
 113
 114/*
 115 * Purge a dquot from all tracking data structures and free it.
 116 */
 117STATIC int
 118xfs_qm_dqpurge(
 119	struct xfs_dquot	*dqp,
 120	void			*data)
 121{
 122	struct xfs_mount	*mp = dqp->q_mount;
 123	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 124	int			error = -EAGAIN;
 125
 126	xfs_dqlock(dqp);
 127	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
 128		goto out_unlock;
 129
 130	dqp->q_flags |= XFS_DQFLAG_FREEING;
 131
 132	xfs_dqflock(dqp);
 133
 134	/*
 135	 * If we are turning this type of quotas off, we don't care
 136	 * about the dirty metadata sitting in this dquot. OTOH, if
 137	 * we're unmounting, we do care, so we flush it and wait.
 138	 */
 139	if (XFS_DQ_IS_DIRTY(dqp)) {
 140		struct xfs_buf	*bp = NULL;
 141
 142		/*
 143		 * We don't care about getting disk errors here. We need
 144		 * to purge this dquot anyway, so we go ahead regardless.
 145		 */
 146		error = xfs_qm_dqflush(dqp, &bp);
 147		if (!error) {
 148			error = xfs_bwrite(bp);
 149			xfs_buf_relse(bp);
 150		} else if (error == -EAGAIN) {
 151			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
 152			goto out_unlock;
 153		}
 154		xfs_dqflock(dqp);
 155	}
 156
 157	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 158	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 159		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
 160
 161	xfs_dqfunlock(dqp);
 162	xfs_dqunlock(dqp);
 163
 164	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
 165	qi->qi_dquots--;
 166
 167	/*
 168	 * We move dquots to the freelist as soon as their reference count
 169	 * hits zero, so it really should be on the freelist here.
 170	 */
 171	ASSERT(!list_empty(&dqp->q_lru));
 172	list_lru_del(&qi->qi_lru, &dqp->q_lru);
 173	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
 174
 175	xfs_qm_dqdestroy(dqp);
 176	return 0;
 177
 178out_unlock:
 179	xfs_dqunlock(dqp);
 180	return error;
 181}
 182
 183/*
 184 * Purge the dquot cache.
 185 */
 186void
 187xfs_qm_dqpurge_all(
 188	struct xfs_mount	*mp,
 189	uint			flags)
 190{
 191	if (flags & XFS_QMOPT_UQUOTA)
 192		xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
 193	if (flags & XFS_QMOPT_GQUOTA)
 194		xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
 195	if (flags & XFS_QMOPT_PQUOTA)
 196		xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
 197}
 198
 199/*
 200 * Just destroy the quotainfo structure.
 201 */
 202void
 203xfs_qm_unmount(
 204	struct xfs_mount	*mp)
 205{
 206	if (mp->m_quotainfo) {
 207		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
 208		xfs_qm_destroy_quotainfo(mp);
 209	}
 210}
 211
 212/*
 213 * Called from the vfsops layer.
 214 */
 215void
 216xfs_qm_unmount_quotas(
 217	xfs_mount_t	*mp)
 218{
 219	/*
 220	 * Release the dquots that root inode, et al might be holding,
 221	 * before we flush quotas and blow away the quotainfo structure.
 222	 */
 223	ASSERT(mp->m_rootip);
 224	xfs_qm_dqdetach(mp->m_rootip);
 225	if (mp->m_rbmip)
 226		xfs_qm_dqdetach(mp->m_rbmip);
 227	if (mp->m_rsumip)
 228		xfs_qm_dqdetach(mp->m_rsumip);
 229
 230	/*
 231	 * Release the quota inodes.
 232	 */
 233	if (mp->m_quotainfo) {
 234		if (mp->m_quotainfo->qi_uquotaip) {
 235			xfs_irele(mp->m_quotainfo->qi_uquotaip);
 236			mp->m_quotainfo->qi_uquotaip = NULL;
 237		}
 238		if (mp->m_quotainfo->qi_gquotaip) {
 239			xfs_irele(mp->m_quotainfo->qi_gquotaip);
 240			mp->m_quotainfo->qi_gquotaip = NULL;
 241		}
 242		if (mp->m_quotainfo->qi_pquotaip) {
 243			xfs_irele(mp->m_quotainfo->qi_pquotaip);
 244			mp->m_quotainfo->qi_pquotaip = NULL;
 245		}
 246	}
 247}
 248
 249STATIC int
 250xfs_qm_dqattach_one(
 251	struct xfs_inode	*ip,
 252	xfs_dqid_t		id,
 253	xfs_dqtype_t		type,
 254	bool			doalloc,
 255	struct xfs_dquot	**IO_idqpp)
 256{
 257	struct xfs_dquot	*dqp;
 258	int			error;
 259
 260	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 261	error = 0;
 262
 263	/*
 264	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
 265	 * or &i_gdquot. This made the code look weird, but made the logic a lot
 266	 * simpler.
 267	 */
 268	dqp = *IO_idqpp;
 269	if (dqp) {
 270		trace_xfs_dqattach_found(dqp);
 271		return 0;
 272	}
 273
 274	/*
 275	 * Find the dquot from somewhere. This bumps the reference count of
 276	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
 277	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
 278	 * turned off suddenly.
 279	 */
 280	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
 281	if (error)
 282		return error;
 283
 284	trace_xfs_dqattach_get(dqp);
 285
 286	/*
 287	 * dqget may have dropped and re-acquired the ilock, but it guarantees
 288	 * that the dquot returned is the one that should go in the inode.
 289	 */
 290	*IO_idqpp = dqp;
 291	xfs_dqunlock(dqp);
 292	return 0;
 293}
 294
 295static bool
 296xfs_qm_need_dqattach(
 297	struct xfs_inode	*ip)
 298{
 299	struct xfs_mount	*mp = ip->i_mount;
 300
 301	if (!XFS_IS_QUOTA_RUNNING(mp))
 302		return false;
 303	if (!XFS_IS_QUOTA_ON(mp))
 304		return false;
 305	if (!XFS_NOT_DQATTACHED(mp, ip))
 306		return false;
 307	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 308		return false;
 309	return true;
 310}
 311
 312/*
 313 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 314 * into account.
 315 * If @doalloc is true, the dquot(s) will be allocated if needed.
 316 * Inode may get unlocked and relocked in here, and the caller must deal with
 317 * the consequences.
 318 */
 319int
 320xfs_qm_dqattach_locked(
 321	xfs_inode_t	*ip,
 322	bool		doalloc)
 323{
 324	xfs_mount_t	*mp = ip->i_mount;
 325	int		error = 0;
 326
 327	if (!xfs_qm_need_dqattach(ip))
 328		return 0;
 329
 330	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 331
 332	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
 333		error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)),
 334				XFS_DQTYPE_USER, doalloc, &ip->i_udquot);
 335		if (error)
 336			goto done;
 337		ASSERT(ip->i_udquot);
 338	}
 339
 340	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
 341		error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)),
 342				XFS_DQTYPE_GROUP, doalloc, &ip->i_gdquot);
 343		if (error)
 344			goto done;
 345		ASSERT(ip->i_gdquot);
 346	}
 347
 348	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
 349		error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQTYPE_PROJ,
 350				doalloc, &ip->i_pdquot);
 351		if (error)
 352			goto done;
 353		ASSERT(ip->i_pdquot);
 354	}
 355
 356done:
 357	/*
 358	 * Don't worry about the dquots that we may have attached before any
 359	 * error - they'll get detached later if it has not already been done.
 360	 */
 361	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 362	return error;
 363}
 364
 365int
 366xfs_qm_dqattach(
 367	struct xfs_inode	*ip)
 368{
 369	int			error;
 370
 371	if (!xfs_qm_need_dqattach(ip))
 372		return 0;
 373
 374	xfs_ilock(ip, XFS_ILOCK_EXCL);
 375	error = xfs_qm_dqattach_locked(ip, false);
 376	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 377
 378	return error;
 379}
 380
 381/*
 382 * Release dquots (and their references) if any.
 383 * The inode should be locked EXCL except when this's called by
 384 * xfs_ireclaim.
 385 */
 386void
 387xfs_qm_dqdetach(
 388	xfs_inode_t	*ip)
 389{
 390	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
 391		return;
 392
 393	trace_xfs_dquot_dqdetach(ip);
 394
 395	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
 396	if (ip->i_udquot) {
 397		xfs_qm_dqrele(ip->i_udquot);
 398		ip->i_udquot = NULL;
 399	}
 400	if (ip->i_gdquot) {
 401		xfs_qm_dqrele(ip->i_gdquot);
 402		ip->i_gdquot = NULL;
 403	}
 404	if (ip->i_pdquot) {
 405		xfs_qm_dqrele(ip->i_pdquot);
 406		ip->i_pdquot = NULL;
 407	}
 408}
 409
 410struct xfs_qm_isolate {
 411	struct list_head	buffers;
 412	struct list_head	dispose;
 413};
 414
 415static enum lru_status
 416xfs_qm_dquot_isolate(
 417	struct list_head	*item,
 418	struct list_lru_one	*lru,
 419	spinlock_t		*lru_lock,
 420	void			*arg)
 421		__releases(lru_lock) __acquires(lru_lock)
 422{
 423	struct xfs_dquot	*dqp = container_of(item,
 424						struct xfs_dquot, q_lru);
 425	struct xfs_qm_isolate	*isol = arg;
 426
 427	if (!xfs_dqlock_nowait(dqp))
 428		goto out_miss_busy;
 429
 430	/*
 431	 * This dquot has acquired a reference in the meantime remove it from
 432	 * the freelist and try again.
 433	 */
 434	if (dqp->q_nrefs) {
 435		xfs_dqunlock(dqp);
 436		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
 437
 438		trace_xfs_dqreclaim_want(dqp);
 439		list_lru_isolate(lru, &dqp->q_lru);
 440		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 441		return LRU_REMOVED;
 442	}
 443
 444	/*
 445	 * If the dquot is dirty, flush it. If it's already being flushed, just
 446	 * skip it so there is time for the IO to complete before we try to
 447	 * reclaim it again on the next LRU pass.
 448	 */
 449	if (!xfs_dqflock_nowait(dqp)) {
 450		xfs_dqunlock(dqp);
 451		goto out_miss_busy;
 452	}
 453
 454	if (XFS_DQ_IS_DIRTY(dqp)) {
 455		struct xfs_buf	*bp = NULL;
 456		int		error;
 457
 458		trace_xfs_dqreclaim_dirty(dqp);
 459
 460		/* we have to drop the LRU lock to flush the dquot */
 461		spin_unlock(lru_lock);
 462
 463		error = xfs_qm_dqflush(dqp, &bp);
 464		if (error)
 465			goto out_unlock_dirty;
 466
 467		xfs_buf_delwri_queue(bp, &isol->buffers);
 468		xfs_buf_relse(bp);
 469		goto out_unlock_dirty;
 470	}
 471	xfs_dqfunlock(dqp);
 472
 473	/*
 474	 * Prevent lookups now that we are past the point of no return.
 475	 */
 476	dqp->q_flags |= XFS_DQFLAG_FREEING;
 477	xfs_dqunlock(dqp);
 478
 479	ASSERT(dqp->q_nrefs == 0);
 480	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
 481	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 482	trace_xfs_dqreclaim_done(dqp);
 483	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
 484	return LRU_REMOVED;
 485
 486out_miss_busy:
 487	trace_xfs_dqreclaim_busy(dqp);
 488	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 489	return LRU_SKIP;
 490
 491out_unlock_dirty:
 492	trace_xfs_dqreclaim_busy(dqp);
 493	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 494	xfs_dqunlock(dqp);
 495	spin_lock(lru_lock);
 496	return LRU_RETRY;
 497}
 498
 499static unsigned long
 500xfs_qm_shrink_scan(
 501	struct shrinker		*shrink,
 502	struct shrink_control	*sc)
 503{
 504	struct xfs_quotainfo	*qi = container_of(shrink,
 505					struct xfs_quotainfo, qi_shrinker);
 506	struct xfs_qm_isolate	isol;
 507	unsigned long		freed;
 508	int			error;
 509
 510	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
 511		return 0;
 512
 513	INIT_LIST_HEAD(&isol.buffers);
 514	INIT_LIST_HEAD(&isol.dispose);
 515
 516	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
 517				     xfs_qm_dquot_isolate, &isol);
 518
 519	error = xfs_buf_delwri_submit(&isol.buffers);
 520	if (error)
 521		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
 522
 523	while (!list_empty(&isol.dispose)) {
 524		struct xfs_dquot	*dqp;
 525
 526		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
 527		list_del_init(&dqp->q_lru);
 528		xfs_qm_dqfree_one(dqp);
 529	}
 530
 531	return freed;
 532}
 533
 534static unsigned long
 535xfs_qm_shrink_count(
 536	struct shrinker		*shrink,
 537	struct shrink_control	*sc)
 538{
 539	struct xfs_quotainfo	*qi = container_of(shrink,
 540					struct xfs_quotainfo, qi_shrinker);
 541
 542	return list_lru_shrink_count(&qi->qi_lru, sc);
 543}
 544
 545STATIC void
 546xfs_qm_set_defquota(
 547	struct xfs_mount	*mp,
 548	xfs_dqtype_t		type,
 549	struct xfs_quotainfo	*qinf)
 550{
 551	struct xfs_dquot	*dqp;
 552	struct xfs_def_quota	*defq;
 553	int			error;
 554
 555	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 556	if (error)
 557		return;
 558
 559	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
 560
 561	/*
 562	 * Timers and warnings have been already set, let's just set the
 563	 * default limits for this quota type
 564	 */
 565	defq->blk.hard = dqp->q_blk.hardlimit;
 566	defq->blk.soft = dqp->q_blk.softlimit;
 567	defq->ino.hard = dqp->q_ino.hardlimit;
 568	defq->ino.soft = dqp->q_ino.softlimit;
 569	defq->rtb.hard = dqp->q_rtb.hardlimit;
 570	defq->rtb.soft = dqp->q_rtb.softlimit;
 571	xfs_qm_dqdestroy(dqp);
 572}
 573
 574/* Initialize quota time limits from the root dquot. */
 575static void
 576xfs_qm_init_timelimits(
 577	struct xfs_mount	*mp,
 578	xfs_dqtype_t		type)
 579{
 580	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
 581	struct xfs_def_quota	*defq;
 582	struct xfs_dquot	*dqp;
 583	int			error;
 584
 585	defq = xfs_get_defquota(qinf, type);
 586
 587	defq->blk.time = XFS_QM_BTIMELIMIT;
 588	defq->ino.time = XFS_QM_ITIMELIMIT;
 589	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
 590	defq->blk.warn = XFS_QM_BWARNLIMIT;
 591	defq->ino.warn = XFS_QM_IWARNLIMIT;
 592	defq->rtb.warn = XFS_QM_RTBWARNLIMIT;
 593
 594	/*
 595	 * We try to get the limits from the superuser's limits fields.
 596	 * This is quite hacky, but it is standard quota practice.
 597	 *
 598	 * Since we may not have done a quotacheck by this point, just read
 599	 * the dquot without attaching it to any hashtables or lists.
 600	 */
 601	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 602	if (error)
 603		return;
 604
 605	/*
 606	 * The warnings and timers set the grace period given to
 607	 * a user or group before he or she can not perform any
 608	 * more writing. If it is zero, a default is used.
 609	 */
 610	if (dqp->q_blk.timer)
 611		defq->blk.time = dqp->q_blk.timer;
 612	if (dqp->q_ino.timer)
 613		defq->ino.time = dqp->q_ino.timer;
 614	if (dqp->q_rtb.timer)
 615		defq->rtb.time = dqp->q_rtb.timer;
 616	if (dqp->q_blk.warnings)
 617		defq->blk.warn = dqp->q_blk.warnings;
 618	if (dqp->q_ino.warnings)
 619		defq->ino.warn = dqp->q_ino.warnings;
 620	if (dqp->q_rtb.warnings)
 621		defq->rtb.warn = dqp->q_rtb.warnings;
 622
 623	xfs_qm_dqdestroy(dqp);
 624}
 625
 626/*
 627 * This initializes all the quota information that's kept in the
 628 * mount structure
 629 */
 630STATIC int
 631xfs_qm_init_quotainfo(
 632	struct xfs_mount	*mp)
 633{
 634	struct xfs_quotainfo	*qinf;
 635	int			error;
 636
 637	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 638
 639	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
 640
 641	error = list_lru_init(&qinf->qi_lru);
 642	if (error)
 643		goto out_free_qinf;
 644
 645	/*
 646	 * See if quotainodes are setup, and if not, allocate them,
 647	 * and change the superblock accordingly.
 648	 */
 649	error = xfs_qm_init_quotainos(mp);
 650	if (error)
 651		goto out_free_lru;
 652
 653	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
 654	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 655	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
 656	mutex_init(&qinf->qi_tree_lock);
 657
 658	/* mutex used to serialize quotaoffs */
 659	mutex_init(&qinf->qi_quotaofflock);
 660
 661	/* Precalc some constants */
 662	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 663	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
 
 
 
 
 
 
 
 
 
 
 
 664
 665	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 666
 667	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
 668	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
 669	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
 670
 671	if (XFS_IS_UQUOTA_RUNNING(mp))
 672		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
 673	if (XFS_IS_GQUOTA_RUNNING(mp))
 674		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
 675	if (XFS_IS_PQUOTA_RUNNING(mp))
 676		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
 677
 678	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
 679	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
 680	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
 681	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
 682
 683	error = register_shrinker(&qinf->qi_shrinker);
 684	if (error)
 685		goto out_free_inos;
 686
 687	return 0;
 688
 689out_free_inos:
 690	mutex_destroy(&qinf->qi_quotaofflock);
 691	mutex_destroy(&qinf->qi_tree_lock);
 692	xfs_qm_destroy_quotainos(qinf);
 693out_free_lru:
 694	list_lru_destroy(&qinf->qi_lru);
 695out_free_qinf:
 696	kmem_free(qinf);
 697	mp->m_quotainfo = NULL;
 698	return error;
 699}
 700
 701/*
 702 * Gets called when unmounting a filesystem or when all quotas get
 703 * turned off.
 704 * This purges the quota inodes, destroys locks and frees itself.
 705 */
 706void
 707xfs_qm_destroy_quotainfo(
 708	struct xfs_mount	*mp)
 709{
 710	struct xfs_quotainfo	*qi;
 711
 712	qi = mp->m_quotainfo;
 713	ASSERT(qi != NULL);
 714
 715	unregister_shrinker(&qi->qi_shrinker);
 716	list_lru_destroy(&qi->qi_lru);
 717	xfs_qm_destroy_quotainos(qi);
 718	mutex_destroy(&qi->qi_tree_lock);
 719	mutex_destroy(&qi->qi_quotaofflock);
 720	kmem_free(qi);
 721	mp->m_quotainfo = NULL;
 722}
 723
 724/*
 725 * Create an inode and return with a reference already taken, but unlocked
 726 * This is how we create quota inodes
 727 */
 728STATIC int
 729xfs_qm_qino_alloc(
 730	xfs_mount_t	*mp,
 731	xfs_inode_t	**ip,
 732	uint		flags)
 733{
 734	xfs_trans_t	*tp;
 735	int		error;
 736	bool		need_alloc = true;
 737
 738	*ip = NULL;
 739	/*
 740	 * With superblock that doesn't have separate pquotino, we
 741	 * share an inode between gquota and pquota. If the on-disk
 742	 * superblock has GQUOTA and the filesystem is now mounted
 743	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
 744	 * vice-versa.
 745	 */
 746	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
 747			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
 748		xfs_ino_t ino = NULLFSINO;
 749
 750		if ((flags & XFS_QMOPT_PQUOTA) &&
 751			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
 752			ino = mp->m_sb.sb_gquotino;
 753			if (XFS_IS_CORRUPT(mp,
 754					   mp->m_sb.sb_pquotino != NULLFSINO))
 755				return -EFSCORRUPTED;
 756		} else if ((flags & XFS_QMOPT_GQUOTA) &&
 757			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
 758			ino = mp->m_sb.sb_pquotino;
 759			if (XFS_IS_CORRUPT(mp,
 760					   mp->m_sb.sb_gquotino != NULLFSINO))
 761				return -EFSCORRUPTED;
 762		}
 763		if (ino != NULLFSINO) {
 764			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
 765			if (error)
 766				return error;
 767			mp->m_sb.sb_gquotino = NULLFSINO;
 768			mp->m_sb.sb_pquotino = NULLFSINO;
 769			need_alloc = false;
 770		}
 771	}
 772
 773	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
 774			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
 775			0, 0, &tp);
 776	if (error)
 777		return error;
 778
 779	if (need_alloc) {
 780		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
 
 
 
 
 
 781		if (error) {
 782			xfs_trans_cancel(tp);
 783			return error;
 784		}
 785	}
 786
 787	/*
 788	 * Make the changes in the superblock, and log those too.
 789	 * sbfields arg may contain fields other than *QUOTINO;
 790	 * VERSIONNUM for example.
 791	 */
 792	spin_lock(&mp->m_sb_lock);
 793	if (flags & XFS_QMOPT_SBVERSION) {
 794		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
 795
 796		xfs_sb_version_addquota(&mp->m_sb);
 797		mp->m_sb.sb_uquotino = NULLFSINO;
 798		mp->m_sb.sb_gquotino = NULLFSINO;
 799		mp->m_sb.sb_pquotino = NULLFSINO;
 800
 801		/* qflags will get updated fully _after_ quotacheck */
 802		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
 803	}
 804	if (flags & XFS_QMOPT_UQUOTA)
 805		mp->m_sb.sb_uquotino = (*ip)->i_ino;
 806	else if (flags & XFS_QMOPT_GQUOTA)
 807		mp->m_sb.sb_gquotino = (*ip)->i_ino;
 808	else
 809		mp->m_sb.sb_pquotino = (*ip)->i_ino;
 810	spin_unlock(&mp->m_sb_lock);
 811	xfs_log_sb(tp);
 812
 813	error = xfs_trans_commit(tp);
 814	if (error) {
 815		ASSERT(XFS_FORCED_SHUTDOWN(mp));
 816		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
 817	}
 818	if (need_alloc)
 819		xfs_finish_inode_setup(*ip);
 820	return error;
 821}
 822
 823
 824STATIC void
 825xfs_qm_reset_dqcounts(
 826	struct xfs_mount	*mp,
 827	struct xfs_buf		*bp,
 828	xfs_dqid_t		id,
 829	xfs_dqtype_t		type)
 830{
 831	struct xfs_dqblk	*dqb;
 832	int			j;
 833
 834	trace_xfs_reset_dqcounts(bp, _RET_IP_);
 835
 836	/*
 837	 * Reset all counters and timers. They'll be
 838	 * started afresh by xfs_qm_quotacheck.
 839	 */
 840#ifdef DEBUG
 841	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
 842		sizeof(xfs_dqblk_t);
 843	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 844#endif
 845	dqb = bp->b_addr;
 846	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
 847		struct xfs_disk_dquot	*ddq;
 848
 849		ddq = (struct xfs_disk_dquot *)&dqb[j];
 850
 851		/*
 852		 * Do a sanity check, and if needed, repair the dqblk. Don't
 853		 * output any warnings because it's perfectly possible to
 854		 * find uninitialised dquot blks. See comment in
 855		 * xfs_dquot_verify.
 856		 */
 857		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
 858		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
 859			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
 860
 861		/*
 862		 * Reset type in case we are reusing group quota file for
 863		 * project quotas or vice versa
 864		 */
 865		ddq->d_type = type;
 866		ddq->d_bcount = 0;
 867		ddq->d_icount = 0;
 868		ddq->d_rtbcount = 0;
 869
 870		/*
 871		 * dquot id 0 stores the default grace period and the maximum
 872		 * warning limit that were set by the administrator, so we
 873		 * should not reset them.
 874		 */
 875		if (ddq->d_id != 0) {
 876			ddq->d_btimer = 0;
 877			ddq->d_itimer = 0;
 878			ddq->d_rtbtimer = 0;
 879			ddq->d_bwarns = 0;
 880			ddq->d_iwarns = 0;
 881			ddq->d_rtbwarns = 0;
 
 
 882		}
 883
 884		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 885			xfs_update_cksum((char *)&dqb[j],
 886					 sizeof(struct xfs_dqblk),
 887					 XFS_DQUOT_CRC_OFF);
 888		}
 889	}
 890}
 891
 892STATIC int
 893xfs_qm_reset_dqcounts_all(
 894	struct xfs_mount	*mp,
 895	xfs_dqid_t		firstid,
 896	xfs_fsblock_t		bno,
 897	xfs_filblks_t		blkcnt,
 898	xfs_dqtype_t		type,
 899	struct list_head	*buffer_list)
 900{
 901	struct xfs_buf		*bp;
 902	int			error = 0;
 903
 904	ASSERT(blkcnt > 0);
 905
 906	/*
 907	 * Blkcnt arg can be a very big number, and might even be
 908	 * larger than the log itself. So, we have to break it up into
 909	 * manageable-sized transactions.
 910	 * Note that we don't start a permanent transaction here; we might
 911	 * not be able to get a log reservation for the whole thing up front,
 912	 * and we don't really care to either, because we just discard
 913	 * everything if we were to crash in the middle of this loop.
 914	 */
 915	while (blkcnt--) {
 916		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 917			      XFS_FSB_TO_DADDR(mp, bno),
 918			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 919			      &xfs_dquot_buf_ops);
 920
 921		/*
 922		 * CRC and validation errors will return a EFSCORRUPTED here. If
 923		 * this occurs, re-read without CRC validation so that we can
 924		 * repair the damage via xfs_qm_reset_dqcounts(). This process
 925		 * will leave a trace in the log indicating corruption has
 926		 * been detected.
 927		 */
 928		if (error == -EFSCORRUPTED) {
 929			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 930				      XFS_FSB_TO_DADDR(mp, bno),
 931				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 932				      NULL);
 933		}
 934
 935		if (error)
 936			break;
 937
 938		/*
 939		 * A corrupt buffer might not have a verifier attached, so
 940		 * make sure we have the correct one attached before writeback
 941		 * occurs.
 942		 */
 943		bp->b_ops = &xfs_dquot_buf_ops;
 944		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
 945		xfs_buf_delwri_queue(bp, buffer_list);
 946		xfs_buf_relse(bp);
 947
 948		/* goto the next block. */
 949		bno++;
 950		firstid += mp->m_quotainfo->qi_dqperchunk;
 951	}
 952
 953	return error;
 954}
 955
 956/*
 957 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
 958 * counters for every chunk of dquots that we find.
 959 */
 960STATIC int
 961xfs_qm_reset_dqcounts_buf(
 962	struct xfs_mount	*mp,
 963	struct xfs_inode	*qip,
 964	xfs_dqtype_t		type,
 965	struct list_head	*buffer_list)
 966{
 967	struct xfs_bmbt_irec	*map;
 968	int			i, nmaps;	/* number of map entries */
 969	int			error;		/* return value */
 970	xfs_fileoff_t		lblkno;
 971	xfs_filblks_t		maxlblkcnt;
 972	xfs_dqid_t		firstid;
 973	xfs_fsblock_t		rablkno;
 974	xfs_filblks_t		rablkcnt;
 975
 976	error = 0;
 977	/*
 978	 * This looks racy, but we can't keep an inode lock across a
 979	 * trans_reserve. But, this gets called during quotacheck, and that
 980	 * happens only at mount time which is single threaded.
 981	 */
 982	if (qip->i_d.di_nblocks == 0)
 983		return 0;
 984
 985	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
 986
 987	lblkno = 0;
 988	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 989	do {
 990		uint		lock_mode;
 991
 992		nmaps = XFS_DQITER_MAP_SIZE;
 993		/*
 994		 * We aren't changing the inode itself. Just changing
 995		 * some of its data. No new blocks are added here, and
 996		 * the inode is never added to the transaction.
 997		 */
 998		lock_mode = xfs_ilock_data_map_shared(qip);
 999		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1000				       map, &nmaps, 0);
1001		xfs_iunlock(qip, lock_mode);
1002		if (error)
1003			break;
1004
1005		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1006		for (i = 0; i < nmaps; i++) {
1007			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1008			ASSERT(map[i].br_blockcount);
1009
1010
1011			lblkno += map[i].br_blockcount;
1012
1013			if (map[i].br_startblock == HOLESTARTBLOCK)
1014				continue;
1015
1016			firstid = (xfs_dqid_t) map[i].br_startoff *
1017				mp->m_quotainfo->qi_dqperchunk;
1018			/*
1019			 * Do a read-ahead on the next extent.
1020			 */
1021			if ((i+1 < nmaps) &&
1022			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1023				rablkcnt =  map[i+1].br_blockcount;
1024				rablkno = map[i+1].br_startblock;
1025				while (rablkcnt--) {
1026					xfs_buf_readahead(mp->m_ddev_targp,
1027					       XFS_FSB_TO_DADDR(mp, rablkno),
1028					       mp->m_quotainfo->qi_dqchunklen,
1029					       &xfs_dquot_buf_ops);
1030					rablkno++;
1031				}
1032			}
1033			/*
1034			 * Iterate thru all the blks in the extent and
1035			 * reset the counters of all the dquots inside them.
1036			 */
1037			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1038						   map[i].br_startblock,
1039						   map[i].br_blockcount,
1040						   type, buffer_list);
1041			if (error)
1042				goto out;
1043		}
1044	} while (nmaps > 0);
1045
1046out:
1047	kmem_free(map);
1048	return error;
1049}
1050
1051/*
1052 * Called by dqusage_adjust in doing a quotacheck.
1053 *
1054 * Given the inode, and a dquot id this updates both the incore dqout as well
1055 * as the buffer copy. This is so that once the quotacheck is done, we can
1056 * just log all the buffers, as opposed to logging numerous updates to
1057 * individual dquots.
1058 */
1059STATIC int
1060xfs_qm_quotacheck_dqadjust(
1061	struct xfs_inode	*ip,
1062	xfs_dqtype_t		type,
1063	xfs_qcnt_t		nblks,
1064	xfs_qcnt_t		rtblks)
1065{
1066	struct xfs_mount	*mp = ip->i_mount;
1067	struct xfs_dquot	*dqp;
1068	xfs_dqid_t		id;
1069	int			error;
1070
1071	id = xfs_qm_id_for_quotatype(ip, type);
1072	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1073	if (error) {
1074		/*
1075		 * Shouldn't be able to turn off quotas here.
1076		 */
1077		ASSERT(error != -ESRCH);
1078		ASSERT(error != -ENOENT);
1079		return error;
1080	}
1081
1082	trace_xfs_dqadjust(dqp);
1083
1084	/*
1085	 * Adjust the inode count and the block count to reflect this inode's
1086	 * resource usage.
1087	 */
1088	dqp->q_ino.count++;
1089	dqp->q_ino.reserved++;
1090	if (nblks) {
1091		dqp->q_blk.count += nblks;
1092		dqp->q_blk.reserved += nblks;
1093	}
1094	if (rtblks) {
1095		dqp->q_rtb.count += rtblks;
1096		dqp->q_rtb.reserved += rtblks;
1097	}
1098
1099	/*
1100	 * Set default limits, adjust timers (since we changed usages)
1101	 *
1102	 * There are no timers for the default values set in the root dquot.
1103	 */
1104	if (dqp->q_id) {
1105		xfs_qm_adjust_dqlimits(dqp);
1106		xfs_qm_adjust_dqtimers(dqp);
1107	}
1108
1109	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1110	xfs_qm_dqput(dqp);
1111	return 0;
1112}
1113
1114/*
1115 * callback routine supplied to bulkstat(). Given an inumber, find its
1116 * dquots and update them to account for resources taken by that inode.
1117 */
1118/* ARGSUSED */
1119STATIC int
1120xfs_qm_dqusage_adjust(
1121	struct xfs_mount	*mp,
1122	struct xfs_trans	*tp,
1123	xfs_ino_t		ino,
1124	void			*data)
1125{
1126	struct xfs_inode	*ip;
1127	xfs_qcnt_t		nblks;
1128	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1129	int			error;
1130
1131	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1132
1133	/*
1134	 * rootino must have its resources accounted for, not so with the quota
1135	 * inodes.
1136	 */
1137	if (xfs_is_quota_inode(&mp->m_sb, ino))
1138		return 0;
1139
1140	/*
1141	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1142	 * at mount time and therefore nobody will be racing chown/chproj.
1143	 */
1144	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1145	if (error == -EINVAL || error == -ENOENT)
1146		return 0;
1147	if (error)
1148		return error;
1149
1150	ASSERT(ip->i_delayed_blks == 0);
1151
1152	if (XFS_IS_REALTIME_INODE(ip)) {
1153		struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1154
1155		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1156			error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1157			if (error)
1158				goto error0;
1159		}
1160
1161		xfs_bmap_count_leaves(ifp, &rtblks);
1162	}
1163
1164	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1165
1166	/*
1167	 * Add the (disk blocks and inode) resources occupied by this
1168	 * inode to its dquots. We do this adjustment in the incore dquot,
1169	 * and also copy the changes to its buffer.
1170	 * We don't care about putting these changes in a transaction
1171	 * envelope because if we crash in the middle of a 'quotacheck'
1172	 * we have to start from the beginning anyway.
1173	 * Once we're done, we'll log all the dquot bufs.
1174	 *
1175	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1176	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1177	 */
1178	if (XFS_IS_UQUOTA_ON(mp)) {
1179		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1180				rtblks);
1181		if (error)
1182			goto error0;
1183	}
1184
1185	if (XFS_IS_GQUOTA_ON(mp)) {
1186		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1187				rtblks);
1188		if (error)
1189			goto error0;
1190	}
1191
1192	if (XFS_IS_PQUOTA_ON(mp)) {
1193		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1194				rtblks);
1195		if (error)
1196			goto error0;
1197	}
1198
1199error0:
1200	xfs_irele(ip);
1201	return error;
1202}
1203
1204STATIC int
1205xfs_qm_flush_one(
1206	struct xfs_dquot	*dqp,
1207	void			*data)
1208{
1209	struct xfs_mount	*mp = dqp->q_mount;
1210	struct list_head	*buffer_list = data;
1211	struct xfs_buf		*bp = NULL;
1212	int			error = 0;
1213
1214	xfs_dqlock(dqp);
1215	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1216		goto out_unlock;
1217	if (!XFS_DQ_IS_DIRTY(dqp))
1218		goto out_unlock;
1219
1220	/*
1221	 * The only way the dquot is already flush locked by the time quotacheck
1222	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1223	 * it for the final time. Quotacheck collects all dquot bufs in the
1224	 * local delwri queue before dquots are dirtied, so reclaim can't have
1225	 * possibly queued it for I/O. The only way out is to push the buffer to
1226	 * cycle the flush lock.
1227	 */
1228	if (!xfs_dqflock_nowait(dqp)) {
1229		/* buf is pinned in-core by delwri list */
1230		bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1231				mp->m_quotainfo->qi_dqchunklen, 0);
1232		if (!bp) {
1233			error = -EINVAL;
1234			goto out_unlock;
1235		}
1236		xfs_buf_unlock(bp);
1237
1238		xfs_buf_delwri_pushbuf(bp, buffer_list);
1239		xfs_buf_rele(bp);
1240
1241		error = -EAGAIN;
1242		goto out_unlock;
1243	}
1244
1245	error = xfs_qm_dqflush(dqp, &bp);
1246	if (error)
1247		goto out_unlock;
1248
1249	xfs_buf_delwri_queue(bp, buffer_list);
1250	xfs_buf_relse(bp);
1251out_unlock:
1252	xfs_dqunlock(dqp);
1253	return error;
1254}
1255
1256/*
1257 * Walk thru all the filesystem inodes and construct a consistent view
1258 * of the disk quota world. If the quotacheck fails, disable quotas.
1259 */
1260STATIC int
1261xfs_qm_quotacheck(
1262	xfs_mount_t	*mp)
1263{
1264	int			error, error2;
1265	uint			flags;
1266	LIST_HEAD		(buffer_list);
1267	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1268	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1269	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1270
1271	flags = 0;
1272
1273	ASSERT(uip || gip || pip);
1274	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1275
1276	xfs_notice(mp, "Quotacheck needed: Please wait.");
1277
1278	/*
1279	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1280	 * their counters to zero. We need a clean slate.
1281	 * We don't log our changes till later.
1282	 */
1283	if (uip) {
1284		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1285					 &buffer_list);
1286		if (error)
1287			goto error_return;
1288		flags |= XFS_UQUOTA_CHKD;
1289	}
1290
1291	if (gip) {
1292		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1293					 &buffer_list);
1294		if (error)
1295			goto error_return;
1296		flags |= XFS_GQUOTA_CHKD;
1297	}
1298
1299	if (pip) {
1300		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1301					 &buffer_list);
1302		if (error)
1303			goto error_return;
1304		flags |= XFS_PQUOTA_CHKD;
1305	}
1306
1307	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1308			NULL);
1309	if (error)
1310		goto error_return;
1311
1312	/*
1313	 * We've made all the changes that we need to make incore.  Flush them
1314	 * down to disk buffers if everything was updated successfully.
1315	 */
1316	if (XFS_IS_UQUOTA_ON(mp)) {
1317		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1318					  &buffer_list);
1319	}
1320	if (XFS_IS_GQUOTA_ON(mp)) {
1321		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1322					   &buffer_list);
1323		if (!error)
1324			error = error2;
1325	}
1326	if (XFS_IS_PQUOTA_ON(mp)) {
1327		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1328					   &buffer_list);
1329		if (!error)
1330			error = error2;
1331	}
1332
1333	error2 = xfs_buf_delwri_submit(&buffer_list);
1334	if (!error)
1335		error = error2;
1336
1337	/*
1338	 * We can get this error if we couldn't do a dquot allocation inside
1339	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1340	 * dirty dquots that might be cached, we just want to get rid of them
1341	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1342	 * at this point (because we intentionally didn't in dqget_noattach).
1343	 */
1344	if (error) {
1345		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1346		goto error_return;
1347	}
1348
1349	/*
1350	 * If one type of quotas is off, then it will lose its
1351	 * quotachecked status, since we won't be doing accounting for
1352	 * that type anymore.
1353	 */
1354	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1355	mp->m_qflags |= flags;
1356
1357 error_return:
1358	xfs_buf_delwri_cancel(&buffer_list);
1359
1360	if (error) {
1361		xfs_warn(mp,
1362	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1363			error);
1364		/*
1365		 * We must turn off quotas.
1366		 */
1367		ASSERT(mp->m_quotainfo != NULL);
1368		xfs_qm_destroy_quotainfo(mp);
1369		if (xfs_mount_reset_sbqflags(mp)) {
1370			xfs_warn(mp,
1371				"Quotacheck: Failed to reset quota flags.");
1372		}
1373	} else
1374		xfs_notice(mp, "Quotacheck: Done.");
1375	return error;
1376}
1377
1378/*
1379 * This is called from xfs_mountfs to start quotas and initialize all
1380 * necessary data structures like quotainfo.  This is also responsible for
1381 * running a quotacheck as necessary.  We are guaranteed that the superblock
1382 * is consistently read in at this point.
1383 *
1384 * If we fail here, the mount will continue with quota turned off. We don't
1385 * need to inidicate success or failure at all.
1386 */
1387void
1388xfs_qm_mount_quotas(
1389	struct xfs_mount	*mp)
1390{
1391	int			error = 0;
1392	uint			sbf;
1393
1394	/*
1395	 * If quotas on realtime volumes is not supported, we disable
1396	 * quotas immediately.
1397	 */
1398	if (mp->m_sb.sb_rextents) {
1399		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1400		mp->m_qflags = 0;
1401		goto write_changes;
1402	}
1403
1404	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1405
1406	/*
1407	 * Allocate the quotainfo structure inside the mount struct, and
1408	 * create quotainode(s), and change/rev superblock if necessary.
1409	 */
1410	error = xfs_qm_init_quotainfo(mp);
1411	if (error) {
1412		/*
1413		 * We must turn off quotas.
1414		 */
1415		ASSERT(mp->m_quotainfo == NULL);
1416		mp->m_qflags = 0;
1417		goto write_changes;
1418	}
1419	/*
1420	 * If any of the quotas are not consistent, do a quotacheck.
1421	 */
1422	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1423		error = xfs_qm_quotacheck(mp);
1424		if (error) {
1425			/* Quotacheck failed and disabled quotas. */
1426			return;
1427		}
1428	}
1429	/*
1430	 * If one type of quotas is off, then it will lose its
1431	 * quotachecked status, since we won't be doing accounting for
1432	 * that type anymore.
1433	 */
1434	if (!XFS_IS_UQUOTA_ON(mp))
1435		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1436	if (!XFS_IS_GQUOTA_ON(mp))
1437		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1438	if (!XFS_IS_PQUOTA_ON(mp))
1439		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1440
1441 write_changes:
1442	/*
1443	 * We actually don't have to acquire the m_sb_lock at all.
1444	 * This can only be called from mount, and that's single threaded. XXX
1445	 */
1446	spin_lock(&mp->m_sb_lock);
1447	sbf = mp->m_sb.sb_qflags;
1448	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1449	spin_unlock(&mp->m_sb_lock);
1450
1451	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1452		if (xfs_sync_sb(mp, false)) {
1453			/*
1454			 * We could only have been turning quotas off.
1455			 * We aren't in very good shape actually because
1456			 * the incore structures are convinced that quotas are
1457			 * off, but the on disk superblock doesn't know that !
1458			 */
1459			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1460			xfs_alert(mp, "%s: Superblock update failed!",
1461				__func__);
1462		}
1463	}
1464
1465	if (error) {
1466		xfs_warn(mp, "Failed to initialize disk quotas.");
1467		return;
1468	}
1469}
1470
1471/*
1472 * This is called after the superblock has been read in and we're ready to
1473 * iget the quota inodes.
1474 */
1475STATIC int
1476xfs_qm_init_quotainos(
1477	xfs_mount_t	*mp)
1478{
1479	struct xfs_inode	*uip = NULL;
1480	struct xfs_inode	*gip = NULL;
1481	struct xfs_inode	*pip = NULL;
1482	int			error;
1483	uint			flags = 0;
1484
1485	ASSERT(mp->m_quotainfo);
1486
1487	/*
1488	 * Get the uquota and gquota inodes
1489	 */
1490	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1491		if (XFS_IS_UQUOTA_ON(mp) &&
1492		    mp->m_sb.sb_uquotino != NULLFSINO) {
1493			ASSERT(mp->m_sb.sb_uquotino > 0);
1494			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1495					     0, 0, &uip);
1496			if (error)
1497				return error;
1498		}
1499		if (XFS_IS_GQUOTA_ON(mp) &&
1500		    mp->m_sb.sb_gquotino != NULLFSINO) {
1501			ASSERT(mp->m_sb.sb_gquotino > 0);
1502			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1503					     0, 0, &gip);
1504			if (error)
1505				goto error_rele;
1506		}
1507		if (XFS_IS_PQUOTA_ON(mp) &&
1508		    mp->m_sb.sb_pquotino != NULLFSINO) {
1509			ASSERT(mp->m_sb.sb_pquotino > 0);
1510			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1511					     0, 0, &pip);
1512			if (error)
1513				goto error_rele;
1514		}
1515	} else {
1516		flags |= XFS_QMOPT_SBVERSION;
1517	}
1518
1519	/*
1520	 * Create the three inodes, if they don't exist already. The changes
1521	 * made above will get added to a transaction and logged in one of
1522	 * the qino_alloc calls below.  If the device is readonly,
1523	 * temporarily switch to read-write to do this.
1524	 */
1525	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1526		error = xfs_qm_qino_alloc(mp, &uip,
1527					      flags | XFS_QMOPT_UQUOTA);
1528		if (error)
1529			goto error_rele;
1530
1531		flags &= ~XFS_QMOPT_SBVERSION;
1532	}
1533	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1534		error = xfs_qm_qino_alloc(mp, &gip,
1535					  flags | XFS_QMOPT_GQUOTA);
1536		if (error)
1537			goto error_rele;
1538
1539		flags &= ~XFS_QMOPT_SBVERSION;
1540	}
1541	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1542		error = xfs_qm_qino_alloc(mp, &pip,
1543					  flags | XFS_QMOPT_PQUOTA);
1544		if (error)
1545			goto error_rele;
1546	}
1547
1548	mp->m_quotainfo->qi_uquotaip = uip;
1549	mp->m_quotainfo->qi_gquotaip = gip;
1550	mp->m_quotainfo->qi_pquotaip = pip;
1551
1552	return 0;
1553
1554error_rele:
1555	if (uip)
1556		xfs_irele(uip);
1557	if (gip)
1558		xfs_irele(gip);
1559	if (pip)
1560		xfs_irele(pip);
1561	return error;
1562}
1563
1564STATIC void
1565xfs_qm_destroy_quotainos(
1566	struct xfs_quotainfo	*qi)
1567{
1568	if (qi->qi_uquotaip) {
1569		xfs_irele(qi->qi_uquotaip);
1570		qi->qi_uquotaip = NULL; /* paranoia */
1571	}
1572	if (qi->qi_gquotaip) {
1573		xfs_irele(qi->qi_gquotaip);
1574		qi->qi_gquotaip = NULL;
1575	}
1576	if (qi->qi_pquotaip) {
1577		xfs_irele(qi->qi_pquotaip);
1578		qi->qi_pquotaip = NULL;
1579	}
1580}
1581
1582STATIC void
1583xfs_qm_dqfree_one(
1584	struct xfs_dquot	*dqp)
1585{
1586	struct xfs_mount	*mp = dqp->q_mount;
1587	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1588
1589	mutex_lock(&qi->qi_tree_lock);
1590	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1591
1592	qi->qi_dquots--;
1593	mutex_unlock(&qi->qi_tree_lock);
1594
1595	xfs_qm_dqdestroy(dqp);
1596}
1597
1598/* --------------- utility functions for vnodeops ---------------- */
1599
1600
1601/*
1602 * Given an inode, a uid, gid and prid make sure that we have
1603 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1604 * quotas by creating this file.
1605 * This also attaches dquot(s) to the given inode after locking it,
1606 * and returns the dquots corresponding to the uid and/or gid.
1607 *
1608 * in	: inode (unlocked)
1609 * out	: udquot, gdquot with references taken and unlocked
1610 */
1611int
1612xfs_qm_vop_dqalloc(
1613	struct xfs_inode	*ip,
1614	kuid_t			uid,
1615	kgid_t			gid,
1616	prid_t			prid,
1617	uint			flags,
1618	struct xfs_dquot	**O_udqpp,
1619	struct xfs_dquot	**O_gdqpp,
1620	struct xfs_dquot	**O_pdqpp)
1621{
1622	struct xfs_mount	*mp = ip->i_mount;
1623	struct inode		*inode = VFS_I(ip);
1624	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1625	struct xfs_dquot	*uq = NULL;
1626	struct xfs_dquot	*gq = NULL;
1627	struct xfs_dquot	*pq = NULL;
1628	int			error;
1629	uint			lockflags;
1630
1631	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1632		return 0;
1633
1634	lockflags = XFS_ILOCK_EXCL;
1635	xfs_ilock(ip, lockflags);
1636
1637	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1638		gid = inode->i_gid;
1639
1640	/*
1641	 * Attach the dquot(s) to this inode, doing a dquot allocation
1642	 * if necessary. The dquot(s) will not be locked.
1643	 */
1644	if (XFS_NOT_DQATTACHED(mp, ip)) {
1645		error = xfs_qm_dqattach_locked(ip, true);
1646		if (error) {
1647			xfs_iunlock(ip, lockflags);
1648			return error;
1649		}
1650	}
1651
1652	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
 
1653		if (!uid_eq(inode->i_uid, uid)) {
1654			/*
1655			 * What we need is the dquot that has this uid, and
1656			 * if we send the inode to dqget, the uid of the inode
1657			 * takes priority over what's sent in the uid argument.
1658			 * We must unlock inode here before calling dqget if
1659			 * we're not sending the inode, because otherwise
1660			 * we'll deadlock by doing trans_reserve while
1661			 * holding ilock.
1662			 */
1663			xfs_iunlock(ip, lockflags);
1664			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1665					XFS_DQTYPE_USER, true, &uq);
1666			if (error) {
1667				ASSERT(error != -ENOENT);
1668				return error;
1669			}
1670			/*
1671			 * Get the ilock in the right order.
1672			 */
1673			xfs_dqunlock(uq);
1674			lockflags = XFS_ILOCK_SHARED;
1675			xfs_ilock(ip, lockflags);
1676		} else {
1677			/*
1678			 * Take an extra reference, because we'll return
1679			 * this to caller
1680			 */
1681			ASSERT(ip->i_udquot);
1682			uq = xfs_qm_dqhold(ip->i_udquot);
1683		}
1684	}
1685	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
 
1686		if (!gid_eq(inode->i_gid, gid)) {
1687			xfs_iunlock(ip, lockflags);
1688			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1689					XFS_DQTYPE_GROUP, true, &gq);
1690			if (error) {
1691				ASSERT(error != -ENOENT);
1692				goto error_rele;
1693			}
1694			xfs_dqunlock(gq);
1695			lockflags = XFS_ILOCK_SHARED;
1696			xfs_ilock(ip, lockflags);
1697		} else {
1698			ASSERT(ip->i_gdquot);
1699			gq = xfs_qm_dqhold(ip->i_gdquot);
1700		}
1701	}
1702	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1703		if (ip->i_d.di_projid != prid) {
 
1704			xfs_iunlock(ip, lockflags);
1705			error = xfs_qm_dqget(mp, (xfs_dqid_t)prid,
1706					XFS_DQTYPE_PROJ, true, &pq);
1707			if (error) {
1708				ASSERT(error != -ENOENT);
1709				goto error_rele;
1710			}
1711			xfs_dqunlock(pq);
1712			lockflags = XFS_ILOCK_SHARED;
1713			xfs_ilock(ip, lockflags);
1714		} else {
1715			ASSERT(ip->i_pdquot);
1716			pq = xfs_qm_dqhold(ip->i_pdquot);
1717		}
1718	}
1719	trace_xfs_dquot_dqalloc(ip);
1720
1721	xfs_iunlock(ip, lockflags);
1722	if (O_udqpp)
1723		*O_udqpp = uq;
1724	else
1725		xfs_qm_dqrele(uq);
1726	if (O_gdqpp)
1727		*O_gdqpp = gq;
1728	else
1729		xfs_qm_dqrele(gq);
1730	if (O_pdqpp)
1731		*O_pdqpp = pq;
1732	else
1733		xfs_qm_dqrele(pq);
1734	return 0;
1735
1736error_rele:
1737	xfs_qm_dqrele(gq);
1738	xfs_qm_dqrele(uq);
1739	return error;
1740}
1741
1742/*
1743 * Actually transfer ownership, and do dquot modifications.
1744 * These were already reserved.
1745 */
1746struct xfs_dquot *
1747xfs_qm_vop_chown(
1748	struct xfs_trans	*tp,
1749	struct xfs_inode	*ip,
1750	struct xfs_dquot	**IO_olddq,
1751	struct xfs_dquot	*newdq)
1752{
1753	struct xfs_dquot	*prevdq;
1754	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1755				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1756
1757
1758	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1759	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1760
1761	/* old dquot */
1762	prevdq = *IO_olddq;
1763	ASSERT(prevdq);
1764	ASSERT(prevdq != newdq);
1765
1766	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1767	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1768
1769	/* the sparkling new dquot */
1770	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1771	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1772
1773	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1774	 * Take an extra reference, because the inode is going to keep
1775	 * this dquot pointer even after the trans_commit.
1776	 */
1777	*IO_olddq = xfs_qm_dqhold(newdq);
1778
1779	return prevdq;
1780}
1781
1782/*
1783 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1784 */
1785int
1786xfs_qm_vop_chown_reserve(
1787	struct xfs_trans	*tp,
1788	struct xfs_inode	*ip,
1789	struct xfs_dquot	*udqp,
1790	struct xfs_dquot	*gdqp,
1791	struct xfs_dquot	*pdqp,
1792	uint			flags)
1793{
1794	struct xfs_mount	*mp = ip->i_mount;
1795	uint64_t		delblks;
1796	unsigned int		blkflags;
1797	struct xfs_dquot	*udq_unres = NULL;
1798	struct xfs_dquot	*gdq_unres = NULL;
1799	struct xfs_dquot	*pdq_unres = NULL;
1800	struct xfs_dquot	*udq_delblks = NULL;
1801	struct xfs_dquot	*gdq_delblks = NULL;
1802	struct xfs_dquot	*pdq_delblks = NULL;
1803	int			error;
1804
1805
1806	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1807	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1808
1809	delblks = ip->i_delayed_blks;
1810	blkflags = XFS_IS_REALTIME_INODE(ip) ?
1811			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1812
1813	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1814	    i_uid_read(VFS_I(ip)) != udqp->q_id) {
1815		udq_delblks = udqp;
1816		/*
1817		 * If there are delayed allocation blocks, then we have to
1818		 * unreserve those from the old dquot, and add them to the
1819		 * new dquot.
1820		 */
1821		if (delblks) {
1822			ASSERT(ip->i_udquot);
1823			udq_unres = ip->i_udquot;
1824		}
1825	}
1826	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1827	    i_gid_read(VFS_I(ip)) != gdqp->q_id) {
1828		gdq_delblks = gdqp;
1829		if (delblks) {
1830			ASSERT(ip->i_gdquot);
1831			gdq_unres = ip->i_gdquot;
1832		}
1833	}
1834
1835	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1836	    ip->i_d.di_projid != pdqp->q_id) {
1837		pdq_delblks = pdqp;
1838		if (delblks) {
1839			ASSERT(ip->i_pdquot);
1840			pdq_unres = ip->i_pdquot;
1841		}
1842	}
1843
1844	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1845				udq_delblks, gdq_delblks, pdq_delblks,
1846				ip->i_d.di_nblocks, 1, flags | blkflags);
1847	if (error)
1848		return error;
1849
1850	/*
1851	 * Do the delayed blks reservations/unreservations now. Since, these
1852	 * are done without the help of a transaction, if a reservation fails
1853	 * its previous reservations won't be automatically undone by trans
1854	 * code. So, we have to do it manually here.
1855	 */
1856	if (delblks) {
1857		/*
1858		 * Do the reservations first. Unreservation can't fail.
1859		 */
1860		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1861		ASSERT(udq_unres || gdq_unres || pdq_unres);
1862		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1863			    udq_delblks, gdq_delblks, pdq_delblks,
1864			    (xfs_qcnt_t)delblks, 0, flags | blkflags);
1865		if (error)
1866			return error;
1867		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1868				udq_unres, gdq_unres, pdq_unres,
1869				-((xfs_qcnt_t)delblks), 0, blkflags);
1870	}
1871
1872	return 0;
1873}
1874
1875int
1876xfs_qm_vop_rename_dqattach(
1877	struct xfs_inode	**i_tab)
1878{
1879	struct xfs_mount	*mp = i_tab[0]->i_mount;
1880	int			i;
1881
1882	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1883		return 0;
1884
1885	for (i = 0; (i < 4 && i_tab[i]); i++) {
1886		struct xfs_inode	*ip = i_tab[i];
1887		int			error;
1888
1889		/*
1890		 * Watch out for duplicate entries in the table.
1891		 */
1892		if (i == 0 || ip != i_tab[i-1]) {
1893			if (XFS_NOT_DQATTACHED(mp, ip)) {
1894				error = xfs_qm_dqattach(ip);
1895				if (error)
1896					return error;
1897			}
1898		}
1899	}
1900	return 0;
1901}
1902
1903void
1904xfs_qm_vop_create_dqattach(
1905	struct xfs_trans	*tp,
1906	struct xfs_inode	*ip,
1907	struct xfs_dquot	*udqp,
1908	struct xfs_dquot	*gdqp,
1909	struct xfs_dquot	*pdqp)
1910{
1911	struct xfs_mount	*mp = tp->t_mountp;
1912
1913	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1914		return;
1915
1916	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1917
1918	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1919		ASSERT(ip->i_udquot == NULL);
1920		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1921
1922		ip->i_udquot = xfs_qm_dqhold(udqp);
1923		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1924	}
1925	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1926		ASSERT(ip->i_gdquot == NULL);
1927		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1928
1929		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1930		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1931	}
1932	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1933		ASSERT(ip->i_pdquot == NULL);
1934		ASSERT(ip->i_d.di_projid == pdqp->q_id);
1935
1936		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1937		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1938	}
1939}
1940
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_sb.h"
  14#include "xfs_mount.h"
  15#include "xfs_inode.h"
  16#include "xfs_iwalk.h"
  17#include "xfs_quota.h"
  18#include "xfs_bmap.h"
  19#include "xfs_bmap_util.h"
  20#include "xfs_trans.h"
  21#include "xfs_trans_space.h"
  22#include "xfs_qm.h"
  23#include "xfs_trace.h"
  24#include "xfs_icache.h"
  25#include "xfs_error.h"
  26#include "xfs_ag.h"
  27#include "xfs_ialloc.h"
  28
  29/*
  30 * The global quota manager. There is only one of these for the entire
  31 * system, _not_ one per file system. XQM keeps track of the overall
  32 * quota functionality, including maintaining the freelist and hash
  33 * tables of dquots.
  34 */
  35STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
  36STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
  37
  38STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
  39STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  40/*
  41 * We use the batch lookup interface to iterate over the dquots as it
  42 * currently is the only interface into the radix tree code that allows
  43 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  44 * operations is fine as all callers are used either during mount/umount
  45 * or quotaoff.
  46 */
  47#define XFS_DQ_LOOKUP_BATCH	32
  48
  49STATIC int
  50xfs_qm_dquot_walk(
  51	struct xfs_mount	*mp,
  52	xfs_dqtype_t		type,
  53	int			(*execute)(struct xfs_dquot *dqp, void *data),
  54	void			*data)
  55{
  56	struct xfs_quotainfo	*qi = mp->m_quotainfo;
  57	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
  58	uint32_t		next_index;
  59	int			last_error = 0;
  60	int			skipped;
  61	int			nr_found;
  62
  63restart:
  64	skipped = 0;
  65	next_index = 0;
  66	nr_found = 0;
  67
  68	while (1) {
  69		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  70		int		error = 0;
  71		int		i;
  72
  73		mutex_lock(&qi->qi_tree_lock);
  74		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  75					next_index, XFS_DQ_LOOKUP_BATCH);
  76		if (!nr_found) {
  77			mutex_unlock(&qi->qi_tree_lock);
  78			break;
  79		}
  80
  81		for (i = 0; i < nr_found; i++) {
  82			struct xfs_dquot *dqp = batch[i];
  83
  84			next_index = dqp->q_id + 1;
  85
  86			error = execute(batch[i], data);
  87			if (error == -EAGAIN) {
  88				skipped++;
  89				continue;
  90			}
  91			if (error && last_error != -EFSCORRUPTED)
  92				last_error = error;
  93		}
  94
  95		mutex_unlock(&qi->qi_tree_lock);
  96
  97		/* bail out if the filesystem is corrupted.  */
  98		if (last_error == -EFSCORRUPTED) {
  99			skipped = 0;
 100			break;
 101		}
 102		/* we're done if id overflows back to zero */
 103		if (!next_index)
 104			break;
 105	}
 106
 107	if (skipped) {
 108		delay(1);
 109		goto restart;
 110	}
 111
 112	return last_error;
 113}
 114
 115
 116/*
 117 * Purge a dquot from all tracking data structures and free it.
 118 */
 119STATIC int
 120xfs_qm_dqpurge(
 121	struct xfs_dquot	*dqp,
 122	void			*data)
 123{
 124	struct xfs_mount	*mp = dqp->q_mount;
 125	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 126	int			error = -EAGAIN;
 127
 128	xfs_dqlock(dqp);
 129	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
 130		goto out_unlock;
 131
 132	dqp->q_flags |= XFS_DQFLAG_FREEING;
 133
 134	xfs_dqflock(dqp);
 135
 136	/*
 137	 * If we are turning this type of quotas off, we don't care
 138	 * about the dirty metadata sitting in this dquot. OTOH, if
 139	 * we're unmounting, we do care, so we flush it and wait.
 140	 */
 141	if (XFS_DQ_IS_DIRTY(dqp)) {
 142		struct xfs_buf	*bp = NULL;
 143
 144		/*
 145		 * We don't care about getting disk errors here. We need
 146		 * to purge this dquot anyway, so we go ahead regardless.
 147		 */
 148		error = xfs_qm_dqflush(dqp, &bp);
 149		if (!error) {
 150			error = xfs_bwrite(bp);
 151			xfs_buf_relse(bp);
 152		} else if (error == -EAGAIN) {
 153			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
 154			goto out_unlock;
 155		}
 156		xfs_dqflock(dqp);
 157	}
 158
 159	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 160	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 161		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
 162
 163	xfs_dqfunlock(dqp);
 164	xfs_dqunlock(dqp);
 165
 166	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
 167	qi->qi_dquots--;
 168
 169	/*
 170	 * We move dquots to the freelist as soon as their reference count
 171	 * hits zero, so it really should be on the freelist here.
 172	 */
 173	ASSERT(!list_empty(&dqp->q_lru));
 174	list_lru_del(&qi->qi_lru, &dqp->q_lru);
 175	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
 176
 177	xfs_qm_dqdestroy(dqp);
 178	return 0;
 179
 180out_unlock:
 181	xfs_dqunlock(dqp);
 182	return error;
 183}
 184
 185/*
 186 * Purge the dquot cache.
 187 */
 188void
 189xfs_qm_dqpurge_all(
 190	struct xfs_mount	*mp,
 191	uint			flags)
 192{
 193	if (flags & XFS_QMOPT_UQUOTA)
 194		xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
 195	if (flags & XFS_QMOPT_GQUOTA)
 196		xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
 197	if (flags & XFS_QMOPT_PQUOTA)
 198		xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
 199}
 200
 201/*
 202 * Just destroy the quotainfo structure.
 203 */
 204void
 205xfs_qm_unmount(
 206	struct xfs_mount	*mp)
 207{
 208	if (mp->m_quotainfo) {
 209		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
 210		xfs_qm_destroy_quotainfo(mp);
 211	}
 212}
 213
 214/*
 215 * Called from the vfsops layer.
 216 */
 217void
 218xfs_qm_unmount_quotas(
 219	xfs_mount_t	*mp)
 220{
 221	/*
 222	 * Release the dquots that root inode, et al might be holding,
 223	 * before we flush quotas and blow away the quotainfo structure.
 224	 */
 225	ASSERT(mp->m_rootip);
 226	xfs_qm_dqdetach(mp->m_rootip);
 227	if (mp->m_rbmip)
 228		xfs_qm_dqdetach(mp->m_rbmip);
 229	if (mp->m_rsumip)
 230		xfs_qm_dqdetach(mp->m_rsumip);
 231
 232	/*
 233	 * Release the quota inodes.
 234	 */
 235	if (mp->m_quotainfo) {
 236		if (mp->m_quotainfo->qi_uquotaip) {
 237			xfs_irele(mp->m_quotainfo->qi_uquotaip);
 238			mp->m_quotainfo->qi_uquotaip = NULL;
 239		}
 240		if (mp->m_quotainfo->qi_gquotaip) {
 241			xfs_irele(mp->m_quotainfo->qi_gquotaip);
 242			mp->m_quotainfo->qi_gquotaip = NULL;
 243		}
 244		if (mp->m_quotainfo->qi_pquotaip) {
 245			xfs_irele(mp->m_quotainfo->qi_pquotaip);
 246			mp->m_quotainfo->qi_pquotaip = NULL;
 247		}
 248	}
 249}
 250
 251STATIC int
 252xfs_qm_dqattach_one(
 253	struct xfs_inode	*ip,
 
 254	xfs_dqtype_t		type,
 255	bool			doalloc,
 256	struct xfs_dquot	**IO_idqpp)
 257{
 258	struct xfs_dquot	*dqp;
 259	int			error;
 260
 261	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 262	error = 0;
 263
 264	/*
 265	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
 266	 * or &i_gdquot. This made the code look weird, but made the logic a lot
 267	 * simpler.
 268	 */
 269	dqp = *IO_idqpp;
 270	if (dqp) {
 271		trace_xfs_dqattach_found(dqp);
 272		return 0;
 273	}
 274
 275	/*
 276	 * Find the dquot from somewhere. This bumps the reference count of
 277	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
 278	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
 279	 * turned off suddenly.
 280	 */
 281	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
 282	if (error)
 283		return error;
 284
 285	trace_xfs_dqattach_get(dqp);
 286
 287	/*
 288	 * dqget may have dropped and re-acquired the ilock, but it guarantees
 289	 * that the dquot returned is the one that should go in the inode.
 290	 */
 291	*IO_idqpp = dqp;
 292	xfs_dqunlock(dqp);
 293	return 0;
 294}
 295
 296static bool
 297xfs_qm_need_dqattach(
 298	struct xfs_inode	*ip)
 299{
 300	struct xfs_mount	*mp = ip->i_mount;
 301
 302	if (!XFS_IS_QUOTA_RUNNING(mp))
 303		return false;
 304	if (!XFS_IS_QUOTA_ON(mp))
 305		return false;
 306	if (!XFS_NOT_DQATTACHED(mp, ip))
 307		return false;
 308	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 309		return false;
 310	return true;
 311}
 312
 313/*
 314 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 315 * into account.
 316 * If @doalloc is true, the dquot(s) will be allocated if needed.
 317 * Inode may get unlocked and relocked in here, and the caller must deal with
 318 * the consequences.
 319 */
 320int
 321xfs_qm_dqattach_locked(
 322	xfs_inode_t	*ip,
 323	bool		doalloc)
 324{
 325	xfs_mount_t	*mp = ip->i_mount;
 326	int		error = 0;
 327
 328	if (!xfs_qm_need_dqattach(ip))
 329		return 0;
 330
 331	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 332
 333	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
 334		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
 335				doalloc, &ip->i_udquot);
 336		if (error)
 337			goto done;
 338		ASSERT(ip->i_udquot);
 339	}
 340
 341	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
 342		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
 343				doalloc, &ip->i_gdquot);
 344		if (error)
 345			goto done;
 346		ASSERT(ip->i_gdquot);
 347	}
 348
 349	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
 350		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
 351				doalloc, &ip->i_pdquot);
 352		if (error)
 353			goto done;
 354		ASSERT(ip->i_pdquot);
 355	}
 356
 357done:
 358	/*
 359	 * Don't worry about the dquots that we may have attached before any
 360	 * error - they'll get detached later if it has not already been done.
 361	 */
 362	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 363	return error;
 364}
 365
 366int
 367xfs_qm_dqattach(
 368	struct xfs_inode	*ip)
 369{
 370	int			error;
 371
 372	if (!xfs_qm_need_dqattach(ip))
 373		return 0;
 374
 375	xfs_ilock(ip, XFS_ILOCK_EXCL);
 376	error = xfs_qm_dqattach_locked(ip, false);
 377	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 378
 379	return error;
 380}
 381
 382/*
 383 * Release dquots (and their references) if any.
 384 * The inode should be locked EXCL except when this's called by
 385 * xfs_ireclaim.
 386 */
 387void
 388xfs_qm_dqdetach(
 389	xfs_inode_t	*ip)
 390{
 391	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
 392		return;
 393
 394	trace_xfs_dquot_dqdetach(ip);
 395
 396	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
 397	if (ip->i_udquot) {
 398		xfs_qm_dqrele(ip->i_udquot);
 399		ip->i_udquot = NULL;
 400	}
 401	if (ip->i_gdquot) {
 402		xfs_qm_dqrele(ip->i_gdquot);
 403		ip->i_gdquot = NULL;
 404	}
 405	if (ip->i_pdquot) {
 406		xfs_qm_dqrele(ip->i_pdquot);
 407		ip->i_pdquot = NULL;
 408	}
 409}
 410
 411struct xfs_qm_isolate {
 412	struct list_head	buffers;
 413	struct list_head	dispose;
 414};
 415
 416static enum lru_status
 417xfs_qm_dquot_isolate(
 418	struct list_head	*item,
 419	struct list_lru_one	*lru,
 420	spinlock_t		*lru_lock,
 421	void			*arg)
 422		__releases(lru_lock) __acquires(lru_lock)
 423{
 424	struct xfs_dquot	*dqp = container_of(item,
 425						struct xfs_dquot, q_lru);
 426	struct xfs_qm_isolate	*isol = arg;
 427
 428	if (!xfs_dqlock_nowait(dqp))
 429		goto out_miss_busy;
 430
 431	/*
 432	 * This dquot has acquired a reference in the meantime remove it from
 433	 * the freelist and try again.
 434	 */
 435	if (dqp->q_nrefs) {
 436		xfs_dqunlock(dqp);
 437		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
 438
 439		trace_xfs_dqreclaim_want(dqp);
 440		list_lru_isolate(lru, &dqp->q_lru);
 441		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 442		return LRU_REMOVED;
 443	}
 444
 445	/*
 446	 * If the dquot is dirty, flush it. If it's already being flushed, just
 447	 * skip it so there is time for the IO to complete before we try to
 448	 * reclaim it again on the next LRU pass.
 449	 */
 450	if (!xfs_dqflock_nowait(dqp)) {
 451		xfs_dqunlock(dqp);
 452		goto out_miss_busy;
 453	}
 454
 455	if (XFS_DQ_IS_DIRTY(dqp)) {
 456		struct xfs_buf	*bp = NULL;
 457		int		error;
 458
 459		trace_xfs_dqreclaim_dirty(dqp);
 460
 461		/* we have to drop the LRU lock to flush the dquot */
 462		spin_unlock(lru_lock);
 463
 464		error = xfs_qm_dqflush(dqp, &bp);
 465		if (error)
 466			goto out_unlock_dirty;
 467
 468		xfs_buf_delwri_queue(bp, &isol->buffers);
 469		xfs_buf_relse(bp);
 470		goto out_unlock_dirty;
 471	}
 472	xfs_dqfunlock(dqp);
 473
 474	/*
 475	 * Prevent lookups now that we are past the point of no return.
 476	 */
 477	dqp->q_flags |= XFS_DQFLAG_FREEING;
 478	xfs_dqunlock(dqp);
 479
 480	ASSERT(dqp->q_nrefs == 0);
 481	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
 482	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 483	trace_xfs_dqreclaim_done(dqp);
 484	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
 485	return LRU_REMOVED;
 486
 487out_miss_busy:
 488	trace_xfs_dqreclaim_busy(dqp);
 489	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 490	return LRU_SKIP;
 491
 492out_unlock_dirty:
 493	trace_xfs_dqreclaim_busy(dqp);
 494	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 495	xfs_dqunlock(dqp);
 496	spin_lock(lru_lock);
 497	return LRU_RETRY;
 498}
 499
 500static unsigned long
 501xfs_qm_shrink_scan(
 502	struct shrinker		*shrink,
 503	struct shrink_control	*sc)
 504{
 505	struct xfs_quotainfo	*qi = container_of(shrink,
 506					struct xfs_quotainfo, qi_shrinker);
 507	struct xfs_qm_isolate	isol;
 508	unsigned long		freed;
 509	int			error;
 510
 511	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
 512		return 0;
 513
 514	INIT_LIST_HEAD(&isol.buffers);
 515	INIT_LIST_HEAD(&isol.dispose);
 516
 517	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
 518				     xfs_qm_dquot_isolate, &isol);
 519
 520	error = xfs_buf_delwri_submit(&isol.buffers);
 521	if (error)
 522		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
 523
 524	while (!list_empty(&isol.dispose)) {
 525		struct xfs_dquot	*dqp;
 526
 527		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
 528		list_del_init(&dqp->q_lru);
 529		xfs_qm_dqfree_one(dqp);
 530	}
 531
 532	return freed;
 533}
 534
 535static unsigned long
 536xfs_qm_shrink_count(
 537	struct shrinker		*shrink,
 538	struct shrink_control	*sc)
 539{
 540	struct xfs_quotainfo	*qi = container_of(shrink,
 541					struct xfs_quotainfo, qi_shrinker);
 542
 543	return list_lru_shrink_count(&qi->qi_lru, sc);
 544}
 545
 546STATIC void
 547xfs_qm_set_defquota(
 548	struct xfs_mount	*mp,
 549	xfs_dqtype_t		type,
 550	struct xfs_quotainfo	*qinf)
 551{
 552	struct xfs_dquot	*dqp;
 553	struct xfs_def_quota	*defq;
 554	int			error;
 555
 556	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 557	if (error)
 558		return;
 559
 560	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
 561
 562	/*
 563	 * Timers and warnings have been already set, let's just set the
 564	 * default limits for this quota type
 565	 */
 566	defq->blk.hard = dqp->q_blk.hardlimit;
 567	defq->blk.soft = dqp->q_blk.softlimit;
 568	defq->ino.hard = dqp->q_ino.hardlimit;
 569	defq->ino.soft = dqp->q_ino.softlimit;
 570	defq->rtb.hard = dqp->q_rtb.hardlimit;
 571	defq->rtb.soft = dqp->q_rtb.softlimit;
 572	xfs_qm_dqdestroy(dqp);
 573}
 574
 575/* Initialize quota time limits from the root dquot. */
 576static void
 577xfs_qm_init_timelimits(
 578	struct xfs_mount	*mp,
 579	xfs_dqtype_t		type)
 580{
 581	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
 582	struct xfs_def_quota	*defq;
 583	struct xfs_dquot	*dqp;
 584	int			error;
 585
 586	defq = xfs_get_defquota(qinf, type);
 587
 588	defq->blk.time = XFS_QM_BTIMELIMIT;
 589	defq->ino.time = XFS_QM_ITIMELIMIT;
 590	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
 591	defq->blk.warn = XFS_QM_BWARNLIMIT;
 592	defq->ino.warn = XFS_QM_IWARNLIMIT;
 593	defq->rtb.warn = XFS_QM_RTBWARNLIMIT;
 594
 595	/*
 596	 * We try to get the limits from the superuser's limits fields.
 597	 * This is quite hacky, but it is standard quota practice.
 598	 *
 599	 * Since we may not have done a quotacheck by this point, just read
 600	 * the dquot without attaching it to any hashtables or lists.
 601	 */
 602	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 603	if (error)
 604		return;
 605
 606	/*
 607	 * The warnings and timers set the grace period given to
 608	 * a user or group before he or she can not perform any
 609	 * more writing. If it is zero, a default is used.
 610	 */
 611	if (dqp->q_blk.timer)
 612		defq->blk.time = dqp->q_blk.timer;
 613	if (dqp->q_ino.timer)
 614		defq->ino.time = dqp->q_ino.timer;
 615	if (dqp->q_rtb.timer)
 616		defq->rtb.time = dqp->q_rtb.timer;
 617	if (dqp->q_blk.warnings)
 618		defq->blk.warn = dqp->q_blk.warnings;
 619	if (dqp->q_ino.warnings)
 620		defq->ino.warn = dqp->q_ino.warnings;
 621	if (dqp->q_rtb.warnings)
 622		defq->rtb.warn = dqp->q_rtb.warnings;
 623
 624	xfs_qm_dqdestroy(dqp);
 625}
 626
 627/*
 628 * This initializes all the quota information that's kept in the
 629 * mount structure
 630 */
 631STATIC int
 632xfs_qm_init_quotainfo(
 633	struct xfs_mount	*mp)
 634{
 635	struct xfs_quotainfo	*qinf;
 636	int			error;
 637
 638	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 639
 640	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
 641
 642	error = list_lru_init(&qinf->qi_lru);
 643	if (error)
 644		goto out_free_qinf;
 645
 646	/*
 647	 * See if quotainodes are setup, and if not, allocate them,
 648	 * and change the superblock accordingly.
 649	 */
 650	error = xfs_qm_init_quotainos(mp);
 651	if (error)
 652		goto out_free_lru;
 653
 654	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
 655	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 656	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
 657	mutex_init(&qinf->qi_tree_lock);
 658
 659	/* mutex used to serialize quotaoffs */
 660	mutex_init(&qinf->qi_quotaofflock);
 661
 662	/* Precalc some constants */
 663	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 664	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
 665	if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
 666		qinf->qi_expiry_min =
 667			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
 668		qinf->qi_expiry_max =
 669			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
 670	} else {
 671		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
 672		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
 673	}
 674	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
 675			qinf->qi_expiry_max);
 676
 677	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 678
 679	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
 680	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
 681	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
 682
 683	if (XFS_IS_UQUOTA_RUNNING(mp))
 684		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
 685	if (XFS_IS_GQUOTA_RUNNING(mp))
 686		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
 687	if (XFS_IS_PQUOTA_RUNNING(mp))
 688		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
 689
 690	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
 691	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
 692	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
 693	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
 694
 695	error = register_shrinker(&qinf->qi_shrinker);
 696	if (error)
 697		goto out_free_inos;
 698
 699	return 0;
 700
 701out_free_inos:
 702	mutex_destroy(&qinf->qi_quotaofflock);
 703	mutex_destroy(&qinf->qi_tree_lock);
 704	xfs_qm_destroy_quotainos(qinf);
 705out_free_lru:
 706	list_lru_destroy(&qinf->qi_lru);
 707out_free_qinf:
 708	kmem_free(qinf);
 709	mp->m_quotainfo = NULL;
 710	return error;
 711}
 712
 713/*
 714 * Gets called when unmounting a filesystem or when all quotas get
 715 * turned off.
 716 * This purges the quota inodes, destroys locks and frees itself.
 717 */
 718void
 719xfs_qm_destroy_quotainfo(
 720	struct xfs_mount	*mp)
 721{
 722	struct xfs_quotainfo	*qi;
 723
 724	qi = mp->m_quotainfo;
 725	ASSERT(qi != NULL);
 726
 727	unregister_shrinker(&qi->qi_shrinker);
 728	list_lru_destroy(&qi->qi_lru);
 729	xfs_qm_destroy_quotainos(qi);
 730	mutex_destroy(&qi->qi_tree_lock);
 731	mutex_destroy(&qi->qi_quotaofflock);
 732	kmem_free(qi);
 733	mp->m_quotainfo = NULL;
 734}
 735
 736/*
 737 * Create an inode and return with a reference already taken, but unlocked
 738 * This is how we create quota inodes
 739 */
 740STATIC int
 741xfs_qm_qino_alloc(
 742	struct xfs_mount	*mp,
 743	struct xfs_inode	**ipp,
 744	unsigned int		flags)
 745{
 746	struct xfs_trans	*tp;
 747	int			error;
 748	bool			need_alloc = true;
 749
 750	*ipp = NULL;
 751	/*
 752	 * With superblock that doesn't have separate pquotino, we
 753	 * share an inode between gquota and pquota. If the on-disk
 754	 * superblock has GQUOTA and the filesystem is now mounted
 755	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
 756	 * vice-versa.
 757	 */
 758	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
 759			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
 760		xfs_ino_t ino = NULLFSINO;
 761
 762		if ((flags & XFS_QMOPT_PQUOTA) &&
 763			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
 764			ino = mp->m_sb.sb_gquotino;
 765			if (XFS_IS_CORRUPT(mp,
 766					   mp->m_sb.sb_pquotino != NULLFSINO))
 767				return -EFSCORRUPTED;
 768		} else if ((flags & XFS_QMOPT_GQUOTA) &&
 769			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
 770			ino = mp->m_sb.sb_pquotino;
 771			if (XFS_IS_CORRUPT(mp,
 772					   mp->m_sb.sb_gquotino != NULLFSINO))
 773				return -EFSCORRUPTED;
 774		}
 775		if (ino != NULLFSINO) {
 776			error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
 777			if (error)
 778				return error;
 779			mp->m_sb.sb_gquotino = NULLFSINO;
 780			mp->m_sb.sb_pquotino = NULLFSINO;
 781			need_alloc = false;
 782		}
 783	}
 784
 785	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
 786			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
 787			0, 0, &tp);
 788	if (error)
 789		return error;
 790
 791	if (need_alloc) {
 792		xfs_ino_t	ino;
 793
 794		error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
 795		if (!error)
 796			error = xfs_init_new_inode(&init_user_ns, tp, NULL, ino,
 797					S_IFREG, 1, 0, 0, false, ipp);
 798		if (error) {
 799			xfs_trans_cancel(tp);
 800			return error;
 801		}
 802	}
 803
 804	/*
 805	 * Make the changes in the superblock, and log those too.
 806	 * sbfields arg may contain fields other than *QUOTINO;
 807	 * VERSIONNUM for example.
 808	 */
 809	spin_lock(&mp->m_sb_lock);
 810	if (flags & XFS_QMOPT_SBVERSION) {
 811		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
 812
 813		xfs_sb_version_addquota(&mp->m_sb);
 814		mp->m_sb.sb_uquotino = NULLFSINO;
 815		mp->m_sb.sb_gquotino = NULLFSINO;
 816		mp->m_sb.sb_pquotino = NULLFSINO;
 817
 818		/* qflags will get updated fully _after_ quotacheck */
 819		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
 820	}
 821	if (flags & XFS_QMOPT_UQUOTA)
 822		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
 823	else if (flags & XFS_QMOPT_GQUOTA)
 824		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
 825	else
 826		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
 827	spin_unlock(&mp->m_sb_lock);
 828	xfs_log_sb(tp);
 829
 830	error = xfs_trans_commit(tp);
 831	if (error) {
 832		ASSERT(XFS_FORCED_SHUTDOWN(mp));
 833		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
 834	}
 835	if (need_alloc)
 836		xfs_finish_inode_setup(*ipp);
 837	return error;
 838}
 839
 840
 841STATIC void
 842xfs_qm_reset_dqcounts(
 843	struct xfs_mount	*mp,
 844	struct xfs_buf		*bp,
 845	xfs_dqid_t		id,
 846	xfs_dqtype_t		type)
 847{
 848	struct xfs_dqblk	*dqb;
 849	int			j;
 850
 851	trace_xfs_reset_dqcounts(bp, _RET_IP_);
 852
 853	/*
 854	 * Reset all counters and timers. They'll be
 855	 * started afresh by xfs_qm_quotacheck.
 856	 */
 857#ifdef DEBUG
 858	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
 859		sizeof(xfs_dqblk_t);
 860	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 861#endif
 862	dqb = bp->b_addr;
 863	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
 864		struct xfs_disk_dquot	*ddq;
 865
 866		ddq = (struct xfs_disk_dquot *)&dqb[j];
 867
 868		/*
 869		 * Do a sanity check, and if needed, repair the dqblk. Don't
 870		 * output any warnings because it's perfectly possible to
 871		 * find uninitialised dquot blks. See comment in
 872		 * xfs_dquot_verify.
 873		 */
 874		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
 875		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
 876			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
 877
 878		/*
 879		 * Reset type in case we are reusing group quota file for
 880		 * project quotas or vice versa
 881		 */
 882		ddq->d_type = type;
 883		ddq->d_bcount = 0;
 884		ddq->d_icount = 0;
 885		ddq->d_rtbcount = 0;
 886
 887		/*
 888		 * dquot id 0 stores the default grace period and the maximum
 889		 * warning limit that were set by the administrator, so we
 890		 * should not reset them.
 891		 */
 892		if (ddq->d_id != 0) {
 893			ddq->d_btimer = 0;
 894			ddq->d_itimer = 0;
 895			ddq->d_rtbtimer = 0;
 896			ddq->d_bwarns = 0;
 897			ddq->d_iwarns = 0;
 898			ddq->d_rtbwarns = 0;
 899			if (xfs_sb_version_hasbigtime(&mp->m_sb))
 900				ddq->d_type |= XFS_DQTYPE_BIGTIME;
 901		}
 902
 903		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 904			xfs_update_cksum((char *)&dqb[j],
 905					 sizeof(struct xfs_dqblk),
 906					 XFS_DQUOT_CRC_OFF);
 907		}
 908	}
 909}
 910
 911STATIC int
 912xfs_qm_reset_dqcounts_all(
 913	struct xfs_mount	*mp,
 914	xfs_dqid_t		firstid,
 915	xfs_fsblock_t		bno,
 916	xfs_filblks_t		blkcnt,
 917	xfs_dqtype_t		type,
 918	struct list_head	*buffer_list)
 919{
 920	struct xfs_buf		*bp;
 921	int			error = 0;
 922
 923	ASSERT(blkcnt > 0);
 924
 925	/*
 926	 * Blkcnt arg can be a very big number, and might even be
 927	 * larger than the log itself. So, we have to break it up into
 928	 * manageable-sized transactions.
 929	 * Note that we don't start a permanent transaction here; we might
 930	 * not be able to get a log reservation for the whole thing up front,
 931	 * and we don't really care to either, because we just discard
 932	 * everything if we were to crash in the middle of this loop.
 933	 */
 934	while (blkcnt--) {
 935		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 936			      XFS_FSB_TO_DADDR(mp, bno),
 937			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 938			      &xfs_dquot_buf_ops);
 939
 940		/*
 941		 * CRC and validation errors will return a EFSCORRUPTED here. If
 942		 * this occurs, re-read without CRC validation so that we can
 943		 * repair the damage via xfs_qm_reset_dqcounts(). This process
 944		 * will leave a trace in the log indicating corruption has
 945		 * been detected.
 946		 */
 947		if (error == -EFSCORRUPTED) {
 948			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 949				      XFS_FSB_TO_DADDR(mp, bno),
 950				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 951				      NULL);
 952		}
 953
 954		if (error)
 955			break;
 956
 957		/*
 958		 * A corrupt buffer might not have a verifier attached, so
 959		 * make sure we have the correct one attached before writeback
 960		 * occurs.
 961		 */
 962		bp->b_ops = &xfs_dquot_buf_ops;
 963		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
 964		xfs_buf_delwri_queue(bp, buffer_list);
 965		xfs_buf_relse(bp);
 966
 967		/* goto the next block. */
 968		bno++;
 969		firstid += mp->m_quotainfo->qi_dqperchunk;
 970	}
 971
 972	return error;
 973}
 974
 975/*
 976 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
 977 * counters for every chunk of dquots that we find.
 978 */
 979STATIC int
 980xfs_qm_reset_dqcounts_buf(
 981	struct xfs_mount	*mp,
 982	struct xfs_inode	*qip,
 983	xfs_dqtype_t		type,
 984	struct list_head	*buffer_list)
 985{
 986	struct xfs_bmbt_irec	*map;
 987	int			i, nmaps;	/* number of map entries */
 988	int			error;		/* return value */
 989	xfs_fileoff_t		lblkno;
 990	xfs_filblks_t		maxlblkcnt;
 991	xfs_dqid_t		firstid;
 992	xfs_fsblock_t		rablkno;
 993	xfs_filblks_t		rablkcnt;
 994
 995	error = 0;
 996	/*
 997	 * This looks racy, but we can't keep an inode lock across a
 998	 * trans_reserve. But, this gets called during quotacheck, and that
 999	 * happens only at mount time which is single threaded.
1000	 */
1001	if (qip->i_nblocks == 0)
1002		return 0;
1003
1004	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
1005
1006	lblkno = 0;
1007	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1008	do {
1009		uint		lock_mode;
1010
1011		nmaps = XFS_DQITER_MAP_SIZE;
1012		/*
1013		 * We aren't changing the inode itself. Just changing
1014		 * some of its data. No new blocks are added here, and
1015		 * the inode is never added to the transaction.
1016		 */
1017		lock_mode = xfs_ilock_data_map_shared(qip);
1018		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1019				       map, &nmaps, 0);
1020		xfs_iunlock(qip, lock_mode);
1021		if (error)
1022			break;
1023
1024		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1025		for (i = 0; i < nmaps; i++) {
1026			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1027			ASSERT(map[i].br_blockcount);
1028
1029
1030			lblkno += map[i].br_blockcount;
1031
1032			if (map[i].br_startblock == HOLESTARTBLOCK)
1033				continue;
1034
1035			firstid = (xfs_dqid_t) map[i].br_startoff *
1036				mp->m_quotainfo->qi_dqperchunk;
1037			/*
1038			 * Do a read-ahead on the next extent.
1039			 */
1040			if ((i+1 < nmaps) &&
1041			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1042				rablkcnt =  map[i+1].br_blockcount;
1043				rablkno = map[i+1].br_startblock;
1044				while (rablkcnt--) {
1045					xfs_buf_readahead(mp->m_ddev_targp,
1046					       XFS_FSB_TO_DADDR(mp, rablkno),
1047					       mp->m_quotainfo->qi_dqchunklen,
1048					       &xfs_dquot_buf_ops);
1049					rablkno++;
1050				}
1051			}
1052			/*
1053			 * Iterate thru all the blks in the extent and
1054			 * reset the counters of all the dquots inside them.
1055			 */
1056			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1057						   map[i].br_startblock,
1058						   map[i].br_blockcount,
1059						   type, buffer_list);
1060			if (error)
1061				goto out;
1062		}
1063	} while (nmaps > 0);
1064
1065out:
1066	kmem_free(map);
1067	return error;
1068}
1069
1070/*
1071 * Called by dqusage_adjust in doing a quotacheck.
1072 *
1073 * Given the inode, and a dquot id this updates both the incore dqout as well
1074 * as the buffer copy. This is so that once the quotacheck is done, we can
1075 * just log all the buffers, as opposed to logging numerous updates to
1076 * individual dquots.
1077 */
1078STATIC int
1079xfs_qm_quotacheck_dqadjust(
1080	struct xfs_inode	*ip,
1081	xfs_dqtype_t		type,
1082	xfs_qcnt_t		nblks,
1083	xfs_qcnt_t		rtblks)
1084{
1085	struct xfs_mount	*mp = ip->i_mount;
1086	struct xfs_dquot	*dqp;
1087	xfs_dqid_t		id;
1088	int			error;
1089
1090	id = xfs_qm_id_for_quotatype(ip, type);
1091	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1092	if (error) {
1093		/*
1094		 * Shouldn't be able to turn off quotas here.
1095		 */
1096		ASSERT(error != -ESRCH);
1097		ASSERT(error != -ENOENT);
1098		return error;
1099	}
1100
1101	trace_xfs_dqadjust(dqp);
1102
1103	/*
1104	 * Adjust the inode count and the block count to reflect this inode's
1105	 * resource usage.
1106	 */
1107	dqp->q_ino.count++;
1108	dqp->q_ino.reserved++;
1109	if (nblks) {
1110		dqp->q_blk.count += nblks;
1111		dqp->q_blk.reserved += nblks;
1112	}
1113	if (rtblks) {
1114		dqp->q_rtb.count += rtblks;
1115		dqp->q_rtb.reserved += rtblks;
1116	}
1117
1118	/*
1119	 * Set default limits, adjust timers (since we changed usages)
1120	 *
1121	 * There are no timers for the default values set in the root dquot.
1122	 */
1123	if (dqp->q_id) {
1124		xfs_qm_adjust_dqlimits(dqp);
1125		xfs_qm_adjust_dqtimers(dqp);
1126	}
1127
1128	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1129	xfs_qm_dqput(dqp);
1130	return 0;
1131}
1132
1133/*
1134 * callback routine supplied to bulkstat(). Given an inumber, find its
1135 * dquots and update them to account for resources taken by that inode.
1136 */
1137/* ARGSUSED */
1138STATIC int
1139xfs_qm_dqusage_adjust(
1140	struct xfs_mount	*mp,
1141	struct xfs_trans	*tp,
1142	xfs_ino_t		ino,
1143	void			*data)
1144{
1145	struct xfs_inode	*ip;
1146	xfs_qcnt_t		nblks;
1147	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1148	int			error;
1149
1150	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1151
1152	/*
1153	 * rootino must have its resources accounted for, not so with the quota
1154	 * inodes.
1155	 */
1156	if (xfs_is_quota_inode(&mp->m_sb, ino))
1157		return 0;
1158
1159	/*
1160	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1161	 * at mount time and therefore nobody will be racing chown/chproj.
1162	 */
1163	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1164	if (error == -EINVAL || error == -ENOENT)
1165		return 0;
1166	if (error)
1167		return error;
1168
1169	ASSERT(ip->i_delayed_blks == 0);
1170
1171	if (XFS_IS_REALTIME_INODE(ip)) {
1172		struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1173
1174		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1175		if (error)
1176			goto error0;
 
 
1177
1178		xfs_bmap_count_leaves(ifp, &rtblks);
1179	}
1180
1181	nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1182
1183	/*
1184	 * Add the (disk blocks and inode) resources occupied by this
1185	 * inode to its dquots. We do this adjustment in the incore dquot,
1186	 * and also copy the changes to its buffer.
1187	 * We don't care about putting these changes in a transaction
1188	 * envelope because if we crash in the middle of a 'quotacheck'
1189	 * we have to start from the beginning anyway.
1190	 * Once we're done, we'll log all the dquot bufs.
1191	 *
1192	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1193	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1194	 */
1195	if (XFS_IS_UQUOTA_ON(mp)) {
1196		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1197				rtblks);
1198		if (error)
1199			goto error0;
1200	}
1201
1202	if (XFS_IS_GQUOTA_ON(mp)) {
1203		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1204				rtblks);
1205		if (error)
1206			goto error0;
1207	}
1208
1209	if (XFS_IS_PQUOTA_ON(mp)) {
1210		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1211				rtblks);
1212		if (error)
1213			goto error0;
1214	}
1215
1216error0:
1217	xfs_irele(ip);
1218	return error;
1219}
1220
1221STATIC int
1222xfs_qm_flush_one(
1223	struct xfs_dquot	*dqp,
1224	void			*data)
1225{
1226	struct xfs_mount	*mp = dqp->q_mount;
1227	struct list_head	*buffer_list = data;
1228	struct xfs_buf		*bp = NULL;
1229	int			error = 0;
1230
1231	xfs_dqlock(dqp);
1232	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1233		goto out_unlock;
1234	if (!XFS_DQ_IS_DIRTY(dqp))
1235		goto out_unlock;
1236
1237	/*
1238	 * The only way the dquot is already flush locked by the time quotacheck
1239	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1240	 * it for the final time. Quotacheck collects all dquot bufs in the
1241	 * local delwri queue before dquots are dirtied, so reclaim can't have
1242	 * possibly queued it for I/O. The only way out is to push the buffer to
1243	 * cycle the flush lock.
1244	 */
1245	if (!xfs_dqflock_nowait(dqp)) {
1246		/* buf is pinned in-core by delwri list */
1247		bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1248				mp->m_quotainfo->qi_dqchunklen, 0);
1249		if (!bp) {
1250			error = -EINVAL;
1251			goto out_unlock;
1252		}
1253		xfs_buf_unlock(bp);
1254
1255		xfs_buf_delwri_pushbuf(bp, buffer_list);
1256		xfs_buf_rele(bp);
1257
1258		error = -EAGAIN;
1259		goto out_unlock;
1260	}
1261
1262	error = xfs_qm_dqflush(dqp, &bp);
1263	if (error)
1264		goto out_unlock;
1265
1266	xfs_buf_delwri_queue(bp, buffer_list);
1267	xfs_buf_relse(bp);
1268out_unlock:
1269	xfs_dqunlock(dqp);
1270	return error;
1271}
1272
1273/*
1274 * Walk thru all the filesystem inodes and construct a consistent view
1275 * of the disk quota world. If the quotacheck fails, disable quotas.
1276 */
1277STATIC int
1278xfs_qm_quotacheck(
1279	xfs_mount_t	*mp)
1280{
1281	int			error, error2;
1282	uint			flags;
1283	LIST_HEAD		(buffer_list);
1284	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1285	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1286	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1287
1288	flags = 0;
1289
1290	ASSERT(uip || gip || pip);
1291	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1292
1293	xfs_notice(mp, "Quotacheck needed: Please wait.");
1294
1295	/*
1296	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1297	 * their counters to zero. We need a clean slate.
1298	 * We don't log our changes till later.
1299	 */
1300	if (uip) {
1301		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1302					 &buffer_list);
1303		if (error)
1304			goto error_return;
1305		flags |= XFS_UQUOTA_CHKD;
1306	}
1307
1308	if (gip) {
1309		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1310					 &buffer_list);
1311		if (error)
1312			goto error_return;
1313		flags |= XFS_GQUOTA_CHKD;
1314	}
1315
1316	if (pip) {
1317		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1318					 &buffer_list);
1319		if (error)
1320			goto error_return;
1321		flags |= XFS_PQUOTA_CHKD;
1322	}
1323
1324	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1325			NULL);
1326	if (error)
1327		goto error_return;
1328
1329	/*
1330	 * We've made all the changes that we need to make incore.  Flush them
1331	 * down to disk buffers if everything was updated successfully.
1332	 */
1333	if (XFS_IS_UQUOTA_ON(mp)) {
1334		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1335					  &buffer_list);
1336	}
1337	if (XFS_IS_GQUOTA_ON(mp)) {
1338		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1339					   &buffer_list);
1340		if (!error)
1341			error = error2;
1342	}
1343	if (XFS_IS_PQUOTA_ON(mp)) {
1344		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1345					   &buffer_list);
1346		if (!error)
1347			error = error2;
1348	}
1349
1350	error2 = xfs_buf_delwri_submit(&buffer_list);
1351	if (!error)
1352		error = error2;
1353
1354	/*
1355	 * We can get this error if we couldn't do a dquot allocation inside
1356	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1357	 * dirty dquots that might be cached, we just want to get rid of them
1358	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1359	 * at this point (because we intentionally didn't in dqget_noattach).
1360	 */
1361	if (error) {
1362		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1363		goto error_return;
1364	}
1365
1366	/*
1367	 * If one type of quotas is off, then it will lose its
1368	 * quotachecked status, since we won't be doing accounting for
1369	 * that type anymore.
1370	 */
1371	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1372	mp->m_qflags |= flags;
1373
1374 error_return:
1375	xfs_buf_delwri_cancel(&buffer_list);
1376
1377	if (error) {
1378		xfs_warn(mp,
1379	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1380			error);
1381		/*
1382		 * We must turn off quotas.
1383		 */
1384		ASSERT(mp->m_quotainfo != NULL);
1385		xfs_qm_destroy_quotainfo(mp);
1386		if (xfs_mount_reset_sbqflags(mp)) {
1387			xfs_warn(mp,
1388				"Quotacheck: Failed to reset quota flags.");
1389		}
1390	} else
1391		xfs_notice(mp, "Quotacheck: Done.");
1392	return error;
1393}
1394
1395/*
1396 * This is called from xfs_mountfs to start quotas and initialize all
1397 * necessary data structures like quotainfo.  This is also responsible for
1398 * running a quotacheck as necessary.  We are guaranteed that the superblock
1399 * is consistently read in at this point.
1400 *
1401 * If we fail here, the mount will continue with quota turned off. We don't
1402 * need to inidicate success or failure at all.
1403 */
1404void
1405xfs_qm_mount_quotas(
1406	struct xfs_mount	*mp)
1407{
1408	int			error = 0;
1409	uint			sbf;
1410
1411	/*
1412	 * If quotas on realtime volumes is not supported, we disable
1413	 * quotas immediately.
1414	 */
1415	if (mp->m_sb.sb_rextents) {
1416		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1417		mp->m_qflags = 0;
1418		goto write_changes;
1419	}
1420
1421	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1422
1423	/*
1424	 * Allocate the quotainfo structure inside the mount struct, and
1425	 * create quotainode(s), and change/rev superblock if necessary.
1426	 */
1427	error = xfs_qm_init_quotainfo(mp);
1428	if (error) {
1429		/*
1430		 * We must turn off quotas.
1431		 */
1432		ASSERT(mp->m_quotainfo == NULL);
1433		mp->m_qflags = 0;
1434		goto write_changes;
1435	}
1436	/*
1437	 * If any of the quotas are not consistent, do a quotacheck.
1438	 */
1439	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1440		error = xfs_qm_quotacheck(mp);
1441		if (error) {
1442			/* Quotacheck failed and disabled quotas. */
1443			return;
1444		}
1445	}
1446	/*
1447	 * If one type of quotas is off, then it will lose its
1448	 * quotachecked status, since we won't be doing accounting for
1449	 * that type anymore.
1450	 */
1451	if (!XFS_IS_UQUOTA_ON(mp))
1452		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1453	if (!XFS_IS_GQUOTA_ON(mp))
1454		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1455	if (!XFS_IS_PQUOTA_ON(mp))
1456		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1457
1458 write_changes:
1459	/*
1460	 * We actually don't have to acquire the m_sb_lock at all.
1461	 * This can only be called from mount, and that's single threaded. XXX
1462	 */
1463	spin_lock(&mp->m_sb_lock);
1464	sbf = mp->m_sb.sb_qflags;
1465	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1466	spin_unlock(&mp->m_sb_lock);
1467
1468	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1469		if (xfs_sync_sb(mp, false)) {
1470			/*
1471			 * We could only have been turning quotas off.
1472			 * We aren't in very good shape actually because
1473			 * the incore structures are convinced that quotas are
1474			 * off, but the on disk superblock doesn't know that !
1475			 */
1476			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1477			xfs_alert(mp, "%s: Superblock update failed!",
1478				__func__);
1479		}
1480	}
1481
1482	if (error) {
1483		xfs_warn(mp, "Failed to initialize disk quotas.");
1484		return;
1485	}
1486}
1487
1488/*
1489 * This is called after the superblock has been read in and we're ready to
1490 * iget the quota inodes.
1491 */
1492STATIC int
1493xfs_qm_init_quotainos(
1494	xfs_mount_t	*mp)
1495{
1496	struct xfs_inode	*uip = NULL;
1497	struct xfs_inode	*gip = NULL;
1498	struct xfs_inode	*pip = NULL;
1499	int			error;
1500	uint			flags = 0;
1501
1502	ASSERT(mp->m_quotainfo);
1503
1504	/*
1505	 * Get the uquota and gquota inodes
1506	 */
1507	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1508		if (XFS_IS_UQUOTA_ON(mp) &&
1509		    mp->m_sb.sb_uquotino != NULLFSINO) {
1510			ASSERT(mp->m_sb.sb_uquotino > 0);
1511			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1512					     0, 0, &uip);
1513			if (error)
1514				return error;
1515		}
1516		if (XFS_IS_GQUOTA_ON(mp) &&
1517		    mp->m_sb.sb_gquotino != NULLFSINO) {
1518			ASSERT(mp->m_sb.sb_gquotino > 0);
1519			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1520					     0, 0, &gip);
1521			if (error)
1522				goto error_rele;
1523		}
1524		if (XFS_IS_PQUOTA_ON(mp) &&
1525		    mp->m_sb.sb_pquotino != NULLFSINO) {
1526			ASSERT(mp->m_sb.sb_pquotino > 0);
1527			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1528					     0, 0, &pip);
1529			if (error)
1530				goto error_rele;
1531		}
1532	} else {
1533		flags |= XFS_QMOPT_SBVERSION;
1534	}
1535
1536	/*
1537	 * Create the three inodes, if they don't exist already. The changes
1538	 * made above will get added to a transaction and logged in one of
1539	 * the qino_alloc calls below.  If the device is readonly,
1540	 * temporarily switch to read-write to do this.
1541	 */
1542	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1543		error = xfs_qm_qino_alloc(mp, &uip,
1544					      flags | XFS_QMOPT_UQUOTA);
1545		if (error)
1546			goto error_rele;
1547
1548		flags &= ~XFS_QMOPT_SBVERSION;
1549	}
1550	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1551		error = xfs_qm_qino_alloc(mp, &gip,
1552					  flags | XFS_QMOPT_GQUOTA);
1553		if (error)
1554			goto error_rele;
1555
1556		flags &= ~XFS_QMOPT_SBVERSION;
1557	}
1558	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1559		error = xfs_qm_qino_alloc(mp, &pip,
1560					  flags | XFS_QMOPT_PQUOTA);
1561		if (error)
1562			goto error_rele;
1563	}
1564
1565	mp->m_quotainfo->qi_uquotaip = uip;
1566	mp->m_quotainfo->qi_gquotaip = gip;
1567	mp->m_quotainfo->qi_pquotaip = pip;
1568
1569	return 0;
1570
1571error_rele:
1572	if (uip)
1573		xfs_irele(uip);
1574	if (gip)
1575		xfs_irele(gip);
1576	if (pip)
1577		xfs_irele(pip);
1578	return error;
1579}
1580
1581STATIC void
1582xfs_qm_destroy_quotainos(
1583	struct xfs_quotainfo	*qi)
1584{
1585	if (qi->qi_uquotaip) {
1586		xfs_irele(qi->qi_uquotaip);
1587		qi->qi_uquotaip = NULL; /* paranoia */
1588	}
1589	if (qi->qi_gquotaip) {
1590		xfs_irele(qi->qi_gquotaip);
1591		qi->qi_gquotaip = NULL;
1592	}
1593	if (qi->qi_pquotaip) {
1594		xfs_irele(qi->qi_pquotaip);
1595		qi->qi_pquotaip = NULL;
1596	}
1597}
1598
1599STATIC void
1600xfs_qm_dqfree_one(
1601	struct xfs_dquot	*dqp)
1602{
1603	struct xfs_mount	*mp = dqp->q_mount;
1604	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1605
1606	mutex_lock(&qi->qi_tree_lock);
1607	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1608
1609	qi->qi_dquots--;
1610	mutex_unlock(&qi->qi_tree_lock);
1611
1612	xfs_qm_dqdestroy(dqp);
1613}
1614
1615/* --------------- utility functions for vnodeops ---------------- */
1616
1617
1618/*
1619 * Given an inode, a uid, gid and prid make sure that we have
1620 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1621 * quotas by creating this file.
1622 * This also attaches dquot(s) to the given inode after locking it,
1623 * and returns the dquots corresponding to the uid and/or gid.
1624 *
1625 * in	: inode (unlocked)
1626 * out	: udquot, gdquot with references taken and unlocked
1627 */
1628int
1629xfs_qm_vop_dqalloc(
1630	struct xfs_inode	*ip,
1631	kuid_t			uid,
1632	kgid_t			gid,
1633	prid_t			prid,
1634	uint			flags,
1635	struct xfs_dquot	**O_udqpp,
1636	struct xfs_dquot	**O_gdqpp,
1637	struct xfs_dquot	**O_pdqpp)
1638{
1639	struct xfs_mount	*mp = ip->i_mount;
1640	struct inode		*inode = VFS_I(ip);
1641	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1642	struct xfs_dquot	*uq = NULL;
1643	struct xfs_dquot	*gq = NULL;
1644	struct xfs_dquot	*pq = NULL;
1645	int			error;
1646	uint			lockflags;
1647
1648	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1649		return 0;
1650
1651	lockflags = XFS_ILOCK_EXCL;
1652	xfs_ilock(ip, lockflags);
1653
1654	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1655		gid = inode->i_gid;
1656
1657	/*
1658	 * Attach the dquot(s) to this inode, doing a dquot allocation
1659	 * if necessary. The dquot(s) will not be locked.
1660	 */
1661	if (XFS_NOT_DQATTACHED(mp, ip)) {
1662		error = xfs_qm_dqattach_locked(ip, true);
1663		if (error) {
1664			xfs_iunlock(ip, lockflags);
1665			return error;
1666		}
1667	}
1668
1669	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1670		ASSERT(O_udqpp);
1671		if (!uid_eq(inode->i_uid, uid)) {
1672			/*
1673			 * What we need is the dquot that has this uid, and
1674			 * if we send the inode to dqget, the uid of the inode
1675			 * takes priority over what's sent in the uid argument.
1676			 * We must unlock inode here before calling dqget if
1677			 * we're not sending the inode, because otherwise
1678			 * we'll deadlock by doing trans_reserve while
1679			 * holding ilock.
1680			 */
1681			xfs_iunlock(ip, lockflags);
1682			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1683					XFS_DQTYPE_USER, true, &uq);
1684			if (error) {
1685				ASSERT(error != -ENOENT);
1686				return error;
1687			}
1688			/*
1689			 * Get the ilock in the right order.
1690			 */
1691			xfs_dqunlock(uq);
1692			lockflags = XFS_ILOCK_SHARED;
1693			xfs_ilock(ip, lockflags);
1694		} else {
1695			/*
1696			 * Take an extra reference, because we'll return
1697			 * this to caller
1698			 */
1699			ASSERT(ip->i_udquot);
1700			uq = xfs_qm_dqhold(ip->i_udquot);
1701		}
1702	}
1703	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1704		ASSERT(O_gdqpp);
1705		if (!gid_eq(inode->i_gid, gid)) {
1706			xfs_iunlock(ip, lockflags);
1707			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1708					XFS_DQTYPE_GROUP, true, &gq);
1709			if (error) {
1710				ASSERT(error != -ENOENT);
1711				goto error_rele;
1712			}
1713			xfs_dqunlock(gq);
1714			lockflags = XFS_ILOCK_SHARED;
1715			xfs_ilock(ip, lockflags);
1716		} else {
1717			ASSERT(ip->i_gdquot);
1718			gq = xfs_qm_dqhold(ip->i_gdquot);
1719		}
1720	}
1721	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1722		ASSERT(O_pdqpp);
1723		if (ip->i_projid != prid) {
1724			xfs_iunlock(ip, lockflags);
1725			error = xfs_qm_dqget(mp, prid,
1726					XFS_DQTYPE_PROJ, true, &pq);
1727			if (error) {
1728				ASSERT(error != -ENOENT);
1729				goto error_rele;
1730			}
1731			xfs_dqunlock(pq);
1732			lockflags = XFS_ILOCK_SHARED;
1733			xfs_ilock(ip, lockflags);
1734		} else {
1735			ASSERT(ip->i_pdquot);
1736			pq = xfs_qm_dqhold(ip->i_pdquot);
1737		}
1738	}
1739	trace_xfs_dquot_dqalloc(ip);
1740
1741	xfs_iunlock(ip, lockflags);
1742	if (O_udqpp)
1743		*O_udqpp = uq;
1744	else
1745		xfs_qm_dqrele(uq);
1746	if (O_gdqpp)
1747		*O_gdqpp = gq;
1748	else
1749		xfs_qm_dqrele(gq);
1750	if (O_pdqpp)
1751		*O_pdqpp = pq;
1752	else
1753		xfs_qm_dqrele(pq);
1754	return 0;
1755
1756error_rele:
1757	xfs_qm_dqrele(gq);
1758	xfs_qm_dqrele(uq);
1759	return error;
1760}
1761
1762/*
1763 * Actually transfer ownership, and do dquot modifications.
1764 * These were already reserved.
1765 */
1766struct xfs_dquot *
1767xfs_qm_vop_chown(
1768	struct xfs_trans	*tp,
1769	struct xfs_inode	*ip,
1770	struct xfs_dquot	**IO_olddq,
1771	struct xfs_dquot	*newdq)
1772{
1773	struct xfs_dquot	*prevdq;
1774	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1775				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1776
1777
1778	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1779	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1780
1781	/* old dquot */
1782	prevdq = *IO_olddq;
1783	ASSERT(prevdq);
1784	ASSERT(prevdq != newdq);
1785
1786	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
1787	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1788
1789	/* the sparkling new dquot */
1790	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
1791	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1792
1793	/*
1794	 * Back when we made quota reservations for the chown, we reserved the
1795	 * ondisk blocks + delalloc blocks with the new dquot.  Now that we've
1796	 * switched the dquots, decrease the new dquot's block reservation
1797	 * (having already bumped up the real counter) so that we don't have
1798	 * any reservation to give back when we commit.
1799	 */
1800	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1801			-ip->i_delayed_blks);
1802
1803	/*
1804	 * Give the incore reservation for delalloc blocks back to the old
1805	 * dquot.  We don't normally handle delalloc quota reservations
1806	 * transactionally, so just lock the dquot and subtract from the
1807	 * reservation.  Dirty the transaction because it's too late to turn
1808	 * back now.
1809	 */
1810	tp->t_flags |= XFS_TRANS_DIRTY;
1811	xfs_dqlock(prevdq);
1812	ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1813	prevdq->q_blk.reserved -= ip->i_delayed_blks;
1814	xfs_dqunlock(prevdq);
1815
1816	/*
1817	 * Take an extra reference, because the inode is going to keep
1818	 * this dquot pointer even after the trans_commit.
1819	 */
1820	*IO_olddq = xfs_qm_dqhold(newdq);
1821
1822	return prevdq;
1823}
1824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1825int
1826xfs_qm_vop_rename_dqattach(
1827	struct xfs_inode	**i_tab)
1828{
1829	struct xfs_mount	*mp = i_tab[0]->i_mount;
1830	int			i;
1831
1832	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1833		return 0;
1834
1835	for (i = 0; (i < 4 && i_tab[i]); i++) {
1836		struct xfs_inode	*ip = i_tab[i];
1837		int			error;
1838
1839		/*
1840		 * Watch out for duplicate entries in the table.
1841		 */
1842		if (i == 0 || ip != i_tab[i-1]) {
1843			if (XFS_NOT_DQATTACHED(mp, ip)) {
1844				error = xfs_qm_dqattach(ip);
1845				if (error)
1846					return error;
1847			}
1848		}
1849	}
1850	return 0;
1851}
1852
1853void
1854xfs_qm_vop_create_dqattach(
1855	struct xfs_trans	*tp,
1856	struct xfs_inode	*ip,
1857	struct xfs_dquot	*udqp,
1858	struct xfs_dquot	*gdqp,
1859	struct xfs_dquot	*pdqp)
1860{
1861	struct xfs_mount	*mp = tp->t_mountp;
1862
1863	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1864		return;
1865
1866	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1867
1868	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1869		ASSERT(ip->i_udquot == NULL);
1870		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1871
1872		ip->i_udquot = xfs_qm_dqhold(udqp);
1873		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1874	}
1875	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1876		ASSERT(ip->i_gdquot == NULL);
1877		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1878
1879		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1880		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1881	}
1882	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1883		ASSERT(ip->i_pdquot == NULL);
1884		ASSERT(ip->i_projid == pdqp->q_id);
1885
1886		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1887		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1888	}
1889}
1890