Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v4.10.11
 
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_bit.h"
  25#include "xfs_sb.h"
  26#include "xfs_mount.h"
  27#include "xfs_inode.h"
  28#include "xfs_ialloc.h"
  29#include "xfs_itable.h"
  30#include "xfs_quota.h"
  31#include "xfs_error.h"
  32#include "xfs_bmap.h"
  33#include "xfs_bmap_btree.h"
  34#include "xfs_trans.h"
  35#include "xfs_trans_space.h"
  36#include "xfs_qm.h"
  37#include "xfs_trace.h"
  38#include "xfs_icache.h"
  39#include "xfs_cksum.h"
 
 
 
  40
  41/*
  42 * The global quota manager. There is only one of these for the entire
  43 * system, _not_ one per file system. XQM keeps track of the overall
  44 * quota functionality, including maintaining the freelist and hash
  45 * tables of dquots.
  46 */
  47STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
  48STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
  49
  50
 
  51STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  52/*
  53 * We use the batch lookup interface to iterate over the dquots as it
  54 * currently is the only interface into the radix tree code that allows
  55 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  56 * operations is fine as all callers are used either during mount/umount
  57 * or quotaoff.
  58 */
  59#define XFS_DQ_LOOKUP_BATCH	32
  60
  61STATIC int
  62xfs_qm_dquot_walk(
  63	struct xfs_mount	*mp,
  64	int			type,
  65	int			(*execute)(struct xfs_dquot *dqp, void *data),
  66	void			*data)
  67{
  68	struct xfs_quotainfo	*qi = mp->m_quotainfo;
  69	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
  70	uint32_t		next_index;
  71	int			last_error = 0;
  72	int			skipped;
  73	int			nr_found;
  74
  75restart:
  76	skipped = 0;
  77	next_index = 0;
  78	nr_found = 0;
  79
  80	while (1) {
  81		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  82		int		error = 0;
  83		int		i;
  84
  85		mutex_lock(&qi->qi_tree_lock);
  86		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  87					next_index, XFS_DQ_LOOKUP_BATCH);
  88		if (!nr_found) {
  89			mutex_unlock(&qi->qi_tree_lock);
  90			break;
  91		}
  92
  93		for (i = 0; i < nr_found; i++) {
  94			struct xfs_dquot *dqp = batch[i];
  95
  96			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
  97
  98			error = execute(batch[i], data);
  99			if (error == -EAGAIN) {
 100				skipped++;
 101				continue;
 102			}
 103			if (error && last_error != -EFSCORRUPTED)
 104				last_error = error;
 105		}
 106
 107		mutex_unlock(&qi->qi_tree_lock);
 108
 109		/* bail out if the filesystem is corrupted.  */
 110		if (last_error == -EFSCORRUPTED) {
 111			skipped = 0;
 112			break;
 113		}
 
 
 
 114	}
 115
 116	if (skipped) {
 117		delay(1);
 118		goto restart;
 119	}
 120
 121	return last_error;
 122}
 123
 124
 125/*
 126 * Purge a dquot from all tracking data structures and free it.
 127 */
 128STATIC int
 129xfs_qm_dqpurge(
 130	struct xfs_dquot	*dqp,
 131	void			*data)
 132{
 133	struct xfs_mount	*mp = dqp->q_mount;
 134	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 135
 136	xfs_dqlock(dqp);
 137	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
 138		xfs_dqunlock(dqp);
 139		return -EAGAIN;
 140	}
 141
 142	dqp->dq_flags |= XFS_DQ_FREEING;
 143
 144	xfs_dqflock(dqp);
 145
 146	/*
 147	 * If we are turning this type of quotas off, we don't care
 148	 * about the dirty metadata sitting in this dquot. OTOH, if
 149	 * we're unmounting, we do care, so we flush it and wait.
 150	 */
 151	if (XFS_DQ_IS_DIRTY(dqp)) {
 152		struct xfs_buf	*bp = NULL;
 153		int		error;
 154
 155		/*
 156		 * We don't care about getting disk errors here. We need
 157		 * to purge this dquot anyway, so we go ahead regardless.
 158		 */
 159		error = xfs_qm_dqflush(dqp, &bp);
 160		if (error) {
 161			xfs_warn(mp, "%s: dquot %p flush failed",
 162				__func__, dqp);
 163		} else {
 164			error = xfs_bwrite(bp);
 165			xfs_buf_relse(bp);
 
 
 
 166		}
 167		xfs_dqflock(dqp);
 168	}
 169
 170	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 171	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 172	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
 173
 174	xfs_dqfunlock(dqp);
 175	xfs_dqunlock(dqp);
 176
 177	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
 178			  be32_to_cpu(dqp->q_core.d_id));
 179	qi->qi_dquots--;
 180
 181	/*
 182	 * We move dquots to the freelist as soon as their reference count
 183	 * hits zero, so it really should be on the freelist here.
 184	 */
 185	ASSERT(!list_empty(&dqp->q_lru));
 186	list_lru_del(&qi->qi_lru, &dqp->q_lru);
 187	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
 188
 189	xfs_qm_dqdestroy(dqp);
 190	return 0;
 
 
 
 
 191}
 192
 193/*
 194 * Purge the dquot cache.
 195 */
 196void
 197xfs_qm_dqpurge_all(
 198	struct xfs_mount	*mp,
 199	uint			flags)
 200{
 201	if (flags & XFS_QMOPT_UQUOTA)
 202		xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
 203	if (flags & XFS_QMOPT_GQUOTA)
 204		xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
 205	if (flags & XFS_QMOPT_PQUOTA)
 206		xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
 207}
 208
 209/*
 210 * Just destroy the quotainfo structure.
 211 */
 212void
 213xfs_qm_unmount(
 214	struct xfs_mount	*mp)
 215{
 216	if (mp->m_quotainfo) {
 217		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
 218		xfs_qm_destroy_quotainfo(mp);
 219	}
 220}
 221
 222/*
 223 * Called from the vfsops layer.
 224 */
 225void
 226xfs_qm_unmount_quotas(
 227	xfs_mount_t	*mp)
 228{
 229	/*
 230	 * Release the dquots that root inode, et al might be holding,
 231	 * before we flush quotas and blow away the quotainfo structure.
 232	 */
 233	ASSERT(mp->m_rootip);
 234	xfs_qm_dqdetach(mp->m_rootip);
 235	if (mp->m_rbmip)
 236		xfs_qm_dqdetach(mp->m_rbmip);
 237	if (mp->m_rsumip)
 238		xfs_qm_dqdetach(mp->m_rsumip);
 239
 240	/*
 241	 * Release the quota inodes.
 242	 */
 243	if (mp->m_quotainfo) {
 244		if (mp->m_quotainfo->qi_uquotaip) {
 245			IRELE(mp->m_quotainfo->qi_uquotaip);
 246			mp->m_quotainfo->qi_uquotaip = NULL;
 247		}
 248		if (mp->m_quotainfo->qi_gquotaip) {
 249			IRELE(mp->m_quotainfo->qi_gquotaip);
 250			mp->m_quotainfo->qi_gquotaip = NULL;
 251		}
 252		if (mp->m_quotainfo->qi_pquotaip) {
 253			IRELE(mp->m_quotainfo->qi_pquotaip);
 254			mp->m_quotainfo->qi_pquotaip = NULL;
 255		}
 256	}
 257}
 258
 259STATIC int
 260xfs_qm_dqattach_one(
 261	xfs_inode_t	*ip,
 262	xfs_dqid_t	id,
 263	uint		type,
 264	uint		doalloc,
 265	xfs_dquot_t	**IO_idqpp)
 266{
 267	xfs_dquot_t	*dqp;
 268	int		error;
 269
 270	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 271	error = 0;
 272
 273	/*
 274	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
 275	 * or &i_gdquot. This made the code look weird, but made the logic a lot
 276	 * simpler.
 277	 */
 278	dqp = *IO_idqpp;
 279	if (dqp) {
 280		trace_xfs_dqattach_found(dqp);
 281		return 0;
 282	}
 283
 284	/*
 285	 * Find the dquot from somewhere. This bumps the reference count of
 286	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
 287	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
 288	 * turned off suddenly.
 289	 */
 290	error = xfs_qm_dqget(ip->i_mount, ip, id, type,
 291			     doalloc | XFS_QMOPT_DOWARN, &dqp);
 292	if (error)
 293		return error;
 294
 295	trace_xfs_dqattach_get(dqp);
 296
 297	/*
 298	 * dqget may have dropped and re-acquired the ilock, but it guarantees
 299	 * that the dquot returned is the one that should go in the inode.
 300	 */
 301	*IO_idqpp = dqp;
 302	xfs_dqunlock(dqp);
 303	return 0;
 304}
 305
 306static bool
 307xfs_qm_need_dqattach(
 308	struct xfs_inode	*ip)
 309{
 310	struct xfs_mount	*mp = ip->i_mount;
 311
 312	if (!XFS_IS_QUOTA_RUNNING(mp))
 313		return false;
 314	if (!XFS_IS_QUOTA_ON(mp))
 315		return false;
 316	if (!XFS_NOT_DQATTACHED(mp, ip))
 317		return false;
 318	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 319		return false;
 320	return true;
 321}
 322
 323/*
 324 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 325 * into account.
 326 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
 327 * Inode may get unlocked and relocked in here, and the caller must deal with
 328 * the consequences.
 329 */
 330int
 331xfs_qm_dqattach_locked(
 332	xfs_inode_t	*ip,
 333	uint		flags)
 334{
 335	xfs_mount_t	*mp = ip->i_mount;
 336	int		error = 0;
 337
 338	if (!xfs_qm_need_dqattach(ip))
 339		return 0;
 340
 341	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 342
 343	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
 344		error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
 345						flags & XFS_QMOPT_DQALLOC,
 346						&ip->i_udquot);
 347		if (error)
 348			goto done;
 349		ASSERT(ip->i_udquot);
 350	}
 351
 352	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
 353		error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
 354						flags & XFS_QMOPT_DQALLOC,
 355						&ip->i_gdquot);
 356		if (error)
 357			goto done;
 358		ASSERT(ip->i_gdquot);
 359	}
 360
 361	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
 362		error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
 363						flags & XFS_QMOPT_DQALLOC,
 364						&ip->i_pdquot);
 365		if (error)
 366			goto done;
 367		ASSERT(ip->i_pdquot);
 368	}
 369
 370done:
 371	/*
 372	 * Don't worry about the dquots that we may have attached before any
 373	 * error - they'll get detached later if it has not already been done.
 374	 */
 375	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 376	return error;
 377}
 378
 379int
 380xfs_qm_dqattach(
 381	struct xfs_inode	*ip,
 382	uint			flags)
 383{
 384	int			error;
 385
 386	if (!xfs_qm_need_dqattach(ip))
 387		return 0;
 388
 389	xfs_ilock(ip, XFS_ILOCK_EXCL);
 390	error = xfs_qm_dqattach_locked(ip, flags);
 391	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 392
 393	return error;
 394}
 395
 396/*
 397 * Release dquots (and their references) if any.
 398 * The inode should be locked EXCL except when this's called by
 399 * xfs_ireclaim.
 400 */
 401void
 402xfs_qm_dqdetach(
 403	xfs_inode_t	*ip)
 404{
 405	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
 406		return;
 407
 408	trace_xfs_dquot_dqdetach(ip);
 409
 410	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
 411	if (ip->i_udquot) {
 412		xfs_qm_dqrele(ip->i_udquot);
 413		ip->i_udquot = NULL;
 414	}
 415	if (ip->i_gdquot) {
 416		xfs_qm_dqrele(ip->i_gdquot);
 417		ip->i_gdquot = NULL;
 418	}
 419	if (ip->i_pdquot) {
 420		xfs_qm_dqrele(ip->i_pdquot);
 421		ip->i_pdquot = NULL;
 422	}
 423}
 424
 425struct xfs_qm_isolate {
 426	struct list_head	buffers;
 427	struct list_head	dispose;
 428};
 429
 430static enum lru_status
 431xfs_qm_dquot_isolate(
 432	struct list_head	*item,
 433	struct list_lru_one	*lru,
 434	spinlock_t		*lru_lock,
 435	void			*arg)
 436		__releases(lru_lock) __acquires(lru_lock)
 437{
 438	struct xfs_dquot	*dqp = container_of(item,
 439						struct xfs_dquot, q_lru);
 440	struct xfs_qm_isolate	*isol = arg;
 441
 442	if (!xfs_dqlock_nowait(dqp))
 443		goto out_miss_busy;
 444
 445	/*
 
 
 
 
 
 
 
 
 446	 * This dquot has acquired a reference in the meantime remove it from
 447	 * the freelist and try again.
 448	 */
 449	if (dqp->q_nrefs) {
 450		xfs_dqunlock(dqp);
 451		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
 452
 453		trace_xfs_dqreclaim_want(dqp);
 454		list_lru_isolate(lru, &dqp->q_lru);
 455		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 456		return LRU_REMOVED;
 457	}
 458
 459	/*
 460	 * If the dquot is dirty, flush it. If it's already being flushed, just
 461	 * skip it so there is time for the IO to complete before we try to
 462	 * reclaim it again on the next LRU pass.
 463	 */
 464	if (!xfs_dqflock_nowait(dqp)) {
 465		xfs_dqunlock(dqp);
 466		goto out_miss_busy;
 467	}
 468
 469	if (XFS_DQ_IS_DIRTY(dqp)) {
 470		struct xfs_buf	*bp = NULL;
 471		int		error;
 472
 473		trace_xfs_dqreclaim_dirty(dqp);
 474
 475		/* we have to drop the LRU lock to flush the dquot */
 476		spin_unlock(lru_lock);
 477
 478		error = xfs_qm_dqflush(dqp, &bp);
 479		if (error) {
 480			xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
 481				 __func__, dqp);
 482			goto out_unlock_dirty;
 483		}
 484
 485		xfs_buf_delwri_queue(bp, &isol->buffers);
 486		xfs_buf_relse(bp);
 487		goto out_unlock_dirty;
 488	}
 489	xfs_dqfunlock(dqp);
 490
 491	/*
 492	 * Prevent lookups now that we are past the point of no return.
 493	 */
 494	dqp->dq_flags |= XFS_DQ_FREEING;
 495	xfs_dqunlock(dqp);
 496
 497	ASSERT(dqp->q_nrefs == 0);
 498	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
 499	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 500	trace_xfs_dqreclaim_done(dqp);
 501	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
 502	return LRU_REMOVED;
 503
 
 
 504out_miss_busy:
 505	trace_xfs_dqreclaim_busy(dqp);
 506	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 507	return LRU_SKIP;
 508
 509out_unlock_dirty:
 510	trace_xfs_dqreclaim_busy(dqp);
 511	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 512	xfs_dqunlock(dqp);
 513	spin_lock(lru_lock);
 514	return LRU_RETRY;
 515}
 516
 517static unsigned long
 518xfs_qm_shrink_scan(
 519	struct shrinker		*shrink,
 520	struct shrink_control	*sc)
 521{
 522	struct xfs_quotainfo	*qi = container_of(shrink,
 523					struct xfs_quotainfo, qi_shrinker);
 524	struct xfs_qm_isolate	isol;
 525	unsigned long		freed;
 526	int			error;
 527
 528	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
 529		return 0;
 530
 531	INIT_LIST_HEAD(&isol.buffers);
 532	INIT_LIST_HEAD(&isol.dispose);
 533
 534	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
 535				     xfs_qm_dquot_isolate, &isol);
 536
 537	error = xfs_buf_delwri_submit(&isol.buffers);
 538	if (error)
 539		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
 540
 541	while (!list_empty(&isol.dispose)) {
 542		struct xfs_dquot	*dqp;
 543
 544		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
 545		list_del_init(&dqp->q_lru);
 546		xfs_qm_dqfree_one(dqp);
 547	}
 548
 549	return freed;
 550}
 551
 552static unsigned long
 553xfs_qm_shrink_count(
 554	struct shrinker		*shrink,
 555	struct shrink_control	*sc)
 556{
 557	struct xfs_quotainfo	*qi = container_of(shrink,
 558					struct xfs_quotainfo, qi_shrinker);
 559
 560	return list_lru_shrink_count(&qi->qi_lru, sc);
 561}
 562
 563STATIC void
 564xfs_qm_set_defquota(
 565	xfs_mount_t	*mp,
 566	uint		type,
 567	xfs_quotainfo_t	*qinf)
 568{
 569	xfs_dquot_t		*dqp;
 570	struct xfs_def_quota    *defq;
 571	int			error;
 572
 573	error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574
 575	if (!error) {
 576		xfs_disk_dquot_t        *ddqp = &dqp->q_core;
 577
 578		defq = xfs_get_defquota(dqp, qinf);
 
 
 579
 580		/*
 581		 * Timers and warnings have been already set, let's just set the
 582		 * default limits for this quota type
 583		 */
 584		defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
 585		defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
 586		defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
 587		defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
 588		defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
 589		defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
 590		xfs_qm_dqdestroy(dqp);
 591	}
 
 
 
 
 
 
 
 
 
 
 
 
 592}
 593
 594/*
 595 * This initializes all the quota information that's kept in the
 596 * mount structure
 597 */
 598STATIC int
 599xfs_qm_init_quotainfo(
 600	xfs_mount_t	*mp)
 601{
 602	xfs_quotainfo_t *qinf;
 603	int		error;
 604	xfs_dquot_t	*dqp;
 605
 606	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 607
 608	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
 609
 610	error = list_lru_init(&qinf->qi_lru);
 611	if (error)
 612		goto out_free_qinf;
 613
 614	/*
 615	 * See if quotainodes are setup, and if not, allocate them,
 616	 * and change the superblock accordingly.
 617	 */
 618	error = xfs_qm_init_quotainos(mp);
 619	if (error)
 620		goto out_free_lru;
 621
 622	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
 623	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 624	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
 625	mutex_init(&qinf->qi_tree_lock);
 626
 627	/* mutex used to serialize quotaoffs */
 628	mutex_init(&qinf->qi_quotaofflock);
 629
 630	/* Precalc some constants */
 631	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 632	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
 633
 634	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 635
 636	/*
 637	 * We try to get the limits from the superuser's limits fields.
 638	 * This is quite hacky, but it is standard quota practice.
 639	 *
 640	 * Since we may not have done a quotacheck by this point, just read
 641	 * the dquot without attaching it to any hashtables or lists.
 642	 *
 643	 * Timers and warnings are globally set by the first timer found in
 644	 * user/group/proj quota types, otherwise a default value is used.
 645	 * This should be split into different fields per quota type.
 646	 */
 647	error = xfs_qm_dqread(mp, 0,
 648			XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
 649			 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
 650			  XFS_DQ_PROJ),
 651			XFS_QMOPT_DOWARN, &dqp);
 652
 653	if (!error) {
 654		xfs_disk_dquot_t	*ddqp = &dqp->q_core;
 655
 656		/*
 657		 * The warnings and timers set the grace period given to
 658		 * a user or group before he or she can not perform any
 659		 * more writing. If it is zero, a default is used.
 660		 */
 661		qinf->qi_btimelimit = ddqp->d_btimer ?
 662			be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
 663		qinf->qi_itimelimit = ddqp->d_itimer ?
 664			be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
 665		qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
 666			be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
 667		qinf->qi_bwarnlimit = ddqp->d_bwarns ?
 668			be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
 669		qinf->qi_iwarnlimit = ddqp->d_iwarns ?
 670			be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
 671		qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
 672			be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
 673		xfs_qm_dqdestroy(dqp);
 674	} else {
 675		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
 676		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
 677		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
 678		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
 679		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
 680		qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
 681	}
 
 
 
 
 682
 683	if (XFS_IS_UQUOTA_RUNNING(mp))
 684		xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
 685	if (XFS_IS_GQUOTA_RUNNING(mp))
 686		xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
 687	if (XFS_IS_PQUOTA_RUNNING(mp))
 688		xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
 
 
 
 
 689
 690	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
 691	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
 692	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
 693	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
 694	register_shrinker(&qinf->qi_shrinker);
 
 
 
 
 
 695	return 0;
 696
 
 
 
 
 697out_free_lru:
 698	list_lru_destroy(&qinf->qi_lru);
 699out_free_qinf:
 700	kmem_free(qinf);
 701	mp->m_quotainfo = NULL;
 702	return error;
 703}
 704
 705
 706/*
 707 * Gets called when unmounting a filesystem or when all quotas get
 708 * turned off.
 709 * This purges the quota inodes, destroys locks and frees itself.
 710 */
 711void
 712xfs_qm_destroy_quotainfo(
 713	xfs_mount_t	*mp)
 714{
 715	xfs_quotainfo_t *qi;
 716
 717	qi = mp->m_quotainfo;
 718	ASSERT(qi != NULL);
 719
 720	unregister_shrinker(&qi->qi_shrinker);
 721	list_lru_destroy(&qi->qi_lru);
 722
 723	if (qi->qi_uquotaip) {
 724		IRELE(qi->qi_uquotaip);
 725		qi->qi_uquotaip = NULL; /* paranoia */
 726	}
 727	if (qi->qi_gquotaip) {
 728		IRELE(qi->qi_gquotaip);
 729		qi->qi_gquotaip = NULL;
 730	}
 731	if (qi->qi_pquotaip) {
 732		IRELE(qi->qi_pquotaip);
 733		qi->qi_pquotaip = NULL;
 734	}
 735	mutex_destroy(&qi->qi_quotaofflock);
 736	kmem_free(qi);
 737	mp->m_quotainfo = NULL;
 738}
 739
 740/*
 741 * Create an inode and return with a reference already taken, but unlocked
 742 * This is how we create quota inodes
 743 */
 744STATIC int
 745xfs_qm_qino_alloc(
 746	xfs_mount_t	*mp,
 747	xfs_inode_t	**ip,
 748	uint		flags)
 749{
 750	xfs_trans_t	*tp;
 751	int		error;
 752	int		committed;
 753	bool		need_alloc = true;
 754
 755	*ip = NULL;
 756	/*
 757	 * With superblock that doesn't have separate pquotino, we
 758	 * share an inode between gquota and pquota. If the on-disk
 759	 * superblock has GQUOTA and the filesystem is now mounted
 760	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
 761	 * vice-versa.
 762	 */
 763	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
 764			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
 765		xfs_ino_t ino = NULLFSINO;
 766
 767		if ((flags & XFS_QMOPT_PQUOTA) &&
 768			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
 769			ino = mp->m_sb.sb_gquotino;
 770			ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
 
 
 771		} else if ((flags & XFS_QMOPT_GQUOTA) &&
 772			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
 773			ino = mp->m_sb.sb_pquotino;
 774			ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
 
 
 775		}
 776		if (ino != NULLFSINO) {
 777			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
 778			if (error)
 779				return error;
 780			mp->m_sb.sb_gquotino = NULLFSINO;
 781			mp->m_sb.sb_pquotino = NULLFSINO;
 782			need_alloc = false;
 783		}
 784	}
 785
 786	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
 787			XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
 
 788	if (error)
 789		return error;
 790
 791	if (need_alloc) {
 792		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
 793								&committed);
 
 
 
 
 794		if (error) {
 795			xfs_trans_cancel(tp);
 796			return error;
 797		}
 798	}
 799
 800	/*
 801	 * Make the changes in the superblock, and log those too.
 802	 * sbfields arg may contain fields other than *QUOTINO;
 803	 * VERSIONNUM for example.
 804	 */
 805	spin_lock(&mp->m_sb_lock);
 806	if (flags & XFS_QMOPT_SBVERSION) {
 807		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
 808
 809		xfs_sb_version_addquota(&mp->m_sb);
 810		mp->m_sb.sb_uquotino = NULLFSINO;
 811		mp->m_sb.sb_gquotino = NULLFSINO;
 812		mp->m_sb.sb_pquotino = NULLFSINO;
 813
 814		/* qflags will get updated fully _after_ quotacheck */
 815		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
 816	}
 817	if (flags & XFS_QMOPT_UQUOTA)
 818		mp->m_sb.sb_uquotino = (*ip)->i_ino;
 819	else if (flags & XFS_QMOPT_GQUOTA)
 820		mp->m_sb.sb_gquotino = (*ip)->i_ino;
 821	else
 822		mp->m_sb.sb_pquotino = (*ip)->i_ino;
 823	spin_unlock(&mp->m_sb_lock);
 824	xfs_log_sb(tp);
 825
 826	error = xfs_trans_commit(tp);
 827	if (error) {
 828		ASSERT(XFS_FORCED_SHUTDOWN(mp));
 829		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
 830	}
 831	if (need_alloc)
 832		xfs_finish_inode_setup(*ip);
 833	return error;
 834}
 835
 836
 837STATIC void
 838xfs_qm_reset_dqcounts(
 839	xfs_mount_t	*mp,
 840	xfs_buf_t	*bp,
 841	xfs_dqid_t	id,
 842	uint		type)
 843{
 844	struct xfs_dqblk	*dqb;
 845	int			j;
 846
 847	trace_xfs_reset_dqcounts(bp, _RET_IP_);
 848
 849	/*
 850	 * Reset all counters and timers. They'll be
 851	 * started afresh by xfs_qm_quotacheck.
 852	 */
 853#ifdef DEBUG
 854	j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 855	do_div(j, sizeof(xfs_dqblk_t));
 856	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 857#endif
 858	dqb = bp->b_addr;
 859	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
 860		struct xfs_disk_dquot	*ddq;
 861
 862		ddq = (struct xfs_disk_dquot *)&dqb[j];
 863
 864		/*
 865		 * Do a sanity check, and if needed, repair the dqblk. Don't
 866		 * output any warnings because it's perfectly possible to
 867		 * find uninitialised dquot blks. See comment in xfs_dqcheck.
 
 868		 */
 869		xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
 870			    "xfs_quotacheck");
 
 
 871		/*
 872		 * Reset type in case we are reusing group quota file for
 873		 * project quotas or vice versa
 874		 */
 875		ddq->d_flags = type;
 876		ddq->d_bcount = 0;
 877		ddq->d_icount = 0;
 878		ddq->d_rtbcount = 0;
 879		ddq->d_btimer = 0;
 880		ddq->d_itimer = 0;
 881		ddq->d_rtbtimer = 0;
 882		ddq->d_bwarns = 0;
 883		ddq->d_iwarns = 0;
 884		ddq->d_rtbwarns = 0;
 885
 886		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 887			xfs_update_cksum((char *)&dqb[j],
 888					 sizeof(struct xfs_dqblk),
 889					 XFS_DQUOT_CRC_OFF);
 890		}
 891	}
 892}
 893
 894STATIC int
 895xfs_qm_dqiter_bufs(
 896	struct xfs_mount	*mp,
 897	xfs_dqid_t		firstid,
 898	xfs_fsblock_t		bno,
 899	xfs_filblks_t		blkcnt,
 900	uint			flags,
 901	struct list_head	*buffer_list)
 902{
 903	struct xfs_buf		*bp;
 904	int			error;
 905	int			type;
 906
 907	ASSERT(blkcnt > 0);
 908	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
 909		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
 910	error = 0;
 911
 912	/*
 913	 * Blkcnt arg can be a very big number, and might even be
 914	 * larger than the log itself. So, we have to break it up into
 915	 * manageable-sized transactions.
 916	 * Note that we don't start a permanent transaction here; we might
 917	 * not be able to get a log reservation for the whole thing up front,
 918	 * and we don't really care to either, because we just discard
 919	 * everything if we were to crash in the middle of this loop.
 920	 */
 921	while (blkcnt--) {
 922		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 923			      XFS_FSB_TO_DADDR(mp, bno),
 924			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 925			      &xfs_dquot_buf_ops);
 926
 927		/*
 928		 * CRC and validation errors will return a EFSCORRUPTED here. If
 929		 * this occurs, re-read without CRC validation so that we can
 930		 * repair the damage via xfs_qm_reset_dqcounts(). This process
 931		 * will leave a trace in the log indicating corruption has
 932		 * been detected.
 933		 */
 934		if (error == -EFSCORRUPTED) {
 935			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 936				      XFS_FSB_TO_DADDR(mp, bno),
 937				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 938				      NULL);
 939		}
 940
 941		if (error)
 942			break;
 943
 944		/*
 945		 * A corrupt buffer might not have a verifier attached, so
 946		 * make sure we have the correct one attached before writeback
 947		 * occurs.
 948		 */
 949		bp->b_ops = &xfs_dquot_buf_ops;
 950		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
 951		xfs_buf_delwri_queue(bp, buffer_list);
 952		xfs_buf_relse(bp);
 953
 954		/* goto the next block. */
 955		bno++;
 956		firstid += mp->m_quotainfo->qi_dqperchunk;
 957	}
 958
 959	return error;
 960}
 961
 962/*
 963 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
 964 * caller supplied function for every chunk of dquots that we find.
 965 */
 966STATIC int
 967xfs_qm_dqiterate(
 968	struct xfs_mount	*mp,
 969	struct xfs_inode	*qip,
 970	uint			flags,
 971	struct list_head	*buffer_list)
 972{
 973	struct xfs_bmbt_irec	*map;
 974	int			i, nmaps;	/* number of map entries */
 975	int			error;		/* return value */
 976	xfs_fileoff_t		lblkno;
 977	xfs_filblks_t		maxlblkcnt;
 978	xfs_dqid_t		firstid;
 979	xfs_fsblock_t		rablkno;
 980	xfs_filblks_t		rablkcnt;
 981
 982	error = 0;
 983	/*
 984	 * This looks racy, but we can't keep an inode lock across a
 985	 * trans_reserve. But, this gets called during quotacheck, and that
 986	 * happens only at mount time which is single threaded.
 987	 */
 988	if (qip->i_d.di_nblocks == 0)
 989		return 0;
 990
 991	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
 992
 993	lblkno = 0;
 994	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 995	do {
 996		uint		lock_mode;
 997
 998		nmaps = XFS_DQITER_MAP_SIZE;
 999		/*
1000		 * We aren't changing the inode itself. Just changing
1001		 * some of its data. No new blocks are added here, and
1002		 * the inode is never added to the transaction.
1003		 */
1004		lock_mode = xfs_ilock_data_map_shared(qip);
1005		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1006				       map, &nmaps, 0);
1007		xfs_iunlock(qip, lock_mode);
1008		if (error)
1009			break;
1010
1011		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1012		for (i = 0; i < nmaps; i++) {
1013			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1014			ASSERT(map[i].br_blockcount);
1015
1016
1017			lblkno += map[i].br_blockcount;
1018
1019			if (map[i].br_startblock == HOLESTARTBLOCK)
1020				continue;
1021
1022			firstid = (xfs_dqid_t) map[i].br_startoff *
1023				mp->m_quotainfo->qi_dqperchunk;
1024			/*
1025			 * Do a read-ahead on the next extent.
1026			 */
1027			if ((i+1 < nmaps) &&
1028			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1029				rablkcnt =  map[i+1].br_blockcount;
1030				rablkno = map[i+1].br_startblock;
1031				while (rablkcnt--) {
1032					xfs_buf_readahead(mp->m_ddev_targp,
1033					       XFS_FSB_TO_DADDR(mp, rablkno),
1034					       mp->m_quotainfo->qi_dqchunklen,
1035					       &xfs_dquot_buf_ops);
1036					rablkno++;
1037				}
1038			}
1039			/*
1040			 * Iterate thru all the blks in the extent and
1041			 * reset the counters of all the dquots inside them.
1042			 */
1043			error = xfs_qm_dqiter_bufs(mp, firstid,
1044						   map[i].br_startblock,
1045						   map[i].br_blockcount,
1046						   flags, buffer_list);
1047			if (error)
1048				goto out;
1049		}
1050	} while (nmaps > 0);
1051
1052out:
1053	kmem_free(map);
1054	return error;
1055}
1056
1057/*
1058 * Called by dqusage_adjust in doing a quotacheck.
1059 *
1060 * Given the inode, and a dquot id this updates both the incore dqout as well
1061 * as the buffer copy. This is so that once the quotacheck is done, we can
1062 * just log all the buffers, as opposed to logging numerous updates to
1063 * individual dquots.
1064 */
1065STATIC int
1066xfs_qm_quotacheck_dqadjust(
1067	struct xfs_inode	*ip,
1068	xfs_dqid_t		id,
1069	uint			type,
1070	xfs_qcnt_t		nblks,
1071	xfs_qcnt_t		rtblks)
1072{
1073	struct xfs_mount	*mp = ip->i_mount;
1074	struct xfs_dquot	*dqp;
 
1075	int			error;
1076
1077	error = xfs_qm_dqget(mp, ip, id, type,
1078			     XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1079	if (error) {
1080		/*
1081		 * Shouldn't be able to turn off quotas here.
1082		 */
1083		ASSERT(error != -ESRCH);
1084		ASSERT(error != -ENOENT);
1085		return error;
1086	}
1087
1088	trace_xfs_dqadjust(dqp);
1089
1090	/*
1091	 * Adjust the inode count and the block count to reflect this inode's
1092	 * resource usage.
1093	 */
1094	be64_add_cpu(&dqp->q_core.d_icount, 1);
1095	dqp->q_res_icount++;
1096	if (nblks) {
1097		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1098		dqp->q_res_bcount += nblks;
1099	}
1100	if (rtblks) {
1101		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1102		dqp->q_res_rtbcount += rtblks;
1103	}
1104
1105	/*
1106	 * Set default limits, adjust timers (since we changed usages)
1107	 *
1108	 * There are no timers for the default values set in the root dquot.
1109	 */
1110	if (dqp->q_core.d_id) {
1111		xfs_qm_adjust_dqlimits(mp, dqp);
1112		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1113	}
1114
1115	dqp->dq_flags |= XFS_DQ_DIRTY;
1116	xfs_qm_dqput(dqp);
1117	return 0;
1118}
1119
1120STATIC int
1121xfs_qm_get_rtblks(
1122	xfs_inode_t	*ip,
1123	xfs_qcnt_t	*O_rtblks)
1124{
1125	xfs_filblks_t	rtblks;			/* total rt blks */
1126	xfs_extnum_t	idx;			/* extent record index */
1127	xfs_ifork_t	*ifp;			/* inode fork pointer */
1128	xfs_extnum_t	nextents;		/* number of extent entries */
1129	int		error;
1130
1131	ASSERT(XFS_IS_REALTIME_INODE(ip));
1132	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1133	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1134		if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1135			return error;
1136	}
1137	rtblks = 0;
1138	nextents = xfs_iext_count(ifp);
1139	for (idx = 0; idx < nextents; idx++)
1140		rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1141	*O_rtblks = (xfs_qcnt_t)rtblks;
1142	return 0;
1143}
1144
1145/*
1146 * callback routine supplied to bulkstat(). Given an inumber, find its
1147 * dquots and update them to account for resources taken by that inode.
1148 */
1149/* ARGSUSED */
1150STATIC int
1151xfs_qm_dqusage_adjust(
1152	xfs_mount_t	*mp,		/* mount point for filesystem */
1153	xfs_ino_t	ino,		/* inode number to get data for */
1154	void		__user *buffer,	/* not used */
1155	int		ubsize,		/* not used */
1156	int		*ubused,	/* not used */
1157	int		*res)		/* result code value */
1158{
1159	xfs_inode_t	*ip;
1160	xfs_qcnt_t	nblks, rtblks = 0;
1161	int		error;
1162
1163	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1164
1165	/*
1166	 * rootino must have its resources accounted for, not so with the quota
1167	 * inodes.
1168	 */
1169	if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1170		*res = BULKSTAT_RV_NOTHING;
1171		return -EINVAL;
1172	}
1173
1174	/*
1175	 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1176	 * interface expects the inode to be exclusively locked because that's
1177	 * the case in all other instances. It's OK that we do this because
1178	 * quotacheck is done only at mount time.
1179	 */
1180	error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
1181			 &ip);
1182	if (error) {
1183		*res = BULKSTAT_RV_NOTHING;
1184		return error;
1185	}
1186
1187	ASSERT(ip->i_delayed_blks == 0);
1188
1189	if (XFS_IS_REALTIME_INODE(ip)) {
1190		/*
1191		 * Walk thru the extent list and count the realtime blocks.
1192		 */
1193		error = xfs_qm_get_rtblks(ip, &rtblks);
1194		if (error)
1195			goto error0;
 
 
1196	}
1197
1198	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1199
1200	/*
1201	 * Add the (disk blocks and inode) resources occupied by this
1202	 * inode to its dquots. We do this adjustment in the incore dquot,
1203	 * and also copy the changes to its buffer.
1204	 * We don't care about putting these changes in a transaction
1205	 * envelope because if we crash in the middle of a 'quotacheck'
1206	 * we have to start from the beginning anyway.
1207	 * Once we're done, we'll log all the dquot bufs.
1208	 *
1209	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1210	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1211	 */
1212	if (XFS_IS_UQUOTA_ON(mp)) {
1213		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1214						   XFS_DQ_USER, nblks, rtblks);
1215		if (error)
1216			goto error0;
1217	}
1218
1219	if (XFS_IS_GQUOTA_ON(mp)) {
1220		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1221						   XFS_DQ_GROUP, nblks, rtblks);
1222		if (error)
1223			goto error0;
1224	}
1225
1226	if (XFS_IS_PQUOTA_ON(mp)) {
1227		error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1228						   XFS_DQ_PROJ, nblks, rtblks);
1229		if (error)
1230			goto error0;
1231	}
1232
1233	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1234	IRELE(ip);
1235	*res = BULKSTAT_RV_DIDONE;
1236	return 0;
1237
1238error0:
1239	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1240	IRELE(ip);
1241	*res = BULKSTAT_RV_GIVEUP;
1242	return error;
1243}
1244
1245STATIC int
1246xfs_qm_flush_one(
1247	struct xfs_dquot	*dqp,
1248	void			*data)
1249{
 
1250	struct list_head	*buffer_list = data;
1251	struct xfs_buf		*bp = NULL;
1252	int			error = 0;
1253
1254	xfs_dqlock(dqp);
1255	if (dqp->dq_flags & XFS_DQ_FREEING)
1256		goto out_unlock;
1257	if (!XFS_DQ_IS_DIRTY(dqp))
1258		goto out_unlock;
1259
1260	xfs_dqflock(dqp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1261	error = xfs_qm_dqflush(dqp, &bp);
1262	if (error)
1263		goto out_unlock;
1264
1265	xfs_buf_delwri_queue(bp, buffer_list);
1266	xfs_buf_relse(bp);
1267out_unlock:
1268	xfs_dqunlock(dqp);
1269	return error;
1270}
1271
1272/*
1273 * Walk thru all the filesystem inodes and construct a consistent view
1274 * of the disk quota world. If the quotacheck fails, disable quotas.
1275 */
1276STATIC int
1277xfs_qm_quotacheck(
1278	xfs_mount_t	*mp)
1279{
1280	int			done, count, error, error2;
1281	xfs_ino_t		lastino;
1282	size_t			structsz;
1283	uint			flags;
1284	LIST_HEAD		(buffer_list);
1285	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1286	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1287	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1288
1289	count = INT_MAX;
1290	structsz = 1;
1291	lastino = 0;
1292	flags = 0;
1293
1294	ASSERT(uip || gip || pip);
1295	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1296
1297	xfs_notice(mp, "Quotacheck needed: Please wait.");
1298
1299	/*
1300	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1301	 * their counters to zero. We need a clean slate.
1302	 * We don't log our changes till later.
1303	 */
1304	if (uip) {
1305		error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1306					 &buffer_list);
1307		if (error)
1308			goto error_return;
1309		flags |= XFS_UQUOTA_CHKD;
1310	}
1311
1312	if (gip) {
1313		error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
1314					 &buffer_list);
1315		if (error)
1316			goto error_return;
1317		flags |= XFS_GQUOTA_CHKD;
1318	}
1319
1320	if (pip) {
1321		error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
1322					 &buffer_list);
1323		if (error)
1324			goto error_return;
1325		flags |= XFS_PQUOTA_CHKD;
1326	}
1327
1328	do {
 
 
1329		/*
1330		 * Iterate thru all the inodes in the file system,
1331		 * adjusting the corresponding dquot counters in core.
 
1332		 */
1333		error = xfs_bulkstat(mp, &lastino, &count,
1334				     xfs_qm_dqusage_adjust,
1335				     structsz, NULL, &done);
1336		if (error)
1337			break;
1338
1339	} while (!done);
1340
1341	/*
1342	 * We've made all the changes that we need to make incore.  Flush them
1343	 * down to disk buffers if everything was updated successfully.
1344	 */
1345	if (XFS_IS_UQUOTA_ON(mp)) {
1346		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1347					  &buffer_list);
1348	}
1349	if (XFS_IS_GQUOTA_ON(mp)) {
1350		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1351					   &buffer_list);
1352		if (!error)
1353			error = error2;
1354	}
1355	if (XFS_IS_PQUOTA_ON(mp)) {
1356		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1357					   &buffer_list);
1358		if (!error)
1359			error = error2;
1360	}
1361
1362	error2 = xfs_buf_delwri_submit(&buffer_list);
1363	if (!error)
1364		error = error2;
1365
1366	/*
1367	 * We can get this error if we couldn't do a dquot allocation inside
1368	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1369	 * dirty dquots that might be cached, we just want to get rid of them
1370	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1371	 * at this point (because we intentionally didn't in dqget_noattach).
1372	 */
1373	if (error) {
1374		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1375		goto error_return;
1376	}
1377
1378	/*
1379	 * If one type of quotas is off, then it will lose its
1380	 * quotachecked status, since we won't be doing accounting for
1381	 * that type anymore.
1382	 */
1383	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1384	mp->m_qflags |= flags;
1385
1386 error_return:
1387	while (!list_empty(&buffer_list)) {
1388		struct xfs_buf *bp =
1389			list_first_entry(&buffer_list, struct xfs_buf, b_list);
1390		list_del_init(&bp->b_list);
1391		xfs_buf_relse(bp);
1392	}
1393
1394	if (error) {
1395		xfs_warn(mp,
1396	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1397			error);
1398		/*
1399		 * We must turn off quotas.
1400		 */
1401		ASSERT(mp->m_quotainfo != NULL);
1402		xfs_qm_destroy_quotainfo(mp);
1403		if (xfs_mount_reset_sbqflags(mp)) {
1404			xfs_warn(mp,
1405				"Quotacheck: Failed to reset quota flags.");
1406		}
1407	} else
1408		xfs_notice(mp, "Quotacheck: Done.");
1409	return error;
1410}
1411
1412/*
1413 * This is called from xfs_mountfs to start quotas and initialize all
1414 * necessary data structures like quotainfo.  This is also responsible for
1415 * running a quotacheck as necessary.  We are guaranteed that the superblock
1416 * is consistently read in at this point.
1417 *
1418 * If we fail here, the mount will continue with quota turned off. We don't
1419 * need to inidicate success or failure at all.
1420 */
1421void
1422xfs_qm_mount_quotas(
1423	struct xfs_mount	*mp)
1424{
1425	int			error = 0;
1426	uint			sbf;
1427
1428	/*
1429	 * If quotas on realtime volumes is not supported, we disable
1430	 * quotas immediately.
1431	 */
1432	if (mp->m_sb.sb_rextents) {
1433		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1434		mp->m_qflags = 0;
1435		goto write_changes;
1436	}
1437
1438	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1439
1440	/*
1441	 * Allocate the quotainfo structure inside the mount struct, and
1442	 * create quotainode(s), and change/rev superblock if necessary.
1443	 */
1444	error = xfs_qm_init_quotainfo(mp);
1445	if (error) {
1446		/*
1447		 * We must turn off quotas.
1448		 */
1449		ASSERT(mp->m_quotainfo == NULL);
1450		mp->m_qflags = 0;
1451		goto write_changes;
1452	}
1453	/*
1454	 * If any of the quotas are not consistent, do a quotacheck.
1455	 */
1456	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1457		error = xfs_qm_quotacheck(mp);
1458		if (error) {
1459			/* Quotacheck failed and disabled quotas. */
1460			return;
1461		}
1462	}
1463	/*
1464	 * If one type of quotas is off, then it will lose its
1465	 * quotachecked status, since we won't be doing accounting for
1466	 * that type anymore.
1467	 */
1468	if (!XFS_IS_UQUOTA_ON(mp))
1469		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1470	if (!XFS_IS_GQUOTA_ON(mp))
1471		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1472	if (!XFS_IS_PQUOTA_ON(mp))
1473		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1474
1475 write_changes:
1476	/*
1477	 * We actually don't have to acquire the m_sb_lock at all.
1478	 * This can only be called from mount, and that's single threaded. XXX
1479	 */
1480	spin_lock(&mp->m_sb_lock);
1481	sbf = mp->m_sb.sb_qflags;
1482	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1483	spin_unlock(&mp->m_sb_lock);
1484
1485	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1486		if (xfs_sync_sb(mp, false)) {
1487			/*
1488			 * We could only have been turning quotas off.
1489			 * We aren't in very good shape actually because
1490			 * the incore structures are convinced that quotas are
1491			 * off, but the on disk superblock doesn't know that !
1492			 */
1493			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1494			xfs_alert(mp, "%s: Superblock update failed!",
1495				__func__);
1496		}
1497	}
1498
1499	if (error) {
1500		xfs_warn(mp, "Failed to initialize disk quotas.");
1501		return;
1502	}
1503}
1504
1505/*
1506 * This is called after the superblock has been read in and we're ready to
1507 * iget the quota inodes.
1508 */
1509STATIC int
1510xfs_qm_init_quotainos(
1511	xfs_mount_t	*mp)
1512{
1513	struct xfs_inode	*uip = NULL;
1514	struct xfs_inode	*gip = NULL;
1515	struct xfs_inode	*pip = NULL;
1516	int			error;
1517	uint			flags = 0;
1518
1519	ASSERT(mp->m_quotainfo);
1520
1521	/*
1522	 * Get the uquota and gquota inodes
1523	 */
1524	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1525		if (XFS_IS_UQUOTA_ON(mp) &&
1526		    mp->m_sb.sb_uquotino != NULLFSINO) {
1527			ASSERT(mp->m_sb.sb_uquotino > 0);
1528			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1529					     0, 0, &uip);
1530			if (error)
1531				return error;
1532		}
1533		if (XFS_IS_GQUOTA_ON(mp) &&
1534		    mp->m_sb.sb_gquotino != NULLFSINO) {
1535			ASSERT(mp->m_sb.sb_gquotino > 0);
1536			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1537					     0, 0, &gip);
1538			if (error)
1539				goto error_rele;
1540		}
1541		if (XFS_IS_PQUOTA_ON(mp) &&
1542		    mp->m_sb.sb_pquotino != NULLFSINO) {
1543			ASSERT(mp->m_sb.sb_pquotino > 0);
1544			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1545					     0, 0, &pip);
1546			if (error)
1547				goto error_rele;
1548		}
1549	} else {
1550		flags |= XFS_QMOPT_SBVERSION;
1551	}
1552
1553	/*
1554	 * Create the three inodes, if they don't exist already. The changes
1555	 * made above will get added to a transaction and logged in one of
1556	 * the qino_alloc calls below.  If the device is readonly,
1557	 * temporarily switch to read-write to do this.
1558	 */
1559	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1560		error = xfs_qm_qino_alloc(mp, &uip,
1561					      flags | XFS_QMOPT_UQUOTA);
1562		if (error)
1563			goto error_rele;
1564
1565		flags &= ~XFS_QMOPT_SBVERSION;
1566	}
1567	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1568		error = xfs_qm_qino_alloc(mp, &gip,
1569					  flags | XFS_QMOPT_GQUOTA);
1570		if (error)
1571			goto error_rele;
1572
1573		flags &= ~XFS_QMOPT_SBVERSION;
1574	}
1575	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1576		error = xfs_qm_qino_alloc(mp, &pip,
1577					  flags | XFS_QMOPT_PQUOTA);
1578		if (error)
1579			goto error_rele;
1580	}
1581
1582	mp->m_quotainfo->qi_uquotaip = uip;
1583	mp->m_quotainfo->qi_gquotaip = gip;
1584	mp->m_quotainfo->qi_pquotaip = pip;
1585
1586	return 0;
1587
1588error_rele:
1589	if (uip)
1590		IRELE(uip);
1591	if (gip)
1592		IRELE(gip);
1593	if (pip)
1594		IRELE(pip);
1595	return error;
1596}
1597
1598STATIC void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1599xfs_qm_dqfree_one(
1600	struct xfs_dquot	*dqp)
1601{
1602	struct xfs_mount	*mp = dqp->q_mount;
1603	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1604
1605	mutex_lock(&qi->qi_tree_lock);
1606	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1607			  be32_to_cpu(dqp->q_core.d_id));
1608
1609	qi->qi_dquots--;
1610	mutex_unlock(&qi->qi_tree_lock);
1611
1612	xfs_qm_dqdestroy(dqp);
1613}
1614
1615/* --------------- utility functions for vnodeops ---------------- */
1616
1617
1618/*
1619 * Given an inode, a uid, gid and prid make sure that we have
1620 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1621 * quotas by creating this file.
1622 * This also attaches dquot(s) to the given inode after locking it,
1623 * and returns the dquots corresponding to the uid and/or gid.
1624 *
1625 * in	: inode (unlocked)
1626 * out	: udquot, gdquot with references taken and unlocked
1627 */
1628int
1629xfs_qm_vop_dqalloc(
1630	struct xfs_inode	*ip,
1631	xfs_dqid_t		uid,
1632	xfs_dqid_t		gid,
1633	prid_t			prid,
1634	uint			flags,
1635	struct xfs_dquot	**O_udqpp,
1636	struct xfs_dquot	**O_gdqpp,
1637	struct xfs_dquot	**O_pdqpp)
1638{
1639	struct xfs_mount	*mp = ip->i_mount;
 
 
1640	struct xfs_dquot	*uq = NULL;
1641	struct xfs_dquot	*gq = NULL;
1642	struct xfs_dquot	*pq = NULL;
1643	int			error;
1644	uint			lockflags;
1645
1646	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1647		return 0;
1648
1649	lockflags = XFS_ILOCK_EXCL;
1650	xfs_ilock(ip, lockflags);
1651
1652	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1653		gid = ip->i_d.di_gid;
1654
1655	/*
1656	 * Attach the dquot(s) to this inode, doing a dquot allocation
1657	 * if necessary. The dquot(s) will not be locked.
1658	 */
1659	if (XFS_NOT_DQATTACHED(mp, ip)) {
1660		error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1661		if (error) {
1662			xfs_iunlock(ip, lockflags);
1663			return error;
1664		}
1665	}
1666
1667	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1668		if (ip->i_d.di_uid != uid) {
 
1669			/*
1670			 * What we need is the dquot that has this uid, and
1671			 * if we send the inode to dqget, the uid of the inode
1672			 * takes priority over what's sent in the uid argument.
1673			 * We must unlock inode here before calling dqget if
1674			 * we're not sending the inode, because otherwise
1675			 * we'll deadlock by doing trans_reserve while
1676			 * holding ilock.
1677			 */
1678			xfs_iunlock(ip, lockflags);
1679			error = xfs_qm_dqget(mp, NULL, uid,
1680						 XFS_DQ_USER,
1681						 XFS_QMOPT_DQALLOC |
1682						 XFS_QMOPT_DOWARN,
1683						 &uq);
1684			if (error) {
1685				ASSERT(error != -ENOENT);
1686				return error;
1687			}
1688			/*
1689			 * Get the ilock in the right order.
1690			 */
1691			xfs_dqunlock(uq);
1692			lockflags = XFS_ILOCK_SHARED;
1693			xfs_ilock(ip, lockflags);
1694		} else {
1695			/*
1696			 * Take an extra reference, because we'll return
1697			 * this to caller
1698			 */
1699			ASSERT(ip->i_udquot);
1700			uq = xfs_qm_dqhold(ip->i_udquot);
1701		}
1702	}
1703	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1704		if (ip->i_d.di_gid != gid) {
 
1705			xfs_iunlock(ip, lockflags);
1706			error = xfs_qm_dqget(mp, NULL, gid,
1707						 XFS_DQ_GROUP,
1708						 XFS_QMOPT_DQALLOC |
1709						 XFS_QMOPT_DOWARN,
1710						 &gq);
1711			if (error) {
1712				ASSERT(error != -ENOENT);
1713				goto error_rele;
1714			}
1715			xfs_dqunlock(gq);
1716			lockflags = XFS_ILOCK_SHARED;
1717			xfs_ilock(ip, lockflags);
1718		} else {
1719			ASSERT(ip->i_gdquot);
1720			gq = xfs_qm_dqhold(ip->i_gdquot);
1721		}
1722	}
1723	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1724		if (xfs_get_projid(ip) != prid) {
 
1725			xfs_iunlock(ip, lockflags);
1726			error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1727						 XFS_DQ_PROJ,
1728						 XFS_QMOPT_DQALLOC |
1729						 XFS_QMOPT_DOWARN,
1730						 &pq);
1731			if (error) {
1732				ASSERT(error != -ENOENT);
1733				goto error_rele;
1734			}
1735			xfs_dqunlock(pq);
1736			lockflags = XFS_ILOCK_SHARED;
1737			xfs_ilock(ip, lockflags);
1738		} else {
1739			ASSERT(ip->i_pdquot);
1740			pq = xfs_qm_dqhold(ip->i_pdquot);
1741		}
1742	}
1743	if (uq)
1744		trace_xfs_dquot_dqalloc(ip);
1745
1746	xfs_iunlock(ip, lockflags);
1747	if (O_udqpp)
1748		*O_udqpp = uq;
1749	else
1750		xfs_qm_dqrele(uq);
1751	if (O_gdqpp)
1752		*O_gdqpp = gq;
1753	else
1754		xfs_qm_dqrele(gq);
1755	if (O_pdqpp)
1756		*O_pdqpp = pq;
1757	else
1758		xfs_qm_dqrele(pq);
1759	return 0;
1760
1761error_rele:
1762	xfs_qm_dqrele(gq);
1763	xfs_qm_dqrele(uq);
1764	return error;
1765}
1766
1767/*
1768 * Actually transfer ownership, and do dquot modifications.
1769 * These were already reserved.
1770 */
1771xfs_dquot_t *
1772xfs_qm_vop_chown(
1773	xfs_trans_t	*tp,
1774	xfs_inode_t	*ip,
1775	xfs_dquot_t	**IO_olddq,
1776	xfs_dquot_t	*newdq)
1777{
1778	xfs_dquot_t	*prevdq;
1779	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1780				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1781
1782
1783	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1784	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1785
1786	/* old dquot */
1787	prevdq = *IO_olddq;
1788	ASSERT(prevdq);
1789	ASSERT(prevdq != newdq);
1790
1791	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1792	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1793
1794	/* the sparkling new dquot */
1795	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1796	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1797
1798	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1799	 * Take an extra reference, because the inode is going to keep
1800	 * this dquot pointer even after the trans_commit.
1801	 */
1802	*IO_olddq = xfs_qm_dqhold(newdq);
1803
1804	return prevdq;
1805}
1806
1807/*
1808 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1809 */
1810int
1811xfs_qm_vop_chown_reserve(
1812	struct xfs_trans	*tp,
1813	struct xfs_inode	*ip,
1814	struct xfs_dquot	*udqp,
1815	struct xfs_dquot	*gdqp,
1816	struct xfs_dquot	*pdqp,
1817	uint			flags)
1818{
1819	struct xfs_mount	*mp = ip->i_mount;
1820	uint			delblks, blkflags, prjflags = 0;
1821	struct xfs_dquot	*udq_unres = NULL;
1822	struct xfs_dquot	*gdq_unres = NULL;
1823	struct xfs_dquot	*pdq_unres = NULL;
1824	struct xfs_dquot	*udq_delblks = NULL;
1825	struct xfs_dquot	*gdq_delblks = NULL;
1826	struct xfs_dquot	*pdq_delblks = NULL;
1827	int			error;
1828
1829
1830	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1831	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1832
1833	delblks = ip->i_delayed_blks;
1834	blkflags = XFS_IS_REALTIME_INODE(ip) ?
1835			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1836
1837	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1838	    ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
1839		udq_delblks = udqp;
1840		/*
1841		 * If there are delayed allocation blocks, then we have to
1842		 * unreserve those from the old dquot, and add them to the
1843		 * new dquot.
1844		 */
1845		if (delblks) {
1846			ASSERT(ip->i_udquot);
1847			udq_unres = ip->i_udquot;
1848		}
1849	}
1850	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1851	    ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
1852		gdq_delblks = gdqp;
1853		if (delblks) {
1854			ASSERT(ip->i_gdquot);
1855			gdq_unres = ip->i_gdquot;
1856		}
1857	}
1858
1859	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1860	    xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
1861		prjflags = XFS_QMOPT_ENOSPC;
1862		pdq_delblks = pdqp;
1863		if (delblks) {
1864			ASSERT(ip->i_pdquot);
1865			pdq_unres = ip->i_pdquot;
1866		}
1867	}
1868
1869	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1870				udq_delblks, gdq_delblks, pdq_delblks,
1871				ip->i_d.di_nblocks, 1,
1872				flags | blkflags | prjflags);
1873	if (error)
1874		return error;
1875
1876	/*
1877	 * Do the delayed blks reservations/unreservations now. Since, these
1878	 * are done without the help of a transaction, if a reservation fails
1879	 * its previous reservations won't be automatically undone by trans
1880	 * code. So, we have to do it manually here.
1881	 */
1882	if (delblks) {
1883		/*
1884		 * Do the reservations first. Unreservation can't fail.
1885		 */
1886		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1887		ASSERT(udq_unres || gdq_unres || pdq_unres);
1888		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1889			    udq_delblks, gdq_delblks, pdq_delblks,
1890			    (xfs_qcnt_t)delblks, 0,
1891			    flags | blkflags | prjflags);
1892		if (error)
1893			return error;
1894		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1895				udq_unres, gdq_unres, pdq_unres,
1896				-((xfs_qcnt_t)delblks), 0, blkflags);
1897	}
1898
1899	return 0;
1900}
1901
1902int
1903xfs_qm_vop_rename_dqattach(
1904	struct xfs_inode	**i_tab)
1905{
1906	struct xfs_mount	*mp = i_tab[0]->i_mount;
1907	int			i;
1908
1909	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1910		return 0;
1911
1912	for (i = 0; (i < 4 && i_tab[i]); i++) {
1913		struct xfs_inode	*ip = i_tab[i];
1914		int			error;
1915
1916		/*
1917		 * Watch out for duplicate entries in the table.
1918		 */
1919		if (i == 0 || ip != i_tab[i-1]) {
1920			if (XFS_NOT_DQATTACHED(mp, ip)) {
1921				error = xfs_qm_dqattach(ip, 0);
1922				if (error)
1923					return error;
1924			}
1925		}
1926	}
1927	return 0;
1928}
1929
1930void
1931xfs_qm_vop_create_dqattach(
1932	struct xfs_trans	*tp,
1933	struct xfs_inode	*ip,
1934	struct xfs_dquot	*udqp,
1935	struct xfs_dquot	*gdqp,
1936	struct xfs_dquot	*pdqp)
1937{
1938	struct xfs_mount	*mp = tp->t_mountp;
1939
1940	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1941		return;
1942
1943	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1944	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1945
1946	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1947		ASSERT(ip->i_udquot == NULL);
1948		ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1949
1950		ip->i_udquot = xfs_qm_dqhold(udqp);
1951		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1952	}
1953	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1954		ASSERT(ip->i_gdquot == NULL);
1955		ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
 
1956		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1957		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1958	}
1959	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1960		ASSERT(ip->i_pdquot == NULL);
1961		ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
1962
1963		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1964		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1965	}
1966}
1967
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_sb.h"
  14#include "xfs_mount.h"
  15#include "xfs_inode.h"
  16#include "xfs_iwalk.h"
 
  17#include "xfs_quota.h"
 
  18#include "xfs_bmap.h"
  19#include "xfs_bmap_util.h"
  20#include "xfs_trans.h"
  21#include "xfs_trans_space.h"
  22#include "xfs_qm.h"
  23#include "xfs_trace.h"
  24#include "xfs_icache.h"
  25#include "xfs_error.h"
  26#include "xfs_ag.h"
  27#include "xfs_ialloc.h"
  28#include "xfs_log_priv.h"
  29
  30/*
  31 * The global quota manager. There is only one of these for the entire
  32 * system, _not_ one per file system. XQM keeps track of the overall
  33 * quota functionality, including maintaining the freelist and hash
  34 * tables of dquots.
  35 */
  36STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
  37STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
 
  38
  39STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
  40STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  41/*
  42 * We use the batch lookup interface to iterate over the dquots as it
  43 * currently is the only interface into the radix tree code that allows
  44 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  45 * operations is fine as all callers are used either during mount/umount
  46 * or quotaoff.
  47 */
  48#define XFS_DQ_LOOKUP_BATCH	32
  49
  50STATIC int
  51xfs_qm_dquot_walk(
  52	struct xfs_mount	*mp,
  53	xfs_dqtype_t		type,
  54	int			(*execute)(struct xfs_dquot *dqp, void *data),
  55	void			*data)
  56{
  57	struct xfs_quotainfo	*qi = mp->m_quotainfo;
  58	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
  59	uint32_t		next_index;
  60	int			last_error = 0;
  61	int			skipped;
  62	int			nr_found;
  63
  64restart:
  65	skipped = 0;
  66	next_index = 0;
  67	nr_found = 0;
  68
  69	while (1) {
  70		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  71		int		error;
  72		int		i;
  73
  74		mutex_lock(&qi->qi_tree_lock);
  75		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  76					next_index, XFS_DQ_LOOKUP_BATCH);
  77		if (!nr_found) {
  78			mutex_unlock(&qi->qi_tree_lock);
  79			break;
  80		}
  81
  82		for (i = 0; i < nr_found; i++) {
  83			struct xfs_dquot *dqp = batch[i];
  84
  85			next_index = dqp->q_id + 1;
  86
  87			error = execute(batch[i], data);
  88			if (error == -EAGAIN) {
  89				skipped++;
  90				continue;
  91			}
  92			if (error && last_error != -EFSCORRUPTED)
  93				last_error = error;
  94		}
  95
  96		mutex_unlock(&qi->qi_tree_lock);
  97
  98		/* bail out if the filesystem is corrupted.  */
  99		if (last_error == -EFSCORRUPTED) {
 100			skipped = 0;
 101			break;
 102		}
 103		/* we're done if id overflows back to zero */
 104		if (!next_index)
 105			break;
 106	}
 107
 108	if (skipped) {
 109		delay(1);
 110		goto restart;
 111	}
 112
 113	return last_error;
 114}
 115
 116
 117/*
 118 * Purge a dquot from all tracking data structures and free it.
 119 */
 120STATIC int
 121xfs_qm_dqpurge(
 122	struct xfs_dquot	*dqp,
 123	void			*data)
 124{
 125	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
 126	int			error = -EAGAIN;
 127
 128	xfs_dqlock(dqp);
 129	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
 130		goto out_unlock;
 
 
 131
 132	dqp->q_flags |= XFS_DQFLAG_FREEING;
 133
 134	xfs_dqflock(dqp);
 135
 136	/*
 137	 * If we are turning this type of quotas off, we don't care
 138	 * about the dirty metadata sitting in this dquot. OTOH, if
 139	 * we're unmounting, we do care, so we flush it and wait.
 140	 */
 141	if (XFS_DQ_IS_DIRTY(dqp)) {
 142		struct xfs_buf	*bp = NULL;
 
 143
 144		/*
 145		 * We don't care about getting disk errors here. We need
 146		 * to purge this dquot anyway, so we go ahead regardless.
 147		 */
 148		error = xfs_qm_dqflush(dqp, &bp);
 149		if (!error) {
 
 
 
 150			error = xfs_bwrite(bp);
 151			xfs_buf_relse(bp);
 152		} else if (error == -EAGAIN) {
 153			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
 154			goto out_unlock;
 155		}
 156		xfs_dqflock(dqp);
 157	}
 158
 159	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 160	ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
 161		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
 162
 163	xfs_dqfunlock(dqp);
 164	xfs_dqunlock(dqp);
 165
 166	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
 
 167	qi->qi_dquots--;
 168
 169	/*
 170	 * We move dquots to the freelist as soon as their reference count
 171	 * hits zero, so it really should be on the freelist here.
 172	 */
 173	ASSERT(!list_empty(&dqp->q_lru));
 174	list_lru_del(&qi->qi_lru, &dqp->q_lru);
 175	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 176
 177	xfs_qm_dqdestroy(dqp);
 178	return 0;
 179
 180out_unlock:
 181	xfs_dqunlock(dqp);
 182	return error;
 183}
 184
 185/*
 186 * Purge the dquot cache.
 187 */
 188static void
 189xfs_qm_dqpurge_all(
 190	struct xfs_mount	*mp)
 
 191{
 192	xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
 193	xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
 194	xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
 
 
 
 195}
 196
 197/*
 198 * Just destroy the quotainfo structure.
 199 */
 200void
 201xfs_qm_unmount(
 202	struct xfs_mount	*mp)
 203{
 204	if (mp->m_quotainfo) {
 205		xfs_qm_dqpurge_all(mp);
 206		xfs_qm_destroy_quotainfo(mp);
 207	}
 208}
 209
 210/*
 211 * Called from the vfsops layer.
 212 */
 213void
 214xfs_qm_unmount_quotas(
 215	xfs_mount_t	*mp)
 216{
 217	/*
 218	 * Release the dquots that root inode, et al might be holding,
 219	 * before we flush quotas and blow away the quotainfo structure.
 220	 */
 221	ASSERT(mp->m_rootip);
 222	xfs_qm_dqdetach(mp->m_rootip);
 223	if (mp->m_rbmip)
 224		xfs_qm_dqdetach(mp->m_rbmip);
 225	if (mp->m_rsumip)
 226		xfs_qm_dqdetach(mp->m_rsumip);
 227
 228	/*
 229	 * Release the quota inodes.
 230	 */
 231	if (mp->m_quotainfo) {
 232		if (mp->m_quotainfo->qi_uquotaip) {
 233			xfs_irele(mp->m_quotainfo->qi_uquotaip);
 234			mp->m_quotainfo->qi_uquotaip = NULL;
 235		}
 236		if (mp->m_quotainfo->qi_gquotaip) {
 237			xfs_irele(mp->m_quotainfo->qi_gquotaip);
 238			mp->m_quotainfo->qi_gquotaip = NULL;
 239		}
 240		if (mp->m_quotainfo->qi_pquotaip) {
 241			xfs_irele(mp->m_quotainfo->qi_pquotaip);
 242			mp->m_quotainfo->qi_pquotaip = NULL;
 243		}
 244	}
 245}
 246
 247STATIC int
 248xfs_qm_dqattach_one(
 249	struct xfs_inode	*ip,
 250	xfs_dqtype_t		type,
 251	bool			doalloc,
 252	struct xfs_dquot	**IO_idqpp)
 
 253{
 254	struct xfs_dquot	*dqp;
 255	int			error;
 256
 257	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 258	error = 0;
 259
 260	/*
 261	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
 262	 * or &i_gdquot. This made the code look weird, but made the logic a lot
 263	 * simpler.
 264	 */
 265	dqp = *IO_idqpp;
 266	if (dqp) {
 267		trace_xfs_dqattach_found(dqp);
 268		return 0;
 269	}
 270
 271	/*
 272	 * Find the dquot from somewhere. This bumps the reference count of
 273	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
 274	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
 275	 * turned off suddenly.
 276	 */
 277	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
 
 278	if (error)
 279		return error;
 280
 281	trace_xfs_dqattach_get(dqp);
 282
 283	/*
 284	 * dqget may have dropped and re-acquired the ilock, but it guarantees
 285	 * that the dquot returned is the one that should go in the inode.
 286	 */
 287	*IO_idqpp = dqp;
 288	xfs_dqunlock(dqp);
 289	return 0;
 290}
 291
 292static bool
 293xfs_qm_need_dqattach(
 294	struct xfs_inode	*ip)
 295{
 296	struct xfs_mount	*mp = ip->i_mount;
 297
 
 
 298	if (!XFS_IS_QUOTA_ON(mp))
 299		return false;
 300	if (!XFS_NOT_DQATTACHED(mp, ip))
 301		return false;
 302	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 303		return false;
 304	return true;
 305}
 306
 307/*
 308 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 309 * into account.
 310 * If @doalloc is true, the dquot(s) will be allocated if needed.
 311 * Inode may get unlocked and relocked in here, and the caller must deal with
 312 * the consequences.
 313 */
 314int
 315xfs_qm_dqattach_locked(
 316	xfs_inode_t	*ip,
 317	bool		doalloc)
 318{
 319	xfs_mount_t	*mp = ip->i_mount;
 320	int		error = 0;
 321
 322	if (!xfs_qm_need_dqattach(ip))
 323		return 0;
 324
 325	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 326
 327	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
 328		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
 329				doalloc, &ip->i_udquot);
 
 330		if (error)
 331			goto done;
 332		ASSERT(ip->i_udquot);
 333	}
 334
 335	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
 336		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
 337				doalloc, &ip->i_gdquot);
 
 338		if (error)
 339			goto done;
 340		ASSERT(ip->i_gdquot);
 341	}
 342
 343	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
 344		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
 345				doalloc, &ip->i_pdquot);
 
 346		if (error)
 347			goto done;
 348		ASSERT(ip->i_pdquot);
 349	}
 350
 351done:
 352	/*
 353	 * Don't worry about the dquots that we may have attached before any
 354	 * error - they'll get detached later if it has not already been done.
 355	 */
 356	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 357	return error;
 358}
 359
 360int
 361xfs_qm_dqattach(
 362	struct xfs_inode	*ip)
 
 363{
 364	int			error;
 365
 366	if (!xfs_qm_need_dqattach(ip))
 367		return 0;
 368
 369	xfs_ilock(ip, XFS_ILOCK_EXCL);
 370	error = xfs_qm_dqattach_locked(ip, false);
 371	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 372
 373	return error;
 374}
 375
 376/*
 377 * Release dquots (and their references) if any.
 378 * The inode should be locked EXCL except when this's called by
 379 * xfs_ireclaim.
 380 */
 381void
 382xfs_qm_dqdetach(
 383	xfs_inode_t	*ip)
 384{
 385	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
 386		return;
 387
 388	trace_xfs_dquot_dqdetach(ip);
 389
 390	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
 391	if (ip->i_udquot) {
 392		xfs_qm_dqrele(ip->i_udquot);
 393		ip->i_udquot = NULL;
 394	}
 395	if (ip->i_gdquot) {
 396		xfs_qm_dqrele(ip->i_gdquot);
 397		ip->i_gdquot = NULL;
 398	}
 399	if (ip->i_pdquot) {
 400		xfs_qm_dqrele(ip->i_pdquot);
 401		ip->i_pdquot = NULL;
 402	}
 403}
 404
 405struct xfs_qm_isolate {
 406	struct list_head	buffers;
 407	struct list_head	dispose;
 408};
 409
 410static enum lru_status
 411xfs_qm_dquot_isolate(
 412	struct list_head	*item,
 413	struct list_lru_one	*lru,
 414	spinlock_t		*lru_lock,
 415	void			*arg)
 416		__releases(lru_lock) __acquires(lru_lock)
 417{
 418	struct xfs_dquot	*dqp = container_of(item,
 419						struct xfs_dquot, q_lru);
 420	struct xfs_qm_isolate	*isol = arg;
 421
 422	if (!xfs_dqlock_nowait(dqp))
 423		goto out_miss_busy;
 424
 425	/*
 426	 * If something else is freeing this dquot and hasn't yet removed it
 427	 * from the LRU, leave it for the freeing task to complete the freeing
 428	 * process rather than risk it being free from under us here.
 429	 */
 430	if (dqp->q_flags & XFS_DQFLAG_FREEING)
 431		goto out_miss_unlock;
 432
 433	/*
 434	 * This dquot has acquired a reference in the meantime remove it from
 435	 * the freelist and try again.
 436	 */
 437	if (dqp->q_nrefs) {
 438		xfs_dqunlock(dqp);
 439		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
 440
 441		trace_xfs_dqreclaim_want(dqp);
 442		list_lru_isolate(lru, &dqp->q_lru);
 443		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 444		return LRU_REMOVED;
 445	}
 446
 447	/*
 448	 * If the dquot is dirty, flush it. If it's already being flushed, just
 449	 * skip it so there is time for the IO to complete before we try to
 450	 * reclaim it again on the next LRU pass.
 451	 */
 452	if (!xfs_dqflock_nowait(dqp))
 453		goto out_miss_unlock;
 
 
 454
 455	if (XFS_DQ_IS_DIRTY(dqp)) {
 456		struct xfs_buf	*bp = NULL;
 457		int		error;
 458
 459		trace_xfs_dqreclaim_dirty(dqp);
 460
 461		/* we have to drop the LRU lock to flush the dquot */
 462		spin_unlock(lru_lock);
 463
 464		error = xfs_qm_dqflush(dqp, &bp);
 465		if (error)
 
 
 466			goto out_unlock_dirty;
 
 467
 468		xfs_buf_delwri_queue(bp, &isol->buffers);
 469		xfs_buf_relse(bp);
 470		goto out_unlock_dirty;
 471	}
 472	xfs_dqfunlock(dqp);
 473
 474	/*
 475	 * Prevent lookups now that we are past the point of no return.
 476	 */
 477	dqp->q_flags |= XFS_DQFLAG_FREEING;
 478	xfs_dqunlock(dqp);
 479
 480	ASSERT(dqp->q_nrefs == 0);
 481	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
 482	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 483	trace_xfs_dqreclaim_done(dqp);
 484	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
 485	return LRU_REMOVED;
 486
 487out_miss_unlock:
 488	xfs_dqunlock(dqp);
 489out_miss_busy:
 490	trace_xfs_dqreclaim_busy(dqp);
 491	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 492	return LRU_SKIP;
 493
 494out_unlock_dirty:
 495	trace_xfs_dqreclaim_busy(dqp);
 496	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 497	xfs_dqunlock(dqp);
 498	spin_lock(lru_lock);
 499	return LRU_RETRY;
 500}
 501
 502static unsigned long
 503xfs_qm_shrink_scan(
 504	struct shrinker		*shrink,
 505	struct shrink_control	*sc)
 506{
 507	struct xfs_quotainfo	*qi = container_of(shrink,
 508					struct xfs_quotainfo, qi_shrinker);
 509	struct xfs_qm_isolate	isol;
 510	unsigned long		freed;
 511	int			error;
 512
 513	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
 514		return 0;
 515
 516	INIT_LIST_HEAD(&isol.buffers);
 517	INIT_LIST_HEAD(&isol.dispose);
 518
 519	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
 520				     xfs_qm_dquot_isolate, &isol);
 521
 522	error = xfs_buf_delwri_submit(&isol.buffers);
 523	if (error)
 524		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
 525
 526	while (!list_empty(&isol.dispose)) {
 527		struct xfs_dquot	*dqp;
 528
 529		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
 530		list_del_init(&dqp->q_lru);
 531		xfs_qm_dqfree_one(dqp);
 532	}
 533
 534	return freed;
 535}
 536
 537static unsigned long
 538xfs_qm_shrink_count(
 539	struct shrinker		*shrink,
 540	struct shrink_control	*sc)
 541{
 542	struct xfs_quotainfo	*qi = container_of(shrink,
 543					struct xfs_quotainfo, qi_shrinker);
 544
 545	return list_lru_shrink_count(&qi->qi_lru, sc);
 546}
 547
 548STATIC void
 549xfs_qm_set_defquota(
 550	struct xfs_mount	*mp,
 551	xfs_dqtype_t		type,
 552	struct xfs_quotainfo	*qinf)
 553{
 554	struct xfs_dquot	*dqp;
 555	struct xfs_def_quota	*defq;
 556	int			error;
 557
 558	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 559	if (error)
 560		return;
 561
 562	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
 563
 564	/*
 565	 * Timers and warnings have been already set, let's just set the
 566	 * default limits for this quota type
 567	 */
 568	defq->blk.hard = dqp->q_blk.hardlimit;
 569	defq->blk.soft = dqp->q_blk.softlimit;
 570	defq->ino.hard = dqp->q_ino.hardlimit;
 571	defq->ino.soft = dqp->q_ino.softlimit;
 572	defq->rtb.hard = dqp->q_rtb.hardlimit;
 573	defq->rtb.soft = dqp->q_rtb.softlimit;
 574	xfs_qm_dqdestroy(dqp);
 575}
 576
 577/* Initialize quota time limits from the root dquot. */
 578static void
 579xfs_qm_init_timelimits(
 580	struct xfs_mount	*mp,
 581	xfs_dqtype_t		type)
 582{
 583	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
 584	struct xfs_def_quota	*defq;
 585	struct xfs_dquot	*dqp;
 586	int			error;
 587
 588	defq = xfs_get_defquota(qinf, type);
 
 589
 590	defq->blk.time = XFS_QM_BTIMELIMIT;
 591	defq->ino.time = XFS_QM_ITIMELIMIT;
 592	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
 593
 594	/*
 595	 * We try to get the limits from the superuser's limits fields.
 596	 * This is quite hacky, but it is standard quota practice.
 597	 *
 598	 * Since we may not have done a quotacheck by this point, just read
 599	 * the dquot without attaching it to any hashtables or lists.
 600	 */
 601	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 602	if (error)
 603		return;
 604
 605	/*
 606	 * The warnings and timers set the grace period given to
 607	 * a user or group before he or she can not perform any
 608	 * more writing. If it is zero, a default is used.
 609	 */
 610	if (dqp->q_blk.timer)
 611		defq->blk.time = dqp->q_blk.timer;
 612	if (dqp->q_ino.timer)
 613		defq->ino.time = dqp->q_ino.timer;
 614	if (dqp->q_rtb.timer)
 615		defq->rtb.time = dqp->q_rtb.timer;
 616
 617	xfs_qm_dqdestroy(dqp);
 618}
 619
 620/*
 621 * This initializes all the quota information that's kept in the
 622 * mount structure
 623 */
 624STATIC int
 625xfs_qm_init_quotainfo(
 626	struct xfs_mount	*mp)
 627{
 628	struct xfs_quotainfo	*qinf;
 629	int			error;
 
 630
 631	ASSERT(XFS_IS_QUOTA_ON(mp));
 632
 633	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
 634
 635	error = list_lru_init(&qinf->qi_lru);
 636	if (error)
 637		goto out_free_qinf;
 638
 639	/*
 640	 * See if quotainodes are setup, and if not, allocate them,
 641	 * and change the superblock accordingly.
 642	 */
 643	error = xfs_qm_init_quotainos(mp);
 644	if (error)
 645		goto out_free_lru;
 646
 647	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
 648	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 649	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
 650	mutex_init(&qinf->qi_tree_lock);
 651
 652	/* mutex used to serialize quotaoffs */
 653	mutex_init(&qinf->qi_quotaofflock);
 654
 655	/* Precalc some constants */
 656	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 657	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
 658	if (xfs_has_bigtime(mp)) {
 659		qinf->qi_expiry_min =
 660			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
 661		qinf->qi_expiry_max =
 662			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 663	} else {
 664		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
 665		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
 
 
 
 
 666	}
 667	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
 668			qinf->qi_expiry_max);
 669
 670	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 671
 672	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
 673	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
 674	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
 675
 676	if (XFS_IS_UQUOTA_ON(mp))
 677		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
 678	if (XFS_IS_GQUOTA_ON(mp))
 679		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
 680	if (XFS_IS_PQUOTA_ON(mp))
 681		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
 682
 683	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
 684	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
 685	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
 686	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
 687
 688	error = register_shrinker(&qinf->qi_shrinker, "xfs-qm:%s",
 689				  mp->m_super->s_id);
 690	if (error)
 691		goto out_free_inos;
 692
 693	return 0;
 694
 695out_free_inos:
 696	mutex_destroy(&qinf->qi_quotaofflock);
 697	mutex_destroy(&qinf->qi_tree_lock);
 698	xfs_qm_destroy_quotainos(qinf);
 699out_free_lru:
 700	list_lru_destroy(&qinf->qi_lru);
 701out_free_qinf:
 702	kmem_free(qinf);
 703	mp->m_quotainfo = NULL;
 704	return error;
 705}
 706
 
 707/*
 708 * Gets called when unmounting a filesystem or when all quotas get
 709 * turned off.
 710 * This purges the quota inodes, destroys locks and frees itself.
 711 */
 712void
 713xfs_qm_destroy_quotainfo(
 714	struct xfs_mount	*mp)
 715{
 716	struct xfs_quotainfo	*qi;
 717
 718	qi = mp->m_quotainfo;
 719	ASSERT(qi != NULL);
 720
 721	unregister_shrinker(&qi->qi_shrinker);
 722	list_lru_destroy(&qi->qi_lru);
 723	xfs_qm_destroy_quotainos(qi);
 724	mutex_destroy(&qi->qi_tree_lock);
 
 
 
 
 
 
 
 
 
 
 
 725	mutex_destroy(&qi->qi_quotaofflock);
 726	kmem_free(qi);
 727	mp->m_quotainfo = NULL;
 728}
 729
 730/*
 731 * Create an inode and return with a reference already taken, but unlocked
 732 * This is how we create quota inodes
 733 */
 734STATIC int
 735xfs_qm_qino_alloc(
 736	struct xfs_mount	*mp,
 737	struct xfs_inode	**ipp,
 738	unsigned int		flags)
 739{
 740	struct xfs_trans	*tp;
 741	int			error;
 742	bool			need_alloc = true;
 
 743
 744	*ipp = NULL;
 745	/*
 746	 * With superblock that doesn't have separate pquotino, we
 747	 * share an inode between gquota and pquota. If the on-disk
 748	 * superblock has GQUOTA and the filesystem is now mounted
 749	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
 750	 * vice-versa.
 751	 */
 752	if (!xfs_has_pquotino(mp) &&
 753			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
 754		xfs_ino_t ino = NULLFSINO;
 755
 756		if ((flags & XFS_QMOPT_PQUOTA) &&
 757			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
 758			ino = mp->m_sb.sb_gquotino;
 759			if (XFS_IS_CORRUPT(mp,
 760					   mp->m_sb.sb_pquotino != NULLFSINO))
 761				return -EFSCORRUPTED;
 762		} else if ((flags & XFS_QMOPT_GQUOTA) &&
 763			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
 764			ino = mp->m_sb.sb_pquotino;
 765			if (XFS_IS_CORRUPT(mp,
 766					   mp->m_sb.sb_gquotino != NULLFSINO))
 767				return -EFSCORRUPTED;
 768		}
 769		if (ino != NULLFSINO) {
 770			error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
 771			if (error)
 772				return error;
 773			mp->m_sb.sb_gquotino = NULLFSINO;
 774			mp->m_sb.sb_pquotino = NULLFSINO;
 775			need_alloc = false;
 776		}
 777	}
 778
 779	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
 780			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
 781			0, 0, &tp);
 782	if (error)
 783		return error;
 784
 785	if (need_alloc) {
 786		xfs_ino_t	ino;
 787
 788		error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
 789		if (!error)
 790			error = xfs_init_new_inode(&init_user_ns, tp, NULL, ino,
 791					S_IFREG, 1, 0, 0, false, ipp);
 792		if (error) {
 793			xfs_trans_cancel(tp);
 794			return error;
 795		}
 796	}
 797
 798	/*
 799	 * Make the changes in the superblock, and log those too.
 800	 * sbfields arg may contain fields other than *QUOTINO;
 801	 * VERSIONNUM for example.
 802	 */
 803	spin_lock(&mp->m_sb_lock);
 804	if (flags & XFS_QMOPT_SBVERSION) {
 805		ASSERT(!xfs_has_quota(mp));
 806
 807		xfs_add_quota(mp);
 808		mp->m_sb.sb_uquotino = NULLFSINO;
 809		mp->m_sb.sb_gquotino = NULLFSINO;
 810		mp->m_sb.sb_pquotino = NULLFSINO;
 811
 812		/* qflags will get updated fully _after_ quotacheck */
 813		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
 814	}
 815	if (flags & XFS_QMOPT_UQUOTA)
 816		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
 817	else if (flags & XFS_QMOPT_GQUOTA)
 818		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
 819	else
 820		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
 821	spin_unlock(&mp->m_sb_lock);
 822	xfs_log_sb(tp);
 823
 824	error = xfs_trans_commit(tp);
 825	if (error) {
 826		ASSERT(xfs_is_shutdown(mp));
 827		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
 828	}
 829	if (need_alloc)
 830		xfs_finish_inode_setup(*ipp);
 831	return error;
 832}
 833
 834
 835STATIC void
 836xfs_qm_reset_dqcounts(
 837	struct xfs_mount	*mp,
 838	struct xfs_buf		*bp,
 839	xfs_dqid_t		id,
 840	xfs_dqtype_t		type)
 841{
 842	struct xfs_dqblk	*dqb;
 843	int			j;
 844
 845	trace_xfs_reset_dqcounts(bp, _RET_IP_);
 846
 847	/*
 848	 * Reset all counters and timers. They'll be
 849	 * started afresh by xfs_qm_quotacheck.
 850	 */
 851#ifdef DEBUG
 852	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
 853		sizeof(struct xfs_dqblk);
 854	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 855#endif
 856	dqb = bp->b_addr;
 857	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
 858		struct xfs_disk_dquot	*ddq;
 859
 860		ddq = (struct xfs_disk_dquot *)&dqb[j];
 861
 862		/*
 863		 * Do a sanity check, and if needed, repair the dqblk. Don't
 864		 * output any warnings because it's perfectly possible to
 865		 * find uninitialised dquot blks. See comment in
 866		 * xfs_dquot_verify.
 867		 */
 868		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
 869		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
 870			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
 871
 872		/*
 873		 * Reset type in case we are reusing group quota file for
 874		 * project quotas or vice versa
 875		 */
 876		ddq->d_type = type;
 877		ddq->d_bcount = 0;
 878		ddq->d_icount = 0;
 879		ddq->d_rtbcount = 0;
 
 
 
 
 
 
 880
 881		/*
 882		 * dquot id 0 stores the default grace period and the maximum
 883		 * warning limit that were set by the administrator, so we
 884		 * should not reset them.
 885		 */
 886		if (ddq->d_id != 0) {
 887			ddq->d_btimer = 0;
 888			ddq->d_itimer = 0;
 889			ddq->d_rtbtimer = 0;
 890			ddq->d_bwarns = 0;
 891			ddq->d_iwarns = 0;
 892			ddq->d_rtbwarns = 0;
 893			if (xfs_has_bigtime(mp))
 894				ddq->d_type |= XFS_DQTYPE_BIGTIME;
 895		}
 896
 897		if (xfs_has_crc(mp)) {
 898			xfs_update_cksum((char *)&dqb[j],
 899					 sizeof(struct xfs_dqblk),
 900					 XFS_DQUOT_CRC_OFF);
 901		}
 902	}
 903}
 904
 905STATIC int
 906xfs_qm_reset_dqcounts_all(
 907	struct xfs_mount	*mp,
 908	xfs_dqid_t		firstid,
 909	xfs_fsblock_t		bno,
 910	xfs_filblks_t		blkcnt,
 911	xfs_dqtype_t		type,
 912	struct list_head	*buffer_list)
 913{
 914	struct xfs_buf		*bp;
 915	int			error = 0;
 
 916
 917	ASSERT(blkcnt > 0);
 
 
 
 918
 919	/*
 920	 * Blkcnt arg can be a very big number, and might even be
 921	 * larger than the log itself. So, we have to break it up into
 922	 * manageable-sized transactions.
 923	 * Note that we don't start a permanent transaction here; we might
 924	 * not be able to get a log reservation for the whole thing up front,
 925	 * and we don't really care to either, because we just discard
 926	 * everything if we were to crash in the middle of this loop.
 927	 */
 928	while (blkcnt--) {
 929		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 930			      XFS_FSB_TO_DADDR(mp, bno),
 931			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 932			      &xfs_dquot_buf_ops);
 933
 934		/*
 935		 * CRC and validation errors will return a EFSCORRUPTED here. If
 936		 * this occurs, re-read without CRC validation so that we can
 937		 * repair the damage via xfs_qm_reset_dqcounts(). This process
 938		 * will leave a trace in the log indicating corruption has
 939		 * been detected.
 940		 */
 941		if (error == -EFSCORRUPTED) {
 942			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 943				      XFS_FSB_TO_DADDR(mp, bno),
 944				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 945				      NULL);
 946		}
 947
 948		if (error)
 949			break;
 950
 951		/*
 952		 * A corrupt buffer might not have a verifier attached, so
 953		 * make sure we have the correct one attached before writeback
 954		 * occurs.
 955		 */
 956		bp->b_ops = &xfs_dquot_buf_ops;
 957		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
 958		xfs_buf_delwri_queue(bp, buffer_list);
 959		xfs_buf_relse(bp);
 960
 961		/* goto the next block. */
 962		bno++;
 963		firstid += mp->m_quotainfo->qi_dqperchunk;
 964	}
 965
 966	return error;
 967}
 968
 969/*
 970 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
 971 * counters for every chunk of dquots that we find.
 972 */
 973STATIC int
 974xfs_qm_reset_dqcounts_buf(
 975	struct xfs_mount	*mp,
 976	struct xfs_inode	*qip,
 977	xfs_dqtype_t		type,
 978	struct list_head	*buffer_list)
 979{
 980	struct xfs_bmbt_irec	*map;
 981	int			i, nmaps;	/* number of map entries */
 982	int			error;		/* return value */
 983	xfs_fileoff_t		lblkno;
 984	xfs_filblks_t		maxlblkcnt;
 985	xfs_dqid_t		firstid;
 986	xfs_fsblock_t		rablkno;
 987	xfs_filblks_t		rablkcnt;
 988
 989	error = 0;
 990	/*
 991	 * This looks racy, but we can't keep an inode lock across a
 992	 * trans_reserve. But, this gets called during quotacheck, and that
 993	 * happens only at mount time which is single threaded.
 994	 */
 995	if (qip->i_nblocks == 0)
 996		return 0;
 997
 998	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
 999
1000	lblkno = 0;
1001	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1002	do {
1003		uint		lock_mode;
1004
1005		nmaps = XFS_DQITER_MAP_SIZE;
1006		/*
1007		 * We aren't changing the inode itself. Just changing
1008		 * some of its data. No new blocks are added here, and
1009		 * the inode is never added to the transaction.
1010		 */
1011		lock_mode = xfs_ilock_data_map_shared(qip);
1012		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1013				       map, &nmaps, 0);
1014		xfs_iunlock(qip, lock_mode);
1015		if (error)
1016			break;
1017
1018		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1019		for (i = 0; i < nmaps; i++) {
1020			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1021			ASSERT(map[i].br_blockcount);
1022
1023
1024			lblkno += map[i].br_blockcount;
1025
1026			if (map[i].br_startblock == HOLESTARTBLOCK)
1027				continue;
1028
1029			firstid = (xfs_dqid_t) map[i].br_startoff *
1030				mp->m_quotainfo->qi_dqperchunk;
1031			/*
1032			 * Do a read-ahead on the next extent.
1033			 */
1034			if ((i+1 < nmaps) &&
1035			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1036				rablkcnt =  map[i+1].br_blockcount;
1037				rablkno = map[i+1].br_startblock;
1038				while (rablkcnt--) {
1039					xfs_buf_readahead(mp->m_ddev_targp,
1040					       XFS_FSB_TO_DADDR(mp, rablkno),
1041					       mp->m_quotainfo->qi_dqchunklen,
1042					       &xfs_dquot_buf_ops);
1043					rablkno++;
1044				}
1045			}
1046			/*
1047			 * Iterate thru all the blks in the extent and
1048			 * reset the counters of all the dquots inside them.
1049			 */
1050			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1051						   map[i].br_startblock,
1052						   map[i].br_blockcount,
1053						   type, buffer_list);
1054			if (error)
1055				goto out;
1056		}
1057	} while (nmaps > 0);
1058
1059out:
1060	kmem_free(map);
1061	return error;
1062}
1063
1064/*
1065 * Called by dqusage_adjust in doing a quotacheck.
1066 *
1067 * Given the inode, and a dquot id this updates both the incore dqout as well
1068 * as the buffer copy. This is so that once the quotacheck is done, we can
1069 * just log all the buffers, as opposed to logging numerous updates to
1070 * individual dquots.
1071 */
1072STATIC int
1073xfs_qm_quotacheck_dqadjust(
1074	struct xfs_inode	*ip,
1075	xfs_dqtype_t		type,
 
1076	xfs_qcnt_t		nblks,
1077	xfs_qcnt_t		rtblks)
1078{
1079	struct xfs_mount	*mp = ip->i_mount;
1080	struct xfs_dquot	*dqp;
1081	xfs_dqid_t		id;
1082	int			error;
1083
1084	id = xfs_qm_id_for_quotatype(ip, type);
1085	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1086	if (error) {
1087		/*
1088		 * Shouldn't be able to turn off quotas here.
1089		 */
1090		ASSERT(error != -ESRCH);
1091		ASSERT(error != -ENOENT);
1092		return error;
1093	}
1094
1095	trace_xfs_dqadjust(dqp);
1096
1097	/*
1098	 * Adjust the inode count and the block count to reflect this inode's
1099	 * resource usage.
1100	 */
1101	dqp->q_ino.count++;
1102	dqp->q_ino.reserved++;
1103	if (nblks) {
1104		dqp->q_blk.count += nblks;
1105		dqp->q_blk.reserved += nblks;
1106	}
1107	if (rtblks) {
1108		dqp->q_rtb.count += rtblks;
1109		dqp->q_rtb.reserved += rtblks;
1110	}
1111
1112	/*
1113	 * Set default limits, adjust timers (since we changed usages)
1114	 *
1115	 * There are no timers for the default values set in the root dquot.
1116	 */
1117	if (dqp->q_id) {
1118		xfs_qm_adjust_dqlimits(dqp);
1119		xfs_qm_adjust_dqtimers(dqp);
1120	}
1121
1122	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1123	xfs_qm_dqput(dqp);
1124	return 0;
1125}
1126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1127/*
1128 * callback routine supplied to bulkstat(). Given an inumber, find its
1129 * dquots and update them to account for resources taken by that inode.
1130 */
1131/* ARGSUSED */
1132STATIC int
1133xfs_qm_dqusage_adjust(
1134	struct xfs_mount	*mp,
1135	struct xfs_trans	*tp,
1136	xfs_ino_t		ino,
1137	void			*data)
1138{
1139	struct xfs_inode	*ip;
1140	xfs_qcnt_t		nblks;
1141	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1142	int			error;
 
1143
1144	ASSERT(XFS_IS_QUOTA_ON(mp));
1145
1146	/*
1147	 * rootino must have its resources accounted for, not so with the quota
1148	 * inodes.
1149	 */
1150	if (xfs_is_quota_inode(&mp->m_sb, ino))
1151		return 0;
 
 
1152
1153	/*
1154	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1155	 * at mount time and therefore nobody will be racing chown/chproj.
 
 
1156	 */
1157	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1158	if (error == -EINVAL || error == -ENOENT)
1159		return 0;
1160	if (error)
1161		return error;
 
1162
1163	ASSERT(ip->i_delayed_blks == 0);
1164
1165	if (XFS_IS_REALTIME_INODE(ip)) {
1166		struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1167
1168		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
 
1169		if (error)
1170			goto error0;
1171
1172		xfs_bmap_count_leaves(ifp, &rtblks);
1173	}
1174
1175	nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1176
1177	/*
1178	 * Add the (disk blocks and inode) resources occupied by this
1179	 * inode to its dquots. We do this adjustment in the incore dquot,
1180	 * and also copy the changes to its buffer.
1181	 * We don't care about putting these changes in a transaction
1182	 * envelope because if we crash in the middle of a 'quotacheck'
1183	 * we have to start from the beginning anyway.
1184	 * Once we're done, we'll log all the dquot bufs.
1185	 *
1186	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1187	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1188	 */
1189	if (XFS_IS_UQUOTA_ON(mp)) {
1190		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1191				rtblks);
1192		if (error)
1193			goto error0;
1194	}
1195
1196	if (XFS_IS_GQUOTA_ON(mp)) {
1197		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1198				rtblks);
1199		if (error)
1200			goto error0;
1201	}
1202
1203	if (XFS_IS_PQUOTA_ON(mp)) {
1204		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1205				rtblks);
1206		if (error)
1207			goto error0;
1208	}
1209
 
 
 
 
 
1210error0:
1211	xfs_irele(ip);
 
 
1212	return error;
1213}
1214
1215STATIC int
1216xfs_qm_flush_one(
1217	struct xfs_dquot	*dqp,
1218	void			*data)
1219{
1220	struct xfs_mount	*mp = dqp->q_mount;
1221	struct list_head	*buffer_list = data;
1222	struct xfs_buf		*bp = NULL;
1223	int			error = 0;
1224
1225	xfs_dqlock(dqp);
1226	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1227		goto out_unlock;
1228	if (!XFS_DQ_IS_DIRTY(dqp))
1229		goto out_unlock;
1230
1231	/*
1232	 * The only way the dquot is already flush locked by the time quotacheck
1233	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1234	 * it for the final time. Quotacheck collects all dquot bufs in the
1235	 * local delwri queue before dquots are dirtied, so reclaim can't have
1236	 * possibly queued it for I/O. The only way out is to push the buffer to
1237	 * cycle the flush lock.
1238	 */
1239	if (!xfs_dqflock_nowait(dqp)) {
1240		/* buf is pinned in-core by delwri list */
1241		error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1242				mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1243		if (error)
1244			goto out_unlock;
1245
1246		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1247			error = -EAGAIN;
1248			xfs_buf_relse(bp);
1249			goto out_unlock;
1250		}
1251		xfs_buf_unlock(bp);
1252
1253		xfs_buf_delwri_pushbuf(bp, buffer_list);
1254		xfs_buf_rele(bp);
1255
1256		error = -EAGAIN;
1257		goto out_unlock;
1258	}
1259
1260	error = xfs_qm_dqflush(dqp, &bp);
1261	if (error)
1262		goto out_unlock;
1263
1264	xfs_buf_delwri_queue(bp, buffer_list);
1265	xfs_buf_relse(bp);
1266out_unlock:
1267	xfs_dqunlock(dqp);
1268	return error;
1269}
1270
1271/*
1272 * Walk thru all the filesystem inodes and construct a consistent view
1273 * of the disk quota world. If the quotacheck fails, disable quotas.
1274 */
1275STATIC int
1276xfs_qm_quotacheck(
1277	xfs_mount_t	*mp)
1278{
1279	int			error, error2;
 
 
1280	uint			flags;
1281	LIST_HEAD		(buffer_list);
1282	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1283	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1284	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1285
 
 
 
1286	flags = 0;
1287
1288	ASSERT(uip || gip || pip);
1289	ASSERT(XFS_IS_QUOTA_ON(mp));
1290
1291	xfs_notice(mp, "Quotacheck needed: Please wait.");
1292
1293	/*
1294	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1295	 * their counters to zero. We need a clean slate.
1296	 * We don't log our changes till later.
1297	 */
1298	if (uip) {
1299		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1300					 &buffer_list);
1301		if (error)
1302			goto error_return;
1303		flags |= XFS_UQUOTA_CHKD;
1304	}
1305
1306	if (gip) {
1307		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1308					 &buffer_list);
1309		if (error)
1310			goto error_return;
1311		flags |= XFS_GQUOTA_CHKD;
1312	}
1313
1314	if (pip) {
1315		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1316					 &buffer_list);
1317		if (error)
1318			goto error_return;
1319		flags |= XFS_PQUOTA_CHKD;
1320	}
1321
1322	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1323			NULL);
1324	if (error) {
1325		/*
1326		 * The inode walk may have partially populated the dquot
1327		 * caches.  We must purge them before disabling quota and
1328		 * tearing down the quotainfo, or else the dquots will leak.
1329		 */
1330		xfs_qm_dqpurge_all(mp);
1331		goto error_return;
1332	}
 
 
 
 
1333
1334	/*
1335	 * We've made all the changes that we need to make incore.  Flush them
1336	 * down to disk buffers if everything was updated successfully.
1337	 */
1338	if (XFS_IS_UQUOTA_ON(mp)) {
1339		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1340					  &buffer_list);
1341	}
1342	if (XFS_IS_GQUOTA_ON(mp)) {
1343		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1344					   &buffer_list);
1345		if (!error)
1346			error = error2;
1347	}
1348	if (XFS_IS_PQUOTA_ON(mp)) {
1349		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1350					   &buffer_list);
1351		if (!error)
1352			error = error2;
1353	}
1354
1355	error2 = xfs_buf_delwri_submit(&buffer_list);
1356	if (!error)
1357		error = error2;
1358
1359	/*
1360	 * We can get this error if we couldn't do a dquot allocation inside
1361	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1362	 * dirty dquots that might be cached, we just want to get rid of them
1363	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1364	 * at this point (because we intentionally didn't in dqget_noattach).
1365	 */
1366	if (error) {
1367		xfs_qm_dqpurge_all(mp);
1368		goto error_return;
1369	}
1370
1371	/*
1372	 * If one type of quotas is off, then it will lose its
1373	 * quotachecked status, since we won't be doing accounting for
1374	 * that type anymore.
1375	 */
1376	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1377	mp->m_qflags |= flags;
1378
1379 error_return:
1380	xfs_buf_delwri_cancel(&buffer_list);
 
 
 
 
 
1381
1382	if (error) {
1383		xfs_warn(mp,
1384	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1385			error);
1386		/*
1387		 * We must turn off quotas.
1388		 */
1389		ASSERT(mp->m_quotainfo != NULL);
1390		xfs_qm_destroy_quotainfo(mp);
1391		if (xfs_mount_reset_sbqflags(mp)) {
1392			xfs_warn(mp,
1393				"Quotacheck: Failed to reset quota flags.");
1394		}
1395	} else
1396		xfs_notice(mp, "Quotacheck: Done.");
1397	return error;
1398}
1399
1400/*
1401 * This is called from xfs_mountfs to start quotas and initialize all
1402 * necessary data structures like quotainfo.  This is also responsible for
1403 * running a quotacheck as necessary.  We are guaranteed that the superblock
1404 * is consistently read in at this point.
1405 *
1406 * If we fail here, the mount will continue with quota turned off. We don't
1407 * need to inidicate success or failure at all.
1408 */
1409void
1410xfs_qm_mount_quotas(
1411	struct xfs_mount	*mp)
1412{
1413	int			error = 0;
1414	uint			sbf;
1415
1416	/*
1417	 * If quotas on realtime volumes is not supported, we disable
1418	 * quotas immediately.
1419	 */
1420	if (mp->m_sb.sb_rextents) {
1421		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1422		mp->m_qflags = 0;
1423		goto write_changes;
1424	}
1425
1426	ASSERT(XFS_IS_QUOTA_ON(mp));
1427
1428	/*
1429	 * Allocate the quotainfo structure inside the mount struct, and
1430	 * create quotainode(s), and change/rev superblock if necessary.
1431	 */
1432	error = xfs_qm_init_quotainfo(mp);
1433	if (error) {
1434		/*
1435		 * We must turn off quotas.
1436		 */
1437		ASSERT(mp->m_quotainfo == NULL);
1438		mp->m_qflags = 0;
1439		goto write_changes;
1440	}
1441	/*
1442	 * If any of the quotas are not consistent, do a quotacheck.
1443	 */
1444	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1445		error = xfs_qm_quotacheck(mp);
1446		if (error) {
1447			/* Quotacheck failed and disabled quotas. */
1448			return;
1449		}
1450	}
1451	/*
1452	 * If one type of quotas is off, then it will lose its
1453	 * quotachecked status, since we won't be doing accounting for
1454	 * that type anymore.
1455	 */
1456	if (!XFS_IS_UQUOTA_ON(mp))
1457		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1458	if (!XFS_IS_GQUOTA_ON(mp))
1459		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1460	if (!XFS_IS_PQUOTA_ON(mp))
1461		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1462
1463 write_changes:
1464	/*
1465	 * We actually don't have to acquire the m_sb_lock at all.
1466	 * This can only be called from mount, and that's single threaded. XXX
1467	 */
1468	spin_lock(&mp->m_sb_lock);
1469	sbf = mp->m_sb.sb_qflags;
1470	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1471	spin_unlock(&mp->m_sb_lock);
1472
1473	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1474		if (xfs_sync_sb(mp, false)) {
1475			/*
1476			 * We could only have been turning quotas off.
1477			 * We aren't in very good shape actually because
1478			 * the incore structures are convinced that quotas are
1479			 * off, but the on disk superblock doesn't know that !
1480			 */
1481			ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1482			xfs_alert(mp, "%s: Superblock update failed!",
1483				__func__);
1484		}
1485	}
1486
1487	if (error) {
1488		xfs_warn(mp, "Failed to initialize disk quotas.");
1489		return;
1490	}
1491}
1492
1493/*
1494 * This is called after the superblock has been read in and we're ready to
1495 * iget the quota inodes.
1496 */
1497STATIC int
1498xfs_qm_init_quotainos(
1499	xfs_mount_t	*mp)
1500{
1501	struct xfs_inode	*uip = NULL;
1502	struct xfs_inode	*gip = NULL;
1503	struct xfs_inode	*pip = NULL;
1504	int			error;
1505	uint			flags = 0;
1506
1507	ASSERT(mp->m_quotainfo);
1508
1509	/*
1510	 * Get the uquota and gquota inodes
1511	 */
1512	if (xfs_has_quota(mp)) {
1513		if (XFS_IS_UQUOTA_ON(mp) &&
1514		    mp->m_sb.sb_uquotino != NULLFSINO) {
1515			ASSERT(mp->m_sb.sb_uquotino > 0);
1516			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1517					     0, 0, &uip);
1518			if (error)
1519				return error;
1520		}
1521		if (XFS_IS_GQUOTA_ON(mp) &&
1522		    mp->m_sb.sb_gquotino != NULLFSINO) {
1523			ASSERT(mp->m_sb.sb_gquotino > 0);
1524			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1525					     0, 0, &gip);
1526			if (error)
1527				goto error_rele;
1528		}
1529		if (XFS_IS_PQUOTA_ON(mp) &&
1530		    mp->m_sb.sb_pquotino != NULLFSINO) {
1531			ASSERT(mp->m_sb.sb_pquotino > 0);
1532			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1533					     0, 0, &pip);
1534			if (error)
1535				goto error_rele;
1536		}
1537	} else {
1538		flags |= XFS_QMOPT_SBVERSION;
1539	}
1540
1541	/*
1542	 * Create the three inodes, if they don't exist already. The changes
1543	 * made above will get added to a transaction and logged in one of
1544	 * the qino_alloc calls below.  If the device is readonly,
1545	 * temporarily switch to read-write to do this.
1546	 */
1547	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1548		error = xfs_qm_qino_alloc(mp, &uip,
1549					      flags | XFS_QMOPT_UQUOTA);
1550		if (error)
1551			goto error_rele;
1552
1553		flags &= ~XFS_QMOPT_SBVERSION;
1554	}
1555	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1556		error = xfs_qm_qino_alloc(mp, &gip,
1557					  flags | XFS_QMOPT_GQUOTA);
1558		if (error)
1559			goto error_rele;
1560
1561		flags &= ~XFS_QMOPT_SBVERSION;
1562	}
1563	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1564		error = xfs_qm_qino_alloc(mp, &pip,
1565					  flags | XFS_QMOPT_PQUOTA);
1566		if (error)
1567			goto error_rele;
1568	}
1569
1570	mp->m_quotainfo->qi_uquotaip = uip;
1571	mp->m_quotainfo->qi_gquotaip = gip;
1572	mp->m_quotainfo->qi_pquotaip = pip;
1573
1574	return 0;
1575
1576error_rele:
1577	if (uip)
1578		xfs_irele(uip);
1579	if (gip)
1580		xfs_irele(gip);
1581	if (pip)
1582		xfs_irele(pip);
1583	return error;
1584}
1585
1586STATIC void
1587xfs_qm_destroy_quotainos(
1588	struct xfs_quotainfo	*qi)
1589{
1590	if (qi->qi_uquotaip) {
1591		xfs_irele(qi->qi_uquotaip);
1592		qi->qi_uquotaip = NULL; /* paranoia */
1593	}
1594	if (qi->qi_gquotaip) {
1595		xfs_irele(qi->qi_gquotaip);
1596		qi->qi_gquotaip = NULL;
1597	}
1598	if (qi->qi_pquotaip) {
1599		xfs_irele(qi->qi_pquotaip);
1600		qi->qi_pquotaip = NULL;
1601	}
1602}
1603
1604STATIC void
1605xfs_qm_dqfree_one(
1606	struct xfs_dquot	*dqp)
1607{
1608	struct xfs_mount	*mp = dqp->q_mount;
1609	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1610
1611	mutex_lock(&qi->qi_tree_lock);
1612	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
 
1613
1614	qi->qi_dquots--;
1615	mutex_unlock(&qi->qi_tree_lock);
1616
1617	xfs_qm_dqdestroy(dqp);
1618}
1619
1620/* --------------- utility functions for vnodeops ---------------- */
1621
1622
1623/*
1624 * Given an inode, a uid, gid and prid make sure that we have
1625 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1626 * quotas by creating this file.
1627 * This also attaches dquot(s) to the given inode after locking it,
1628 * and returns the dquots corresponding to the uid and/or gid.
1629 *
1630 * in	: inode (unlocked)
1631 * out	: udquot, gdquot with references taken and unlocked
1632 */
1633int
1634xfs_qm_vop_dqalloc(
1635	struct xfs_inode	*ip,
1636	kuid_t			uid,
1637	kgid_t			gid,
1638	prid_t			prid,
1639	uint			flags,
1640	struct xfs_dquot	**O_udqpp,
1641	struct xfs_dquot	**O_gdqpp,
1642	struct xfs_dquot	**O_pdqpp)
1643{
1644	struct xfs_mount	*mp = ip->i_mount;
1645	struct inode		*inode = VFS_I(ip);
1646	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1647	struct xfs_dquot	*uq = NULL;
1648	struct xfs_dquot	*gq = NULL;
1649	struct xfs_dquot	*pq = NULL;
1650	int			error;
1651	uint			lockflags;
1652
1653	if (!XFS_IS_QUOTA_ON(mp))
1654		return 0;
1655
1656	lockflags = XFS_ILOCK_EXCL;
1657	xfs_ilock(ip, lockflags);
1658
1659	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1660		gid = inode->i_gid;
1661
1662	/*
1663	 * Attach the dquot(s) to this inode, doing a dquot allocation
1664	 * if necessary. The dquot(s) will not be locked.
1665	 */
1666	if (XFS_NOT_DQATTACHED(mp, ip)) {
1667		error = xfs_qm_dqattach_locked(ip, true);
1668		if (error) {
1669			xfs_iunlock(ip, lockflags);
1670			return error;
1671		}
1672	}
1673
1674	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1675		ASSERT(O_udqpp);
1676		if (!uid_eq(inode->i_uid, uid)) {
1677			/*
1678			 * What we need is the dquot that has this uid, and
1679			 * if we send the inode to dqget, the uid of the inode
1680			 * takes priority over what's sent in the uid argument.
1681			 * We must unlock inode here before calling dqget if
1682			 * we're not sending the inode, because otherwise
1683			 * we'll deadlock by doing trans_reserve while
1684			 * holding ilock.
1685			 */
1686			xfs_iunlock(ip, lockflags);
1687			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1688					XFS_DQTYPE_USER, true, &uq);
 
 
 
1689			if (error) {
1690				ASSERT(error != -ENOENT);
1691				return error;
1692			}
1693			/*
1694			 * Get the ilock in the right order.
1695			 */
1696			xfs_dqunlock(uq);
1697			lockflags = XFS_ILOCK_SHARED;
1698			xfs_ilock(ip, lockflags);
1699		} else {
1700			/*
1701			 * Take an extra reference, because we'll return
1702			 * this to caller
1703			 */
1704			ASSERT(ip->i_udquot);
1705			uq = xfs_qm_dqhold(ip->i_udquot);
1706		}
1707	}
1708	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1709		ASSERT(O_gdqpp);
1710		if (!gid_eq(inode->i_gid, gid)) {
1711			xfs_iunlock(ip, lockflags);
1712			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1713					XFS_DQTYPE_GROUP, true, &gq);
 
 
 
1714			if (error) {
1715				ASSERT(error != -ENOENT);
1716				goto error_rele;
1717			}
1718			xfs_dqunlock(gq);
1719			lockflags = XFS_ILOCK_SHARED;
1720			xfs_ilock(ip, lockflags);
1721		} else {
1722			ASSERT(ip->i_gdquot);
1723			gq = xfs_qm_dqhold(ip->i_gdquot);
1724		}
1725	}
1726	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1727		ASSERT(O_pdqpp);
1728		if (ip->i_projid != prid) {
1729			xfs_iunlock(ip, lockflags);
1730			error = xfs_qm_dqget(mp, prid,
1731					XFS_DQTYPE_PROJ, true, &pq);
 
 
 
1732			if (error) {
1733				ASSERT(error != -ENOENT);
1734				goto error_rele;
1735			}
1736			xfs_dqunlock(pq);
1737			lockflags = XFS_ILOCK_SHARED;
1738			xfs_ilock(ip, lockflags);
1739		} else {
1740			ASSERT(ip->i_pdquot);
1741			pq = xfs_qm_dqhold(ip->i_pdquot);
1742		}
1743	}
1744	trace_xfs_dquot_dqalloc(ip);
 
1745
1746	xfs_iunlock(ip, lockflags);
1747	if (O_udqpp)
1748		*O_udqpp = uq;
1749	else
1750		xfs_qm_dqrele(uq);
1751	if (O_gdqpp)
1752		*O_gdqpp = gq;
1753	else
1754		xfs_qm_dqrele(gq);
1755	if (O_pdqpp)
1756		*O_pdqpp = pq;
1757	else
1758		xfs_qm_dqrele(pq);
1759	return 0;
1760
1761error_rele:
1762	xfs_qm_dqrele(gq);
1763	xfs_qm_dqrele(uq);
1764	return error;
1765}
1766
1767/*
1768 * Actually transfer ownership, and do dquot modifications.
1769 * These were already reserved.
1770 */
1771struct xfs_dquot *
1772xfs_qm_vop_chown(
1773	struct xfs_trans	*tp,
1774	struct xfs_inode	*ip,
1775	struct xfs_dquot	**IO_olddq,
1776	struct xfs_dquot	*newdq)
1777{
1778	struct xfs_dquot	*prevdq;
1779	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1780				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1781
1782
1783	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1784	ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1785
1786	/* old dquot */
1787	prevdq = *IO_olddq;
1788	ASSERT(prevdq);
1789	ASSERT(prevdq != newdq);
1790
1791	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
1792	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1793
1794	/* the sparkling new dquot */
1795	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
1796	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1797
1798	/*
1799	 * Back when we made quota reservations for the chown, we reserved the
1800	 * ondisk blocks + delalloc blocks with the new dquot.  Now that we've
1801	 * switched the dquots, decrease the new dquot's block reservation
1802	 * (having already bumped up the real counter) so that we don't have
1803	 * any reservation to give back when we commit.
1804	 */
1805	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1806			-ip->i_delayed_blks);
1807
1808	/*
1809	 * Give the incore reservation for delalloc blocks back to the old
1810	 * dquot.  We don't normally handle delalloc quota reservations
1811	 * transactionally, so just lock the dquot and subtract from the
1812	 * reservation.  Dirty the transaction because it's too late to turn
1813	 * back now.
1814	 */
1815	tp->t_flags |= XFS_TRANS_DIRTY;
1816	xfs_dqlock(prevdq);
1817	ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1818	prevdq->q_blk.reserved -= ip->i_delayed_blks;
1819	xfs_dqunlock(prevdq);
1820
1821	/*
1822	 * Take an extra reference, because the inode is going to keep
1823	 * this dquot pointer even after the trans_commit.
1824	 */
1825	*IO_olddq = xfs_qm_dqhold(newdq);
1826
1827	return prevdq;
1828}
1829
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1830int
1831xfs_qm_vop_rename_dqattach(
1832	struct xfs_inode	**i_tab)
1833{
1834	struct xfs_mount	*mp = i_tab[0]->i_mount;
1835	int			i;
1836
1837	if (!XFS_IS_QUOTA_ON(mp))
1838		return 0;
1839
1840	for (i = 0; (i < 4 && i_tab[i]); i++) {
1841		struct xfs_inode	*ip = i_tab[i];
1842		int			error;
1843
1844		/*
1845		 * Watch out for duplicate entries in the table.
1846		 */
1847		if (i == 0 || ip != i_tab[i-1]) {
1848			if (XFS_NOT_DQATTACHED(mp, ip)) {
1849				error = xfs_qm_dqattach(ip);
1850				if (error)
1851					return error;
1852			}
1853		}
1854	}
1855	return 0;
1856}
1857
1858void
1859xfs_qm_vop_create_dqattach(
1860	struct xfs_trans	*tp,
1861	struct xfs_inode	*ip,
1862	struct xfs_dquot	*udqp,
1863	struct xfs_dquot	*gdqp,
1864	struct xfs_dquot	*pdqp)
1865{
1866	struct xfs_mount	*mp = tp->t_mountp;
1867
1868	if (!XFS_IS_QUOTA_ON(mp))
1869		return;
1870
1871	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
1872
1873	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1874		ASSERT(ip->i_udquot == NULL);
1875		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1876
1877		ip->i_udquot = xfs_qm_dqhold(udqp);
1878		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1879	}
1880	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1881		ASSERT(ip->i_gdquot == NULL);
1882		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1883
1884		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1885		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1886	}
1887	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1888		ASSERT(ip->i_pdquot == NULL);
1889		ASSERT(ip->i_projid == pdqp->q_id);
1890
1891		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1892		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1893	}
1894}
1895
1896/* Decide if this inode's dquot is near an enforcement boundary. */
1897bool
1898xfs_inode_near_dquot_enforcement(
1899	struct xfs_inode	*ip,
1900	xfs_dqtype_t		type)
1901{
1902	struct xfs_dquot	*dqp;
1903	int64_t			freesp;
1904
1905	/* We only care for quotas that are enabled and enforced. */
1906	dqp = xfs_inode_dquot(ip, type);
1907	if (!dqp || !xfs_dquot_is_enforced(dqp))
1908		return false;
1909
1910	if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1911	    xfs_dquot_res_over_limits(&dqp->q_rtb))
1912		return true;
1913
1914	/* For space on the data device, check the various thresholds. */
1915	if (!dqp->q_prealloc_hi_wmark)
1916		return false;
1917
1918	if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1919		return false;
1920
1921	if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1922		return true;
1923
1924	freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1925	if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
1926		return true;
1927
1928	return false;
1929}