Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_bit.h"
  25#include "xfs_sb.h"
  26#include "xfs_mount.h"
  27#include "xfs_inode.h"
  28#include "xfs_ialloc.h"
  29#include "xfs_itable.h"
  30#include "xfs_quota.h"
  31#include "xfs_error.h"
  32#include "xfs_bmap.h"
  33#include "xfs_bmap_btree.h"
  34#include "xfs_trans.h"
  35#include "xfs_trans_space.h"
  36#include "xfs_qm.h"
  37#include "xfs_trace.h"
  38#include "xfs_icache.h"
  39#include "xfs_cksum.h"
 
 
  40
  41/*
  42 * The global quota manager. There is only one of these for the entire
  43 * system, _not_ one per file system. XQM keeps track of the overall
  44 * quota functionality, including maintaining the freelist and hash
  45 * tables of dquots.
  46 */
  47STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
  48STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
  49
  50
 
  51STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  52/*
  53 * We use the batch lookup interface to iterate over the dquots as it
  54 * currently is the only interface into the radix tree code that allows
  55 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  56 * operations is fine as all callers are used either during mount/umount
  57 * or quotaoff.
  58 */
  59#define XFS_DQ_LOOKUP_BATCH	32
  60
  61STATIC int
  62xfs_qm_dquot_walk(
  63	struct xfs_mount	*mp,
  64	int			type,
  65	int			(*execute)(struct xfs_dquot *dqp, void *data),
  66	void			*data)
  67{
  68	struct xfs_quotainfo	*qi = mp->m_quotainfo;
  69	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
  70	uint32_t		next_index;
  71	int			last_error = 0;
  72	int			skipped;
  73	int			nr_found;
  74
  75restart:
  76	skipped = 0;
  77	next_index = 0;
  78	nr_found = 0;
  79
  80	while (1) {
  81		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  82		int		error = 0;
  83		int		i;
  84
  85		mutex_lock(&qi->qi_tree_lock);
  86		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  87					next_index, XFS_DQ_LOOKUP_BATCH);
  88		if (!nr_found) {
  89			mutex_unlock(&qi->qi_tree_lock);
  90			break;
  91		}
  92
  93		for (i = 0; i < nr_found; i++) {
  94			struct xfs_dquot *dqp = batch[i];
  95
  96			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
  97
  98			error = execute(batch[i], data);
  99			if (error == -EAGAIN) {
 100				skipped++;
 101				continue;
 102			}
 103			if (error && last_error != -EFSCORRUPTED)
 104				last_error = error;
 105		}
 106
 107		mutex_unlock(&qi->qi_tree_lock);
 108
 109		/* bail out if the filesystem is corrupted.  */
 110		if (last_error == -EFSCORRUPTED) {
 111			skipped = 0;
 112			break;
 113		}
 
 
 
 114	}
 115
 116	if (skipped) {
 117		delay(1);
 118		goto restart;
 119	}
 120
 121	return last_error;
 122}
 123
 124
 125/*
 126 * Purge a dquot from all tracking data structures and free it.
 127 */
 128STATIC int
 129xfs_qm_dqpurge(
 130	struct xfs_dquot	*dqp,
 131	void			*data)
 132{
 133	struct xfs_mount	*mp = dqp->q_mount;
 134	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 
 135
 136	xfs_dqlock(dqp);
 137	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
 138		xfs_dqunlock(dqp);
 139		return -EAGAIN;
 140	}
 141
 142	dqp->dq_flags |= XFS_DQ_FREEING;
 143
 144	xfs_dqflock(dqp);
 145
 146	/*
 147	 * If we are turning this type of quotas off, we don't care
 148	 * about the dirty metadata sitting in this dquot. OTOH, if
 149	 * we're unmounting, we do care, so we flush it and wait.
 150	 */
 151	if (XFS_DQ_IS_DIRTY(dqp)) {
 152		struct xfs_buf	*bp = NULL;
 153		int		error;
 154
 155		/*
 156		 * We don't care about getting disk errors here. We need
 157		 * to purge this dquot anyway, so we go ahead regardless.
 158		 */
 159		error = xfs_qm_dqflush(dqp, &bp);
 160		if (error) {
 161			xfs_warn(mp, "%s: dquot %p flush failed",
 162				__func__, dqp);
 163		} else {
 164			error = xfs_bwrite(bp);
 165			xfs_buf_relse(bp);
 
 
 
 166		}
 167		xfs_dqflock(dqp);
 168	}
 169
 170	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 171	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 172	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
 173
 174	xfs_dqfunlock(dqp);
 175	xfs_dqunlock(dqp);
 176
 177	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
 178			  be32_to_cpu(dqp->q_core.d_id));
 179	qi->qi_dquots--;
 180
 181	/*
 182	 * We move dquots to the freelist as soon as their reference count
 183	 * hits zero, so it really should be on the freelist here.
 184	 */
 185	ASSERT(!list_empty(&dqp->q_lru));
 186	list_lru_del(&qi->qi_lru, &dqp->q_lru);
 187	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
 188
 189	xfs_qm_dqdestroy(dqp);
 190	return 0;
 
 
 
 
 191}
 192
 193/*
 194 * Purge the dquot cache.
 195 */
 196void
 197xfs_qm_dqpurge_all(
 198	struct xfs_mount	*mp,
 199	uint			flags)
 200{
 201	if (flags & XFS_QMOPT_UQUOTA)
 202		xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
 203	if (flags & XFS_QMOPT_GQUOTA)
 204		xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
 205	if (flags & XFS_QMOPT_PQUOTA)
 206		xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
 207}
 208
 209/*
 210 * Just destroy the quotainfo structure.
 211 */
 212void
 213xfs_qm_unmount(
 214	struct xfs_mount	*mp)
 215{
 216	if (mp->m_quotainfo) {
 217		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
 218		xfs_qm_destroy_quotainfo(mp);
 219	}
 220}
 221
 222/*
 223 * Called from the vfsops layer.
 224 */
 225void
 226xfs_qm_unmount_quotas(
 227	xfs_mount_t	*mp)
 228{
 229	/*
 230	 * Release the dquots that root inode, et al might be holding,
 231	 * before we flush quotas and blow away the quotainfo structure.
 232	 */
 233	ASSERT(mp->m_rootip);
 234	xfs_qm_dqdetach(mp->m_rootip);
 235	if (mp->m_rbmip)
 236		xfs_qm_dqdetach(mp->m_rbmip);
 237	if (mp->m_rsumip)
 238		xfs_qm_dqdetach(mp->m_rsumip);
 239
 240	/*
 241	 * Release the quota inodes.
 242	 */
 243	if (mp->m_quotainfo) {
 244		if (mp->m_quotainfo->qi_uquotaip) {
 245			IRELE(mp->m_quotainfo->qi_uquotaip);
 246			mp->m_quotainfo->qi_uquotaip = NULL;
 247		}
 248		if (mp->m_quotainfo->qi_gquotaip) {
 249			IRELE(mp->m_quotainfo->qi_gquotaip);
 250			mp->m_quotainfo->qi_gquotaip = NULL;
 251		}
 252		if (mp->m_quotainfo->qi_pquotaip) {
 253			IRELE(mp->m_quotainfo->qi_pquotaip);
 254			mp->m_quotainfo->qi_pquotaip = NULL;
 255		}
 256	}
 257}
 258
 259STATIC int
 260xfs_qm_dqattach_one(
 261	xfs_inode_t	*ip,
 262	xfs_dqid_t	id,
 263	uint		type,
 264	uint		doalloc,
 265	xfs_dquot_t	**IO_idqpp)
 266{
 267	xfs_dquot_t	*dqp;
 268	int		error;
 269
 270	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 271	error = 0;
 272
 273	/*
 274	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
 275	 * or &i_gdquot. This made the code look weird, but made the logic a lot
 276	 * simpler.
 277	 */
 278	dqp = *IO_idqpp;
 279	if (dqp) {
 280		trace_xfs_dqattach_found(dqp);
 281		return 0;
 282	}
 283
 284	/*
 285	 * Find the dquot from somewhere. This bumps the reference count of
 286	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
 287	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
 288	 * turned off suddenly.
 289	 */
 290	error = xfs_qm_dqget(ip->i_mount, ip, id, type,
 291			     doalloc | XFS_QMOPT_DOWARN, &dqp);
 292	if (error)
 293		return error;
 294
 295	trace_xfs_dqattach_get(dqp);
 296
 297	/*
 298	 * dqget may have dropped and re-acquired the ilock, but it guarantees
 299	 * that the dquot returned is the one that should go in the inode.
 300	 */
 301	*IO_idqpp = dqp;
 302	xfs_dqunlock(dqp);
 303	return 0;
 304}
 305
 306static bool
 307xfs_qm_need_dqattach(
 308	struct xfs_inode	*ip)
 309{
 310	struct xfs_mount	*mp = ip->i_mount;
 311
 312	if (!XFS_IS_QUOTA_RUNNING(mp))
 313		return false;
 314	if (!XFS_IS_QUOTA_ON(mp))
 315		return false;
 316	if (!XFS_NOT_DQATTACHED(mp, ip))
 317		return false;
 318	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 319		return false;
 320	return true;
 321}
 322
 323/*
 324 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 325 * into account.
 326 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
 327 * Inode may get unlocked and relocked in here, and the caller must deal with
 328 * the consequences.
 329 */
 330int
 331xfs_qm_dqattach_locked(
 332	xfs_inode_t	*ip,
 333	uint		flags)
 334{
 335	xfs_mount_t	*mp = ip->i_mount;
 336	int		error = 0;
 337
 338	if (!xfs_qm_need_dqattach(ip))
 339		return 0;
 340
 341	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 342
 343	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
 344		error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
 345						flags & XFS_QMOPT_DQALLOC,
 346						&ip->i_udquot);
 347		if (error)
 348			goto done;
 349		ASSERT(ip->i_udquot);
 350	}
 351
 352	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
 353		error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
 354						flags & XFS_QMOPT_DQALLOC,
 355						&ip->i_gdquot);
 356		if (error)
 357			goto done;
 358		ASSERT(ip->i_gdquot);
 359	}
 360
 361	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
 362		error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
 363						flags & XFS_QMOPT_DQALLOC,
 364						&ip->i_pdquot);
 365		if (error)
 366			goto done;
 367		ASSERT(ip->i_pdquot);
 368	}
 369
 370done:
 371	/*
 372	 * Don't worry about the dquots that we may have attached before any
 373	 * error - they'll get detached later if it has not already been done.
 374	 */
 375	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 376	return error;
 377}
 378
 379int
 380xfs_qm_dqattach(
 381	struct xfs_inode	*ip,
 382	uint			flags)
 383{
 384	int			error;
 385
 386	if (!xfs_qm_need_dqattach(ip))
 387		return 0;
 388
 389	xfs_ilock(ip, XFS_ILOCK_EXCL);
 390	error = xfs_qm_dqattach_locked(ip, flags);
 391	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 392
 393	return error;
 394}
 395
 396/*
 397 * Release dquots (and their references) if any.
 398 * The inode should be locked EXCL except when this's called by
 399 * xfs_ireclaim.
 400 */
 401void
 402xfs_qm_dqdetach(
 403	xfs_inode_t	*ip)
 404{
 405	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
 406		return;
 407
 408	trace_xfs_dquot_dqdetach(ip);
 409
 410	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
 411	if (ip->i_udquot) {
 412		xfs_qm_dqrele(ip->i_udquot);
 413		ip->i_udquot = NULL;
 414	}
 415	if (ip->i_gdquot) {
 416		xfs_qm_dqrele(ip->i_gdquot);
 417		ip->i_gdquot = NULL;
 418	}
 419	if (ip->i_pdquot) {
 420		xfs_qm_dqrele(ip->i_pdquot);
 421		ip->i_pdquot = NULL;
 422	}
 423}
 424
 425struct xfs_qm_isolate {
 426	struct list_head	buffers;
 427	struct list_head	dispose;
 428};
 429
 430static enum lru_status
 431xfs_qm_dquot_isolate(
 432	struct list_head	*item,
 433	struct list_lru_one	*lru,
 434	spinlock_t		*lru_lock,
 435	void			*arg)
 436		__releases(lru_lock) __acquires(lru_lock)
 437{
 438	struct xfs_dquot	*dqp = container_of(item,
 439						struct xfs_dquot, q_lru);
 440	struct xfs_qm_isolate	*isol = arg;
 441
 442	if (!xfs_dqlock_nowait(dqp))
 443		goto out_miss_busy;
 444
 445	/*
 446	 * This dquot has acquired a reference in the meantime remove it from
 447	 * the freelist and try again.
 448	 */
 449	if (dqp->q_nrefs) {
 450		xfs_dqunlock(dqp);
 451		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
 452
 453		trace_xfs_dqreclaim_want(dqp);
 454		list_lru_isolate(lru, &dqp->q_lru);
 455		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 456		return LRU_REMOVED;
 457	}
 458
 459	/*
 460	 * If the dquot is dirty, flush it. If it's already being flushed, just
 461	 * skip it so there is time for the IO to complete before we try to
 462	 * reclaim it again on the next LRU pass.
 463	 */
 464	if (!xfs_dqflock_nowait(dqp)) {
 465		xfs_dqunlock(dqp);
 466		goto out_miss_busy;
 467	}
 468
 469	if (XFS_DQ_IS_DIRTY(dqp)) {
 470		struct xfs_buf	*bp = NULL;
 471		int		error;
 472
 473		trace_xfs_dqreclaim_dirty(dqp);
 474
 475		/* we have to drop the LRU lock to flush the dquot */
 476		spin_unlock(lru_lock);
 477
 478		error = xfs_qm_dqflush(dqp, &bp);
 479		if (error) {
 480			xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
 481				 __func__, dqp);
 482			goto out_unlock_dirty;
 483		}
 484
 485		xfs_buf_delwri_queue(bp, &isol->buffers);
 486		xfs_buf_relse(bp);
 487		goto out_unlock_dirty;
 488	}
 489	xfs_dqfunlock(dqp);
 490
 491	/*
 492	 * Prevent lookups now that we are past the point of no return.
 493	 */
 494	dqp->dq_flags |= XFS_DQ_FREEING;
 495	xfs_dqunlock(dqp);
 496
 497	ASSERT(dqp->q_nrefs == 0);
 498	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
 499	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 500	trace_xfs_dqreclaim_done(dqp);
 501	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
 502	return LRU_REMOVED;
 503
 504out_miss_busy:
 505	trace_xfs_dqreclaim_busy(dqp);
 506	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 507	return LRU_SKIP;
 508
 509out_unlock_dirty:
 510	trace_xfs_dqreclaim_busy(dqp);
 511	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 512	xfs_dqunlock(dqp);
 513	spin_lock(lru_lock);
 514	return LRU_RETRY;
 515}
 516
 517static unsigned long
 518xfs_qm_shrink_scan(
 519	struct shrinker		*shrink,
 520	struct shrink_control	*sc)
 521{
 522	struct xfs_quotainfo	*qi = container_of(shrink,
 523					struct xfs_quotainfo, qi_shrinker);
 524	struct xfs_qm_isolate	isol;
 525	unsigned long		freed;
 526	int			error;
 527
 528	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
 529		return 0;
 530
 531	INIT_LIST_HEAD(&isol.buffers);
 532	INIT_LIST_HEAD(&isol.dispose);
 533
 534	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
 535				     xfs_qm_dquot_isolate, &isol);
 536
 537	error = xfs_buf_delwri_submit(&isol.buffers);
 538	if (error)
 539		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
 540
 541	while (!list_empty(&isol.dispose)) {
 542		struct xfs_dquot	*dqp;
 543
 544		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
 545		list_del_init(&dqp->q_lru);
 546		xfs_qm_dqfree_one(dqp);
 547	}
 548
 549	return freed;
 550}
 551
 552static unsigned long
 553xfs_qm_shrink_count(
 554	struct shrinker		*shrink,
 555	struct shrink_control	*sc)
 556{
 557	struct xfs_quotainfo	*qi = container_of(shrink,
 558					struct xfs_quotainfo, qi_shrinker);
 559
 560	return list_lru_shrink_count(&qi->qi_lru, sc);
 561}
 562
 563STATIC void
 564xfs_qm_set_defquota(
 565	xfs_mount_t	*mp,
 566	uint		type,
 567	xfs_quotainfo_t	*qinf)
 568{
 569	xfs_dquot_t		*dqp;
 570	struct xfs_def_quota    *defq;
 571	int			error;
 572
 573	error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp);
 
 
 574
 575	if (!error) {
 576		xfs_disk_dquot_t        *ddqp = &dqp->q_core;
 577
 578		defq = xfs_get_defquota(dqp, qinf);
 
 
 
 
 
 
 
 
 
 
 
 579
 580		/*
 581		 * Timers and warnings have been already set, let's just set the
 582		 * default limits for this quota type
 583		 */
 584		defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
 585		defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
 586		defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
 587		defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
 588		defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
 589		defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
 590		xfs_qm_dqdestroy(dqp);
 591	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 592}
 593
 594/*
 595 * This initializes all the quota information that's kept in the
 596 * mount structure
 597 */
 598STATIC int
 599xfs_qm_init_quotainfo(
 600	xfs_mount_t	*mp)
 601{
 602	xfs_quotainfo_t *qinf;
 603	int		error;
 604	xfs_dquot_t	*dqp;
 605
 606	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 607
 608	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
 609
 610	error = list_lru_init(&qinf->qi_lru);
 611	if (error)
 612		goto out_free_qinf;
 613
 614	/*
 615	 * See if quotainodes are setup, and if not, allocate them,
 616	 * and change the superblock accordingly.
 617	 */
 618	error = xfs_qm_init_quotainos(mp);
 619	if (error)
 620		goto out_free_lru;
 621
 622	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
 623	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 624	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
 625	mutex_init(&qinf->qi_tree_lock);
 626
 627	/* mutex used to serialize quotaoffs */
 628	mutex_init(&qinf->qi_quotaofflock);
 629
 630	/* Precalc some constants */
 631	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 632	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
 
 
 
 
 
 
 
 
 
 
 
 633
 634	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 635
 636	/*
 637	 * We try to get the limits from the superuser's limits fields.
 638	 * This is quite hacky, but it is standard quota practice.
 639	 *
 640	 * Since we may not have done a quotacheck by this point, just read
 641	 * the dquot without attaching it to any hashtables or lists.
 642	 *
 643	 * Timers and warnings are globally set by the first timer found in
 644	 * user/group/proj quota types, otherwise a default value is used.
 645	 * This should be split into different fields per quota type.
 646	 */
 647	error = xfs_qm_dqread(mp, 0,
 648			XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
 649			 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
 650			  XFS_DQ_PROJ),
 651			XFS_QMOPT_DOWARN, &dqp);
 652
 653	if (!error) {
 654		xfs_disk_dquot_t	*ddqp = &dqp->q_core;
 655
 656		/*
 657		 * The warnings and timers set the grace period given to
 658		 * a user or group before he or she can not perform any
 659		 * more writing. If it is zero, a default is used.
 660		 */
 661		qinf->qi_btimelimit = ddqp->d_btimer ?
 662			be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
 663		qinf->qi_itimelimit = ddqp->d_itimer ?
 664			be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
 665		qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
 666			be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
 667		qinf->qi_bwarnlimit = ddqp->d_bwarns ?
 668			be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
 669		qinf->qi_iwarnlimit = ddqp->d_iwarns ?
 670			be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
 671		qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
 672			be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
 673		xfs_qm_dqdestroy(dqp);
 674	} else {
 675		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
 676		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
 677		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
 678		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
 679		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
 680		qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
 681	}
 682
 683	if (XFS_IS_UQUOTA_RUNNING(mp))
 684		xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
 685	if (XFS_IS_GQUOTA_RUNNING(mp))
 686		xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
 687	if (XFS_IS_PQUOTA_RUNNING(mp))
 688		xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
 689
 690	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
 691	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
 692	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
 693	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
 694	register_shrinker(&qinf->qi_shrinker);
 
 
 
 
 695	return 0;
 696
 
 
 
 
 697out_free_lru:
 698	list_lru_destroy(&qinf->qi_lru);
 699out_free_qinf:
 700	kmem_free(qinf);
 701	mp->m_quotainfo = NULL;
 702	return error;
 703}
 704
 705
 706/*
 707 * Gets called when unmounting a filesystem or when all quotas get
 708 * turned off.
 709 * This purges the quota inodes, destroys locks and frees itself.
 710 */
 711void
 712xfs_qm_destroy_quotainfo(
 713	xfs_mount_t	*mp)
 714{
 715	xfs_quotainfo_t *qi;
 716
 717	qi = mp->m_quotainfo;
 718	ASSERT(qi != NULL);
 719
 720	unregister_shrinker(&qi->qi_shrinker);
 721	list_lru_destroy(&qi->qi_lru);
 722
 723	if (qi->qi_uquotaip) {
 724		IRELE(qi->qi_uquotaip);
 725		qi->qi_uquotaip = NULL; /* paranoia */
 726	}
 727	if (qi->qi_gquotaip) {
 728		IRELE(qi->qi_gquotaip);
 729		qi->qi_gquotaip = NULL;
 730	}
 731	if (qi->qi_pquotaip) {
 732		IRELE(qi->qi_pquotaip);
 733		qi->qi_pquotaip = NULL;
 734	}
 735	mutex_destroy(&qi->qi_quotaofflock);
 736	kmem_free(qi);
 737	mp->m_quotainfo = NULL;
 738}
 739
 740/*
 741 * Create an inode and return with a reference already taken, but unlocked
 742 * This is how we create quota inodes
 743 */
 744STATIC int
 745xfs_qm_qino_alloc(
 746	xfs_mount_t	*mp,
 747	xfs_inode_t	**ip,
 748	uint		flags)
 749{
 750	xfs_trans_t	*tp;
 751	int		error;
 752	int		committed;
 753	bool		need_alloc = true;
 754
 755	*ip = NULL;
 756	/*
 757	 * With superblock that doesn't have separate pquotino, we
 758	 * share an inode between gquota and pquota. If the on-disk
 759	 * superblock has GQUOTA and the filesystem is now mounted
 760	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
 761	 * vice-versa.
 762	 */
 763	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
 764			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
 765		xfs_ino_t ino = NULLFSINO;
 766
 767		if ((flags & XFS_QMOPT_PQUOTA) &&
 768			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
 769			ino = mp->m_sb.sb_gquotino;
 770			ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
 
 
 771		} else if ((flags & XFS_QMOPT_GQUOTA) &&
 772			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
 773			ino = mp->m_sb.sb_pquotino;
 774			ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
 
 
 775		}
 776		if (ino != NULLFSINO) {
 777			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
 778			if (error)
 779				return error;
 780			mp->m_sb.sb_gquotino = NULLFSINO;
 781			mp->m_sb.sb_pquotino = NULLFSINO;
 782			need_alloc = false;
 783		}
 784	}
 785
 786	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
 787	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
 788				  XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
 789	if (error) {
 790		xfs_trans_cancel(tp);
 791		return error;
 792	}
 793
 794	if (need_alloc) {
 795		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
 796								&committed);
 
 
 
 
 797		if (error) {
 798			xfs_trans_cancel(tp);
 799			return error;
 800		}
 801	}
 802
 803	/*
 804	 * Make the changes in the superblock, and log those too.
 805	 * sbfields arg may contain fields other than *QUOTINO;
 806	 * VERSIONNUM for example.
 807	 */
 808	spin_lock(&mp->m_sb_lock);
 809	if (flags & XFS_QMOPT_SBVERSION) {
 810		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
 811
 812		xfs_sb_version_addquota(&mp->m_sb);
 813		mp->m_sb.sb_uquotino = NULLFSINO;
 814		mp->m_sb.sb_gquotino = NULLFSINO;
 815		mp->m_sb.sb_pquotino = NULLFSINO;
 816
 817		/* qflags will get updated fully _after_ quotacheck */
 818		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
 819	}
 820	if (flags & XFS_QMOPT_UQUOTA)
 821		mp->m_sb.sb_uquotino = (*ip)->i_ino;
 822	else if (flags & XFS_QMOPT_GQUOTA)
 823		mp->m_sb.sb_gquotino = (*ip)->i_ino;
 824	else
 825		mp->m_sb.sb_pquotino = (*ip)->i_ino;
 826	spin_unlock(&mp->m_sb_lock);
 827	xfs_log_sb(tp);
 828
 829	error = xfs_trans_commit(tp);
 830	if (error) {
 831		ASSERT(XFS_FORCED_SHUTDOWN(mp));
 832		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
 833	}
 834	if (need_alloc)
 835		xfs_finish_inode_setup(*ip);
 836	return error;
 837}
 838
 839
 840STATIC void
 841xfs_qm_reset_dqcounts(
 842	xfs_mount_t	*mp,
 843	xfs_buf_t	*bp,
 844	xfs_dqid_t	id,
 845	uint		type)
 846{
 847	struct xfs_dqblk	*dqb;
 848	int			j;
 849
 850	trace_xfs_reset_dqcounts(bp, _RET_IP_);
 851
 852	/*
 853	 * Reset all counters and timers. They'll be
 854	 * started afresh by xfs_qm_quotacheck.
 855	 */
 856#ifdef DEBUG
 857	j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 858	do_div(j, sizeof(xfs_dqblk_t));
 859	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 860#endif
 861	dqb = bp->b_addr;
 862	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
 863		struct xfs_disk_dquot	*ddq;
 864
 865		ddq = (struct xfs_disk_dquot *)&dqb[j];
 866
 867		/*
 868		 * Do a sanity check, and if needed, repair the dqblk. Don't
 869		 * output any warnings because it's perfectly possible to
 870		 * find uninitialised dquot blks. See comment in xfs_dqcheck.
 
 871		 */
 872		xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
 873			    "xfs_quotacheck");
 
 
 874		/*
 875		 * Reset type in case we are reusing group quota file for
 876		 * project quotas or vice versa
 877		 */
 878		ddq->d_flags = type;
 879		ddq->d_bcount = 0;
 880		ddq->d_icount = 0;
 881		ddq->d_rtbcount = 0;
 882		ddq->d_btimer = 0;
 883		ddq->d_itimer = 0;
 884		ddq->d_rtbtimer = 0;
 885		ddq->d_bwarns = 0;
 886		ddq->d_iwarns = 0;
 887		ddq->d_rtbwarns = 0;
 
 
 
 
 
 
 
 
 
 
 888
 889		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 890			xfs_update_cksum((char *)&dqb[j],
 891					 sizeof(struct xfs_dqblk),
 892					 XFS_DQUOT_CRC_OFF);
 893		}
 894	}
 895}
 896
 897STATIC int
 898xfs_qm_dqiter_bufs(
 899	struct xfs_mount	*mp,
 900	xfs_dqid_t		firstid,
 901	xfs_fsblock_t		bno,
 902	xfs_filblks_t		blkcnt,
 903	uint			flags,
 904	struct list_head	*buffer_list)
 905{
 906	struct xfs_buf		*bp;
 907	int			error;
 908	int			type;
 909
 910	ASSERT(blkcnt > 0);
 911	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
 912		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
 913	error = 0;
 914
 915	/*
 916	 * Blkcnt arg can be a very big number, and might even be
 917	 * larger than the log itself. So, we have to break it up into
 918	 * manageable-sized transactions.
 919	 * Note that we don't start a permanent transaction here; we might
 920	 * not be able to get a log reservation for the whole thing up front,
 921	 * and we don't really care to either, because we just discard
 922	 * everything if we were to crash in the middle of this loop.
 923	 */
 924	while (blkcnt--) {
 925		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 926			      XFS_FSB_TO_DADDR(mp, bno),
 927			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 928			      &xfs_dquot_buf_ops);
 929
 930		/*
 931		 * CRC and validation errors will return a EFSCORRUPTED here. If
 932		 * this occurs, re-read without CRC validation so that we can
 933		 * repair the damage via xfs_qm_reset_dqcounts(). This process
 934		 * will leave a trace in the log indicating corruption has
 935		 * been detected.
 936		 */
 937		if (error == -EFSCORRUPTED) {
 938			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 939				      XFS_FSB_TO_DADDR(mp, bno),
 940				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 941				      NULL);
 942		}
 943
 944		if (error)
 945			break;
 946
 947		/*
 948		 * A corrupt buffer might not have a verifier attached, so
 949		 * make sure we have the correct one attached before writeback
 950		 * occurs.
 951		 */
 952		bp->b_ops = &xfs_dquot_buf_ops;
 953		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
 954		xfs_buf_delwri_queue(bp, buffer_list);
 955		xfs_buf_relse(bp);
 956
 957		/* goto the next block. */
 958		bno++;
 959		firstid += mp->m_quotainfo->qi_dqperchunk;
 960	}
 961
 962	return error;
 963}
 964
 965/*
 966 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
 967 * caller supplied function for every chunk of dquots that we find.
 968 */
 969STATIC int
 970xfs_qm_dqiterate(
 971	struct xfs_mount	*mp,
 972	struct xfs_inode	*qip,
 973	uint			flags,
 974	struct list_head	*buffer_list)
 975{
 976	struct xfs_bmbt_irec	*map;
 977	int			i, nmaps;	/* number of map entries */
 978	int			error;		/* return value */
 979	xfs_fileoff_t		lblkno;
 980	xfs_filblks_t		maxlblkcnt;
 981	xfs_dqid_t		firstid;
 982	xfs_fsblock_t		rablkno;
 983	xfs_filblks_t		rablkcnt;
 984
 985	error = 0;
 986	/*
 987	 * This looks racy, but we can't keep an inode lock across a
 988	 * trans_reserve. But, this gets called during quotacheck, and that
 989	 * happens only at mount time which is single threaded.
 990	 */
 991	if (qip->i_d.di_nblocks == 0)
 992		return 0;
 993
 994	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
 995
 996	lblkno = 0;
 997	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 998	do {
 999		uint		lock_mode;
1000
1001		nmaps = XFS_DQITER_MAP_SIZE;
1002		/*
1003		 * We aren't changing the inode itself. Just changing
1004		 * some of its data. No new blocks are added here, and
1005		 * the inode is never added to the transaction.
1006		 */
1007		lock_mode = xfs_ilock_data_map_shared(qip);
1008		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1009				       map, &nmaps, 0);
1010		xfs_iunlock(qip, lock_mode);
1011		if (error)
1012			break;
1013
1014		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1015		for (i = 0; i < nmaps; i++) {
1016			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1017			ASSERT(map[i].br_blockcount);
1018
1019
1020			lblkno += map[i].br_blockcount;
1021
1022			if (map[i].br_startblock == HOLESTARTBLOCK)
1023				continue;
1024
1025			firstid = (xfs_dqid_t) map[i].br_startoff *
1026				mp->m_quotainfo->qi_dqperchunk;
1027			/*
1028			 * Do a read-ahead on the next extent.
1029			 */
1030			if ((i+1 < nmaps) &&
1031			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1032				rablkcnt =  map[i+1].br_blockcount;
1033				rablkno = map[i+1].br_startblock;
1034				while (rablkcnt--) {
1035					xfs_buf_readahead(mp->m_ddev_targp,
1036					       XFS_FSB_TO_DADDR(mp, rablkno),
1037					       mp->m_quotainfo->qi_dqchunklen,
1038					       &xfs_dquot_buf_ops);
1039					rablkno++;
1040				}
1041			}
1042			/*
1043			 * Iterate thru all the blks in the extent and
1044			 * reset the counters of all the dquots inside them.
1045			 */
1046			error = xfs_qm_dqiter_bufs(mp, firstid,
1047						   map[i].br_startblock,
1048						   map[i].br_blockcount,
1049						   flags, buffer_list);
1050			if (error)
1051				goto out;
1052		}
1053	} while (nmaps > 0);
1054
1055out:
1056	kmem_free(map);
1057	return error;
1058}
1059
1060/*
1061 * Called by dqusage_adjust in doing a quotacheck.
1062 *
1063 * Given the inode, and a dquot id this updates both the incore dqout as well
1064 * as the buffer copy. This is so that once the quotacheck is done, we can
1065 * just log all the buffers, as opposed to logging numerous updates to
1066 * individual dquots.
1067 */
1068STATIC int
1069xfs_qm_quotacheck_dqadjust(
1070	struct xfs_inode	*ip,
1071	xfs_dqid_t		id,
1072	uint			type,
1073	xfs_qcnt_t		nblks,
1074	xfs_qcnt_t		rtblks)
1075{
1076	struct xfs_mount	*mp = ip->i_mount;
1077	struct xfs_dquot	*dqp;
 
1078	int			error;
1079
1080	error = xfs_qm_dqget(mp, ip, id, type,
1081			     XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1082	if (error) {
1083		/*
1084		 * Shouldn't be able to turn off quotas here.
1085		 */
1086		ASSERT(error != -ESRCH);
1087		ASSERT(error != -ENOENT);
1088		return error;
1089	}
1090
1091	trace_xfs_dqadjust(dqp);
1092
1093	/*
1094	 * Adjust the inode count and the block count to reflect this inode's
1095	 * resource usage.
1096	 */
1097	be64_add_cpu(&dqp->q_core.d_icount, 1);
1098	dqp->q_res_icount++;
1099	if (nblks) {
1100		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1101		dqp->q_res_bcount += nblks;
1102	}
1103	if (rtblks) {
1104		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1105		dqp->q_res_rtbcount += rtblks;
1106	}
1107
1108	/*
1109	 * Set default limits, adjust timers (since we changed usages)
1110	 *
1111	 * There are no timers for the default values set in the root dquot.
1112	 */
1113	if (dqp->q_core.d_id) {
1114		xfs_qm_adjust_dqlimits(mp, dqp);
1115		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1116	}
1117
1118	dqp->dq_flags |= XFS_DQ_DIRTY;
1119	xfs_qm_dqput(dqp);
1120	return 0;
1121}
1122
1123STATIC int
1124xfs_qm_get_rtblks(
1125	xfs_inode_t	*ip,
1126	xfs_qcnt_t	*O_rtblks)
1127{
1128	xfs_filblks_t	rtblks;			/* total rt blks */
1129	xfs_extnum_t	idx;			/* extent record index */
1130	xfs_ifork_t	*ifp;			/* inode fork pointer */
1131	xfs_extnum_t	nextents;		/* number of extent entries */
1132	int		error;
1133
1134	ASSERT(XFS_IS_REALTIME_INODE(ip));
1135	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1136	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1137		if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1138			return error;
1139	}
1140	rtblks = 0;
1141	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1142	for (idx = 0; idx < nextents; idx++)
1143		rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1144	*O_rtblks = (xfs_qcnt_t)rtblks;
1145	return 0;
1146}
1147
1148/*
1149 * callback routine supplied to bulkstat(). Given an inumber, find its
1150 * dquots and update them to account for resources taken by that inode.
1151 */
1152/* ARGSUSED */
1153STATIC int
1154xfs_qm_dqusage_adjust(
1155	xfs_mount_t	*mp,		/* mount point for filesystem */
1156	xfs_ino_t	ino,		/* inode number to get data for */
1157	void		__user *buffer,	/* not used */
1158	int		ubsize,		/* not used */
1159	int		*ubused,	/* not used */
1160	int		*res)		/* result code value */
1161{
1162	xfs_inode_t	*ip;
1163	xfs_qcnt_t	nblks, rtblks = 0;
1164	int		error;
1165
1166	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1167
1168	/*
1169	 * rootino must have its resources accounted for, not so with the quota
1170	 * inodes.
1171	 */
1172	if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1173		*res = BULKSTAT_RV_NOTHING;
1174		return -EINVAL;
1175	}
1176
1177	/*
1178	 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1179	 * interface expects the inode to be exclusively locked because that's
1180	 * the case in all other instances. It's OK that we do this because
1181	 * quotacheck is done only at mount time.
1182	 */
1183	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1184	if (error) {
1185		*res = BULKSTAT_RV_NOTHING;
 
1186		return error;
1187	}
1188
1189	ASSERT(ip->i_delayed_blks == 0);
1190
1191	if (XFS_IS_REALTIME_INODE(ip)) {
1192		/*
1193		 * Walk thru the extent list and count the realtime blocks.
1194		 */
1195		error = xfs_qm_get_rtblks(ip, &rtblks);
1196		if (error)
1197			goto error0;
 
 
1198	}
1199
1200	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1201
1202	/*
1203	 * Add the (disk blocks and inode) resources occupied by this
1204	 * inode to its dquots. We do this adjustment in the incore dquot,
1205	 * and also copy the changes to its buffer.
1206	 * We don't care about putting these changes in a transaction
1207	 * envelope because if we crash in the middle of a 'quotacheck'
1208	 * we have to start from the beginning anyway.
1209	 * Once we're done, we'll log all the dquot bufs.
1210	 *
1211	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1212	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1213	 */
1214	if (XFS_IS_UQUOTA_ON(mp)) {
1215		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1216						   XFS_DQ_USER, nblks, rtblks);
1217		if (error)
1218			goto error0;
1219	}
1220
1221	if (XFS_IS_GQUOTA_ON(mp)) {
1222		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1223						   XFS_DQ_GROUP, nblks, rtblks);
1224		if (error)
1225			goto error0;
1226	}
1227
1228	if (XFS_IS_PQUOTA_ON(mp)) {
1229		error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1230						   XFS_DQ_PROJ, nblks, rtblks);
1231		if (error)
1232			goto error0;
1233	}
1234
1235	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1236	IRELE(ip);
1237	*res = BULKSTAT_RV_DIDONE;
1238	return 0;
1239
1240error0:
1241	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1242	IRELE(ip);
1243	*res = BULKSTAT_RV_GIVEUP;
1244	return error;
1245}
1246
1247STATIC int
1248xfs_qm_flush_one(
1249	struct xfs_dquot	*dqp,
1250	void			*data)
1251{
 
1252	struct list_head	*buffer_list = data;
1253	struct xfs_buf		*bp = NULL;
1254	int			error = 0;
1255
1256	xfs_dqlock(dqp);
1257	if (dqp->dq_flags & XFS_DQ_FREEING)
1258		goto out_unlock;
1259	if (!XFS_DQ_IS_DIRTY(dqp))
1260		goto out_unlock;
1261
1262	xfs_dqflock(dqp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1263	error = xfs_qm_dqflush(dqp, &bp);
1264	if (error)
1265		goto out_unlock;
1266
1267	xfs_buf_delwri_queue(bp, buffer_list);
1268	xfs_buf_relse(bp);
1269out_unlock:
1270	xfs_dqunlock(dqp);
1271	return error;
1272}
1273
1274/*
1275 * Walk thru all the filesystem inodes and construct a consistent view
1276 * of the disk quota world. If the quotacheck fails, disable quotas.
1277 */
1278STATIC int
1279xfs_qm_quotacheck(
1280	xfs_mount_t	*mp)
1281{
1282	int			done, count, error, error2;
1283	xfs_ino_t		lastino;
1284	size_t			structsz;
1285	uint			flags;
1286	LIST_HEAD		(buffer_list);
1287	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1288	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1289	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1290
1291	count = INT_MAX;
1292	structsz = 1;
1293	lastino = 0;
1294	flags = 0;
1295
1296	ASSERT(uip || gip || pip);
1297	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1298
1299	xfs_notice(mp, "Quotacheck needed: Please wait.");
1300
1301	/*
1302	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1303	 * their counters to zero. We need a clean slate.
1304	 * We don't log our changes till later.
1305	 */
1306	if (uip) {
1307		error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1308					 &buffer_list);
1309		if (error)
1310			goto error_return;
1311		flags |= XFS_UQUOTA_CHKD;
1312	}
1313
1314	if (gip) {
1315		error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
1316					 &buffer_list);
1317		if (error)
1318			goto error_return;
1319		flags |= XFS_GQUOTA_CHKD;
1320	}
1321
1322	if (pip) {
1323		error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
1324					 &buffer_list);
1325		if (error)
1326			goto error_return;
1327		flags |= XFS_PQUOTA_CHKD;
1328	}
1329
1330	do {
1331		/*
1332		 * Iterate thru all the inodes in the file system,
1333		 * adjusting the corresponding dquot counters in core.
1334		 */
1335		error = xfs_bulkstat(mp, &lastino, &count,
1336				     xfs_qm_dqusage_adjust,
1337				     structsz, NULL, &done);
1338		if (error)
1339			break;
1340
1341	} while (!done);
1342
1343	/*
1344	 * We've made all the changes that we need to make incore.  Flush them
1345	 * down to disk buffers if everything was updated successfully.
1346	 */
1347	if (XFS_IS_UQUOTA_ON(mp)) {
1348		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1349					  &buffer_list);
1350	}
1351	if (XFS_IS_GQUOTA_ON(mp)) {
1352		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1353					   &buffer_list);
1354		if (!error)
1355			error = error2;
1356	}
1357	if (XFS_IS_PQUOTA_ON(mp)) {
1358		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1359					   &buffer_list);
1360		if (!error)
1361			error = error2;
1362	}
1363
1364	error2 = xfs_buf_delwri_submit(&buffer_list);
1365	if (!error)
1366		error = error2;
1367
1368	/*
1369	 * We can get this error if we couldn't do a dquot allocation inside
1370	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1371	 * dirty dquots that might be cached, we just want to get rid of them
1372	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1373	 * at this point (because we intentionally didn't in dqget_noattach).
1374	 */
1375	if (error) {
1376		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1377		goto error_return;
1378	}
1379
1380	/*
1381	 * If one type of quotas is off, then it will lose its
1382	 * quotachecked status, since we won't be doing accounting for
1383	 * that type anymore.
1384	 */
1385	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1386	mp->m_qflags |= flags;
1387
1388 error_return:
1389	while (!list_empty(&buffer_list)) {
1390		struct xfs_buf *bp =
1391			list_first_entry(&buffer_list, struct xfs_buf, b_list);
1392		list_del_init(&bp->b_list);
1393		xfs_buf_relse(bp);
1394	}
1395
1396	if (error) {
1397		xfs_warn(mp,
1398	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1399			error);
1400		/*
1401		 * We must turn off quotas.
1402		 */
1403		ASSERT(mp->m_quotainfo != NULL);
1404		xfs_qm_destroy_quotainfo(mp);
1405		if (xfs_mount_reset_sbqflags(mp)) {
1406			xfs_warn(mp,
1407				"Quotacheck: Failed to reset quota flags.");
1408		}
1409	} else
1410		xfs_notice(mp, "Quotacheck: Done.");
1411	return error;
1412}
1413
1414/*
1415 * This is called from xfs_mountfs to start quotas and initialize all
1416 * necessary data structures like quotainfo.  This is also responsible for
1417 * running a quotacheck as necessary.  We are guaranteed that the superblock
1418 * is consistently read in at this point.
1419 *
1420 * If we fail here, the mount will continue with quota turned off. We don't
1421 * need to inidicate success or failure at all.
1422 */
1423void
1424xfs_qm_mount_quotas(
1425	struct xfs_mount	*mp)
1426{
1427	int			error = 0;
1428	uint			sbf;
1429
1430	/*
1431	 * If quotas on realtime volumes is not supported, we disable
1432	 * quotas immediately.
1433	 */
1434	if (mp->m_sb.sb_rextents) {
1435		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1436		mp->m_qflags = 0;
1437		goto write_changes;
1438	}
1439
1440	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1441
1442	/*
1443	 * Allocate the quotainfo structure inside the mount struct, and
1444	 * create quotainode(s), and change/rev superblock if necessary.
1445	 */
1446	error = xfs_qm_init_quotainfo(mp);
1447	if (error) {
1448		/*
1449		 * We must turn off quotas.
1450		 */
1451		ASSERT(mp->m_quotainfo == NULL);
1452		mp->m_qflags = 0;
1453		goto write_changes;
1454	}
1455	/*
1456	 * If any of the quotas are not consistent, do a quotacheck.
1457	 */
1458	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1459		error = xfs_qm_quotacheck(mp);
1460		if (error) {
1461			/* Quotacheck failed and disabled quotas. */
1462			return;
1463		}
1464	}
1465	/*
1466	 * If one type of quotas is off, then it will lose its
1467	 * quotachecked status, since we won't be doing accounting for
1468	 * that type anymore.
1469	 */
1470	if (!XFS_IS_UQUOTA_ON(mp))
1471		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1472	if (!XFS_IS_GQUOTA_ON(mp))
1473		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1474	if (!XFS_IS_PQUOTA_ON(mp))
1475		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1476
1477 write_changes:
1478	/*
1479	 * We actually don't have to acquire the m_sb_lock at all.
1480	 * This can only be called from mount, and that's single threaded. XXX
1481	 */
1482	spin_lock(&mp->m_sb_lock);
1483	sbf = mp->m_sb.sb_qflags;
1484	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1485	spin_unlock(&mp->m_sb_lock);
1486
1487	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1488		if (xfs_sync_sb(mp, false)) {
1489			/*
1490			 * We could only have been turning quotas off.
1491			 * We aren't in very good shape actually because
1492			 * the incore structures are convinced that quotas are
1493			 * off, but the on disk superblock doesn't know that !
1494			 */
1495			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1496			xfs_alert(mp, "%s: Superblock update failed!",
1497				__func__);
1498		}
1499	}
1500
1501	if (error) {
1502		xfs_warn(mp, "Failed to initialize disk quotas.");
1503		return;
1504	}
1505}
1506
1507/*
1508 * This is called after the superblock has been read in and we're ready to
1509 * iget the quota inodes.
1510 */
1511STATIC int
1512xfs_qm_init_quotainos(
1513	xfs_mount_t	*mp)
1514{
1515	struct xfs_inode	*uip = NULL;
1516	struct xfs_inode	*gip = NULL;
1517	struct xfs_inode	*pip = NULL;
1518	int			error;
1519	uint			flags = 0;
1520
1521	ASSERT(mp->m_quotainfo);
1522
1523	/*
1524	 * Get the uquota and gquota inodes
1525	 */
1526	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1527		if (XFS_IS_UQUOTA_ON(mp) &&
1528		    mp->m_sb.sb_uquotino != NULLFSINO) {
1529			ASSERT(mp->m_sb.sb_uquotino > 0);
1530			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1531					     0, 0, &uip);
1532			if (error)
1533				return error;
1534		}
1535		if (XFS_IS_GQUOTA_ON(mp) &&
1536		    mp->m_sb.sb_gquotino != NULLFSINO) {
1537			ASSERT(mp->m_sb.sb_gquotino > 0);
1538			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1539					     0, 0, &gip);
1540			if (error)
1541				goto error_rele;
1542		}
1543		if (XFS_IS_PQUOTA_ON(mp) &&
1544		    mp->m_sb.sb_pquotino != NULLFSINO) {
1545			ASSERT(mp->m_sb.sb_pquotino > 0);
1546			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1547					     0, 0, &pip);
1548			if (error)
1549				goto error_rele;
1550		}
1551	} else {
1552		flags |= XFS_QMOPT_SBVERSION;
1553	}
1554
1555	/*
1556	 * Create the three inodes, if they don't exist already. The changes
1557	 * made above will get added to a transaction and logged in one of
1558	 * the qino_alloc calls below.  If the device is readonly,
1559	 * temporarily switch to read-write to do this.
1560	 */
1561	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1562		error = xfs_qm_qino_alloc(mp, &uip,
1563					      flags | XFS_QMOPT_UQUOTA);
1564		if (error)
1565			goto error_rele;
1566
1567		flags &= ~XFS_QMOPT_SBVERSION;
1568	}
1569	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1570		error = xfs_qm_qino_alloc(mp, &gip,
1571					  flags | XFS_QMOPT_GQUOTA);
1572		if (error)
1573			goto error_rele;
1574
1575		flags &= ~XFS_QMOPT_SBVERSION;
1576	}
1577	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1578		error = xfs_qm_qino_alloc(mp, &pip,
1579					  flags | XFS_QMOPT_PQUOTA);
1580		if (error)
1581			goto error_rele;
1582	}
1583
1584	mp->m_quotainfo->qi_uquotaip = uip;
1585	mp->m_quotainfo->qi_gquotaip = gip;
1586	mp->m_quotainfo->qi_pquotaip = pip;
1587
1588	return 0;
1589
1590error_rele:
1591	if (uip)
1592		IRELE(uip);
1593	if (gip)
1594		IRELE(gip);
1595	if (pip)
1596		IRELE(pip);
1597	return error;
1598}
1599
1600STATIC void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1601xfs_qm_dqfree_one(
1602	struct xfs_dquot	*dqp)
1603{
1604	struct xfs_mount	*mp = dqp->q_mount;
1605	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1606
1607	mutex_lock(&qi->qi_tree_lock);
1608	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1609			  be32_to_cpu(dqp->q_core.d_id));
1610
1611	qi->qi_dquots--;
1612	mutex_unlock(&qi->qi_tree_lock);
1613
1614	xfs_qm_dqdestroy(dqp);
1615}
1616
1617/* --------------- utility functions for vnodeops ---------------- */
1618
1619
1620/*
1621 * Given an inode, a uid, gid and prid make sure that we have
1622 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1623 * quotas by creating this file.
1624 * This also attaches dquot(s) to the given inode after locking it,
1625 * and returns the dquots corresponding to the uid and/or gid.
1626 *
1627 * in	: inode (unlocked)
1628 * out	: udquot, gdquot with references taken and unlocked
1629 */
1630int
1631xfs_qm_vop_dqalloc(
1632	struct xfs_inode	*ip,
1633	xfs_dqid_t		uid,
1634	xfs_dqid_t		gid,
1635	prid_t			prid,
1636	uint			flags,
1637	struct xfs_dquot	**O_udqpp,
1638	struct xfs_dquot	**O_gdqpp,
1639	struct xfs_dquot	**O_pdqpp)
1640{
1641	struct xfs_mount	*mp = ip->i_mount;
 
 
1642	struct xfs_dquot	*uq = NULL;
1643	struct xfs_dquot	*gq = NULL;
1644	struct xfs_dquot	*pq = NULL;
1645	int			error;
1646	uint			lockflags;
1647
1648	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1649		return 0;
1650
1651	lockflags = XFS_ILOCK_EXCL;
1652	xfs_ilock(ip, lockflags);
1653
1654	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1655		gid = ip->i_d.di_gid;
1656
1657	/*
1658	 * Attach the dquot(s) to this inode, doing a dquot allocation
1659	 * if necessary. The dquot(s) will not be locked.
1660	 */
1661	if (XFS_NOT_DQATTACHED(mp, ip)) {
1662		error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1663		if (error) {
1664			xfs_iunlock(ip, lockflags);
1665			return error;
1666		}
1667	}
1668
1669	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1670		if (ip->i_d.di_uid != uid) {
 
1671			/*
1672			 * What we need is the dquot that has this uid, and
1673			 * if we send the inode to dqget, the uid of the inode
1674			 * takes priority over what's sent in the uid argument.
1675			 * We must unlock inode here before calling dqget if
1676			 * we're not sending the inode, because otherwise
1677			 * we'll deadlock by doing trans_reserve while
1678			 * holding ilock.
1679			 */
1680			xfs_iunlock(ip, lockflags);
1681			error = xfs_qm_dqget(mp, NULL, uid,
1682						 XFS_DQ_USER,
1683						 XFS_QMOPT_DQALLOC |
1684						 XFS_QMOPT_DOWARN,
1685						 &uq);
1686			if (error) {
1687				ASSERT(error != -ENOENT);
1688				return error;
1689			}
1690			/*
1691			 * Get the ilock in the right order.
1692			 */
1693			xfs_dqunlock(uq);
1694			lockflags = XFS_ILOCK_SHARED;
1695			xfs_ilock(ip, lockflags);
1696		} else {
1697			/*
1698			 * Take an extra reference, because we'll return
1699			 * this to caller
1700			 */
1701			ASSERT(ip->i_udquot);
1702			uq = xfs_qm_dqhold(ip->i_udquot);
1703		}
1704	}
1705	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1706		if (ip->i_d.di_gid != gid) {
 
1707			xfs_iunlock(ip, lockflags);
1708			error = xfs_qm_dqget(mp, NULL, gid,
1709						 XFS_DQ_GROUP,
1710						 XFS_QMOPT_DQALLOC |
1711						 XFS_QMOPT_DOWARN,
1712						 &gq);
1713			if (error) {
1714				ASSERT(error != -ENOENT);
1715				goto error_rele;
1716			}
1717			xfs_dqunlock(gq);
1718			lockflags = XFS_ILOCK_SHARED;
1719			xfs_ilock(ip, lockflags);
1720		} else {
1721			ASSERT(ip->i_gdquot);
1722			gq = xfs_qm_dqhold(ip->i_gdquot);
1723		}
1724	}
1725	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1726		if (xfs_get_projid(ip) != prid) {
 
1727			xfs_iunlock(ip, lockflags);
1728			error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1729						 XFS_DQ_PROJ,
1730						 XFS_QMOPT_DQALLOC |
1731						 XFS_QMOPT_DOWARN,
1732						 &pq);
1733			if (error) {
1734				ASSERT(error != -ENOENT);
1735				goto error_rele;
1736			}
1737			xfs_dqunlock(pq);
1738			lockflags = XFS_ILOCK_SHARED;
1739			xfs_ilock(ip, lockflags);
1740		} else {
1741			ASSERT(ip->i_pdquot);
1742			pq = xfs_qm_dqhold(ip->i_pdquot);
1743		}
1744	}
1745	if (uq)
1746		trace_xfs_dquot_dqalloc(ip);
1747
1748	xfs_iunlock(ip, lockflags);
1749	if (O_udqpp)
1750		*O_udqpp = uq;
1751	else
1752		xfs_qm_dqrele(uq);
1753	if (O_gdqpp)
1754		*O_gdqpp = gq;
1755	else
1756		xfs_qm_dqrele(gq);
1757	if (O_pdqpp)
1758		*O_pdqpp = pq;
1759	else
1760		xfs_qm_dqrele(pq);
1761	return 0;
1762
1763error_rele:
1764	xfs_qm_dqrele(gq);
1765	xfs_qm_dqrele(uq);
1766	return error;
1767}
1768
1769/*
1770 * Actually transfer ownership, and do dquot modifications.
1771 * These were already reserved.
1772 */
1773xfs_dquot_t *
1774xfs_qm_vop_chown(
1775	xfs_trans_t	*tp,
1776	xfs_inode_t	*ip,
1777	xfs_dquot_t	**IO_olddq,
1778	xfs_dquot_t	*newdq)
1779{
1780	xfs_dquot_t	*prevdq;
1781	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1782				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1783
1784
1785	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1786	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1787
1788	/* old dquot */
1789	prevdq = *IO_olddq;
1790	ASSERT(prevdq);
1791	ASSERT(prevdq != newdq);
1792
1793	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1794	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1795
1796	/* the sparkling new dquot */
1797	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1798	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1799
1800	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1801	 * Take an extra reference, because the inode is going to keep
1802	 * this dquot pointer even after the trans_commit.
1803	 */
1804	*IO_olddq = xfs_qm_dqhold(newdq);
1805
1806	return prevdq;
1807}
1808
1809/*
1810 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1811 */
1812int
1813xfs_qm_vop_chown_reserve(
1814	struct xfs_trans	*tp,
1815	struct xfs_inode	*ip,
1816	struct xfs_dquot	*udqp,
1817	struct xfs_dquot	*gdqp,
1818	struct xfs_dquot	*pdqp,
1819	uint			flags)
1820{
1821	struct xfs_mount	*mp = ip->i_mount;
1822	uint			delblks, blkflags, prjflags = 0;
1823	struct xfs_dquot	*udq_unres = NULL;
1824	struct xfs_dquot	*gdq_unres = NULL;
1825	struct xfs_dquot	*pdq_unres = NULL;
1826	struct xfs_dquot	*udq_delblks = NULL;
1827	struct xfs_dquot	*gdq_delblks = NULL;
1828	struct xfs_dquot	*pdq_delblks = NULL;
1829	int			error;
1830
1831
1832	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1833	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1834
1835	delblks = ip->i_delayed_blks;
1836	blkflags = XFS_IS_REALTIME_INODE(ip) ?
1837			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1838
1839	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1840	    ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
1841		udq_delblks = udqp;
1842		/*
1843		 * If there are delayed allocation blocks, then we have to
1844		 * unreserve those from the old dquot, and add them to the
1845		 * new dquot.
1846		 */
1847		if (delblks) {
1848			ASSERT(ip->i_udquot);
1849			udq_unres = ip->i_udquot;
1850		}
1851	}
1852	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1853	    ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
1854		gdq_delblks = gdqp;
1855		if (delblks) {
1856			ASSERT(ip->i_gdquot);
1857			gdq_unres = ip->i_gdquot;
1858		}
1859	}
1860
1861	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1862	    xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
1863		prjflags = XFS_QMOPT_ENOSPC;
1864		pdq_delblks = pdqp;
1865		if (delblks) {
1866			ASSERT(ip->i_pdquot);
1867			pdq_unres = ip->i_pdquot;
1868		}
1869	}
1870
1871	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1872				udq_delblks, gdq_delblks, pdq_delblks,
1873				ip->i_d.di_nblocks, 1,
1874				flags | blkflags | prjflags);
1875	if (error)
1876		return error;
1877
1878	/*
1879	 * Do the delayed blks reservations/unreservations now. Since, these
1880	 * are done without the help of a transaction, if a reservation fails
1881	 * its previous reservations won't be automatically undone by trans
1882	 * code. So, we have to do it manually here.
1883	 */
1884	if (delblks) {
1885		/*
1886		 * Do the reservations first. Unreservation can't fail.
1887		 */
1888		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1889		ASSERT(udq_unres || gdq_unres || pdq_unres);
1890		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1891			    udq_delblks, gdq_delblks, pdq_delblks,
1892			    (xfs_qcnt_t)delblks, 0,
1893			    flags | blkflags | prjflags);
1894		if (error)
1895			return error;
1896		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1897				udq_unres, gdq_unres, pdq_unres,
1898				-((xfs_qcnt_t)delblks), 0, blkflags);
1899	}
1900
1901	return 0;
1902}
1903
1904int
1905xfs_qm_vop_rename_dqattach(
1906	struct xfs_inode	**i_tab)
1907{
1908	struct xfs_mount	*mp = i_tab[0]->i_mount;
1909	int			i;
1910
1911	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1912		return 0;
1913
1914	for (i = 0; (i < 4 && i_tab[i]); i++) {
1915		struct xfs_inode	*ip = i_tab[i];
1916		int			error;
1917
1918		/*
1919		 * Watch out for duplicate entries in the table.
1920		 */
1921		if (i == 0 || ip != i_tab[i-1]) {
1922			if (XFS_NOT_DQATTACHED(mp, ip)) {
1923				error = xfs_qm_dqattach(ip, 0);
1924				if (error)
1925					return error;
1926			}
1927		}
1928	}
1929	return 0;
1930}
1931
1932void
1933xfs_qm_vop_create_dqattach(
1934	struct xfs_trans	*tp,
1935	struct xfs_inode	*ip,
1936	struct xfs_dquot	*udqp,
1937	struct xfs_dquot	*gdqp,
1938	struct xfs_dquot	*pdqp)
1939{
1940	struct xfs_mount	*mp = tp->t_mountp;
1941
1942	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1943		return;
1944
1945	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1946	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1947
1948	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1949		ASSERT(ip->i_udquot == NULL);
1950		ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1951
1952		ip->i_udquot = xfs_qm_dqhold(udqp);
1953		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1954	}
1955	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1956		ASSERT(ip->i_gdquot == NULL);
1957		ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
 
1958		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1959		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1960	}
1961	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1962		ASSERT(ip->i_pdquot == NULL);
1963		ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
1964
1965		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1966		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1967	}
1968}
1969
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_sb.h"
  14#include "xfs_mount.h"
  15#include "xfs_inode.h"
  16#include "xfs_iwalk.h"
 
  17#include "xfs_quota.h"
 
  18#include "xfs_bmap.h"
  19#include "xfs_bmap_util.h"
  20#include "xfs_trans.h"
  21#include "xfs_trans_space.h"
  22#include "xfs_qm.h"
  23#include "xfs_trace.h"
  24#include "xfs_icache.h"
  25#include "xfs_error.h"
  26#include "xfs_ag.h"
  27#include "xfs_ialloc.h"
  28
  29/*
  30 * The global quota manager. There is only one of these for the entire
  31 * system, _not_ one per file system. XQM keeps track of the overall
  32 * quota functionality, including maintaining the freelist and hash
  33 * tables of dquots.
  34 */
  35STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
  36STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
 
  37
  38STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
  39STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  40/*
  41 * We use the batch lookup interface to iterate over the dquots as it
  42 * currently is the only interface into the radix tree code that allows
  43 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  44 * operations is fine as all callers are used either during mount/umount
  45 * or quotaoff.
  46 */
  47#define XFS_DQ_LOOKUP_BATCH	32
  48
  49STATIC int
  50xfs_qm_dquot_walk(
  51	struct xfs_mount	*mp,
  52	xfs_dqtype_t		type,
  53	int			(*execute)(struct xfs_dquot *dqp, void *data),
  54	void			*data)
  55{
  56	struct xfs_quotainfo	*qi = mp->m_quotainfo;
  57	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
  58	uint32_t		next_index;
  59	int			last_error = 0;
  60	int			skipped;
  61	int			nr_found;
  62
  63restart:
  64	skipped = 0;
  65	next_index = 0;
  66	nr_found = 0;
  67
  68	while (1) {
  69		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  70		int		error = 0;
  71		int		i;
  72
  73		mutex_lock(&qi->qi_tree_lock);
  74		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  75					next_index, XFS_DQ_LOOKUP_BATCH);
  76		if (!nr_found) {
  77			mutex_unlock(&qi->qi_tree_lock);
  78			break;
  79		}
  80
  81		for (i = 0; i < nr_found; i++) {
  82			struct xfs_dquot *dqp = batch[i];
  83
  84			next_index = dqp->q_id + 1;
  85
  86			error = execute(batch[i], data);
  87			if (error == -EAGAIN) {
  88				skipped++;
  89				continue;
  90			}
  91			if (error && last_error != -EFSCORRUPTED)
  92				last_error = error;
  93		}
  94
  95		mutex_unlock(&qi->qi_tree_lock);
  96
  97		/* bail out if the filesystem is corrupted.  */
  98		if (last_error == -EFSCORRUPTED) {
  99			skipped = 0;
 100			break;
 101		}
 102		/* we're done if id overflows back to zero */
 103		if (!next_index)
 104			break;
 105	}
 106
 107	if (skipped) {
 108		delay(1);
 109		goto restart;
 110	}
 111
 112	return last_error;
 113}
 114
 115
 116/*
 117 * Purge a dquot from all tracking data structures and free it.
 118 */
 119STATIC int
 120xfs_qm_dqpurge(
 121	struct xfs_dquot	*dqp,
 122	void			*data)
 123{
 124	struct xfs_mount	*mp = dqp->q_mount;
 125	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 126	int			error = -EAGAIN;
 127
 128	xfs_dqlock(dqp);
 129	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
 130		goto out_unlock;
 
 
 131
 132	dqp->q_flags |= XFS_DQFLAG_FREEING;
 133
 134	xfs_dqflock(dqp);
 135
 136	/*
 137	 * If we are turning this type of quotas off, we don't care
 138	 * about the dirty metadata sitting in this dquot. OTOH, if
 139	 * we're unmounting, we do care, so we flush it and wait.
 140	 */
 141	if (XFS_DQ_IS_DIRTY(dqp)) {
 142		struct xfs_buf	*bp = NULL;
 
 143
 144		/*
 145		 * We don't care about getting disk errors here. We need
 146		 * to purge this dquot anyway, so we go ahead regardless.
 147		 */
 148		error = xfs_qm_dqflush(dqp, &bp);
 149		if (!error) {
 
 
 
 150			error = xfs_bwrite(bp);
 151			xfs_buf_relse(bp);
 152		} else if (error == -EAGAIN) {
 153			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
 154			goto out_unlock;
 155		}
 156		xfs_dqflock(dqp);
 157	}
 158
 159	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 160	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 161		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
 162
 163	xfs_dqfunlock(dqp);
 164	xfs_dqunlock(dqp);
 165
 166	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
 
 167	qi->qi_dquots--;
 168
 169	/*
 170	 * We move dquots to the freelist as soon as their reference count
 171	 * hits zero, so it really should be on the freelist here.
 172	 */
 173	ASSERT(!list_empty(&dqp->q_lru));
 174	list_lru_del(&qi->qi_lru, &dqp->q_lru);
 175	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
 176
 177	xfs_qm_dqdestroy(dqp);
 178	return 0;
 179
 180out_unlock:
 181	xfs_dqunlock(dqp);
 182	return error;
 183}
 184
 185/*
 186 * Purge the dquot cache.
 187 */
 188void
 189xfs_qm_dqpurge_all(
 190	struct xfs_mount	*mp,
 191	uint			flags)
 192{
 193	if (flags & XFS_QMOPT_UQUOTA)
 194		xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
 195	if (flags & XFS_QMOPT_GQUOTA)
 196		xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
 197	if (flags & XFS_QMOPT_PQUOTA)
 198		xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
 199}
 200
 201/*
 202 * Just destroy the quotainfo structure.
 203 */
 204void
 205xfs_qm_unmount(
 206	struct xfs_mount	*mp)
 207{
 208	if (mp->m_quotainfo) {
 209		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
 210		xfs_qm_destroy_quotainfo(mp);
 211	}
 212}
 213
 214/*
 215 * Called from the vfsops layer.
 216 */
 217void
 218xfs_qm_unmount_quotas(
 219	xfs_mount_t	*mp)
 220{
 221	/*
 222	 * Release the dquots that root inode, et al might be holding,
 223	 * before we flush quotas and blow away the quotainfo structure.
 224	 */
 225	ASSERT(mp->m_rootip);
 226	xfs_qm_dqdetach(mp->m_rootip);
 227	if (mp->m_rbmip)
 228		xfs_qm_dqdetach(mp->m_rbmip);
 229	if (mp->m_rsumip)
 230		xfs_qm_dqdetach(mp->m_rsumip);
 231
 232	/*
 233	 * Release the quota inodes.
 234	 */
 235	if (mp->m_quotainfo) {
 236		if (mp->m_quotainfo->qi_uquotaip) {
 237			xfs_irele(mp->m_quotainfo->qi_uquotaip);
 238			mp->m_quotainfo->qi_uquotaip = NULL;
 239		}
 240		if (mp->m_quotainfo->qi_gquotaip) {
 241			xfs_irele(mp->m_quotainfo->qi_gquotaip);
 242			mp->m_quotainfo->qi_gquotaip = NULL;
 243		}
 244		if (mp->m_quotainfo->qi_pquotaip) {
 245			xfs_irele(mp->m_quotainfo->qi_pquotaip);
 246			mp->m_quotainfo->qi_pquotaip = NULL;
 247		}
 248	}
 249}
 250
 251STATIC int
 252xfs_qm_dqattach_one(
 253	struct xfs_inode	*ip,
 254	xfs_dqtype_t		type,
 255	bool			doalloc,
 256	struct xfs_dquot	**IO_idqpp)
 
 257{
 258	struct xfs_dquot	*dqp;
 259	int			error;
 260
 261	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 262	error = 0;
 263
 264	/*
 265	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
 266	 * or &i_gdquot. This made the code look weird, but made the logic a lot
 267	 * simpler.
 268	 */
 269	dqp = *IO_idqpp;
 270	if (dqp) {
 271		trace_xfs_dqattach_found(dqp);
 272		return 0;
 273	}
 274
 275	/*
 276	 * Find the dquot from somewhere. This bumps the reference count of
 277	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
 278	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
 279	 * turned off suddenly.
 280	 */
 281	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
 
 282	if (error)
 283		return error;
 284
 285	trace_xfs_dqattach_get(dqp);
 286
 287	/*
 288	 * dqget may have dropped and re-acquired the ilock, but it guarantees
 289	 * that the dquot returned is the one that should go in the inode.
 290	 */
 291	*IO_idqpp = dqp;
 292	xfs_dqunlock(dqp);
 293	return 0;
 294}
 295
 296static bool
 297xfs_qm_need_dqattach(
 298	struct xfs_inode	*ip)
 299{
 300	struct xfs_mount	*mp = ip->i_mount;
 301
 302	if (!XFS_IS_QUOTA_RUNNING(mp))
 303		return false;
 304	if (!XFS_IS_QUOTA_ON(mp))
 305		return false;
 306	if (!XFS_NOT_DQATTACHED(mp, ip))
 307		return false;
 308	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 309		return false;
 310	return true;
 311}
 312
 313/*
 314 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 315 * into account.
 316 * If @doalloc is true, the dquot(s) will be allocated if needed.
 317 * Inode may get unlocked and relocked in here, and the caller must deal with
 318 * the consequences.
 319 */
 320int
 321xfs_qm_dqattach_locked(
 322	xfs_inode_t	*ip,
 323	bool		doalloc)
 324{
 325	xfs_mount_t	*mp = ip->i_mount;
 326	int		error = 0;
 327
 328	if (!xfs_qm_need_dqattach(ip))
 329		return 0;
 330
 331	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 332
 333	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
 334		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
 335				doalloc, &ip->i_udquot);
 
 336		if (error)
 337			goto done;
 338		ASSERT(ip->i_udquot);
 339	}
 340
 341	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
 342		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
 343				doalloc, &ip->i_gdquot);
 
 344		if (error)
 345			goto done;
 346		ASSERT(ip->i_gdquot);
 347	}
 348
 349	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
 350		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
 351				doalloc, &ip->i_pdquot);
 
 352		if (error)
 353			goto done;
 354		ASSERT(ip->i_pdquot);
 355	}
 356
 357done:
 358	/*
 359	 * Don't worry about the dquots that we may have attached before any
 360	 * error - they'll get detached later if it has not already been done.
 361	 */
 362	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 363	return error;
 364}
 365
 366int
 367xfs_qm_dqattach(
 368	struct xfs_inode	*ip)
 
 369{
 370	int			error;
 371
 372	if (!xfs_qm_need_dqattach(ip))
 373		return 0;
 374
 375	xfs_ilock(ip, XFS_ILOCK_EXCL);
 376	error = xfs_qm_dqattach_locked(ip, false);
 377	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 378
 379	return error;
 380}
 381
 382/*
 383 * Release dquots (and their references) if any.
 384 * The inode should be locked EXCL except when this's called by
 385 * xfs_ireclaim.
 386 */
 387void
 388xfs_qm_dqdetach(
 389	xfs_inode_t	*ip)
 390{
 391	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
 392		return;
 393
 394	trace_xfs_dquot_dqdetach(ip);
 395
 396	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
 397	if (ip->i_udquot) {
 398		xfs_qm_dqrele(ip->i_udquot);
 399		ip->i_udquot = NULL;
 400	}
 401	if (ip->i_gdquot) {
 402		xfs_qm_dqrele(ip->i_gdquot);
 403		ip->i_gdquot = NULL;
 404	}
 405	if (ip->i_pdquot) {
 406		xfs_qm_dqrele(ip->i_pdquot);
 407		ip->i_pdquot = NULL;
 408	}
 409}
 410
 411struct xfs_qm_isolate {
 412	struct list_head	buffers;
 413	struct list_head	dispose;
 414};
 415
 416static enum lru_status
 417xfs_qm_dquot_isolate(
 418	struct list_head	*item,
 419	struct list_lru_one	*lru,
 420	spinlock_t		*lru_lock,
 421	void			*arg)
 422		__releases(lru_lock) __acquires(lru_lock)
 423{
 424	struct xfs_dquot	*dqp = container_of(item,
 425						struct xfs_dquot, q_lru);
 426	struct xfs_qm_isolate	*isol = arg;
 427
 428	if (!xfs_dqlock_nowait(dqp))
 429		goto out_miss_busy;
 430
 431	/*
 432	 * This dquot has acquired a reference in the meantime remove it from
 433	 * the freelist and try again.
 434	 */
 435	if (dqp->q_nrefs) {
 436		xfs_dqunlock(dqp);
 437		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
 438
 439		trace_xfs_dqreclaim_want(dqp);
 440		list_lru_isolate(lru, &dqp->q_lru);
 441		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 442		return LRU_REMOVED;
 443	}
 444
 445	/*
 446	 * If the dquot is dirty, flush it. If it's already being flushed, just
 447	 * skip it so there is time for the IO to complete before we try to
 448	 * reclaim it again on the next LRU pass.
 449	 */
 450	if (!xfs_dqflock_nowait(dqp)) {
 451		xfs_dqunlock(dqp);
 452		goto out_miss_busy;
 453	}
 454
 455	if (XFS_DQ_IS_DIRTY(dqp)) {
 456		struct xfs_buf	*bp = NULL;
 457		int		error;
 458
 459		trace_xfs_dqreclaim_dirty(dqp);
 460
 461		/* we have to drop the LRU lock to flush the dquot */
 462		spin_unlock(lru_lock);
 463
 464		error = xfs_qm_dqflush(dqp, &bp);
 465		if (error)
 
 
 466			goto out_unlock_dirty;
 
 467
 468		xfs_buf_delwri_queue(bp, &isol->buffers);
 469		xfs_buf_relse(bp);
 470		goto out_unlock_dirty;
 471	}
 472	xfs_dqfunlock(dqp);
 473
 474	/*
 475	 * Prevent lookups now that we are past the point of no return.
 476	 */
 477	dqp->q_flags |= XFS_DQFLAG_FREEING;
 478	xfs_dqunlock(dqp);
 479
 480	ASSERT(dqp->q_nrefs == 0);
 481	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
 482	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 483	trace_xfs_dqreclaim_done(dqp);
 484	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
 485	return LRU_REMOVED;
 486
 487out_miss_busy:
 488	trace_xfs_dqreclaim_busy(dqp);
 489	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 490	return LRU_SKIP;
 491
 492out_unlock_dirty:
 493	trace_xfs_dqreclaim_busy(dqp);
 494	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 495	xfs_dqunlock(dqp);
 496	spin_lock(lru_lock);
 497	return LRU_RETRY;
 498}
 499
 500static unsigned long
 501xfs_qm_shrink_scan(
 502	struct shrinker		*shrink,
 503	struct shrink_control	*sc)
 504{
 505	struct xfs_quotainfo	*qi = container_of(shrink,
 506					struct xfs_quotainfo, qi_shrinker);
 507	struct xfs_qm_isolate	isol;
 508	unsigned long		freed;
 509	int			error;
 510
 511	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
 512		return 0;
 513
 514	INIT_LIST_HEAD(&isol.buffers);
 515	INIT_LIST_HEAD(&isol.dispose);
 516
 517	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
 518				     xfs_qm_dquot_isolate, &isol);
 519
 520	error = xfs_buf_delwri_submit(&isol.buffers);
 521	if (error)
 522		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
 523
 524	while (!list_empty(&isol.dispose)) {
 525		struct xfs_dquot	*dqp;
 526
 527		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
 528		list_del_init(&dqp->q_lru);
 529		xfs_qm_dqfree_one(dqp);
 530	}
 531
 532	return freed;
 533}
 534
 535static unsigned long
 536xfs_qm_shrink_count(
 537	struct shrinker		*shrink,
 538	struct shrink_control	*sc)
 539{
 540	struct xfs_quotainfo	*qi = container_of(shrink,
 541					struct xfs_quotainfo, qi_shrinker);
 542
 543	return list_lru_shrink_count(&qi->qi_lru, sc);
 544}
 545
 546STATIC void
 547xfs_qm_set_defquota(
 548	struct xfs_mount	*mp,
 549	xfs_dqtype_t		type,
 550	struct xfs_quotainfo	*qinf)
 551{
 552	struct xfs_dquot	*dqp;
 553	struct xfs_def_quota	*defq;
 554	int			error;
 555
 556	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 557	if (error)
 558		return;
 559
 560	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
 
 561
 562	/*
 563	 * Timers and warnings have been already set, let's just set the
 564	 * default limits for this quota type
 565	 */
 566	defq->blk.hard = dqp->q_blk.hardlimit;
 567	defq->blk.soft = dqp->q_blk.softlimit;
 568	defq->ino.hard = dqp->q_ino.hardlimit;
 569	defq->ino.soft = dqp->q_ino.softlimit;
 570	defq->rtb.hard = dqp->q_rtb.hardlimit;
 571	defq->rtb.soft = dqp->q_rtb.softlimit;
 572	xfs_qm_dqdestroy(dqp);
 573}
 574
 575/* Initialize quota time limits from the root dquot. */
 576static void
 577xfs_qm_init_timelimits(
 578	struct xfs_mount	*mp,
 579	xfs_dqtype_t		type)
 580{
 581	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
 582	struct xfs_def_quota	*defq;
 583	struct xfs_dquot	*dqp;
 584	int			error;
 585
 586	defq = xfs_get_defquota(qinf, type);
 587
 588	defq->blk.time = XFS_QM_BTIMELIMIT;
 589	defq->ino.time = XFS_QM_ITIMELIMIT;
 590	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
 591	defq->blk.warn = XFS_QM_BWARNLIMIT;
 592	defq->ino.warn = XFS_QM_IWARNLIMIT;
 593	defq->rtb.warn = XFS_QM_RTBWARNLIMIT;
 594
 595	/*
 596	 * We try to get the limits from the superuser's limits fields.
 597	 * This is quite hacky, but it is standard quota practice.
 598	 *
 599	 * Since we may not have done a quotacheck by this point, just read
 600	 * the dquot without attaching it to any hashtables or lists.
 601	 */
 602	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 603	if (error)
 604		return;
 605
 606	/*
 607	 * The warnings and timers set the grace period given to
 608	 * a user or group before he or she can not perform any
 609	 * more writing. If it is zero, a default is used.
 610	 */
 611	if (dqp->q_blk.timer)
 612		defq->blk.time = dqp->q_blk.timer;
 613	if (dqp->q_ino.timer)
 614		defq->ino.time = dqp->q_ino.timer;
 615	if (dqp->q_rtb.timer)
 616		defq->rtb.time = dqp->q_rtb.timer;
 617	if (dqp->q_blk.warnings)
 618		defq->blk.warn = dqp->q_blk.warnings;
 619	if (dqp->q_ino.warnings)
 620		defq->ino.warn = dqp->q_ino.warnings;
 621	if (dqp->q_rtb.warnings)
 622		defq->rtb.warn = dqp->q_rtb.warnings;
 623
 624	xfs_qm_dqdestroy(dqp);
 625}
 626
 627/*
 628 * This initializes all the quota information that's kept in the
 629 * mount structure
 630 */
 631STATIC int
 632xfs_qm_init_quotainfo(
 633	struct xfs_mount	*mp)
 634{
 635	struct xfs_quotainfo	*qinf;
 636	int			error;
 
 637
 638	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 639
 640	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
 641
 642	error = list_lru_init(&qinf->qi_lru);
 643	if (error)
 644		goto out_free_qinf;
 645
 646	/*
 647	 * See if quotainodes are setup, and if not, allocate them,
 648	 * and change the superblock accordingly.
 649	 */
 650	error = xfs_qm_init_quotainos(mp);
 651	if (error)
 652		goto out_free_lru;
 653
 654	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
 655	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 656	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
 657	mutex_init(&qinf->qi_tree_lock);
 658
 659	/* mutex used to serialize quotaoffs */
 660	mutex_init(&qinf->qi_quotaofflock);
 661
 662	/* Precalc some constants */
 663	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 664	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
 665	if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
 666		qinf->qi_expiry_min =
 667			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
 668		qinf->qi_expiry_max =
 669			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
 670	} else {
 671		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
 672		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
 673	}
 674	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
 675			qinf->qi_expiry_max);
 676
 677	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 678
 679	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
 680	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
 681	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 682
 683	if (XFS_IS_UQUOTA_RUNNING(mp))
 684		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
 685	if (XFS_IS_GQUOTA_RUNNING(mp))
 686		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
 687	if (XFS_IS_PQUOTA_RUNNING(mp))
 688		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
 689
 690	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
 691	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
 692	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
 693	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
 694
 695	error = register_shrinker(&qinf->qi_shrinker);
 696	if (error)
 697		goto out_free_inos;
 698
 699	return 0;
 700
 701out_free_inos:
 702	mutex_destroy(&qinf->qi_quotaofflock);
 703	mutex_destroy(&qinf->qi_tree_lock);
 704	xfs_qm_destroy_quotainos(qinf);
 705out_free_lru:
 706	list_lru_destroy(&qinf->qi_lru);
 707out_free_qinf:
 708	kmem_free(qinf);
 709	mp->m_quotainfo = NULL;
 710	return error;
 711}
 712
 
 713/*
 714 * Gets called when unmounting a filesystem or when all quotas get
 715 * turned off.
 716 * This purges the quota inodes, destroys locks and frees itself.
 717 */
 718void
 719xfs_qm_destroy_quotainfo(
 720	struct xfs_mount	*mp)
 721{
 722	struct xfs_quotainfo	*qi;
 723
 724	qi = mp->m_quotainfo;
 725	ASSERT(qi != NULL);
 726
 727	unregister_shrinker(&qi->qi_shrinker);
 728	list_lru_destroy(&qi->qi_lru);
 729	xfs_qm_destroy_quotainos(qi);
 730	mutex_destroy(&qi->qi_tree_lock);
 
 
 
 
 
 
 
 
 
 
 
 731	mutex_destroy(&qi->qi_quotaofflock);
 732	kmem_free(qi);
 733	mp->m_quotainfo = NULL;
 734}
 735
 736/*
 737 * Create an inode and return with a reference already taken, but unlocked
 738 * This is how we create quota inodes
 739 */
 740STATIC int
 741xfs_qm_qino_alloc(
 742	struct xfs_mount	*mp,
 743	struct xfs_inode	**ipp,
 744	unsigned int		flags)
 745{
 746	struct xfs_trans	*tp;
 747	int			error;
 748	bool			need_alloc = true;
 
 749
 750	*ipp = NULL;
 751	/*
 752	 * With superblock that doesn't have separate pquotino, we
 753	 * share an inode between gquota and pquota. If the on-disk
 754	 * superblock has GQUOTA and the filesystem is now mounted
 755	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
 756	 * vice-versa.
 757	 */
 758	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
 759			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
 760		xfs_ino_t ino = NULLFSINO;
 761
 762		if ((flags & XFS_QMOPT_PQUOTA) &&
 763			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
 764			ino = mp->m_sb.sb_gquotino;
 765			if (XFS_IS_CORRUPT(mp,
 766					   mp->m_sb.sb_pquotino != NULLFSINO))
 767				return -EFSCORRUPTED;
 768		} else if ((flags & XFS_QMOPT_GQUOTA) &&
 769			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
 770			ino = mp->m_sb.sb_pquotino;
 771			if (XFS_IS_CORRUPT(mp,
 772					   mp->m_sb.sb_gquotino != NULLFSINO))
 773				return -EFSCORRUPTED;
 774		}
 775		if (ino != NULLFSINO) {
 776			error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
 777			if (error)
 778				return error;
 779			mp->m_sb.sb_gquotino = NULLFSINO;
 780			mp->m_sb.sb_pquotino = NULLFSINO;
 781			need_alloc = false;
 782		}
 783	}
 784
 785	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
 786			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
 787			0, 0, &tp);
 788	if (error)
 
 789		return error;
 
 790
 791	if (need_alloc) {
 792		xfs_ino_t	ino;
 793
 794		error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
 795		if (!error)
 796			error = xfs_init_new_inode(&init_user_ns, tp, NULL, ino,
 797					S_IFREG, 1, 0, 0, false, ipp);
 798		if (error) {
 799			xfs_trans_cancel(tp);
 800			return error;
 801		}
 802	}
 803
 804	/*
 805	 * Make the changes in the superblock, and log those too.
 806	 * sbfields arg may contain fields other than *QUOTINO;
 807	 * VERSIONNUM for example.
 808	 */
 809	spin_lock(&mp->m_sb_lock);
 810	if (flags & XFS_QMOPT_SBVERSION) {
 811		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
 812
 813		xfs_sb_version_addquota(&mp->m_sb);
 814		mp->m_sb.sb_uquotino = NULLFSINO;
 815		mp->m_sb.sb_gquotino = NULLFSINO;
 816		mp->m_sb.sb_pquotino = NULLFSINO;
 817
 818		/* qflags will get updated fully _after_ quotacheck */
 819		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
 820	}
 821	if (flags & XFS_QMOPT_UQUOTA)
 822		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
 823	else if (flags & XFS_QMOPT_GQUOTA)
 824		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
 825	else
 826		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
 827	spin_unlock(&mp->m_sb_lock);
 828	xfs_log_sb(tp);
 829
 830	error = xfs_trans_commit(tp);
 831	if (error) {
 832		ASSERT(XFS_FORCED_SHUTDOWN(mp));
 833		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
 834	}
 835	if (need_alloc)
 836		xfs_finish_inode_setup(*ipp);
 837	return error;
 838}
 839
 840
 841STATIC void
 842xfs_qm_reset_dqcounts(
 843	struct xfs_mount	*mp,
 844	struct xfs_buf		*bp,
 845	xfs_dqid_t		id,
 846	xfs_dqtype_t		type)
 847{
 848	struct xfs_dqblk	*dqb;
 849	int			j;
 850
 851	trace_xfs_reset_dqcounts(bp, _RET_IP_);
 852
 853	/*
 854	 * Reset all counters and timers. They'll be
 855	 * started afresh by xfs_qm_quotacheck.
 856	 */
 857#ifdef DEBUG
 858	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
 859		sizeof(xfs_dqblk_t);
 860	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 861#endif
 862	dqb = bp->b_addr;
 863	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
 864		struct xfs_disk_dquot	*ddq;
 865
 866		ddq = (struct xfs_disk_dquot *)&dqb[j];
 867
 868		/*
 869		 * Do a sanity check, and if needed, repair the dqblk. Don't
 870		 * output any warnings because it's perfectly possible to
 871		 * find uninitialised dquot blks. See comment in
 872		 * xfs_dquot_verify.
 873		 */
 874		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
 875		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
 876			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
 877
 878		/*
 879		 * Reset type in case we are reusing group quota file for
 880		 * project quotas or vice versa
 881		 */
 882		ddq->d_type = type;
 883		ddq->d_bcount = 0;
 884		ddq->d_icount = 0;
 885		ddq->d_rtbcount = 0;
 886
 887		/*
 888		 * dquot id 0 stores the default grace period and the maximum
 889		 * warning limit that were set by the administrator, so we
 890		 * should not reset them.
 891		 */
 892		if (ddq->d_id != 0) {
 893			ddq->d_btimer = 0;
 894			ddq->d_itimer = 0;
 895			ddq->d_rtbtimer = 0;
 896			ddq->d_bwarns = 0;
 897			ddq->d_iwarns = 0;
 898			ddq->d_rtbwarns = 0;
 899			if (xfs_sb_version_hasbigtime(&mp->m_sb))
 900				ddq->d_type |= XFS_DQTYPE_BIGTIME;
 901		}
 902
 903		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 904			xfs_update_cksum((char *)&dqb[j],
 905					 sizeof(struct xfs_dqblk),
 906					 XFS_DQUOT_CRC_OFF);
 907		}
 908	}
 909}
 910
 911STATIC int
 912xfs_qm_reset_dqcounts_all(
 913	struct xfs_mount	*mp,
 914	xfs_dqid_t		firstid,
 915	xfs_fsblock_t		bno,
 916	xfs_filblks_t		blkcnt,
 917	xfs_dqtype_t		type,
 918	struct list_head	*buffer_list)
 919{
 920	struct xfs_buf		*bp;
 921	int			error = 0;
 
 922
 923	ASSERT(blkcnt > 0);
 
 
 
 924
 925	/*
 926	 * Blkcnt arg can be a very big number, and might even be
 927	 * larger than the log itself. So, we have to break it up into
 928	 * manageable-sized transactions.
 929	 * Note that we don't start a permanent transaction here; we might
 930	 * not be able to get a log reservation for the whole thing up front,
 931	 * and we don't really care to either, because we just discard
 932	 * everything if we were to crash in the middle of this loop.
 933	 */
 934	while (blkcnt--) {
 935		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 936			      XFS_FSB_TO_DADDR(mp, bno),
 937			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 938			      &xfs_dquot_buf_ops);
 939
 940		/*
 941		 * CRC and validation errors will return a EFSCORRUPTED here. If
 942		 * this occurs, re-read without CRC validation so that we can
 943		 * repair the damage via xfs_qm_reset_dqcounts(). This process
 944		 * will leave a trace in the log indicating corruption has
 945		 * been detected.
 946		 */
 947		if (error == -EFSCORRUPTED) {
 948			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 949				      XFS_FSB_TO_DADDR(mp, bno),
 950				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 951				      NULL);
 952		}
 953
 954		if (error)
 955			break;
 956
 957		/*
 958		 * A corrupt buffer might not have a verifier attached, so
 959		 * make sure we have the correct one attached before writeback
 960		 * occurs.
 961		 */
 962		bp->b_ops = &xfs_dquot_buf_ops;
 963		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
 964		xfs_buf_delwri_queue(bp, buffer_list);
 965		xfs_buf_relse(bp);
 966
 967		/* goto the next block. */
 968		bno++;
 969		firstid += mp->m_quotainfo->qi_dqperchunk;
 970	}
 971
 972	return error;
 973}
 974
 975/*
 976 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
 977 * counters for every chunk of dquots that we find.
 978 */
 979STATIC int
 980xfs_qm_reset_dqcounts_buf(
 981	struct xfs_mount	*mp,
 982	struct xfs_inode	*qip,
 983	xfs_dqtype_t		type,
 984	struct list_head	*buffer_list)
 985{
 986	struct xfs_bmbt_irec	*map;
 987	int			i, nmaps;	/* number of map entries */
 988	int			error;		/* return value */
 989	xfs_fileoff_t		lblkno;
 990	xfs_filblks_t		maxlblkcnt;
 991	xfs_dqid_t		firstid;
 992	xfs_fsblock_t		rablkno;
 993	xfs_filblks_t		rablkcnt;
 994
 995	error = 0;
 996	/*
 997	 * This looks racy, but we can't keep an inode lock across a
 998	 * trans_reserve. But, this gets called during quotacheck, and that
 999	 * happens only at mount time which is single threaded.
1000	 */
1001	if (qip->i_nblocks == 0)
1002		return 0;
1003
1004	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
1005
1006	lblkno = 0;
1007	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1008	do {
1009		uint		lock_mode;
1010
1011		nmaps = XFS_DQITER_MAP_SIZE;
1012		/*
1013		 * We aren't changing the inode itself. Just changing
1014		 * some of its data. No new blocks are added here, and
1015		 * the inode is never added to the transaction.
1016		 */
1017		lock_mode = xfs_ilock_data_map_shared(qip);
1018		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1019				       map, &nmaps, 0);
1020		xfs_iunlock(qip, lock_mode);
1021		if (error)
1022			break;
1023
1024		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1025		for (i = 0; i < nmaps; i++) {
1026			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1027			ASSERT(map[i].br_blockcount);
1028
1029
1030			lblkno += map[i].br_blockcount;
1031
1032			if (map[i].br_startblock == HOLESTARTBLOCK)
1033				continue;
1034
1035			firstid = (xfs_dqid_t) map[i].br_startoff *
1036				mp->m_quotainfo->qi_dqperchunk;
1037			/*
1038			 * Do a read-ahead on the next extent.
1039			 */
1040			if ((i+1 < nmaps) &&
1041			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1042				rablkcnt =  map[i+1].br_blockcount;
1043				rablkno = map[i+1].br_startblock;
1044				while (rablkcnt--) {
1045					xfs_buf_readahead(mp->m_ddev_targp,
1046					       XFS_FSB_TO_DADDR(mp, rablkno),
1047					       mp->m_quotainfo->qi_dqchunklen,
1048					       &xfs_dquot_buf_ops);
1049					rablkno++;
1050				}
1051			}
1052			/*
1053			 * Iterate thru all the blks in the extent and
1054			 * reset the counters of all the dquots inside them.
1055			 */
1056			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1057						   map[i].br_startblock,
1058						   map[i].br_blockcount,
1059						   type, buffer_list);
1060			if (error)
1061				goto out;
1062		}
1063	} while (nmaps > 0);
1064
1065out:
1066	kmem_free(map);
1067	return error;
1068}
1069
1070/*
1071 * Called by dqusage_adjust in doing a quotacheck.
1072 *
1073 * Given the inode, and a dquot id this updates both the incore dqout as well
1074 * as the buffer copy. This is so that once the quotacheck is done, we can
1075 * just log all the buffers, as opposed to logging numerous updates to
1076 * individual dquots.
1077 */
1078STATIC int
1079xfs_qm_quotacheck_dqadjust(
1080	struct xfs_inode	*ip,
1081	xfs_dqtype_t		type,
 
1082	xfs_qcnt_t		nblks,
1083	xfs_qcnt_t		rtblks)
1084{
1085	struct xfs_mount	*mp = ip->i_mount;
1086	struct xfs_dquot	*dqp;
1087	xfs_dqid_t		id;
1088	int			error;
1089
1090	id = xfs_qm_id_for_quotatype(ip, type);
1091	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1092	if (error) {
1093		/*
1094		 * Shouldn't be able to turn off quotas here.
1095		 */
1096		ASSERT(error != -ESRCH);
1097		ASSERT(error != -ENOENT);
1098		return error;
1099	}
1100
1101	trace_xfs_dqadjust(dqp);
1102
1103	/*
1104	 * Adjust the inode count and the block count to reflect this inode's
1105	 * resource usage.
1106	 */
1107	dqp->q_ino.count++;
1108	dqp->q_ino.reserved++;
1109	if (nblks) {
1110		dqp->q_blk.count += nblks;
1111		dqp->q_blk.reserved += nblks;
1112	}
1113	if (rtblks) {
1114		dqp->q_rtb.count += rtblks;
1115		dqp->q_rtb.reserved += rtblks;
1116	}
1117
1118	/*
1119	 * Set default limits, adjust timers (since we changed usages)
1120	 *
1121	 * There are no timers for the default values set in the root dquot.
1122	 */
1123	if (dqp->q_id) {
1124		xfs_qm_adjust_dqlimits(dqp);
1125		xfs_qm_adjust_dqtimers(dqp);
1126	}
1127
1128	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1129	xfs_qm_dqput(dqp);
1130	return 0;
1131}
1132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133/*
1134 * callback routine supplied to bulkstat(). Given an inumber, find its
1135 * dquots and update them to account for resources taken by that inode.
1136 */
1137/* ARGSUSED */
1138STATIC int
1139xfs_qm_dqusage_adjust(
1140	struct xfs_mount	*mp,
1141	struct xfs_trans	*tp,
1142	xfs_ino_t		ino,
1143	void			*data)
1144{
1145	struct xfs_inode	*ip;
1146	xfs_qcnt_t		nblks;
1147	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1148	int			error;
 
1149
1150	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1151
1152	/*
1153	 * rootino must have its resources accounted for, not so with the quota
1154	 * inodes.
1155	 */
1156	if (xfs_is_quota_inode(&mp->m_sb, ino))
1157		return 0;
 
 
1158
1159	/*
1160	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1161	 * at mount time and therefore nobody will be racing chown/chproj.
 
 
1162	 */
1163	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1164	if (error == -EINVAL || error == -ENOENT)
1165		return 0;
1166	if (error)
1167		return error;
 
1168
1169	ASSERT(ip->i_delayed_blks == 0);
1170
1171	if (XFS_IS_REALTIME_INODE(ip)) {
1172		struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1173
1174		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
 
1175		if (error)
1176			goto error0;
1177
1178		xfs_bmap_count_leaves(ifp, &rtblks);
1179	}
1180
1181	nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1182
1183	/*
1184	 * Add the (disk blocks and inode) resources occupied by this
1185	 * inode to its dquots. We do this adjustment in the incore dquot,
1186	 * and also copy the changes to its buffer.
1187	 * We don't care about putting these changes in a transaction
1188	 * envelope because if we crash in the middle of a 'quotacheck'
1189	 * we have to start from the beginning anyway.
1190	 * Once we're done, we'll log all the dquot bufs.
1191	 *
1192	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1193	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1194	 */
1195	if (XFS_IS_UQUOTA_ON(mp)) {
1196		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1197				rtblks);
1198		if (error)
1199			goto error0;
1200	}
1201
1202	if (XFS_IS_GQUOTA_ON(mp)) {
1203		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1204				rtblks);
1205		if (error)
1206			goto error0;
1207	}
1208
1209	if (XFS_IS_PQUOTA_ON(mp)) {
1210		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1211				rtblks);
1212		if (error)
1213			goto error0;
1214	}
1215
 
 
 
 
 
1216error0:
1217	xfs_irele(ip);
 
 
1218	return error;
1219}
1220
1221STATIC int
1222xfs_qm_flush_one(
1223	struct xfs_dquot	*dqp,
1224	void			*data)
1225{
1226	struct xfs_mount	*mp = dqp->q_mount;
1227	struct list_head	*buffer_list = data;
1228	struct xfs_buf		*bp = NULL;
1229	int			error = 0;
1230
1231	xfs_dqlock(dqp);
1232	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1233		goto out_unlock;
1234	if (!XFS_DQ_IS_DIRTY(dqp))
1235		goto out_unlock;
1236
1237	/*
1238	 * The only way the dquot is already flush locked by the time quotacheck
1239	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1240	 * it for the final time. Quotacheck collects all dquot bufs in the
1241	 * local delwri queue before dquots are dirtied, so reclaim can't have
1242	 * possibly queued it for I/O. The only way out is to push the buffer to
1243	 * cycle the flush lock.
1244	 */
1245	if (!xfs_dqflock_nowait(dqp)) {
1246		/* buf is pinned in-core by delwri list */
1247		bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1248				mp->m_quotainfo->qi_dqchunklen, 0);
1249		if (!bp) {
1250			error = -EINVAL;
1251			goto out_unlock;
1252		}
1253		xfs_buf_unlock(bp);
1254
1255		xfs_buf_delwri_pushbuf(bp, buffer_list);
1256		xfs_buf_rele(bp);
1257
1258		error = -EAGAIN;
1259		goto out_unlock;
1260	}
1261
1262	error = xfs_qm_dqflush(dqp, &bp);
1263	if (error)
1264		goto out_unlock;
1265
1266	xfs_buf_delwri_queue(bp, buffer_list);
1267	xfs_buf_relse(bp);
1268out_unlock:
1269	xfs_dqunlock(dqp);
1270	return error;
1271}
1272
1273/*
1274 * Walk thru all the filesystem inodes and construct a consistent view
1275 * of the disk quota world. If the quotacheck fails, disable quotas.
1276 */
1277STATIC int
1278xfs_qm_quotacheck(
1279	xfs_mount_t	*mp)
1280{
1281	int			error, error2;
 
 
1282	uint			flags;
1283	LIST_HEAD		(buffer_list);
1284	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1285	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1286	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1287
 
 
 
1288	flags = 0;
1289
1290	ASSERT(uip || gip || pip);
1291	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1292
1293	xfs_notice(mp, "Quotacheck needed: Please wait.");
1294
1295	/*
1296	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1297	 * their counters to zero. We need a clean slate.
1298	 * We don't log our changes till later.
1299	 */
1300	if (uip) {
1301		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1302					 &buffer_list);
1303		if (error)
1304			goto error_return;
1305		flags |= XFS_UQUOTA_CHKD;
1306	}
1307
1308	if (gip) {
1309		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1310					 &buffer_list);
1311		if (error)
1312			goto error_return;
1313		flags |= XFS_GQUOTA_CHKD;
1314	}
1315
1316	if (pip) {
1317		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1318					 &buffer_list);
1319		if (error)
1320			goto error_return;
1321		flags |= XFS_PQUOTA_CHKD;
1322	}
1323
1324	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1325			NULL);
1326	if (error)
1327		goto error_return;
 
 
 
 
 
 
 
 
1328
1329	/*
1330	 * We've made all the changes that we need to make incore.  Flush them
1331	 * down to disk buffers if everything was updated successfully.
1332	 */
1333	if (XFS_IS_UQUOTA_ON(mp)) {
1334		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1335					  &buffer_list);
1336	}
1337	if (XFS_IS_GQUOTA_ON(mp)) {
1338		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1339					   &buffer_list);
1340		if (!error)
1341			error = error2;
1342	}
1343	if (XFS_IS_PQUOTA_ON(mp)) {
1344		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1345					   &buffer_list);
1346		if (!error)
1347			error = error2;
1348	}
1349
1350	error2 = xfs_buf_delwri_submit(&buffer_list);
1351	if (!error)
1352		error = error2;
1353
1354	/*
1355	 * We can get this error if we couldn't do a dquot allocation inside
1356	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1357	 * dirty dquots that might be cached, we just want to get rid of them
1358	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1359	 * at this point (because we intentionally didn't in dqget_noattach).
1360	 */
1361	if (error) {
1362		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1363		goto error_return;
1364	}
1365
1366	/*
1367	 * If one type of quotas is off, then it will lose its
1368	 * quotachecked status, since we won't be doing accounting for
1369	 * that type anymore.
1370	 */
1371	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1372	mp->m_qflags |= flags;
1373
1374 error_return:
1375	xfs_buf_delwri_cancel(&buffer_list);
 
 
 
 
 
1376
1377	if (error) {
1378		xfs_warn(mp,
1379	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1380			error);
1381		/*
1382		 * We must turn off quotas.
1383		 */
1384		ASSERT(mp->m_quotainfo != NULL);
1385		xfs_qm_destroy_quotainfo(mp);
1386		if (xfs_mount_reset_sbqflags(mp)) {
1387			xfs_warn(mp,
1388				"Quotacheck: Failed to reset quota flags.");
1389		}
1390	} else
1391		xfs_notice(mp, "Quotacheck: Done.");
1392	return error;
1393}
1394
1395/*
1396 * This is called from xfs_mountfs to start quotas and initialize all
1397 * necessary data structures like quotainfo.  This is also responsible for
1398 * running a quotacheck as necessary.  We are guaranteed that the superblock
1399 * is consistently read in at this point.
1400 *
1401 * If we fail here, the mount will continue with quota turned off. We don't
1402 * need to inidicate success or failure at all.
1403 */
1404void
1405xfs_qm_mount_quotas(
1406	struct xfs_mount	*mp)
1407{
1408	int			error = 0;
1409	uint			sbf;
1410
1411	/*
1412	 * If quotas on realtime volumes is not supported, we disable
1413	 * quotas immediately.
1414	 */
1415	if (mp->m_sb.sb_rextents) {
1416		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1417		mp->m_qflags = 0;
1418		goto write_changes;
1419	}
1420
1421	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1422
1423	/*
1424	 * Allocate the quotainfo structure inside the mount struct, and
1425	 * create quotainode(s), and change/rev superblock if necessary.
1426	 */
1427	error = xfs_qm_init_quotainfo(mp);
1428	if (error) {
1429		/*
1430		 * We must turn off quotas.
1431		 */
1432		ASSERT(mp->m_quotainfo == NULL);
1433		mp->m_qflags = 0;
1434		goto write_changes;
1435	}
1436	/*
1437	 * If any of the quotas are not consistent, do a quotacheck.
1438	 */
1439	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1440		error = xfs_qm_quotacheck(mp);
1441		if (error) {
1442			/* Quotacheck failed and disabled quotas. */
1443			return;
1444		}
1445	}
1446	/*
1447	 * If one type of quotas is off, then it will lose its
1448	 * quotachecked status, since we won't be doing accounting for
1449	 * that type anymore.
1450	 */
1451	if (!XFS_IS_UQUOTA_ON(mp))
1452		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1453	if (!XFS_IS_GQUOTA_ON(mp))
1454		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1455	if (!XFS_IS_PQUOTA_ON(mp))
1456		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1457
1458 write_changes:
1459	/*
1460	 * We actually don't have to acquire the m_sb_lock at all.
1461	 * This can only be called from mount, and that's single threaded. XXX
1462	 */
1463	spin_lock(&mp->m_sb_lock);
1464	sbf = mp->m_sb.sb_qflags;
1465	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1466	spin_unlock(&mp->m_sb_lock);
1467
1468	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1469		if (xfs_sync_sb(mp, false)) {
1470			/*
1471			 * We could only have been turning quotas off.
1472			 * We aren't in very good shape actually because
1473			 * the incore structures are convinced that quotas are
1474			 * off, but the on disk superblock doesn't know that !
1475			 */
1476			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1477			xfs_alert(mp, "%s: Superblock update failed!",
1478				__func__);
1479		}
1480	}
1481
1482	if (error) {
1483		xfs_warn(mp, "Failed to initialize disk quotas.");
1484		return;
1485	}
1486}
1487
1488/*
1489 * This is called after the superblock has been read in and we're ready to
1490 * iget the quota inodes.
1491 */
1492STATIC int
1493xfs_qm_init_quotainos(
1494	xfs_mount_t	*mp)
1495{
1496	struct xfs_inode	*uip = NULL;
1497	struct xfs_inode	*gip = NULL;
1498	struct xfs_inode	*pip = NULL;
1499	int			error;
1500	uint			flags = 0;
1501
1502	ASSERT(mp->m_quotainfo);
1503
1504	/*
1505	 * Get the uquota and gquota inodes
1506	 */
1507	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1508		if (XFS_IS_UQUOTA_ON(mp) &&
1509		    mp->m_sb.sb_uquotino != NULLFSINO) {
1510			ASSERT(mp->m_sb.sb_uquotino > 0);
1511			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1512					     0, 0, &uip);
1513			if (error)
1514				return error;
1515		}
1516		if (XFS_IS_GQUOTA_ON(mp) &&
1517		    mp->m_sb.sb_gquotino != NULLFSINO) {
1518			ASSERT(mp->m_sb.sb_gquotino > 0);
1519			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1520					     0, 0, &gip);
1521			if (error)
1522				goto error_rele;
1523		}
1524		if (XFS_IS_PQUOTA_ON(mp) &&
1525		    mp->m_sb.sb_pquotino != NULLFSINO) {
1526			ASSERT(mp->m_sb.sb_pquotino > 0);
1527			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1528					     0, 0, &pip);
1529			if (error)
1530				goto error_rele;
1531		}
1532	} else {
1533		flags |= XFS_QMOPT_SBVERSION;
1534	}
1535
1536	/*
1537	 * Create the three inodes, if they don't exist already. The changes
1538	 * made above will get added to a transaction and logged in one of
1539	 * the qino_alloc calls below.  If the device is readonly,
1540	 * temporarily switch to read-write to do this.
1541	 */
1542	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1543		error = xfs_qm_qino_alloc(mp, &uip,
1544					      flags | XFS_QMOPT_UQUOTA);
1545		if (error)
1546			goto error_rele;
1547
1548		flags &= ~XFS_QMOPT_SBVERSION;
1549	}
1550	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1551		error = xfs_qm_qino_alloc(mp, &gip,
1552					  flags | XFS_QMOPT_GQUOTA);
1553		if (error)
1554			goto error_rele;
1555
1556		flags &= ~XFS_QMOPT_SBVERSION;
1557	}
1558	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1559		error = xfs_qm_qino_alloc(mp, &pip,
1560					  flags | XFS_QMOPT_PQUOTA);
1561		if (error)
1562			goto error_rele;
1563	}
1564
1565	mp->m_quotainfo->qi_uquotaip = uip;
1566	mp->m_quotainfo->qi_gquotaip = gip;
1567	mp->m_quotainfo->qi_pquotaip = pip;
1568
1569	return 0;
1570
1571error_rele:
1572	if (uip)
1573		xfs_irele(uip);
1574	if (gip)
1575		xfs_irele(gip);
1576	if (pip)
1577		xfs_irele(pip);
1578	return error;
1579}
1580
1581STATIC void
1582xfs_qm_destroy_quotainos(
1583	struct xfs_quotainfo	*qi)
1584{
1585	if (qi->qi_uquotaip) {
1586		xfs_irele(qi->qi_uquotaip);
1587		qi->qi_uquotaip = NULL; /* paranoia */
1588	}
1589	if (qi->qi_gquotaip) {
1590		xfs_irele(qi->qi_gquotaip);
1591		qi->qi_gquotaip = NULL;
1592	}
1593	if (qi->qi_pquotaip) {
1594		xfs_irele(qi->qi_pquotaip);
1595		qi->qi_pquotaip = NULL;
1596	}
1597}
1598
1599STATIC void
1600xfs_qm_dqfree_one(
1601	struct xfs_dquot	*dqp)
1602{
1603	struct xfs_mount	*mp = dqp->q_mount;
1604	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1605
1606	mutex_lock(&qi->qi_tree_lock);
1607	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
 
1608
1609	qi->qi_dquots--;
1610	mutex_unlock(&qi->qi_tree_lock);
1611
1612	xfs_qm_dqdestroy(dqp);
1613}
1614
1615/* --------------- utility functions for vnodeops ---------------- */
1616
1617
1618/*
1619 * Given an inode, a uid, gid and prid make sure that we have
1620 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1621 * quotas by creating this file.
1622 * This also attaches dquot(s) to the given inode after locking it,
1623 * and returns the dquots corresponding to the uid and/or gid.
1624 *
1625 * in	: inode (unlocked)
1626 * out	: udquot, gdquot with references taken and unlocked
1627 */
1628int
1629xfs_qm_vop_dqalloc(
1630	struct xfs_inode	*ip,
1631	kuid_t			uid,
1632	kgid_t			gid,
1633	prid_t			prid,
1634	uint			flags,
1635	struct xfs_dquot	**O_udqpp,
1636	struct xfs_dquot	**O_gdqpp,
1637	struct xfs_dquot	**O_pdqpp)
1638{
1639	struct xfs_mount	*mp = ip->i_mount;
1640	struct inode		*inode = VFS_I(ip);
1641	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1642	struct xfs_dquot	*uq = NULL;
1643	struct xfs_dquot	*gq = NULL;
1644	struct xfs_dquot	*pq = NULL;
1645	int			error;
1646	uint			lockflags;
1647
1648	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1649		return 0;
1650
1651	lockflags = XFS_ILOCK_EXCL;
1652	xfs_ilock(ip, lockflags);
1653
1654	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1655		gid = inode->i_gid;
1656
1657	/*
1658	 * Attach the dquot(s) to this inode, doing a dquot allocation
1659	 * if necessary. The dquot(s) will not be locked.
1660	 */
1661	if (XFS_NOT_DQATTACHED(mp, ip)) {
1662		error = xfs_qm_dqattach_locked(ip, true);
1663		if (error) {
1664			xfs_iunlock(ip, lockflags);
1665			return error;
1666		}
1667	}
1668
1669	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1670		ASSERT(O_udqpp);
1671		if (!uid_eq(inode->i_uid, uid)) {
1672			/*
1673			 * What we need is the dquot that has this uid, and
1674			 * if we send the inode to dqget, the uid of the inode
1675			 * takes priority over what's sent in the uid argument.
1676			 * We must unlock inode here before calling dqget if
1677			 * we're not sending the inode, because otherwise
1678			 * we'll deadlock by doing trans_reserve while
1679			 * holding ilock.
1680			 */
1681			xfs_iunlock(ip, lockflags);
1682			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1683					XFS_DQTYPE_USER, true, &uq);
 
 
 
1684			if (error) {
1685				ASSERT(error != -ENOENT);
1686				return error;
1687			}
1688			/*
1689			 * Get the ilock in the right order.
1690			 */
1691			xfs_dqunlock(uq);
1692			lockflags = XFS_ILOCK_SHARED;
1693			xfs_ilock(ip, lockflags);
1694		} else {
1695			/*
1696			 * Take an extra reference, because we'll return
1697			 * this to caller
1698			 */
1699			ASSERT(ip->i_udquot);
1700			uq = xfs_qm_dqhold(ip->i_udquot);
1701		}
1702	}
1703	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1704		ASSERT(O_gdqpp);
1705		if (!gid_eq(inode->i_gid, gid)) {
1706			xfs_iunlock(ip, lockflags);
1707			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1708					XFS_DQTYPE_GROUP, true, &gq);
 
 
 
1709			if (error) {
1710				ASSERT(error != -ENOENT);
1711				goto error_rele;
1712			}
1713			xfs_dqunlock(gq);
1714			lockflags = XFS_ILOCK_SHARED;
1715			xfs_ilock(ip, lockflags);
1716		} else {
1717			ASSERT(ip->i_gdquot);
1718			gq = xfs_qm_dqhold(ip->i_gdquot);
1719		}
1720	}
1721	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1722		ASSERT(O_pdqpp);
1723		if (ip->i_projid != prid) {
1724			xfs_iunlock(ip, lockflags);
1725			error = xfs_qm_dqget(mp, prid,
1726					XFS_DQTYPE_PROJ, true, &pq);
 
 
 
1727			if (error) {
1728				ASSERT(error != -ENOENT);
1729				goto error_rele;
1730			}
1731			xfs_dqunlock(pq);
1732			lockflags = XFS_ILOCK_SHARED;
1733			xfs_ilock(ip, lockflags);
1734		} else {
1735			ASSERT(ip->i_pdquot);
1736			pq = xfs_qm_dqhold(ip->i_pdquot);
1737		}
1738	}
1739	trace_xfs_dquot_dqalloc(ip);
 
1740
1741	xfs_iunlock(ip, lockflags);
1742	if (O_udqpp)
1743		*O_udqpp = uq;
1744	else
1745		xfs_qm_dqrele(uq);
1746	if (O_gdqpp)
1747		*O_gdqpp = gq;
1748	else
1749		xfs_qm_dqrele(gq);
1750	if (O_pdqpp)
1751		*O_pdqpp = pq;
1752	else
1753		xfs_qm_dqrele(pq);
1754	return 0;
1755
1756error_rele:
1757	xfs_qm_dqrele(gq);
1758	xfs_qm_dqrele(uq);
1759	return error;
1760}
1761
1762/*
1763 * Actually transfer ownership, and do dquot modifications.
1764 * These were already reserved.
1765 */
1766struct xfs_dquot *
1767xfs_qm_vop_chown(
1768	struct xfs_trans	*tp,
1769	struct xfs_inode	*ip,
1770	struct xfs_dquot	**IO_olddq,
1771	struct xfs_dquot	*newdq)
1772{
1773	struct xfs_dquot	*prevdq;
1774	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1775				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1776
1777
1778	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1779	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1780
1781	/* old dquot */
1782	prevdq = *IO_olddq;
1783	ASSERT(prevdq);
1784	ASSERT(prevdq != newdq);
1785
1786	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
1787	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1788
1789	/* the sparkling new dquot */
1790	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
1791	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1792
1793	/*
1794	 * Back when we made quota reservations for the chown, we reserved the
1795	 * ondisk blocks + delalloc blocks with the new dquot.  Now that we've
1796	 * switched the dquots, decrease the new dquot's block reservation
1797	 * (having already bumped up the real counter) so that we don't have
1798	 * any reservation to give back when we commit.
1799	 */
1800	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1801			-ip->i_delayed_blks);
1802
1803	/*
1804	 * Give the incore reservation for delalloc blocks back to the old
1805	 * dquot.  We don't normally handle delalloc quota reservations
1806	 * transactionally, so just lock the dquot and subtract from the
1807	 * reservation.  Dirty the transaction because it's too late to turn
1808	 * back now.
1809	 */
1810	tp->t_flags |= XFS_TRANS_DIRTY;
1811	xfs_dqlock(prevdq);
1812	ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1813	prevdq->q_blk.reserved -= ip->i_delayed_blks;
1814	xfs_dqunlock(prevdq);
1815
1816	/*
1817	 * Take an extra reference, because the inode is going to keep
1818	 * this dquot pointer even after the trans_commit.
1819	 */
1820	*IO_olddq = xfs_qm_dqhold(newdq);
1821
1822	return prevdq;
1823}
1824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1825int
1826xfs_qm_vop_rename_dqattach(
1827	struct xfs_inode	**i_tab)
1828{
1829	struct xfs_mount	*mp = i_tab[0]->i_mount;
1830	int			i;
1831
1832	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1833		return 0;
1834
1835	for (i = 0; (i < 4 && i_tab[i]); i++) {
1836		struct xfs_inode	*ip = i_tab[i];
1837		int			error;
1838
1839		/*
1840		 * Watch out for duplicate entries in the table.
1841		 */
1842		if (i == 0 || ip != i_tab[i-1]) {
1843			if (XFS_NOT_DQATTACHED(mp, ip)) {
1844				error = xfs_qm_dqattach(ip);
1845				if (error)
1846					return error;
1847			}
1848		}
1849	}
1850	return 0;
1851}
1852
1853void
1854xfs_qm_vop_create_dqattach(
1855	struct xfs_trans	*tp,
1856	struct xfs_inode	*ip,
1857	struct xfs_dquot	*udqp,
1858	struct xfs_dquot	*gdqp,
1859	struct xfs_dquot	*pdqp)
1860{
1861	struct xfs_mount	*mp = tp->t_mountp;
1862
1863	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1864		return;
1865
1866	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
1867
1868	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1869		ASSERT(ip->i_udquot == NULL);
1870		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1871
1872		ip->i_udquot = xfs_qm_dqhold(udqp);
1873		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1874	}
1875	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1876		ASSERT(ip->i_gdquot == NULL);
1877		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1878
1879		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1880		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1881	}
1882	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1883		ASSERT(ip->i_pdquot == NULL);
1884		ASSERT(ip->i_projid == pdqp->q_id);
1885
1886		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1887		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1888	}
1889}
1890