Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_sb.h"
  14#include "xfs_mount.h"
  15#include "xfs_inode.h"
  16#include "xfs_iwalk.h"
  17#include "xfs_quota.h"
  18#include "xfs_bmap.h"
  19#include "xfs_bmap_util.h"
  20#include "xfs_trans.h"
  21#include "xfs_trans_space.h"
  22#include "xfs_qm.h"
  23#include "xfs_trace.h"
  24#include "xfs_icache.h"
  25#include "xfs_error.h"
  26#include "xfs_ag.h"
  27#include "xfs_ialloc.h"
  28#include "xfs_log_priv.h"
  29#include "xfs_health.h"
  30#include "xfs_da_format.h"
  31#include "xfs_metafile.h"
  32#include "xfs_rtgroup.h"
  33
  34/*
  35 * The global quota manager. There is only one of these for the entire
  36 * system, _not_ one per file system. XQM keeps track of the overall
  37 * quota functionality, including maintaining the freelist and hash
  38 * tables of dquots.
  39 */
  40STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
  41STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
  42
 
  43STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  44/*
  45 * We use the batch lookup interface to iterate over the dquots as it
  46 * currently is the only interface into the radix tree code that allows
  47 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  48 * operations is fine as all callers are used either during mount/umount
  49 * or quotaoff.
  50 */
  51#define XFS_DQ_LOOKUP_BATCH	32
  52
  53STATIC int
  54xfs_qm_dquot_walk(
  55	struct xfs_mount	*mp,
  56	xfs_dqtype_t		type,
  57	int			(*execute)(struct xfs_dquot *dqp, void *data),
  58	void			*data)
  59{
  60	struct xfs_quotainfo	*qi = mp->m_quotainfo;
  61	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
  62	uint32_t		next_index;
  63	int			last_error = 0;
  64	int			skipped;
  65	int			nr_found;
  66
  67restart:
  68	skipped = 0;
  69	next_index = 0;
  70	nr_found = 0;
  71
  72	while (1) {
  73		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  74		int		error;
  75		int		i;
  76
  77		mutex_lock(&qi->qi_tree_lock);
  78		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  79					next_index, XFS_DQ_LOOKUP_BATCH);
  80		if (!nr_found) {
  81			mutex_unlock(&qi->qi_tree_lock);
  82			break;
  83		}
  84
  85		for (i = 0; i < nr_found; i++) {
  86			struct xfs_dquot *dqp = batch[i];
  87
  88			next_index = dqp->q_id + 1;
  89
  90			error = execute(batch[i], data);
  91			if (error == -EAGAIN) {
  92				skipped++;
  93				continue;
  94			}
  95			if (error && last_error != -EFSCORRUPTED)
  96				last_error = error;
  97		}
  98
  99		mutex_unlock(&qi->qi_tree_lock);
 100
 101		/* bail out if the filesystem is corrupted.  */
 102		if (last_error == -EFSCORRUPTED) {
 103			skipped = 0;
 104			break;
 105		}
 106		/* we're done if id overflows back to zero */
 107		if (!next_index)
 108			break;
 109	}
 110
 111	if (skipped) {
 112		delay(1);
 113		goto restart;
 114	}
 115
 116	return last_error;
 117}
 118
 119
 120/*
 121 * Purge a dquot from all tracking data structures and free it.
 122 */
 123STATIC int
 124xfs_qm_dqpurge(
 125	struct xfs_dquot	*dqp,
 126	void			*data)
 127{
 128	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
 
 129	int			error = -EAGAIN;
 130
 131	xfs_dqlock(dqp);
 132	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
 133		goto out_unlock;
 134
 135	dqp->q_flags |= XFS_DQFLAG_FREEING;
 136
 137	xfs_dqflock(dqp);
 138
 139	/*
 140	 * If we are turning this type of quotas off, we don't care
 141	 * about the dirty metadata sitting in this dquot. OTOH, if
 142	 * we're unmounting, we do care, so we flush it and wait.
 143	 */
 144	if (XFS_DQ_IS_DIRTY(dqp)) {
 145		struct xfs_buf	*bp = NULL;
 146
 147		/*
 148		 * We don't care about getting disk errors here. We need
 149		 * to purge this dquot anyway, so we go ahead regardless.
 150		 */
 151		error = xfs_dquot_use_attached_buf(dqp, &bp);
 152		if (error == -EAGAIN) {
 153			xfs_dqfunlock(dqp);
 154			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
 155			goto out_unlock;
 156		}
 157		if (!bp)
 158			goto out_funlock;
 159
 160		/*
 161		 * dqflush completes dqflock on error, and the bwrite ioend
 162		 * does it on success.
 163		 */
 164		error = xfs_qm_dqflush(dqp, bp);
 165		if (!error) {
 166			error = xfs_bwrite(bp);
 167			xfs_buf_relse(bp);
 
 
 
 168		}
 169		xfs_dqflock(dqp);
 170	}
 171	xfs_dquot_detach_buf(dqp);
 172
 173out_funlock:
 174	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 175	ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
 176		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
 177
 178	xfs_dqfunlock(dqp);
 179	xfs_dqunlock(dqp);
 180
 181	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
 182	qi->qi_dquots--;
 183
 184	/*
 185	 * We move dquots to the freelist as soon as their reference count
 186	 * hits zero, so it really should be on the freelist here.
 187	 */
 188	ASSERT(!list_empty(&dqp->q_lru));
 189	list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
 190	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 191
 192	xfs_qm_dqdestroy(dqp);
 193	return 0;
 194
 195out_unlock:
 196	xfs_dqunlock(dqp);
 197	return error;
 198}
 199
 200/*
 201 * Purge the dquot cache.
 202 */
 203static void
 204xfs_qm_dqpurge_all(
 205	struct xfs_mount	*mp)
 
 206{
 207	xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
 208	xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
 209	xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
 
 
 
 210}
 211
 212/*
 213 * Just destroy the quotainfo structure.
 214 */
 215void
 216xfs_qm_unmount(
 217	struct xfs_mount	*mp)
 218{
 219	if (mp->m_quotainfo) {
 220		xfs_qm_dqpurge_all(mp);
 221		xfs_qm_destroy_quotainfo(mp);
 222	}
 223}
 224
 225static void
 226xfs_qm_unmount_rt(
 227	struct xfs_mount	*mp)
 228{
 229	struct xfs_rtgroup	*rtg = xfs_rtgroup_grab(mp, 0);
 230
 231	if (!rtg)
 232		return;
 233	if (rtg->rtg_inodes[XFS_RTGI_BITMAP])
 234		xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_BITMAP]);
 235	if (rtg->rtg_inodes[XFS_RTGI_SUMMARY])
 236		xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_SUMMARY]);
 237	xfs_rtgroup_rele(rtg);
 238}
 239
 240STATIC void
 241xfs_qm_destroy_quotainos(
 242	struct xfs_quotainfo	*qi)
 243{
 244	if (qi->qi_uquotaip) {
 245		xfs_irele(qi->qi_uquotaip);
 246		qi->qi_uquotaip = NULL; /* paranoia */
 247	}
 248	if (qi->qi_gquotaip) {
 249		xfs_irele(qi->qi_gquotaip);
 250		qi->qi_gquotaip = NULL;
 251	}
 252	if (qi->qi_pquotaip) {
 253		xfs_irele(qi->qi_pquotaip);
 254		qi->qi_pquotaip = NULL;
 255	}
 256	if (qi->qi_dirip) {
 257		xfs_irele(qi->qi_dirip);
 258		qi->qi_dirip = NULL;
 259	}
 260}
 261
 262/*
 263 * Called from the vfsops layer.
 264 */
 265void
 266xfs_qm_unmount_quotas(
 267	xfs_mount_t	*mp)
 268{
 269	/*
 270	 * Release the dquots that root inode, et al might be holding,
 271	 * before we flush quotas and blow away the quotainfo structure.
 272	 */
 273	ASSERT(mp->m_rootip);
 274	xfs_qm_dqdetach(mp->m_rootip);
 275
 276	/*
 277	 * For pre-RTG file systems, the RT inodes have quotas attached,
 278	 * detach them now.
 279	 */
 280	if (!xfs_has_rtgroups(mp))
 281		xfs_qm_unmount_rt(mp);
 282
 283	/*
 284	 * Release the quota inodes.
 285	 */
 286	if (mp->m_quotainfo)
 287		xfs_qm_destroy_quotainos(mp->m_quotainfo);
 
 
 
 
 
 
 
 
 
 
 
 
 288}
 289
 290STATIC int
 291xfs_qm_dqattach_one(
 292	struct xfs_inode	*ip,
 293	xfs_dqtype_t		type,
 294	bool			doalloc,
 295	struct xfs_dquot	**IO_idqpp)
 296{
 297	struct xfs_dquot	*dqp;
 298	int			error;
 299
 300	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
 301	error = 0;
 302
 303	/*
 304	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
 305	 * or &i_gdquot. This made the code look weird, but made the logic a lot
 306	 * simpler.
 307	 */
 308	dqp = *IO_idqpp;
 309	if (dqp) {
 310		trace_xfs_dqattach_found(dqp);
 311		return 0;
 312	}
 313
 314	/*
 315	 * Find the dquot from somewhere. This bumps the reference count of
 316	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
 317	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
 318	 * turned off suddenly.
 319	 */
 320	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
 321	if (error)
 322		return error;
 323
 324	trace_xfs_dqattach_get(dqp);
 325
 326	/*
 327	 * dqget may have dropped and re-acquired the ilock, but it guarantees
 328	 * that the dquot returned is the one that should go in the inode.
 329	 */
 330	*IO_idqpp = dqp;
 331	xfs_dqunlock(dqp);
 332	return 0;
 333}
 334
 335static bool
 336xfs_qm_need_dqattach(
 337	struct xfs_inode	*ip)
 338{
 339	struct xfs_mount	*mp = ip->i_mount;
 340
 
 
 341	if (!XFS_IS_QUOTA_ON(mp))
 342		return false;
 343	if (!XFS_NOT_DQATTACHED(mp, ip))
 344		return false;
 345	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 346		return false;
 347	if (xfs_is_metadir_inode(ip))
 348		return false;
 349	return true;
 350}
 351
 352/*
 353 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 354 * into account.
 355 * If @doalloc is true, the dquot(s) will be allocated if needed.
 356 * Inode may get unlocked and relocked in here, and the caller must deal with
 357 * the consequences.
 358 */
 359int
 360xfs_qm_dqattach_locked(
 361	xfs_inode_t	*ip,
 362	bool		doalloc)
 363{
 364	xfs_mount_t	*mp = ip->i_mount;
 365	int		error = 0;
 366
 367	if (!xfs_qm_need_dqattach(ip))
 368		return 0;
 369
 370	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
 371	ASSERT(!xfs_is_metadir_inode(ip));
 372
 373	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
 374		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
 375				doalloc, &ip->i_udquot);
 376		if (error)
 377			goto done;
 378		ASSERT(ip->i_udquot);
 379	}
 380
 381	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
 382		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
 383				doalloc, &ip->i_gdquot);
 384		if (error)
 385			goto done;
 386		ASSERT(ip->i_gdquot);
 387	}
 388
 389	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
 390		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
 391				doalloc, &ip->i_pdquot);
 392		if (error)
 393			goto done;
 394		ASSERT(ip->i_pdquot);
 395	}
 396
 397done:
 398	/*
 399	 * Don't worry about the dquots that we may have attached before any
 400	 * error - they'll get detached later if it has not already been done.
 401	 */
 402	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
 403	return error;
 404}
 405
 406int
 407xfs_qm_dqattach(
 408	struct xfs_inode	*ip)
 409{
 410	int			error;
 411
 412	if (!xfs_qm_need_dqattach(ip))
 413		return 0;
 414
 415	xfs_ilock(ip, XFS_ILOCK_EXCL);
 416	error = xfs_qm_dqattach_locked(ip, false);
 417	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 418
 419	return error;
 420}
 421
 422/*
 423 * Release dquots (and their references) if any.
 424 * The inode should be locked EXCL except when this's called by
 425 * xfs_ireclaim.
 426 */
 427void
 428xfs_qm_dqdetach(
 429	xfs_inode_t	*ip)
 430{
 431	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
 432		return;
 433
 434	trace_xfs_dquot_dqdetach(ip);
 435
 436	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
 437	if (ip->i_udquot) {
 438		xfs_qm_dqrele(ip->i_udquot);
 439		ip->i_udquot = NULL;
 440	}
 441	if (ip->i_gdquot) {
 442		xfs_qm_dqrele(ip->i_gdquot);
 443		ip->i_gdquot = NULL;
 444	}
 445	if (ip->i_pdquot) {
 446		xfs_qm_dqrele(ip->i_pdquot);
 447		ip->i_pdquot = NULL;
 448	}
 449}
 450
 451struct xfs_qm_isolate {
 452	struct list_head	buffers;
 453	struct list_head	dispose;
 454};
 455
 456static enum lru_status
 457xfs_qm_dquot_isolate(
 458	struct list_head	*item,
 459	struct list_lru_one	*lru,
 
 460	void			*arg)
 461		__releases(&lru->lock) __acquires(&lru->lock)
 462{
 463	struct xfs_dquot	*dqp = container_of(item,
 464						struct xfs_dquot, q_lru);
 465	struct xfs_qm_isolate	*isol = arg;
 466
 467	if (!xfs_dqlock_nowait(dqp))
 468		goto out_miss_busy;
 469
 470	/*
 471	 * If something else is freeing this dquot and hasn't yet removed it
 472	 * from the LRU, leave it for the freeing task to complete the freeing
 473	 * process rather than risk it being free from under us here.
 474	 */
 475	if (dqp->q_flags & XFS_DQFLAG_FREEING)
 476		goto out_miss_unlock;
 477
 478	/*
 479	 * This dquot has acquired a reference in the meantime remove it from
 480	 * the freelist and try again.
 481	 */
 482	if (dqp->q_nrefs) {
 483		xfs_dqunlock(dqp);
 484		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
 485
 486		trace_xfs_dqreclaim_want(dqp);
 487		list_lru_isolate(lru, &dqp->q_lru);
 488		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 489		return LRU_REMOVED;
 490	}
 491
 492	/*
 493	 * If the dquot is dirty, flush it. If it's already being flushed, just
 494	 * skip it so there is time for the IO to complete before we try to
 495	 * reclaim it again on the next LRU pass.
 496	 */
 497	if (!xfs_dqflock_nowait(dqp))
 498		goto out_miss_unlock;
 
 
 499
 500	if (XFS_DQ_IS_DIRTY(dqp)) {
 501		struct xfs_buf	*bp = NULL;
 502		int		error;
 503
 504		trace_xfs_dqreclaim_dirty(dqp);
 505
 506		/* we have to drop the LRU lock to flush the dquot */
 507		spin_unlock(&lru->lock);
 508
 509		error = xfs_dquot_use_attached_buf(dqp, &bp);
 510		if (!bp || error == -EAGAIN) {
 511			xfs_dqfunlock(dqp);
 512			goto out_unlock_dirty;
 513		}
 514
 515		/*
 516		 * dqflush completes dqflock on error, and the delwri ioend
 517		 * does it on success.
 518		 */
 519		error = xfs_qm_dqflush(dqp, bp);
 520		if (error)
 521			goto out_unlock_dirty;
 522
 523		xfs_buf_delwri_queue(bp, &isol->buffers);
 524		xfs_buf_relse(bp);
 525		goto out_unlock_dirty;
 526	}
 527
 528	xfs_dquot_detach_buf(dqp);
 529	xfs_dqfunlock(dqp);
 530
 531	/*
 532	 * Prevent lookups now that we are past the point of no return.
 533	 */
 534	dqp->q_flags |= XFS_DQFLAG_FREEING;
 535	xfs_dqunlock(dqp);
 536
 537	ASSERT(dqp->q_nrefs == 0);
 538	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
 539	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 540	trace_xfs_dqreclaim_done(dqp);
 541	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
 542	return LRU_REMOVED;
 543
 544out_miss_unlock:
 545	xfs_dqunlock(dqp);
 546out_miss_busy:
 547	trace_xfs_dqreclaim_busy(dqp);
 548	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 549	return LRU_SKIP;
 550
 551out_unlock_dirty:
 552	trace_xfs_dqreclaim_busy(dqp);
 553	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 554	xfs_dqunlock(dqp);
 
 555	return LRU_RETRY;
 556}
 557
 558static unsigned long
 559xfs_qm_shrink_scan(
 560	struct shrinker		*shrink,
 561	struct shrink_control	*sc)
 562{
 563	struct xfs_quotainfo	*qi = shrink->private_data;
 
 564	struct xfs_qm_isolate	isol;
 565	unsigned long		freed;
 566	int			error;
 567
 568	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
 569		return 0;
 570
 571	INIT_LIST_HEAD(&isol.buffers);
 572	INIT_LIST_HEAD(&isol.dispose);
 573
 574	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
 575				     xfs_qm_dquot_isolate, &isol);
 576
 577	error = xfs_buf_delwri_submit(&isol.buffers);
 578	if (error)
 579		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
 580
 581	while (!list_empty(&isol.dispose)) {
 582		struct xfs_dquot	*dqp;
 583
 584		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
 585		list_del_init(&dqp->q_lru);
 586		xfs_qm_dqfree_one(dqp);
 587	}
 588
 589	return freed;
 590}
 591
 592static unsigned long
 593xfs_qm_shrink_count(
 594	struct shrinker		*shrink,
 595	struct shrink_control	*sc)
 596{
 597	struct xfs_quotainfo	*qi = shrink->private_data;
 
 598
 599	return list_lru_shrink_count(&qi->qi_lru, sc);
 600}
 601
 602STATIC void
 603xfs_qm_set_defquota(
 604	struct xfs_mount	*mp,
 605	xfs_dqtype_t		type,
 606	struct xfs_quotainfo	*qinf)
 607{
 608	struct xfs_dquot	*dqp;
 609	struct xfs_def_quota	*defq;
 610	int			error;
 611
 612	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 613	if (error)
 614		return;
 615
 616	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
 617
 618	/*
 619	 * Timers and warnings have been already set, let's just set the
 620	 * default limits for this quota type
 621	 */
 622	defq->blk.hard = dqp->q_blk.hardlimit;
 623	defq->blk.soft = dqp->q_blk.softlimit;
 624	defq->ino.hard = dqp->q_ino.hardlimit;
 625	defq->ino.soft = dqp->q_ino.softlimit;
 626	defq->rtb.hard = dqp->q_rtb.hardlimit;
 627	defq->rtb.soft = dqp->q_rtb.softlimit;
 628	xfs_qm_dqdestroy(dqp);
 629}
 630
 631/* Initialize quota time limits from the root dquot. */
 632static void
 633xfs_qm_init_timelimits(
 634	struct xfs_mount	*mp,
 635	xfs_dqtype_t		type)
 636{
 637	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
 638	struct xfs_def_quota	*defq;
 639	struct xfs_dquot	*dqp;
 640	int			error;
 641
 642	defq = xfs_get_defquota(qinf, type);
 643
 644	defq->blk.time = XFS_QM_BTIMELIMIT;
 645	defq->ino.time = XFS_QM_ITIMELIMIT;
 646	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
 
 
 
 647
 648	/*
 649	 * We try to get the limits from the superuser's limits fields.
 650	 * This is quite hacky, but it is standard quota practice.
 651	 *
 652	 * Since we may not have done a quotacheck by this point, just read
 653	 * the dquot without attaching it to any hashtables or lists.
 654	 */
 655	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 656	if (error)
 657		return;
 658
 659	/*
 660	 * The warnings and timers set the grace period given to
 661	 * a user or group before he or she can not perform any
 662	 * more writing. If it is zero, a default is used.
 663	 */
 664	if (dqp->q_blk.timer)
 665		defq->blk.time = dqp->q_blk.timer;
 666	if (dqp->q_ino.timer)
 667		defq->ino.time = dqp->q_ino.timer;
 668	if (dqp->q_rtb.timer)
 669		defq->rtb.time = dqp->q_rtb.timer;
 
 
 
 
 
 
 670
 671	xfs_qm_dqdestroy(dqp);
 672}
 673
 674static int
 675xfs_qm_load_metadir_qinos(
 676	struct xfs_mount	*mp,
 677	struct xfs_quotainfo	*qi)
 678{
 679	struct xfs_trans	*tp;
 680	int			error;
 681
 682	error = xfs_trans_alloc_empty(mp, &tp);
 683	if (error)
 684		return error;
 685
 686	error = xfs_dqinode_load_parent(tp, &qi->qi_dirip);
 687	if (error == -ENOENT) {
 688		/* no quota dir directory, but we'll create one later */
 689		error = 0;
 690		goto out_trans;
 691	}
 692	if (error)
 693		goto out_trans;
 694
 695	if (XFS_IS_UQUOTA_ON(mp)) {
 696		error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_USER,
 697				&qi->qi_uquotaip);
 698		if (error && error != -ENOENT)
 699			goto out_trans;
 700	}
 701
 702	if (XFS_IS_GQUOTA_ON(mp)) {
 703		error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_GROUP,
 704				&qi->qi_gquotaip);
 705		if (error && error != -ENOENT)
 706			goto out_trans;
 707	}
 708
 709	if (XFS_IS_PQUOTA_ON(mp)) {
 710		error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_PROJ,
 711				&qi->qi_pquotaip);
 712		if (error && error != -ENOENT)
 713			goto out_trans;
 714	}
 715
 716	error = 0;
 717out_trans:
 718	xfs_trans_cancel(tp);
 719	return error;
 720}
 721
 722/* Create quota inodes in the metadata directory tree. */
 723STATIC int
 724xfs_qm_create_metadir_qinos(
 725	struct xfs_mount	*mp,
 726	struct xfs_quotainfo	*qi)
 727{
 728	int			error;
 729
 730	if (!qi->qi_dirip) {
 731		error = xfs_dqinode_mkdir_parent(mp, &qi->qi_dirip);
 732		if (error && error != -EEXIST)
 733			return error;
 734		/*
 735		 * If the /quotas dirent points to an inode that isn't
 736		 * loadable, qi_dirip will be NULL but mkdir_parent will return
 737		 * -EEXIST.  In this case the metadir is corrupt, so bail out.
 738		 */
 739		if (XFS_IS_CORRUPT(mp, qi->qi_dirip == NULL))
 740			return -EFSCORRUPTED;
 741	}
 742
 743	if (XFS_IS_UQUOTA_ON(mp) && !qi->qi_uquotaip) {
 744		error = xfs_dqinode_metadir_create(qi->qi_dirip,
 745				XFS_DQTYPE_USER, &qi->qi_uquotaip);
 746		if (error)
 747			return error;
 748	}
 749
 750	if (XFS_IS_GQUOTA_ON(mp) && !qi->qi_gquotaip) {
 751		error = xfs_dqinode_metadir_create(qi->qi_dirip,
 752				XFS_DQTYPE_GROUP, &qi->qi_gquotaip);
 753		if (error)
 754			return error;
 755	}
 756
 757	if (XFS_IS_PQUOTA_ON(mp) && !qi->qi_pquotaip) {
 758		error = xfs_dqinode_metadir_create(qi->qi_dirip,
 759				XFS_DQTYPE_PROJ, &qi->qi_pquotaip);
 760		if (error)
 761			return error;
 762	}
 763
 764	return 0;
 765}
 766
 767/*
 768 * Add QUOTABIT to sb_versionnum and initialize qflags in preparation for
 769 * creating quota files on a metadir filesystem.
 770 */
 771STATIC int
 772xfs_qm_prep_metadir_sb(
 773	struct xfs_mount	*mp)
 774{
 775	struct xfs_trans	*tp;
 776	int			error;
 777
 778	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0, 0, &tp);
 779	if (error)
 780		return error;
 781
 782	spin_lock(&mp->m_sb_lock);
 783
 784	xfs_add_quota(mp);
 785
 786	/* qflags will get updated fully _after_ quotacheck */
 787	mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
 788
 789	spin_unlock(&mp->m_sb_lock);
 790	xfs_log_sb(tp);
 791
 792	return xfs_trans_commit(tp);
 793}
 794
 795/*
 796 * Load existing quota inodes or create them.  Since this is a V5 filesystem,
 797 * we don't have to deal with the grp/prjquota switcheroo thing from V4.
 798 */
 799STATIC int
 800xfs_qm_init_metadir_qinos(
 801	struct xfs_mount	*mp)
 802{
 803	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 804	int			error;
 805
 806	if (!xfs_has_quota(mp)) {
 807		error = xfs_qm_prep_metadir_sb(mp);
 808		if (error)
 809			return error;
 810	}
 811
 812	error = xfs_qm_load_metadir_qinos(mp, qi);
 813	if (error)
 814		goto out_err;
 815
 816	error = xfs_qm_create_metadir_qinos(mp, qi);
 817	if (error)
 818		goto out_err;
 819
 820	/* The only user of the quota dir inode is online fsck */
 821#if !IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
 822	xfs_irele(qi->qi_dirip);
 823	qi->qi_dirip = NULL;
 824#endif
 825	return 0;
 826out_err:
 827	xfs_qm_destroy_quotainos(mp->m_quotainfo);
 828	return error;
 829}
 830
 831/*
 832 * This initializes all the quota information that's kept in the
 833 * mount structure
 834 */
 835STATIC int
 836xfs_qm_init_quotainfo(
 837	struct xfs_mount	*mp)
 838{
 839	struct xfs_quotainfo	*qinf;
 840	int			error;
 841
 842	ASSERT(XFS_IS_QUOTA_ON(mp));
 843
 844	qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo),
 845					GFP_KERNEL | __GFP_NOFAIL);
 846
 847	error = list_lru_init(&qinf->qi_lru);
 848	if (error)
 849		goto out_free_qinf;
 850
 851	/*
 852	 * See if quotainodes are setup, and if not, allocate them,
 853	 * and change the superblock accordingly.
 854	 */
 855	if (xfs_has_metadir(mp))
 856		error = xfs_qm_init_metadir_qinos(mp);
 857	else
 858		error = xfs_qm_init_quotainos(mp);
 859	if (error)
 860		goto out_free_lru;
 861
 862	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL);
 863	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL);
 864	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL);
 865	mutex_init(&qinf->qi_tree_lock);
 866
 867	/* mutex used to serialize quotaoffs */
 868	mutex_init(&qinf->qi_quotaofflock);
 869
 870	/* Precalc some constants */
 871	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 872	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
 873	if (xfs_has_bigtime(mp)) {
 874		qinf->qi_expiry_min =
 875			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
 876		qinf->qi_expiry_max =
 877			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
 878	} else {
 879		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
 880		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
 881	}
 882	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
 883			qinf->qi_expiry_max);
 884
 885	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 886
 887	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
 888	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
 889	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
 890
 891	if (XFS_IS_UQUOTA_ON(mp))
 892		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
 893	if (XFS_IS_GQUOTA_ON(mp))
 894		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
 895	if (XFS_IS_PQUOTA_ON(mp))
 896		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
 897
 898	qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s",
 899					   mp->m_super->s_id);
 900	if (!qinf->qi_shrinker) {
 901		error = -ENOMEM;
 902		goto out_free_inos;
 903	}
 904
 905	qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
 906	qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
 907	qinf->qi_shrinker->private_data = qinf;
 908
 909	shrinker_register(qinf->qi_shrinker);
 910
 911	xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks);
 912	xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks);
 913
 914	return 0;
 915
 916out_free_inos:
 917	mutex_destroy(&qinf->qi_quotaofflock);
 918	mutex_destroy(&qinf->qi_tree_lock);
 919	xfs_qm_destroy_quotainos(qinf);
 920out_free_lru:
 921	list_lru_destroy(&qinf->qi_lru);
 922out_free_qinf:
 923	kfree(qinf);
 924	mp->m_quotainfo = NULL;
 925	return error;
 926}
 927
 928/*
 929 * Gets called when unmounting a filesystem or when all quotas get
 930 * turned off.
 931 * This purges the quota inodes, destroys locks and frees itself.
 932 */
 933void
 934xfs_qm_destroy_quotainfo(
 935	struct xfs_mount	*mp)
 936{
 937	struct xfs_quotainfo	*qi;
 938
 939	qi = mp->m_quotainfo;
 940	ASSERT(qi != NULL);
 941
 942	shrinker_free(qi->qi_shrinker);
 943	list_lru_destroy(&qi->qi_lru);
 944	xfs_qm_destroy_quotainos(qi);
 945	mutex_destroy(&qi->qi_tree_lock);
 946	mutex_destroy(&qi->qi_quotaofflock);
 947	kfree(qi);
 948	mp->m_quotainfo = NULL;
 949}
 950
 951static inline enum xfs_metafile_type
 952xfs_qm_metafile_type(
 953	unsigned int		flags)
 954{
 955	if (flags & XFS_QMOPT_UQUOTA)
 956		return XFS_METAFILE_USRQUOTA;
 957	else if (flags & XFS_QMOPT_GQUOTA)
 958		return XFS_METAFILE_GRPQUOTA;
 959	return XFS_METAFILE_PRJQUOTA;
 960}
 961
 962/*
 963 * Create an inode and return with a reference already taken, but unlocked
 964 * This is how we create quota inodes
 965 */
 966STATIC int
 967xfs_qm_qino_alloc(
 968	struct xfs_mount	*mp,
 969	struct xfs_inode	**ipp,
 970	unsigned int		flags)
 971{
 972	struct xfs_trans	*tp;
 973	enum xfs_metafile_type	metafile_type = xfs_qm_metafile_type(flags);
 974	int			error;
 975	bool			need_alloc = true;
 976
 977	*ipp = NULL;
 978	/*
 979	 * With superblock that doesn't have separate pquotino, we
 980	 * share an inode between gquota and pquota. If the on-disk
 981	 * superblock has GQUOTA and the filesystem is now mounted
 982	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
 983	 * vice-versa.
 984	 */
 985	if (!xfs_has_pquotino(mp) &&
 986			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
 987		xfs_ino_t ino = NULLFSINO;
 988
 989		if ((flags & XFS_QMOPT_PQUOTA) &&
 990			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
 991			ino = mp->m_sb.sb_gquotino;
 992			if (XFS_IS_CORRUPT(mp,
 993					   mp->m_sb.sb_pquotino != NULLFSINO)) {
 994				xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
 995				return -EFSCORRUPTED;
 996			}
 997		} else if ((flags & XFS_QMOPT_GQUOTA) &&
 998			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
 999			ino = mp->m_sb.sb_pquotino;
1000			if (XFS_IS_CORRUPT(mp,
1001					   mp->m_sb.sb_gquotino != NULLFSINO)) {
1002				xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
1003				return -EFSCORRUPTED;
1004			}
1005		}
1006		if (ino != NULLFSINO) {
1007			error = xfs_metafile_iget(mp, ino, metafile_type, ipp);
1008			if (error)
1009				return error;
1010
1011			mp->m_sb.sb_gquotino = NULLFSINO;
1012			mp->m_sb.sb_pquotino = NULLFSINO;
1013			need_alloc = false;
1014		}
1015	}
1016
1017	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
1018			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
1019			0, 0, &tp);
1020	if (error)
1021		return error;
1022
1023	if (need_alloc) {
1024		struct xfs_icreate_args	args = {
1025			.mode		= S_IFREG,
1026			.flags		= XFS_ICREATE_UNLINKABLE,
1027		};
1028		xfs_ino_t	ino;
1029
1030		error = xfs_dialloc(&tp, &args, &ino);
1031		if (!error)
1032			error = xfs_icreate(tp, ino, &args, ipp);
 
1033		if (error) {
1034			xfs_trans_cancel(tp);
1035			return error;
1036		}
1037		if (xfs_has_metadir(mp))
1038			xfs_metafile_set_iflag(tp, *ipp, metafile_type);
1039	}
1040
1041	/*
1042	 * Make the changes in the superblock, and log those too.
1043	 * sbfields arg may contain fields other than *QUOTINO;
1044	 * VERSIONNUM for example.
1045	 */
1046	spin_lock(&mp->m_sb_lock);
1047	if (flags & XFS_QMOPT_SBVERSION) {
1048		ASSERT(!xfs_has_quota(mp));
1049
1050		xfs_add_quota(mp);
1051		mp->m_sb.sb_uquotino = NULLFSINO;
1052		mp->m_sb.sb_gquotino = NULLFSINO;
1053		mp->m_sb.sb_pquotino = NULLFSINO;
1054
1055		/* qflags will get updated fully _after_ quotacheck */
1056		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
1057	}
1058	if (flags & XFS_QMOPT_UQUOTA)
1059		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
1060	else if (flags & XFS_QMOPT_GQUOTA)
1061		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
1062	else
1063		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
1064	spin_unlock(&mp->m_sb_lock);
1065	xfs_log_sb(tp);
1066
1067	error = xfs_trans_commit(tp);
1068	if (error) {
1069		ASSERT(xfs_is_shutdown(mp));
1070		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
1071	}
1072	if (need_alloc) {
1073		xfs_iunlock(*ipp, XFS_ILOCK_EXCL);
1074		xfs_finish_inode_setup(*ipp);
1075	}
1076	return error;
1077}
1078
1079
1080STATIC void
1081xfs_qm_reset_dqcounts(
1082	struct xfs_mount	*mp,
1083	struct xfs_buf		*bp,
1084	xfs_dqid_t		id,
1085	xfs_dqtype_t		type)
1086{
1087	struct xfs_dqblk	*dqb;
1088	int			j;
1089
1090	trace_xfs_reset_dqcounts(bp, _RET_IP_);
1091
1092	/*
1093	 * Reset all counters and timers. They'll be
1094	 * started afresh by xfs_qm_quotacheck.
1095	 */
1096#ifdef DEBUG
1097	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
1098		sizeof(struct xfs_dqblk);
1099	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
1100#endif
1101	dqb = bp->b_addr;
1102	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
1103		struct xfs_disk_dquot	*ddq;
1104
1105		ddq = (struct xfs_disk_dquot *)&dqb[j];
1106
1107		/*
1108		 * Do a sanity check, and if needed, repair the dqblk. Don't
1109		 * output any warnings because it's perfectly possible to
1110		 * find uninitialised dquot blks. See comment in
1111		 * xfs_dquot_verify.
1112		 */
1113		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
1114		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
1115			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
1116
1117		/*
1118		 * Reset type in case we are reusing group quota file for
1119		 * project quotas or vice versa
1120		 */
1121		ddq->d_type = type;
1122		ddq->d_bcount = 0;
1123		ddq->d_icount = 0;
1124		ddq->d_rtbcount = 0;
1125
1126		/*
1127		 * dquot id 0 stores the default grace period and the maximum
1128		 * warning limit that were set by the administrator, so we
1129		 * should not reset them.
1130		 */
1131		if (ddq->d_id != 0) {
1132			ddq->d_btimer = 0;
1133			ddq->d_itimer = 0;
1134			ddq->d_rtbtimer = 0;
1135			ddq->d_bwarns = 0;
1136			ddq->d_iwarns = 0;
1137			ddq->d_rtbwarns = 0;
1138			if (xfs_has_bigtime(mp))
1139				ddq->d_type |= XFS_DQTYPE_BIGTIME;
1140		}
1141
1142		if (xfs_has_crc(mp)) {
1143			xfs_update_cksum((char *)&dqb[j],
1144					 sizeof(struct xfs_dqblk),
1145					 XFS_DQUOT_CRC_OFF);
1146		}
1147	}
1148}
1149
1150STATIC int
1151xfs_qm_reset_dqcounts_all(
1152	struct xfs_mount	*mp,
1153	xfs_dqid_t		firstid,
1154	xfs_fsblock_t		bno,
1155	xfs_filblks_t		blkcnt,
1156	xfs_dqtype_t		type,
1157	struct list_head	*buffer_list)
1158{
1159	struct xfs_buf		*bp;
1160	int			error = 0;
1161
1162	ASSERT(blkcnt > 0);
1163
1164	/*
1165	 * Blkcnt arg can be a very big number, and might even be
1166	 * larger than the log itself. So, we have to break it up into
1167	 * manageable-sized transactions.
1168	 * Note that we don't start a permanent transaction here; we might
1169	 * not be able to get a log reservation for the whole thing up front,
1170	 * and we don't really care to either, because we just discard
1171	 * everything if we were to crash in the middle of this loop.
1172	 */
1173	while (blkcnt--) {
1174		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1175			      XFS_FSB_TO_DADDR(mp, bno),
1176			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1177			      &xfs_dquot_buf_ops);
1178
1179		/*
1180		 * CRC and validation errors will return a EFSCORRUPTED here. If
1181		 * this occurs, re-read without CRC validation so that we can
1182		 * repair the damage via xfs_qm_reset_dqcounts(). This process
1183		 * will leave a trace in the log indicating corruption has
1184		 * been detected.
1185		 */
1186		if (error == -EFSCORRUPTED) {
1187			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1188				      XFS_FSB_TO_DADDR(mp, bno),
1189				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1190				      NULL);
1191		}
1192
1193		if (error)
1194			break;
1195
1196		/*
1197		 * A corrupt buffer might not have a verifier attached, so
1198		 * make sure we have the correct one attached before writeback
1199		 * occurs.
1200		 */
1201		bp->b_ops = &xfs_dquot_buf_ops;
1202		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1203		xfs_buf_delwri_queue(bp, buffer_list);
1204		xfs_buf_relse(bp);
1205
1206		/* goto the next block. */
1207		bno++;
1208		firstid += mp->m_quotainfo->qi_dqperchunk;
1209	}
1210
1211	return error;
1212}
1213
1214/*
1215 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
1216 * counters for every chunk of dquots that we find.
1217 */
1218STATIC int
1219xfs_qm_reset_dqcounts_buf(
1220	struct xfs_mount	*mp,
1221	struct xfs_inode	*qip,
1222	xfs_dqtype_t		type,
1223	struct list_head	*buffer_list)
1224{
1225	struct xfs_bmbt_irec	*map;
1226	int			i, nmaps;	/* number of map entries */
1227	int			error;		/* return value */
1228	xfs_fileoff_t		lblkno;
1229	xfs_filblks_t		maxlblkcnt;
1230	xfs_dqid_t		firstid;
1231	xfs_fsblock_t		rablkno;
1232	xfs_filblks_t		rablkcnt;
1233
1234	error = 0;
1235	/*
1236	 * This looks racy, but we can't keep an inode lock across a
1237	 * trans_reserve. But, this gets called during quotacheck, and that
1238	 * happens only at mount time which is single threaded.
1239	 */
1240	if (qip->i_nblocks == 0)
1241		return 0;
1242
1243	map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
1244			GFP_KERNEL | __GFP_NOFAIL);
1245
1246	lblkno = 0;
1247	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1248	do {
1249		uint		lock_mode;
1250
1251		nmaps = XFS_DQITER_MAP_SIZE;
1252		/*
1253		 * We aren't changing the inode itself. Just changing
1254		 * some of its data. No new blocks are added here, and
1255		 * the inode is never added to the transaction.
1256		 */
1257		lock_mode = xfs_ilock_data_map_shared(qip);
1258		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1259				       map, &nmaps, 0);
1260		xfs_iunlock(qip, lock_mode);
1261		if (error)
1262			break;
1263
1264		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1265		for (i = 0; i < nmaps; i++) {
1266			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1267			ASSERT(map[i].br_blockcount);
1268
1269
1270			lblkno += map[i].br_blockcount;
1271
1272			if (map[i].br_startblock == HOLESTARTBLOCK)
1273				continue;
1274
1275			firstid = (xfs_dqid_t) map[i].br_startoff *
1276				mp->m_quotainfo->qi_dqperchunk;
1277			/*
1278			 * Do a read-ahead on the next extent.
1279			 */
1280			if ((i+1 < nmaps) &&
1281			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1282				rablkcnt =  map[i+1].br_blockcount;
1283				rablkno = map[i+1].br_startblock;
1284				while (rablkcnt--) {
1285					xfs_buf_readahead(mp->m_ddev_targp,
1286					       XFS_FSB_TO_DADDR(mp, rablkno),
1287					       mp->m_quotainfo->qi_dqchunklen,
1288					       &xfs_dquot_buf_ops);
1289					rablkno++;
1290				}
1291			}
1292			/*
1293			 * Iterate thru all the blks in the extent and
1294			 * reset the counters of all the dquots inside them.
1295			 */
1296			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1297						   map[i].br_startblock,
1298						   map[i].br_blockcount,
1299						   type, buffer_list);
1300			if (error)
1301				goto out;
1302		}
1303	} while (nmaps > 0);
1304
1305out:
1306	kfree(map);
1307	return error;
1308}
1309
1310/*
1311 * Called by dqusage_adjust in doing a quotacheck.
1312 *
1313 * Given the inode, and a dquot id this updates both the incore dqout as well
1314 * as the buffer copy. This is so that once the quotacheck is done, we can
1315 * just log all the buffers, as opposed to logging numerous updates to
1316 * individual dquots.
1317 */
1318STATIC int
1319xfs_qm_quotacheck_dqadjust(
1320	struct xfs_inode	*ip,
1321	xfs_dqtype_t		type,
1322	xfs_qcnt_t		nblks,
1323	xfs_qcnt_t		rtblks)
1324{
1325	struct xfs_mount	*mp = ip->i_mount;
1326	struct xfs_dquot	*dqp;
1327	xfs_dqid_t		id;
1328	int			error;
1329
1330	id = xfs_qm_id_for_quotatype(ip, type);
1331	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1332	if (error) {
1333		/*
1334		 * Shouldn't be able to turn off quotas here.
1335		 */
1336		ASSERT(error != -ESRCH);
1337		ASSERT(error != -ENOENT);
1338		return error;
1339	}
1340
1341	error = xfs_dquot_attach_buf(NULL, dqp);
1342	if (error)
1343		return error;
1344
1345	trace_xfs_dqadjust(dqp);
1346
1347	/*
1348	 * Adjust the inode count and the block count to reflect this inode's
1349	 * resource usage.
1350	 */
1351	dqp->q_ino.count++;
1352	dqp->q_ino.reserved++;
1353	if (nblks) {
1354		dqp->q_blk.count += nblks;
1355		dqp->q_blk.reserved += nblks;
1356	}
1357	if (rtblks) {
1358		dqp->q_rtb.count += rtblks;
1359		dqp->q_rtb.reserved += rtblks;
1360	}
1361
1362	/*
1363	 * Set default limits, adjust timers (since we changed usages)
1364	 *
1365	 * There are no timers for the default values set in the root dquot.
1366	 */
1367	if (dqp->q_id) {
1368		xfs_qm_adjust_dqlimits(dqp);
1369		xfs_qm_adjust_dqtimers(dqp);
1370	}
1371
1372	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1373	xfs_qm_dqput(dqp);
1374	return 0;
1375}
1376
1377/*
1378 * callback routine supplied to bulkstat(). Given an inumber, find its
1379 * dquots and update them to account for resources taken by that inode.
1380 */
1381/* ARGSUSED */
1382STATIC int
1383xfs_qm_dqusage_adjust(
1384	struct xfs_mount	*mp,
1385	struct xfs_trans	*tp,
1386	xfs_ino_t		ino,
1387	void			*data)
1388{
1389	struct xfs_inode	*ip;
1390	xfs_filblks_t		nblks, rtblks;
1391	unsigned int		lock_mode;
1392	int			error;
1393
1394	ASSERT(XFS_IS_QUOTA_ON(mp));
1395
1396	/*
1397	 * rootino must have its resources accounted for, not so with the quota
1398	 * inodes.
1399	 */
1400	if (xfs_is_quota_inode(&mp->m_sb, ino))
1401		return 0;
1402
1403	/*
1404	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1405	 * at mount time and therefore nobody will be racing chown/chproj.
1406	 */
1407	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1408	if (error == -EINVAL || error == -ENOENT)
1409		return 0;
1410	if (error)
1411		return error;
1412
1413	/*
1414	 * Reload the incore unlinked list to avoid failure in inodegc.
1415	 * Use an unlocked check here because unrecovered unlinked inodes
1416	 * should be somewhat rare.
1417	 */
1418	if (xfs_inode_unlinked_incomplete(ip)) {
1419		error = xfs_inode_reload_unlinked(ip);
1420		if (error) {
1421			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1422			goto error0;
1423		}
1424	}
1425
1426	/* Metadata directory files are not accounted to user-visible quotas. */
1427	if (xfs_is_metadir_inode(ip))
1428		goto error0;
1429
1430	ASSERT(ip->i_delayed_blks == 0);
1431
1432	lock_mode = xfs_ilock_data_map_shared(ip);
1433	if (XFS_IS_REALTIME_INODE(ip)) {
 
 
1434		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1435		if (error) {
1436			xfs_iunlock(ip, lock_mode);
1437			goto error0;
1438		}
 
1439	}
1440	xfs_inode_count_blocks(tp, ip, &nblks, &rtblks);
1441	xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
1442	xfs_iunlock(ip, lock_mode);
1443
1444	/*
1445	 * Add the (disk blocks and inode) resources occupied by this
1446	 * inode to its dquots. We do this adjustment in the incore dquot,
1447	 * and also copy the changes to its buffer.
1448	 * We don't care about putting these changes in a transaction
1449	 * envelope because if we crash in the middle of a 'quotacheck'
1450	 * we have to start from the beginning anyway.
1451	 * Once we're done, we'll log all the dquot bufs.
1452	 *
1453	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1454	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1455	 */
1456	if (XFS_IS_UQUOTA_ON(mp)) {
1457		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1458				rtblks);
1459		if (error)
1460			goto error0;
1461	}
1462
1463	if (XFS_IS_GQUOTA_ON(mp)) {
1464		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1465				rtblks);
1466		if (error)
1467			goto error0;
1468	}
1469
1470	if (XFS_IS_PQUOTA_ON(mp)) {
1471		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1472				rtblks);
1473		if (error)
1474			goto error0;
1475	}
1476
1477error0:
1478	xfs_irele(ip);
1479	return error;
1480}
1481
1482STATIC int
1483xfs_qm_flush_one(
1484	struct xfs_dquot	*dqp,
1485	void			*data)
1486{
1487	struct xfs_mount	*mp = dqp->q_mount;
1488	struct list_head	*buffer_list = data;
1489	struct xfs_buf		*bp = NULL;
1490	int			error = 0;
1491
1492	xfs_dqlock(dqp);
1493	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1494		goto out_unlock;
1495	if (!XFS_DQ_IS_DIRTY(dqp))
1496		goto out_unlock;
1497
1498	/*
1499	 * The only way the dquot is already flush locked by the time quotacheck
1500	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1501	 * it for the final time. Quotacheck collects all dquot bufs in the
1502	 * local delwri queue before dquots are dirtied, so reclaim can't have
1503	 * possibly queued it for I/O. The only way out is to push the buffer to
1504	 * cycle the flush lock.
1505	 */
1506	if (!xfs_dqflock_nowait(dqp)) {
1507		/* buf is pinned in-core by delwri list */
1508		error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1509				mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1510		if (error)
1511			goto out_unlock;
1512
1513		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1514			error = -EAGAIN;
1515			xfs_buf_relse(bp);
1516			goto out_unlock;
1517		}
1518		xfs_buf_unlock(bp);
1519
1520		xfs_buf_delwri_pushbuf(bp, buffer_list);
1521		xfs_buf_rele(bp);
1522
1523		error = -EAGAIN;
1524		goto out_unlock;
1525	}
1526
1527	error = xfs_dquot_use_attached_buf(dqp, &bp);
1528	if (error)
1529		goto out_unlock;
1530	if (!bp) {
1531		error = -EFSCORRUPTED;
1532		goto out_unlock;
1533	}
1534
1535	error = xfs_qm_dqflush(dqp, bp);
1536	if (!error)
1537		xfs_buf_delwri_queue(bp, buffer_list);
1538	xfs_buf_relse(bp);
1539out_unlock:
1540	xfs_dqunlock(dqp);
1541	return error;
1542}
1543
1544/*
1545 * Walk thru all the filesystem inodes and construct a consistent view
1546 * of the disk quota world. If the quotacheck fails, disable quotas.
1547 */
1548STATIC int
1549xfs_qm_quotacheck(
1550	xfs_mount_t	*mp)
1551{
1552	int			error, error2;
1553	uint			flags;
1554	LIST_HEAD		(buffer_list);
1555	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1556	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1557	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1558
1559	flags = 0;
1560
1561	ASSERT(uip || gip || pip);
1562	ASSERT(XFS_IS_QUOTA_ON(mp));
1563
1564	xfs_notice(mp, "Quotacheck needed: Please wait.");
1565
1566	/*
1567	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1568	 * their counters to zero. We need a clean slate.
1569	 * We don't log our changes till later.
1570	 */
1571	if (uip) {
1572		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1573					 &buffer_list);
1574		if (error)
1575			goto error_return;
1576		flags |= XFS_UQUOTA_CHKD;
1577	}
1578
1579	if (gip) {
1580		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1581					 &buffer_list);
1582		if (error)
1583			goto error_return;
1584		flags |= XFS_GQUOTA_CHKD;
1585	}
1586
1587	if (pip) {
1588		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1589					 &buffer_list);
1590		if (error)
1591			goto error_return;
1592		flags |= XFS_PQUOTA_CHKD;
1593	}
1594
1595	xfs_set_quotacheck_running(mp);
1596	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1597			NULL);
1598	xfs_clear_quotacheck_running(mp);
1599
1600	/*
1601	 * On error, the inode walk may have partially populated the dquot
1602	 * caches.  We must purge them before disabling quota and tearing down
1603	 * the quotainfo, or else the dquots will leak.
1604	 */
1605	if (error)
1606		goto error_purge;
1607
1608	/*
1609	 * We've made all the changes that we need to make incore.  Flush them
1610	 * down to disk buffers if everything was updated successfully.
1611	 */
1612	if (XFS_IS_UQUOTA_ON(mp)) {
1613		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1614					  &buffer_list);
1615	}
1616	if (XFS_IS_GQUOTA_ON(mp)) {
1617		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1618					   &buffer_list);
1619		if (!error)
1620			error = error2;
1621	}
1622	if (XFS_IS_PQUOTA_ON(mp)) {
1623		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1624					   &buffer_list);
1625		if (!error)
1626			error = error2;
1627	}
1628
1629	error2 = xfs_buf_delwri_submit(&buffer_list);
1630	if (!error)
1631		error = error2;
1632
1633	/*
1634	 * We can get this error if we couldn't do a dquot allocation inside
1635	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1636	 * dirty dquots that might be cached, we just want to get rid of them
1637	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1638	 * at this point (because we intentionally didn't in dqget_noattach).
1639	 */
1640	if (error)
1641		goto error_purge;
 
 
1642
1643	/*
1644	 * If one type of quotas is off, then it will lose its
1645	 * quotachecked status, since we won't be doing accounting for
1646	 * that type anymore.
1647	 */
1648	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1649	mp->m_qflags |= flags;
1650
1651error_return:
1652	xfs_buf_delwri_cancel(&buffer_list);
1653
1654	if (error) {
1655		xfs_warn(mp,
1656	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1657			error);
1658		/*
1659		 * We must turn off quotas.
1660		 */
1661		ASSERT(mp->m_quotainfo != NULL);
1662		xfs_qm_destroy_quotainfo(mp);
1663		if (xfs_mount_reset_sbqflags(mp)) {
1664			xfs_warn(mp,
1665				"Quotacheck: Failed to reset quota flags.");
1666		}
1667		xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
1668	} else {
1669		xfs_notice(mp, "Quotacheck: Done.");
1670		xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
1671	}
1672
1673	return error;
1674
1675error_purge:
1676	/*
1677	 * On error, we may have inodes queued for inactivation. This may try
1678	 * to attach dquots to the inode before running cleanup operations on
1679	 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1680	 * below that frees mp->m_quotainfo. To avoid this race, flush all the
1681	 * pending inodegc operations before we purge the dquots from memory,
1682	 * ensuring that background inactivation is idle whilst we turn off
1683	 * quotas.
1684	 */
1685	xfs_inodegc_flush(mp);
1686	xfs_qm_dqpurge_all(mp);
1687	goto error_return;
1688
1689}
1690
1691/*
1692 * This is called from xfs_mountfs to start quotas and initialize all
1693 * necessary data structures like quotainfo.  This is also responsible for
1694 * running a quotacheck as necessary.  We are guaranteed that the superblock
1695 * is consistently read in at this point.
1696 *
1697 * If we fail here, the mount will continue with quota turned off. We don't
1698 * need to inidicate success or failure at all.
1699 */
1700void
1701xfs_qm_mount_quotas(
1702	struct xfs_mount	*mp)
1703{
1704	int			error = 0;
1705	uint			sbf;
1706
1707	/*
1708	 * If quotas on realtime volumes is not supported, disable quotas
1709	 * immediately.  We only support rtquota if rtgroups are enabled to
1710	 * avoid problems with older kernels.
1711	 */
1712	if (mp->m_sb.sb_rextents && !xfs_has_rtgroups(mp)) {
1713		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1714		mp->m_qflags = 0;
1715		goto write_changes;
1716	}
1717
1718	ASSERT(XFS_IS_QUOTA_ON(mp));
1719
1720	/*
1721	 * Allocate the quotainfo structure inside the mount struct, and
1722	 * create quotainode(s), and change/rev superblock if necessary.
1723	 */
1724	error = xfs_qm_init_quotainfo(mp);
1725	if (error) {
1726		/*
1727		 * We must turn off quotas.
1728		 */
1729		ASSERT(mp->m_quotainfo == NULL);
1730		mp->m_qflags = 0;
1731		goto write_changes;
1732	}
1733	/*
1734	 * If any of the quotas are not consistent, do a quotacheck.
1735	 */
1736	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1737		error = xfs_qm_quotacheck(mp);
1738		if (error) {
1739			/* Quotacheck failed and disabled quotas. */
1740			return;
1741		}
1742	}
1743	/*
1744	 * If one type of quotas is off, then it will lose its
1745	 * quotachecked status, since we won't be doing accounting for
1746	 * that type anymore.
1747	 */
1748	if (!XFS_IS_UQUOTA_ON(mp))
1749		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1750	if (!XFS_IS_GQUOTA_ON(mp))
1751		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1752	if (!XFS_IS_PQUOTA_ON(mp))
1753		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1754
1755 write_changes:
1756	/*
1757	 * We actually don't have to acquire the m_sb_lock at all.
1758	 * This can only be called from mount, and that's single threaded. XXX
1759	 */
1760	spin_lock(&mp->m_sb_lock);
1761	sbf = mp->m_sb.sb_qflags;
1762	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1763	spin_unlock(&mp->m_sb_lock);
1764
1765	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1766		if (xfs_sync_sb(mp, false)) {
1767			/*
1768			 * We could only have been turning quotas off.
1769			 * We aren't in very good shape actually because
1770			 * the incore structures are convinced that quotas are
1771			 * off, but the on disk superblock doesn't know that !
1772			 */
1773			ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1774			xfs_alert(mp, "%s: Superblock update failed!",
1775				__func__);
1776		}
1777	}
1778
1779	if (error) {
1780		xfs_warn(mp, "Failed to initialize disk quotas, err %d.", error);
1781		return;
1782	}
1783}
1784
1785/*
1786 * Load the inode for a given type of quota, assuming that the sb fields have
1787 * been sorted out.  This is not true when switching quota types on a V4
1788 * filesystem, so do not use this function for that.
1789 *
1790 * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
1791 * success; or a negative errno.
1792 */
1793int
1794xfs_qm_qino_load(
1795	struct xfs_mount	*mp,
1796	xfs_dqtype_t		type,
1797	struct xfs_inode	**ipp)
1798{
1799	struct xfs_trans	*tp;
1800	struct xfs_inode	*dp = NULL;
1801	int			error;
1802
1803	error = xfs_trans_alloc_empty(mp, &tp);
1804	if (error)
1805		return error;
1806
1807	if (xfs_has_metadir(mp)) {
1808		error = xfs_dqinode_load_parent(tp, &dp);
1809		if (error)
1810			goto out_cancel;
1811	}
1812
1813	error = xfs_dqinode_load(tp, dp, type, ipp);
1814	if (dp)
1815		xfs_irele(dp);
1816out_cancel:
1817	xfs_trans_cancel(tp);
1818	return error;
1819}
1820
1821/*
1822 * This is called after the superblock has been read in and we're ready to
1823 * iget the quota inodes.
1824 */
1825STATIC int
1826xfs_qm_init_quotainos(
1827	xfs_mount_t	*mp)
1828{
1829	struct xfs_inode	*uip = NULL;
1830	struct xfs_inode	*gip = NULL;
1831	struct xfs_inode	*pip = NULL;
1832	int			error;
1833	uint			flags = 0;
1834
1835	ASSERT(mp->m_quotainfo);
1836
1837	/*
1838	 * Get the uquota and gquota inodes
1839	 */
1840	if (xfs_has_quota(mp)) {
1841		if (XFS_IS_UQUOTA_ON(mp) &&
1842		    mp->m_sb.sb_uquotino != NULLFSINO) {
1843			ASSERT(mp->m_sb.sb_uquotino > 0);
1844			error = xfs_qm_qino_load(mp, XFS_DQTYPE_USER, &uip);
 
1845			if (error)
1846				return error;
1847		}
1848		if (XFS_IS_GQUOTA_ON(mp) &&
1849		    mp->m_sb.sb_gquotino != NULLFSINO) {
1850			ASSERT(mp->m_sb.sb_gquotino > 0);
1851			error = xfs_qm_qino_load(mp, XFS_DQTYPE_GROUP, &gip);
 
1852			if (error)
1853				goto error_rele;
1854		}
1855		if (XFS_IS_PQUOTA_ON(mp) &&
1856		    mp->m_sb.sb_pquotino != NULLFSINO) {
1857			ASSERT(mp->m_sb.sb_pquotino > 0);
1858			error = xfs_qm_qino_load(mp, XFS_DQTYPE_PROJ, &pip);
 
1859			if (error)
1860				goto error_rele;
1861		}
1862	} else {
1863		flags |= XFS_QMOPT_SBVERSION;
1864	}
1865
1866	/*
1867	 * Create the three inodes, if they don't exist already. The changes
1868	 * made above will get added to a transaction and logged in one of
1869	 * the qino_alloc calls below.  If the device is readonly,
1870	 * temporarily switch to read-write to do this.
1871	 */
1872	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1873		error = xfs_qm_qino_alloc(mp, &uip,
1874					      flags | XFS_QMOPT_UQUOTA);
1875		if (error)
1876			goto error_rele;
1877
1878		flags &= ~XFS_QMOPT_SBVERSION;
1879	}
1880	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1881		error = xfs_qm_qino_alloc(mp, &gip,
1882					  flags | XFS_QMOPT_GQUOTA);
1883		if (error)
1884			goto error_rele;
1885
1886		flags &= ~XFS_QMOPT_SBVERSION;
1887	}
1888	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1889		error = xfs_qm_qino_alloc(mp, &pip,
1890					  flags | XFS_QMOPT_PQUOTA);
1891		if (error)
1892			goto error_rele;
1893	}
1894
1895	mp->m_quotainfo->qi_uquotaip = uip;
1896	mp->m_quotainfo->qi_gquotaip = gip;
1897	mp->m_quotainfo->qi_pquotaip = pip;
1898
1899	return 0;
1900
1901error_rele:
1902	if (uip)
1903		xfs_irele(uip);
1904	if (gip)
1905		xfs_irele(gip);
1906	if (pip)
1907		xfs_irele(pip);
1908	return error;
1909}
1910
1911STATIC void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1912xfs_qm_dqfree_one(
1913	struct xfs_dquot	*dqp)
1914{
1915	struct xfs_mount	*mp = dqp->q_mount;
1916	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1917
1918	mutex_lock(&qi->qi_tree_lock);
1919	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1920
1921	qi->qi_dquots--;
1922	mutex_unlock(&qi->qi_tree_lock);
1923
1924	xfs_qm_dqdestroy(dqp);
1925}
1926
1927/* --------------- utility functions for vnodeops ---------------- */
1928
1929
1930/*
1931 * Given an inode, a uid, gid and prid make sure that we have
1932 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1933 * quotas by creating this file.
1934 * This also attaches dquot(s) to the given inode after locking it,
1935 * and returns the dquots corresponding to the uid and/or gid.
1936 *
1937 * in	: inode (unlocked)
1938 * out	: udquot, gdquot with references taken and unlocked
1939 */
1940int
1941xfs_qm_vop_dqalloc(
1942	struct xfs_inode	*ip,
1943	kuid_t			uid,
1944	kgid_t			gid,
1945	prid_t			prid,
1946	uint			flags,
1947	struct xfs_dquot	**O_udqpp,
1948	struct xfs_dquot	**O_gdqpp,
1949	struct xfs_dquot	**O_pdqpp)
1950{
1951	struct xfs_mount	*mp = ip->i_mount;
1952	struct inode		*inode = VFS_I(ip);
1953	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1954	struct xfs_dquot	*uq = NULL;
1955	struct xfs_dquot	*gq = NULL;
1956	struct xfs_dquot	*pq = NULL;
1957	int			error;
1958	uint			lockflags;
1959
1960	if (!XFS_IS_QUOTA_ON(mp))
1961		return 0;
1962
1963	ASSERT(!xfs_is_metadir_inode(ip));
1964
1965	lockflags = XFS_ILOCK_EXCL;
1966	xfs_ilock(ip, lockflags);
1967
1968	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1969		gid = inode->i_gid;
1970
1971	/*
1972	 * Attach the dquot(s) to this inode, doing a dquot allocation
1973	 * if necessary. The dquot(s) will not be locked.
1974	 */
1975	if (XFS_NOT_DQATTACHED(mp, ip)) {
1976		error = xfs_qm_dqattach_locked(ip, true);
1977		if (error) {
1978			xfs_iunlock(ip, lockflags);
1979			return error;
1980		}
1981	}
1982
1983	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1984		ASSERT(O_udqpp);
1985		if (!uid_eq(inode->i_uid, uid)) {
1986			/*
1987			 * What we need is the dquot that has this uid, and
1988			 * if we send the inode to dqget, the uid of the inode
1989			 * takes priority over what's sent in the uid argument.
1990			 * We must unlock inode here before calling dqget if
1991			 * we're not sending the inode, because otherwise
1992			 * we'll deadlock by doing trans_reserve while
1993			 * holding ilock.
1994			 */
1995			xfs_iunlock(ip, lockflags);
1996			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1997					XFS_DQTYPE_USER, true, &uq);
1998			if (error) {
1999				ASSERT(error != -ENOENT);
2000				return error;
2001			}
2002			/*
2003			 * Get the ilock in the right order.
2004			 */
2005			xfs_dqunlock(uq);
2006			lockflags = XFS_ILOCK_SHARED;
2007			xfs_ilock(ip, lockflags);
2008		} else {
2009			/*
2010			 * Take an extra reference, because we'll return
2011			 * this to caller
2012			 */
2013			ASSERT(ip->i_udquot);
2014			uq = xfs_qm_dqhold(ip->i_udquot);
2015		}
2016	}
2017	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
2018		ASSERT(O_gdqpp);
2019		if (!gid_eq(inode->i_gid, gid)) {
2020			xfs_iunlock(ip, lockflags);
2021			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
2022					XFS_DQTYPE_GROUP, true, &gq);
2023			if (error) {
2024				ASSERT(error != -ENOENT);
2025				goto error_rele;
2026			}
2027			xfs_dqunlock(gq);
2028			lockflags = XFS_ILOCK_SHARED;
2029			xfs_ilock(ip, lockflags);
2030		} else {
2031			ASSERT(ip->i_gdquot);
2032			gq = xfs_qm_dqhold(ip->i_gdquot);
2033		}
2034	}
2035	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
2036		ASSERT(O_pdqpp);
2037		if (ip->i_projid != prid) {
2038			xfs_iunlock(ip, lockflags);
2039			error = xfs_qm_dqget(mp, prid,
2040					XFS_DQTYPE_PROJ, true, &pq);
2041			if (error) {
2042				ASSERT(error != -ENOENT);
2043				goto error_rele;
2044			}
2045			xfs_dqunlock(pq);
2046			lockflags = XFS_ILOCK_SHARED;
2047			xfs_ilock(ip, lockflags);
2048		} else {
2049			ASSERT(ip->i_pdquot);
2050			pq = xfs_qm_dqhold(ip->i_pdquot);
2051		}
2052	}
2053	trace_xfs_dquot_dqalloc(ip);
2054
2055	xfs_iunlock(ip, lockflags);
2056	if (O_udqpp)
2057		*O_udqpp = uq;
2058	else
2059		xfs_qm_dqrele(uq);
2060	if (O_gdqpp)
2061		*O_gdqpp = gq;
2062	else
2063		xfs_qm_dqrele(gq);
2064	if (O_pdqpp)
2065		*O_pdqpp = pq;
2066	else
2067		xfs_qm_dqrele(pq);
2068	return 0;
2069
2070error_rele:
2071	xfs_qm_dqrele(gq);
2072	xfs_qm_dqrele(uq);
2073	return error;
2074}
2075
2076/*
2077 * Actually transfer ownership, and do dquot modifications.
2078 * These were already reserved.
2079 */
2080struct xfs_dquot *
2081xfs_qm_vop_chown(
2082	struct xfs_trans	*tp,
2083	struct xfs_inode	*ip,
2084	struct xfs_dquot	**IO_olddq,
2085	struct xfs_dquot	*newdq)
2086{
2087	struct xfs_dquot	*prevdq;
2088	xfs_filblks_t		dblocks, rblocks;
2089	bool			isrt = XFS_IS_REALTIME_INODE(ip);
2090
2091	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
2092	ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
2093	ASSERT(!xfs_is_metadir_inode(ip));
2094
2095	/* old dquot */
2096	prevdq = *IO_olddq;
2097	ASSERT(prevdq);
2098	ASSERT(prevdq != newdq);
2099
2100	xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks);
2101
2102	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_BCOUNT,
2103			-(xfs_qcnt_t)dblocks);
2104	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_RTBCOUNT,
2105			-(xfs_qcnt_t)rblocks);
2106	xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
2107
2108	/* the sparkling new dquot */
2109	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_BCOUNT, dblocks);
2110	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_RTBCOUNT, rblocks);
2111	xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
2112
2113	/*
2114	 * Back when we made quota reservations for the chown, we reserved the
2115	 * ondisk blocks + delalloc blocks with the new dquot.  Now that we've
2116	 * switched the dquots, decrease the new dquot's block reservation
2117	 * (having already bumped up the real counter) so that we don't have
2118	 * any reservation to give back when we commit.
2119	 */
2120	xfs_trans_mod_dquot(tp, newdq,
2121			isrt ? XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
2122			-ip->i_delayed_blks);
2123
2124	/*
2125	 * Give the incore reservation for delalloc blocks back to the old
2126	 * dquot.  We don't normally handle delalloc quota reservations
2127	 * transactionally, so just lock the dquot and subtract from the
2128	 * reservation.  Dirty the transaction because it's too late to turn
2129	 * back now.
2130	 */
2131	tp->t_flags |= XFS_TRANS_DIRTY;
2132	xfs_dqlock(prevdq);
2133	if (isrt) {
2134		ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks);
2135		prevdq->q_rtb.reserved -= ip->i_delayed_blks;
2136	} else {
2137		ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
2138		prevdq->q_blk.reserved -= ip->i_delayed_blks;
2139	}
2140	xfs_dqunlock(prevdq);
2141
2142	/*
2143	 * Take an extra reference, because the inode is going to keep
2144	 * this dquot pointer even after the trans_commit.
2145	 */
2146	*IO_olddq = xfs_qm_dqhold(newdq);
2147
2148	return prevdq;
2149}
2150
2151int
2152xfs_qm_vop_rename_dqattach(
2153	struct xfs_inode	**i_tab)
2154{
2155	struct xfs_mount	*mp = i_tab[0]->i_mount;
2156	int			i;
2157
2158	if (!XFS_IS_QUOTA_ON(mp))
2159		return 0;
2160
2161	for (i = 0; (i < 4 && i_tab[i]); i++) {
2162		struct xfs_inode	*ip = i_tab[i];
2163		int			error;
2164
2165		/*
2166		 * Watch out for duplicate entries in the table.
2167		 */
2168		if (i == 0 || ip != i_tab[i-1]) {
2169			if (XFS_NOT_DQATTACHED(mp, ip)) {
2170				error = xfs_qm_dqattach(ip);
2171				if (error)
2172					return error;
2173			}
2174		}
2175	}
2176	return 0;
2177}
2178
2179void
2180xfs_qm_vop_create_dqattach(
2181	struct xfs_trans	*tp,
2182	struct xfs_inode	*ip,
2183	struct xfs_dquot	*udqp,
2184	struct xfs_dquot	*gdqp,
2185	struct xfs_dquot	*pdqp)
2186{
2187	struct xfs_mount	*mp = tp->t_mountp;
2188
2189	if (!XFS_IS_QUOTA_ON(mp))
2190		return;
2191
2192	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
2193	ASSERT(!xfs_is_metadir_inode(ip));
2194
2195	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
2196		ASSERT(ip->i_udquot == NULL);
2197		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
2198
2199		ip->i_udquot = xfs_qm_dqhold(udqp);
 
2200	}
2201	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
2202		ASSERT(ip->i_gdquot == NULL);
2203		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
2204
2205		ip->i_gdquot = xfs_qm_dqhold(gdqp);
 
2206	}
2207	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
2208		ASSERT(ip->i_pdquot == NULL);
2209		ASSERT(ip->i_projid == pdqp->q_id);
2210
2211		ip->i_pdquot = xfs_qm_dqhold(pdqp);
 
2212	}
2213
2214	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
2215}
2216
2217/* Decide if this inode's dquot is near an enforcement boundary. */
2218bool
2219xfs_inode_near_dquot_enforcement(
2220	struct xfs_inode	*ip,
2221	xfs_dqtype_t		type)
2222{
2223	struct xfs_dquot	*dqp;
2224	struct xfs_dquot_res	*res;
2225	struct xfs_dquot_pre	*pre;
2226	int64_t			freesp;
2227
2228	/* We only care for quotas that are enabled and enforced. */
2229	dqp = xfs_inode_dquot(ip, type);
2230	if (!dqp || !xfs_dquot_is_enforced(dqp))
2231		return false;
2232
2233	if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
2234	    xfs_dquot_res_over_limits(&dqp->q_blk) ||
2235	    xfs_dquot_res_over_limits(&dqp->q_rtb))
2236		return true;
2237
2238	if (XFS_IS_REALTIME_INODE(ip)) {
2239		res = &dqp->q_rtb;
2240		pre = &dqp->q_rtb_prealloc;
2241	} else {
2242		res = &dqp->q_blk;
2243		pre = &dqp->q_blk_prealloc;
2244	}
2245
2246	/* For space on the data device, check the various thresholds. */
2247	if (!pre->q_prealloc_hi_wmark)
2248		return false;
2249
2250	if (res->reserved < pre->q_prealloc_lo_wmark)
2251		return false;
2252
2253	if (res->reserved >= pre->q_prealloc_hi_wmark)
2254		return true;
2255
2256	freesp = pre->q_prealloc_hi_wmark - res->reserved;
2257	if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT])
2258		return true;
2259
2260	return false;
2261}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_sb.h"
  14#include "xfs_mount.h"
  15#include "xfs_inode.h"
  16#include "xfs_iwalk.h"
  17#include "xfs_quota.h"
  18#include "xfs_bmap.h"
  19#include "xfs_bmap_util.h"
  20#include "xfs_trans.h"
  21#include "xfs_trans_space.h"
  22#include "xfs_qm.h"
  23#include "xfs_trace.h"
  24#include "xfs_icache.h"
  25#include "xfs_error.h"
  26#include "xfs_ag.h"
  27#include "xfs_ialloc.h"
 
 
 
 
 
  28
  29/*
  30 * The global quota manager. There is only one of these for the entire
  31 * system, _not_ one per file system. XQM keeps track of the overall
  32 * quota functionality, including maintaining the freelist and hash
  33 * tables of dquots.
  34 */
  35STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
  36STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
  37
  38STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
  39STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  40/*
  41 * We use the batch lookup interface to iterate over the dquots as it
  42 * currently is the only interface into the radix tree code that allows
  43 * fuzzy lookups instead of exact matches.  Holding the lock over multiple
  44 * operations is fine as all callers are used either during mount/umount
  45 * or quotaoff.
  46 */
  47#define XFS_DQ_LOOKUP_BATCH	32
  48
  49STATIC int
  50xfs_qm_dquot_walk(
  51	struct xfs_mount	*mp,
  52	xfs_dqtype_t		type,
  53	int			(*execute)(struct xfs_dquot *dqp, void *data),
  54	void			*data)
  55{
  56	struct xfs_quotainfo	*qi = mp->m_quotainfo;
  57	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
  58	uint32_t		next_index;
  59	int			last_error = 0;
  60	int			skipped;
  61	int			nr_found;
  62
  63restart:
  64	skipped = 0;
  65	next_index = 0;
  66	nr_found = 0;
  67
  68	while (1) {
  69		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  70		int		error = 0;
  71		int		i;
  72
  73		mutex_lock(&qi->qi_tree_lock);
  74		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  75					next_index, XFS_DQ_LOOKUP_BATCH);
  76		if (!nr_found) {
  77			mutex_unlock(&qi->qi_tree_lock);
  78			break;
  79		}
  80
  81		for (i = 0; i < nr_found; i++) {
  82			struct xfs_dquot *dqp = batch[i];
  83
  84			next_index = dqp->q_id + 1;
  85
  86			error = execute(batch[i], data);
  87			if (error == -EAGAIN) {
  88				skipped++;
  89				continue;
  90			}
  91			if (error && last_error != -EFSCORRUPTED)
  92				last_error = error;
  93		}
  94
  95		mutex_unlock(&qi->qi_tree_lock);
  96
  97		/* bail out if the filesystem is corrupted.  */
  98		if (last_error == -EFSCORRUPTED) {
  99			skipped = 0;
 100			break;
 101		}
 102		/* we're done if id overflows back to zero */
 103		if (!next_index)
 104			break;
 105	}
 106
 107	if (skipped) {
 108		delay(1);
 109		goto restart;
 110	}
 111
 112	return last_error;
 113}
 114
 115
 116/*
 117 * Purge a dquot from all tracking data structures and free it.
 118 */
 119STATIC int
 120xfs_qm_dqpurge(
 121	struct xfs_dquot	*dqp,
 122	void			*data)
 123{
 124	struct xfs_mount	*mp = dqp->q_mount;
 125	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 126	int			error = -EAGAIN;
 127
 128	xfs_dqlock(dqp);
 129	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
 130		goto out_unlock;
 131
 132	dqp->q_flags |= XFS_DQFLAG_FREEING;
 133
 134	xfs_dqflock(dqp);
 135
 136	/*
 137	 * If we are turning this type of quotas off, we don't care
 138	 * about the dirty metadata sitting in this dquot. OTOH, if
 139	 * we're unmounting, we do care, so we flush it and wait.
 140	 */
 141	if (XFS_DQ_IS_DIRTY(dqp)) {
 142		struct xfs_buf	*bp = NULL;
 143
 144		/*
 145		 * We don't care about getting disk errors here. We need
 146		 * to purge this dquot anyway, so we go ahead regardless.
 147		 */
 148		error = xfs_qm_dqflush(dqp, &bp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 149		if (!error) {
 150			error = xfs_bwrite(bp);
 151			xfs_buf_relse(bp);
 152		} else if (error == -EAGAIN) {
 153			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
 154			goto out_unlock;
 155		}
 156		xfs_dqflock(dqp);
 157	}
 
 158
 
 159	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 160	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 161		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
 162
 163	xfs_dqfunlock(dqp);
 164	xfs_dqunlock(dqp);
 165
 166	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
 167	qi->qi_dquots--;
 168
 169	/*
 170	 * We move dquots to the freelist as soon as their reference count
 171	 * hits zero, so it really should be on the freelist here.
 172	 */
 173	ASSERT(!list_empty(&dqp->q_lru));
 174	list_lru_del(&qi->qi_lru, &dqp->q_lru);
 175	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
 176
 177	xfs_qm_dqdestroy(dqp);
 178	return 0;
 179
 180out_unlock:
 181	xfs_dqunlock(dqp);
 182	return error;
 183}
 184
 185/*
 186 * Purge the dquot cache.
 187 */
 188void
 189xfs_qm_dqpurge_all(
 190	struct xfs_mount	*mp,
 191	uint			flags)
 192{
 193	if (flags & XFS_QMOPT_UQUOTA)
 194		xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
 195	if (flags & XFS_QMOPT_GQUOTA)
 196		xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
 197	if (flags & XFS_QMOPT_PQUOTA)
 198		xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
 199}
 200
 201/*
 202 * Just destroy the quotainfo structure.
 203 */
 204void
 205xfs_qm_unmount(
 206	struct xfs_mount	*mp)
 207{
 208	if (mp->m_quotainfo) {
 209		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
 210		xfs_qm_destroy_quotainfo(mp);
 211	}
 212}
 213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 214/*
 215 * Called from the vfsops layer.
 216 */
 217void
 218xfs_qm_unmount_quotas(
 219	xfs_mount_t	*mp)
 220{
 221	/*
 222	 * Release the dquots that root inode, et al might be holding,
 223	 * before we flush quotas and blow away the quotainfo structure.
 224	 */
 225	ASSERT(mp->m_rootip);
 226	xfs_qm_dqdetach(mp->m_rootip);
 227	if (mp->m_rbmip)
 228		xfs_qm_dqdetach(mp->m_rbmip);
 229	if (mp->m_rsumip)
 230		xfs_qm_dqdetach(mp->m_rsumip);
 
 
 
 231
 232	/*
 233	 * Release the quota inodes.
 234	 */
 235	if (mp->m_quotainfo) {
 236		if (mp->m_quotainfo->qi_uquotaip) {
 237			xfs_irele(mp->m_quotainfo->qi_uquotaip);
 238			mp->m_quotainfo->qi_uquotaip = NULL;
 239		}
 240		if (mp->m_quotainfo->qi_gquotaip) {
 241			xfs_irele(mp->m_quotainfo->qi_gquotaip);
 242			mp->m_quotainfo->qi_gquotaip = NULL;
 243		}
 244		if (mp->m_quotainfo->qi_pquotaip) {
 245			xfs_irele(mp->m_quotainfo->qi_pquotaip);
 246			mp->m_quotainfo->qi_pquotaip = NULL;
 247		}
 248	}
 249}
 250
 251STATIC int
 252xfs_qm_dqattach_one(
 253	struct xfs_inode	*ip,
 254	xfs_dqtype_t		type,
 255	bool			doalloc,
 256	struct xfs_dquot	**IO_idqpp)
 257{
 258	struct xfs_dquot	*dqp;
 259	int			error;
 260
 261	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 262	error = 0;
 263
 264	/*
 265	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
 266	 * or &i_gdquot. This made the code look weird, but made the logic a lot
 267	 * simpler.
 268	 */
 269	dqp = *IO_idqpp;
 270	if (dqp) {
 271		trace_xfs_dqattach_found(dqp);
 272		return 0;
 273	}
 274
 275	/*
 276	 * Find the dquot from somewhere. This bumps the reference count of
 277	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
 278	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
 279	 * turned off suddenly.
 280	 */
 281	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
 282	if (error)
 283		return error;
 284
 285	trace_xfs_dqattach_get(dqp);
 286
 287	/*
 288	 * dqget may have dropped and re-acquired the ilock, but it guarantees
 289	 * that the dquot returned is the one that should go in the inode.
 290	 */
 291	*IO_idqpp = dqp;
 292	xfs_dqunlock(dqp);
 293	return 0;
 294}
 295
 296static bool
 297xfs_qm_need_dqattach(
 298	struct xfs_inode	*ip)
 299{
 300	struct xfs_mount	*mp = ip->i_mount;
 301
 302	if (!XFS_IS_QUOTA_RUNNING(mp))
 303		return false;
 304	if (!XFS_IS_QUOTA_ON(mp))
 305		return false;
 306	if (!XFS_NOT_DQATTACHED(mp, ip))
 307		return false;
 308	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 309		return false;
 
 
 310	return true;
 311}
 312
 313/*
 314 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 315 * into account.
 316 * If @doalloc is true, the dquot(s) will be allocated if needed.
 317 * Inode may get unlocked and relocked in here, and the caller must deal with
 318 * the consequences.
 319 */
 320int
 321xfs_qm_dqattach_locked(
 322	xfs_inode_t	*ip,
 323	bool		doalloc)
 324{
 325	xfs_mount_t	*mp = ip->i_mount;
 326	int		error = 0;
 327
 328	if (!xfs_qm_need_dqattach(ip))
 329		return 0;
 330
 331	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
 332
 333	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
 334		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
 335				doalloc, &ip->i_udquot);
 336		if (error)
 337			goto done;
 338		ASSERT(ip->i_udquot);
 339	}
 340
 341	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
 342		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
 343				doalloc, &ip->i_gdquot);
 344		if (error)
 345			goto done;
 346		ASSERT(ip->i_gdquot);
 347	}
 348
 349	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
 350		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
 351				doalloc, &ip->i_pdquot);
 352		if (error)
 353			goto done;
 354		ASSERT(ip->i_pdquot);
 355	}
 356
 357done:
 358	/*
 359	 * Don't worry about the dquots that we may have attached before any
 360	 * error - they'll get detached later if it has not already been done.
 361	 */
 362	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 363	return error;
 364}
 365
 366int
 367xfs_qm_dqattach(
 368	struct xfs_inode	*ip)
 369{
 370	int			error;
 371
 372	if (!xfs_qm_need_dqattach(ip))
 373		return 0;
 374
 375	xfs_ilock(ip, XFS_ILOCK_EXCL);
 376	error = xfs_qm_dqattach_locked(ip, false);
 377	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 378
 379	return error;
 380}
 381
 382/*
 383 * Release dquots (and their references) if any.
 384 * The inode should be locked EXCL except when this's called by
 385 * xfs_ireclaim.
 386 */
 387void
 388xfs_qm_dqdetach(
 389	xfs_inode_t	*ip)
 390{
 391	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
 392		return;
 393
 394	trace_xfs_dquot_dqdetach(ip);
 395
 396	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
 397	if (ip->i_udquot) {
 398		xfs_qm_dqrele(ip->i_udquot);
 399		ip->i_udquot = NULL;
 400	}
 401	if (ip->i_gdquot) {
 402		xfs_qm_dqrele(ip->i_gdquot);
 403		ip->i_gdquot = NULL;
 404	}
 405	if (ip->i_pdquot) {
 406		xfs_qm_dqrele(ip->i_pdquot);
 407		ip->i_pdquot = NULL;
 408	}
 409}
 410
 411struct xfs_qm_isolate {
 412	struct list_head	buffers;
 413	struct list_head	dispose;
 414};
 415
 416static enum lru_status
 417xfs_qm_dquot_isolate(
 418	struct list_head	*item,
 419	struct list_lru_one	*lru,
 420	spinlock_t		*lru_lock,
 421	void			*arg)
 422		__releases(lru_lock) __acquires(lru_lock)
 423{
 424	struct xfs_dquot	*dqp = container_of(item,
 425						struct xfs_dquot, q_lru);
 426	struct xfs_qm_isolate	*isol = arg;
 427
 428	if (!xfs_dqlock_nowait(dqp))
 429		goto out_miss_busy;
 430
 431	/*
 
 
 
 
 
 
 
 
 432	 * This dquot has acquired a reference in the meantime remove it from
 433	 * the freelist and try again.
 434	 */
 435	if (dqp->q_nrefs) {
 436		xfs_dqunlock(dqp);
 437		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
 438
 439		trace_xfs_dqreclaim_want(dqp);
 440		list_lru_isolate(lru, &dqp->q_lru);
 441		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 442		return LRU_REMOVED;
 443	}
 444
 445	/*
 446	 * If the dquot is dirty, flush it. If it's already being flushed, just
 447	 * skip it so there is time for the IO to complete before we try to
 448	 * reclaim it again on the next LRU pass.
 449	 */
 450	if (!xfs_dqflock_nowait(dqp)) {
 451		xfs_dqunlock(dqp);
 452		goto out_miss_busy;
 453	}
 454
 455	if (XFS_DQ_IS_DIRTY(dqp)) {
 456		struct xfs_buf	*bp = NULL;
 457		int		error;
 458
 459		trace_xfs_dqreclaim_dirty(dqp);
 460
 461		/* we have to drop the LRU lock to flush the dquot */
 462		spin_unlock(lru_lock);
 
 
 
 
 
 
 463
 464		error = xfs_qm_dqflush(dqp, &bp);
 
 
 
 
 465		if (error)
 466			goto out_unlock_dirty;
 467
 468		xfs_buf_delwri_queue(bp, &isol->buffers);
 469		xfs_buf_relse(bp);
 470		goto out_unlock_dirty;
 471	}
 
 
 472	xfs_dqfunlock(dqp);
 473
 474	/*
 475	 * Prevent lookups now that we are past the point of no return.
 476	 */
 477	dqp->q_flags |= XFS_DQFLAG_FREEING;
 478	xfs_dqunlock(dqp);
 479
 480	ASSERT(dqp->q_nrefs == 0);
 481	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
 482	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
 483	trace_xfs_dqreclaim_done(dqp);
 484	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
 485	return LRU_REMOVED;
 486
 
 
 487out_miss_busy:
 488	trace_xfs_dqreclaim_busy(dqp);
 489	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 490	return LRU_SKIP;
 491
 492out_unlock_dirty:
 493	trace_xfs_dqreclaim_busy(dqp);
 494	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
 495	xfs_dqunlock(dqp);
 496	spin_lock(lru_lock);
 497	return LRU_RETRY;
 498}
 499
 500static unsigned long
 501xfs_qm_shrink_scan(
 502	struct shrinker		*shrink,
 503	struct shrink_control	*sc)
 504{
 505	struct xfs_quotainfo	*qi = container_of(shrink,
 506					struct xfs_quotainfo, qi_shrinker);
 507	struct xfs_qm_isolate	isol;
 508	unsigned long		freed;
 509	int			error;
 510
 511	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
 512		return 0;
 513
 514	INIT_LIST_HEAD(&isol.buffers);
 515	INIT_LIST_HEAD(&isol.dispose);
 516
 517	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
 518				     xfs_qm_dquot_isolate, &isol);
 519
 520	error = xfs_buf_delwri_submit(&isol.buffers);
 521	if (error)
 522		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
 523
 524	while (!list_empty(&isol.dispose)) {
 525		struct xfs_dquot	*dqp;
 526
 527		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
 528		list_del_init(&dqp->q_lru);
 529		xfs_qm_dqfree_one(dqp);
 530	}
 531
 532	return freed;
 533}
 534
 535static unsigned long
 536xfs_qm_shrink_count(
 537	struct shrinker		*shrink,
 538	struct shrink_control	*sc)
 539{
 540	struct xfs_quotainfo	*qi = container_of(shrink,
 541					struct xfs_quotainfo, qi_shrinker);
 542
 543	return list_lru_shrink_count(&qi->qi_lru, sc);
 544}
 545
 546STATIC void
 547xfs_qm_set_defquota(
 548	struct xfs_mount	*mp,
 549	xfs_dqtype_t		type,
 550	struct xfs_quotainfo	*qinf)
 551{
 552	struct xfs_dquot	*dqp;
 553	struct xfs_def_quota	*defq;
 554	int			error;
 555
 556	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 557	if (error)
 558		return;
 559
 560	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
 561
 562	/*
 563	 * Timers and warnings have been already set, let's just set the
 564	 * default limits for this quota type
 565	 */
 566	defq->blk.hard = dqp->q_blk.hardlimit;
 567	defq->blk.soft = dqp->q_blk.softlimit;
 568	defq->ino.hard = dqp->q_ino.hardlimit;
 569	defq->ino.soft = dqp->q_ino.softlimit;
 570	defq->rtb.hard = dqp->q_rtb.hardlimit;
 571	defq->rtb.soft = dqp->q_rtb.softlimit;
 572	xfs_qm_dqdestroy(dqp);
 573}
 574
 575/* Initialize quota time limits from the root dquot. */
 576static void
 577xfs_qm_init_timelimits(
 578	struct xfs_mount	*mp,
 579	xfs_dqtype_t		type)
 580{
 581	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
 582	struct xfs_def_quota	*defq;
 583	struct xfs_dquot	*dqp;
 584	int			error;
 585
 586	defq = xfs_get_defquota(qinf, type);
 587
 588	defq->blk.time = XFS_QM_BTIMELIMIT;
 589	defq->ino.time = XFS_QM_ITIMELIMIT;
 590	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
 591	defq->blk.warn = XFS_QM_BWARNLIMIT;
 592	defq->ino.warn = XFS_QM_IWARNLIMIT;
 593	defq->rtb.warn = XFS_QM_RTBWARNLIMIT;
 594
 595	/*
 596	 * We try to get the limits from the superuser's limits fields.
 597	 * This is quite hacky, but it is standard quota practice.
 598	 *
 599	 * Since we may not have done a quotacheck by this point, just read
 600	 * the dquot without attaching it to any hashtables or lists.
 601	 */
 602	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
 603	if (error)
 604		return;
 605
 606	/*
 607	 * The warnings and timers set the grace period given to
 608	 * a user or group before he or she can not perform any
 609	 * more writing. If it is zero, a default is used.
 610	 */
 611	if (dqp->q_blk.timer)
 612		defq->blk.time = dqp->q_blk.timer;
 613	if (dqp->q_ino.timer)
 614		defq->ino.time = dqp->q_ino.timer;
 615	if (dqp->q_rtb.timer)
 616		defq->rtb.time = dqp->q_rtb.timer;
 617	if (dqp->q_blk.warnings)
 618		defq->blk.warn = dqp->q_blk.warnings;
 619	if (dqp->q_ino.warnings)
 620		defq->ino.warn = dqp->q_ino.warnings;
 621	if (dqp->q_rtb.warnings)
 622		defq->rtb.warn = dqp->q_rtb.warnings;
 623
 624	xfs_qm_dqdestroy(dqp);
 625}
 626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627/*
 628 * This initializes all the quota information that's kept in the
 629 * mount structure
 630 */
 631STATIC int
 632xfs_qm_init_quotainfo(
 633	struct xfs_mount	*mp)
 634{
 635	struct xfs_quotainfo	*qinf;
 636	int			error;
 637
 638	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 639
 640	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
 
 641
 642	error = list_lru_init(&qinf->qi_lru);
 643	if (error)
 644		goto out_free_qinf;
 645
 646	/*
 647	 * See if quotainodes are setup, and if not, allocate them,
 648	 * and change the superblock accordingly.
 649	 */
 650	error = xfs_qm_init_quotainos(mp);
 
 
 
 651	if (error)
 652		goto out_free_lru;
 653
 654	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
 655	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
 656	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
 657	mutex_init(&qinf->qi_tree_lock);
 658
 659	/* mutex used to serialize quotaoffs */
 660	mutex_init(&qinf->qi_quotaofflock);
 661
 662	/* Precalc some constants */
 663	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
 664	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
 665	if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
 666		qinf->qi_expiry_min =
 667			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
 668		qinf->qi_expiry_max =
 669			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
 670	} else {
 671		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
 672		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
 673	}
 674	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
 675			qinf->qi_expiry_max);
 676
 677	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
 678
 679	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
 680	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
 681	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
 682
 683	if (XFS_IS_UQUOTA_RUNNING(mp))
 684		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
 685	if (XFS_IS_GQUOTA_RUNNING(mp))
 686		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
 687	if (XFS_IS_PQUOTA_RUNNING(mp))
 688		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
 689
 690	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
 691	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
 692	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
 693	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
 
 
 
 
 
 
 694
 695	error = register_shrinker(&qinf->qi_shrinker);
 696	if (error)
 697		goto out_free_inos;
 
 698
 699	return 0;
 700
 701out_free_inos:
 702	mutex_destroy(&qinf->qi_quotaofflock);
 703	mutex_destroy(&qinf->qi_tree_lock);
 704	xfs_qm_destroy_quotainos(qinf);
 705out_free_lru:
 706	list_lru_destroy(&qinf->qi_lru);
 707out_free_qinf:
 708	kmem_free(qinf);
 709	mp->m_quotainfo = NULL;
 710	return error;
 711}
 712
 713/*
 714 * Gets called when unmounting a filesystem or when all quotas get
 715 * turned off.
 716 * This purges the quota inodes, destroys locks and frees itself.
 717 */
 718void
 719xfs_qm_destroy_quotainfo(
 720	struct xfs_mount	*mp)
 721{
 722	struct xfs_quotainfo	*qi;
 723
 724	qi = mp->m_quotainfo;
 725	ASSERT(qi != NULL);
 726
 727	unregister_shrinker(&qi->qi_shrinker);
 728	list_lru_destroy(&qi->qi_lru);
 729	xfs_qm_destroy_quotainos(qi);
 730	mutex_destroy(&qi->qi_tree_lock);
 731	mutex_destroy(&qi->qi_quotaofflock);
 732	kmem_free(qi);
 733	mp->m_quotainfo = NULL;
 734}
 735
 
 
 
 
 
 
 
 
 
 
 
 736/*
 737 * Create an inode and return with a reference already taken, but unlocked
 738 * This is how we create quota inodes
 739 */
 740STATIC int
 741xfs_qm_qino_alloc(
 742	struct xfs_mount	*mp,
 743	struct xfs_inode	**ipp,
 744	unsigned int		flags)
 745{
 746	struct xfs_trans	*tp;
 
 747	int			error;
 748	bool			need_alloc = true;
 749
 750	*ipp = NULL;
 751	/*
 752	 * With superblock that doesn't have separate pquotino, we
 753	 * share an inode between gquota and pquota. If the on-disk
 754	 * superblock has GQUOTA and the filesystem is now mounted
 755	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
 756	 * vice-versa.
 757	 */
 758	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
 759			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
 760		xfs_ino_t ino = NULLFSINO;
 761
 762		if ((flags & XFS_QMOPT_PQUOTA) &&
 763			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
 764			ino = mp->m_sb.sb_gquotino;
 765			if (XFS_IS_CORRUPT(mp,
 766					   mp->m_sb.sb_pquotino != NULLFSINO))
 
 767				return -EFSCORRUPTED;
 
 768		} else if ((flags & XFS_QMOPT_GQUOTA) &&
 769			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
 770			ino = mp->m_sb.sb_pquotino;
 771			if (XFS_IS_CORRUPT(mp,
 772					   mp->m_sb.sb_gquotino != NULLFSINO))
 
 773				return -EFSCORRUPTED;
 
 774		}
 775		if (ino != NULLFSINO) {
 776			error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
 777			if (error)
 778				return error;
 
 779			mp->m_sb.sb_gquotino = NULLFSINO;
 780			mp->m_sb.sb_pquotino = NULLFSINO;
 781			need_alloc = false;
 782		}
 783	}
 784
 785	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
 786			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
 787			0, 0, &tp);
 788	if (error)
 789		return error;
 790
 791	if (need_alloc) {
 
 
 
 
 792		xfs_ino_t	ino;
 793
 794		error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
 795		if (!error)
 796			error = xfs_init_new_inode(&init_user_ns, tp, NULL, ino,
 797					S_IFREG, 1, 0, 0, false, ipp);
 798		if (error) {
 799			xfs_trans_cancel(tp);
 800			return error;
 801		}
 
 
 802	}
 803
 804	/*
 805	 * Make the changes in the superblock, and log those too.
 806	 * sbfields arg may contain fields other than *QUOTINO;
 807	 * VERSIONNUM for example.
 808	 */
 809	spin_lock(&mp->m_sb_lock);
 810	if (flags & XFS_QMOPT_SBVERSION) {
 811		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
 812
 813		xfs_sb_version_addquota(&mp->m_sb);
 814		mp->m_sb.sb_uquotino = NULLFSINO;
 815		mp->m_sb.sb_gquotino = NULLFSINO;
 816		mp->m_sb.sb_pquotino = NULLFSINO;
 817
 818		/* qflags will get updated fully _after_ quotacheck */
 819		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
 820	}
 821	if (flags & XFS_QMOPT_UQUOTA)
 822		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
 823	else if (flags & XFS_QMOPT_GQUOTA)
 824		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
 825	else
 826		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
 827	spin_unlock(&mp->m_sb_lock);
 828	xfs_log_sb(tp);
 829
 830	error = xfs_trans_commit(tp);
 831	if (error) {
 832		ASSERT(XFS_FORCED_SHUTDOWN(mp));
 833		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
 834	}
 835	if (need_alloc)
 
 836		xfs_finish_inode_setup(*ipp);
 
 837	return error;
 838}
 839
 840
 841STATIC void
 842xfs_qm_reset_dqcounts(
 843	struct xfs_mount	*mp,
 844	struct xfs_buf		*bp,
 845	xfs_dqid_t		id,
 846	xfs_dqtype_t		type)
 847{
 848	struct xfs_dqblk	*dqb;
 849	int			j;
 850
 851	trace_xfs_reset_dqcounts(bp, _RET_IP_);
 852
 853	/*
 854	 * Reset all counters and timers. They'll be
 855	 * started afresh by xfs_qm_quotacheck.
 856	 */
 857#ifdef DEBUG
 858	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
 859		sizeof(xfs_dqblk_t);
 860	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 861#endif
 862	dqb = bp->b_addr;
 863	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
 864		struct xfs_disk_dquot	*ddq;
 865
 866		ddq = (struct xfs_disk_dquot *)&dqb[j];
 867
 868		/*
 869		 * Do a sanity check, and if needed, repair the dqblk. Don't
 870		 * output any warnings because it's perfectly possible to
 871		 * find uninitialised dquot blks. See comment in
 872		 * xfs_dquot_verify.
 873		 */
 874		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
 875		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
 876			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
 877
 878		/*
 879		 * Reset type in case we are reusing group quota file for
 880		 * project quotas or vice versa
 881		 */
 882		ddq->d_type = type;
 883		ddq->d_bcount = 0;
 884		ddq->d_icount = 0;
 885		ddq->d_rtbcount = 0;
 886
 887		/*
 888		 * dquot id 0 stores the default grace period and the maximum
 889		 * warning limit that were set by the administrator, so we
 890		 * should not reset them.
 891		 */
 892		if (ddq->d_id != 0) {
 893			ddq->d_btimer = 0;
 894			ddq->d_itimer = 0;
 895			ddq->d_rtbtimer = 0;
 896			ddq->d_bwarns = 0;
 897			ddq->d_iwarns = 0;
 898			ddq->d_rtbwarns = 0;
 899			if (xfs_sb_version_hasbigtime(&mp->m_sb))
 900				ddq->d_type |= XFS_DQTYPE_BIGTIME;
 901		}
 902
 903		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 904			xfs_update_cksum((char *)&dqb[j],
 905					 sizeof(struct xfs_dqblk),
 906					 XFS_DQUOT_CRC_OFF);
 907		}
 908	}
 909}
 910
 911STATIC int
 912xfs_qm_reset_dqcounts_all(
 913	struct xfs_mount	*mp,
 914	xfs_dqid_t		firstid,
 915	xfs_fsblock_t		bno,
 916	xfs_filblks_t		blkcnt,
 917	xfs_dqtype_t		type,
 918	struct list_head	*buffer_list)
 919{
 920	struct xfs_buf		*bp;
 921	int			error = 0;
 922
 923	ASSERT(blkcnt > 0);
 924
 925	/*
 926	 * Blkcnt arg can be a very big number, and might even be
 927	 * larger than the log itself. So, we have to break it up into
 928	 * manageable-sized transactions.
 929	 * Note that we don't start a permanent transaction here; we might
 930	 * not be able to get a log reservation for the whole thing up front,
 931	 * and we don't really care to either, because we just discard
 932	 * everything if we were to crash in the middle of this loop.
 933	 */
 934	while (blkcnt--) {
 935		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 936			      XFS_FSB_TO_DADDR(mp, bno),
 937			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 938			      &xfs_dquot_buf_ops);
 939
 940		/*
 941		 * CRC and validation errors will return a EFSCORRUPTED here. If
 942		 * this occurs, re-read without CRC validation so that we can
 943		 * repair the damage via xfs_qm_reset_dqcounts(). This process
 944		 * will leave a trace in the log indicating corruption has
 945		 * been detected.
 946		 */
 947		if (error == -EFSCORRUPTED) {
 948			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
 949				      XFS_FSB_TO_DADDR(mp, bno),
 950				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 951				      NULL);
 952		}
 953
 954		if (error)
 955			break;
 956
 957		/*
 958		 * A corrupt buffer might not have a verifier attached, so
 959		 * make sure we have the correct one attached before writeback
 960		 * occurs.
 961		 */
 962		bp->b_ops = &xfs_dquot_buf_ops;
 963		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
 964		xfs_buf_delwri_queue(bp, buffer_list);
 965		xfs_buf_relse(bp);
 966
 967		/* goto the next block. */
 968		bno++;
 969		firstid += mp->m_quotainfo->qi_dqperchunk;
 970	}
 971
 972	return error;
 973}
 974
 975/*
 976 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
 977 * counters for every chunk of dquots that we find.
 978 */
 979STATIC int
 980xfs_qm_reset_dqcounts_buf(
 981	struct xfs_mount	*mp,
 982	struct xfs_inode	*qip,
 983	xfs_dqtype_t		type,
 984	struct list_head	*buffer_list)
 985{
 986	struct xfs_bmbt_irec	*map;
 987	int			i, nmaps;	/* number of map entries */
 988	int			error;		/* return value */
 989	xfs_fileoff_t		lblkno;
 990	xfs_filblks_t		maxlblkcnt;
 991	xfs_dqid_t		firstid;
 992	xfs_fsblock_t		rablkno;
 993	xfs_filblks_t		rablkcnt;
 994
 995	error = 0;
 996	/*
 997	 * This looks racy, but we can't keep an inode lock across a
 998	 * trans_reserve. But, this gets called during quotacheck, and that
 999	 * happens only at mount time which is single threaded.
1000	 */
1001	if (qip->i_nblocks == 0)
1002		return 0;
1003
1004	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
 
1005
1006	lblkno = 0;
1007	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1008	do {
1009		uint		lock_mode;
1010
1011		nmaps = XFS_DQITER_MAP_SIZE;
1012		/*
1013		 * We aren't changing the inode itself. Just changing
1014		 * some of its data. No new blocks are added here, and
1015		 * the inode is never added to the transaction.
1016		 */
1017		lock_mode = xfs_ilock_data_map_shared(qip);
1018		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1019				       map, &nmaps, 0);
1020		xfs_iunlock(qip, lock_mode);
1021		if (error)
1022			break;
1023
1024		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1025		for (i = 0; i < nmaps; i++) {
1026			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1027			ASSERT(map[i].br_blockcount);
1028
1029
1030			lblkno += map[i].br_blockcount;
1031
1032			if (map[i].br_startblock == HOLESTARTBLOCK)
1033				continue;
1034
1035			firstid = (xfs_dqid_t) map[i].br_startoff *
1036				mp->m_quotainfo->qi_dqperchunk;
1037			/*
1038			 * Do a read-ahead on the next extent.
1039			 */
1040			if ((i+1 < nmaps) &&
1041			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1042				rablkcnt =  map[i+1].br_blockcount;
1043				rablkno = map[i+1].br_startblock;
1044				while (rablkcnt--) {
1045					xfs_buf_readahead(mp->m_ddev_targp,
1046					       XFS_FSB_TO_DADDR(mp, rablkno),
1047					       mp->m_quotainfo->qi_dqchunklen,
1048					       &xfs_dquot_buf_ops);
1049					rablkno++;
1050				}
1051			}
1052			/*
1053			 * Iterate thru all the blks in the extent and
1054			 * reset the counters of all the dquots inside them.
1055			 */
1056			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1057						   map[i].br_startblock,
1058						   map[i].br_blockcount,
1059						   type, buffer_list);
1060			if (error)
1061				goto out;
1062		}
1063	} while (nmaps > 0);
1064
1065out:
1066	kmem_free(map);
1067	return error;
1068}
1069
1070/*
1071 * Called by dqusage_adjust in doing a quotacheck.
1072 *
1073 * Given the inode, and a dquot id this updates both the incore dqout as well
1074 * as the buffer copy. This is so that once the quotacheck is done, we can
1075 * just log all the buffers, as opposed to logging numerous updates to
1076 * individual dquots.
1077 */
1078STATIC int
1079xfs_qm_quotacheck_dqadjust(
1080	struct xfs_inode	*ip,
1081	xfs_dqtype_t		type,
1082	xfs_qcnt_t		nblks,
1083	xfs_qcnt_t		rtblks)
1084{
1085	struct xfs_mount	*mp = ip->i_mount;
1086	struct xfs_dquot	*dqp;
1087	xfs_dqid_t		id;
1088	int			error;
1089
1090	id = xfs_qm_id_for_quotatype(ip, type);
1091	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1092	if (error) {
1093		/*
1094		 * Shouldn't be able to turn off quotas here.
1095		 */
1096		ASSERT(error != -ESRCH);
1097		ASSERT(error != -ENOENT);
1098		return error;
1099	}
1100
 
 
 
 
1101	trace_xfs_dqadjust(dqp);
1102
1103	/*
1104	 * Adjust the inode count and the block count to reflect this inode's
1105	 * resource usage.
1106	 */
1107	dqp->q_ino.count++;
1108	dqp->q_ino.reserved++;
1109	if (nblks) {
1110		dqp->q_blk.count += nblks;
1111		dqp->q_blk.reserved += nblks;
1112	}
1113	if (rtblks) {
1114		dqp->q_rtb.count += rtblks;
1115		dqp->q_rtb.reserved += rtblks;
1116	}
1117
1118	/*
1119	 * Set default limits, adjust timers (since we changed usages)
1120	 *
1121	 * There are no timers for the default values set in the root dquot.
1122	 */
1123	if (dqp->q_id) {
1124		xfs_qm_adjust_dqlimits(dqp);
1125		xfs_qm_adjust_dqtimers(dqp);
1126	}
1127
1128	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1129	xfs_qm_dqput(dqp);
1130	return 0;
1131}
1132
1133/*
1134 * callback routine supplied to bulkstat(). Given an inumber, find its
1135 * dquots and update them to account for resources taken by that inode.
1136 */
1137/* ARGSUSED */
1138STATIC int
1139xfs_qm_dqusage_adjust(
1140	struct xfs_mount	*mp,
1141	struct xfs_trans	*tp,
1142	xfs_ino_t		ino,
1143	void			*data)
1144{
1145	struct xfs_inode	*ip;
1146	xfs_qcnt_t		nblks;
1147	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1148	int			error;
1149
1150	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1151
1152	/*
1153	 * rootino must have its resources accounted for, not so with the quota
1154	 * inodes.
1155	 */
1156	if (xfs_is_quota_inode(&mp->m_sb, ino))
1157		return 0;
1158
1159	/*
1160	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1161	 * at mount time and therefore nobody will be racing chown/chproj.
1162	 */
1163	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1164	if (error == -EINVAL || error == -ENOENT)
1165		return 0;
1166	if (error)
1167		return error;
1168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1169	ASSERT(ip->i_delayed_blks == 0);
1170
 
1171	if (XFS_IS_REALTIME_INODE(ip)) {
1172		struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1173
1174		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1175		if (error)
 
1176			goto error0;
1177
1178		xfs_bmap_count_leaves(ifp, &rtblks);
1179	}
1180
1181	nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
 
1182
1183	/*
1184	 * Add the (disk blocks and inode) resources occupied by this
1185	 * inode to its dquots. We do this adjustment in the incore dquot,
1186	 * and also copy the changes to its buffer.
1187	 * We don't care about putting these changes in a transaction
1188	 * envelope because if we crash in the middle of a 'quotacheck'
1189	 * we have to start from the beginning anyway.
1190	 * Once we're done, we'll log all the dquot bufs.
1191	 *
1192	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1193	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1194	 */
1195	if (XFS_IS_UQUOTA_ON(mp)) {
1196		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1197				rtblks);
1198		if (error)
1199			goto error0;
1200	}
1201
1202	if (XFS_IS_GQUOTA_ON(mp)) {
1203		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1204				rtblks);
1205		if (error)
1206			goto error0;
1207	}
1208
1209	if (XFS_IS_PQUOTA_ON(mp)) {
1210		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1211				rtblks);
1212		if (error)
1213			goto error0;
1214	}
1215
1216error0:
1217	xfs_irele(ip);
1218	return error;
1219}
1220
1221STATIC int
1222xfs_qm_flush_one(
1223	struct xfs_dquot	*dqp,
1224	void			*data)
1225{
1226	struct xfs_mount	*mp = dqp->q_mount;
1227	struct list_head	*buffer_list = data;
1228	struct xfs_buf		*bp = NULL;
1229	int			error = 0;
1230
1231	xfs_dqlock(dqp);
1232	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1233		goto out_unlock;
1234	if (!XFS_DQ_IS_DIRTY(dqp))
1235		goto out_unlock;
1236
1237	/*
1238	 * The only way the dquot is already flush locked by the time quotacheck
1239	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1240	 * it for the final time. Quotacheck collects all dquot bufs in the
1241	 * local delwri queue before dquots are dirtied, so reclaim can't have
1242	 * possibly queued it for I/O. The only way out is to push the buffer to
1243	 * cycle the flush lock.
1244	 */
1245	if (!xfs_dqflock_nowait(dqp)) {
1246		/* buf is pinned in-core by delwri list */
1247		bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1248				mp->m_quotainfo->qi_dqchunklen, 0);
1249		if (!bp) {
1250			error = -EINVAL;
 
 
 
 
1251			goto out_unlock;
1252		}
1253		xfs_buf_unlock(bp);
1254
1255		xfs_buf_delwri_pushbuf(bp, buffer_list);
1256		xfs_buf_rele(bp);
1257
1258		error = -EAGAIN;
1259		goto out_unlock;
1260	}
1261
1262	error = xfs_qm_dqflush(dqp, &bp);
1263	if (error)
1264		goto out_unlock;
 
 
 
 
1265
1266	xfs_buf_delwri_queue(bp, buffer_list);
 
 
1267	xfs_buf_relse(bp);
1268out_unlock:
1269	xfs_dqunlock(dqp);
1270	return error;
1271}
1272
1273/*
1274 * Walk thru all the filesystem inodes and construct a consistent view
1275 * of the disk quota world. If the quotacheck fails, disable quotas.
1276 */
1277STATIC int
1278xfs_qm_quotacheck(
1279	xfs_mount_t	*mp)
1280{
1281	int			error, error2;
1282	uint			flags;
1283	LIST_HEAD		(buffer_list);
1284	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1285	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1286	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1287
1288	flags = 0;
1289
1290	ASSERT(uip || gip || pip);
1291	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1292
1293	xfs_notice(mp, "Quotacheck needed: Please wait.");
1294
1295	/*
1296	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1297	 * their counters to zero. We need a clean slate.
1298	 * We don't log our changes till later.
1299	 */
1300	if (uip) {
1301		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1302					 &buffer_list);
1303		if (error)
1304			goto error_return;
1305		flags |= XFS_UQUOTA_CHKD;
1306	}
1307
1308	if (gip) {
1309		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1310					 &buffer_list);
1311		if (error)
1312			goto error_return;
1313		flags |= XFS_GQUOTA_CHKD;
1314	}
1315
1316	if (pip) {
1317		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1318					 &buffer_list);
1319		if (error)
1320			goto error_return;
1321		flags |= XFS_PQUOTA_CHKD;
1322	}
1323
 
1324	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1325			NULL);
 
 
 
 
 
 
 
1326	if (error)
1327		goto error_return;
1328
1329	/*
1330	 * We've made all the changes that we need to make incore.  Flush them
1331	 * down to disk buffers if everything was updated successfully.
1332	 */
1333	if (XFS_IS_UQUOTA_ON(mp)) {
1334		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1335					  &buffer_list);
1336	}
1337	if (XFS_IS_GQUOTA_ON(mp)) {
1338		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1339					   &buffer_list);
1340		if (!error)
1341			error = error2;
1342	}
1343	if (XFS_IS_PQUOTA_ON(mp)) {
1344		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1345					   &buffer_list);
1346		if (!error)
1347			error = error2;
1348	}
1349
1350	error2 = xfs_buf_delwri_submit(&buffer_list);
1351	if (!error)
1352		error = error2;
1353
1354	/*
1355	 * We can get this error if we couldn't do a dquot allocation inside
1356	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1357	 * dirty dquots that might be cached, we just want to get rid of them
1358	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1359	 * at this point (because we intentionally didn't in dqget_noattach).
1360	 */
1361	if (error) {
1362		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1363		goto error_return;
1364	}
1365
1366	/*
1367	 * If one type of quotas is off, then it will lose its
1368	 * quotachecked status, since we won't be doing accounting for
1369	 * that type anymore.
1370	 */
1371	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1372	mp->m_qflags |= flags;
1373
1374 error_return:
1375	xfs_buf_delwri_cancel(&buffer_list);
1376
1377	if (error) {
1378		xfs_warn(mp,
1379	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1380			error);
1381		/*
1382		 * We must turn off quotas.
1383		 */
1384		ASSERT(mp->m_quotainfo != NULL);
1385		xfs_qm_destroy_quotainfo(mp);
1386		if (xfs_mount_reset_sbqflags(mp)) {
1387			xfs_warn(mp,
1388				"Quotacheck: Failed to reset quota flags.");
1389		}
1390	} else
 
1391		xfs_notice(mp, "Quotacheck: Done.");
 
 
 
1392	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1393}
1394
1395/*
1396 * This is called from xfs_mountfs to start quotas and initialize all
1397 * necessary data structures like quotainfo.  This is also responsible for
1398 * running a quotacheck as necessary.  We are guaranteed that the superblock
1399 * is consistently read in at this point.
1400 *
1401 * If we fail here, the mount will continue with quota turned off. We don't
1402 * need to inidicate success or failure at all.
1403 */
1404void
1405xfs_qm_mount_quotas(
1406	struct xfs_mount	*mp)
1407{
1408	int			error = 0;
1409	uint			sbf;
1410
1411	/*
1412	 * If quotas on realtime volumes is not supported, we disable
1413	 * quotas immediately.
 
1414	 */
1415	if (mp->m_sb.sb_rextents) {
1416		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1417		mp->m_qflags = 0;
1418		goto write_changes;
1419	}
1420
1421	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1422
1423	/*
1424	 * Allocate the quotainfo structure inside the mount struct, and
1425	 * create quotainode(s), and change/rev superblock if necessary.
1426	 */
1427	error = xfs_qm_init_quotainfo(mp);
1428	if (error) {
1429		/*
1430		 * We must turn off quotas.
1431		 */
1432		ASSERT(mp->m_quotainfo == NULL);
1433		mp->m_qflags = 0;
1434		goto write_changes;
1435	}
1436	/*
1437	 * If any of the quotas are not consistent, do a quotacheck.
1438	 */
1439	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1440		error = xfs_qm_quotacheck(mp);
1441		if (error) {
1442			/* Quotacheck failed and disabled quotas. */
1443			return;
1444		}
1445	}
1446	/*
1447	 * If one type of quotas is off, then it will lose its
1448	 * quotachecked status, since we won't be doing accounting for
1449	 * that type anymore.
1450	 */
1451	if (!XFS_IS_UQUOTA_ON(mp))
1452		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1453	if (!XFS_IS_GQUOTA_ON(mp))
1454		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1455	if (!XFS_IS_PQUOTA_ON(mp))
1456		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1457
1458 write_changes:
1459	/*
1460	 * We actually don't have to acquire the m_sb_lock at all.
1461	 * This can only be called from mount, and that's single threaded. XXX
1462	 */
1463	spin_lock(&mp->m_sb_lock);
1464	sbf = mp->m_sb.sb_qflags;
1465	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1466	spin_unlock(&mp->m_sb_lock);
1467
1468	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1469		if (xfs_sync_sb(mp, false)) {
1470			/*
1471			 * We could only have been turning quotas off.
1472			 * We aren't in very good shape actually because
1473			 * the incore structures are convinced that quotas are
1474			 * off, but the on disk superblock doesn't know that !
1475			 */
1476			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1477			xfs_alert(mp, "%s: Superblock update failed!",
1478				__func__);
1479		}
1480	}
1481
1482	if (error) {
1483		xfs_warn(mp, "Failed to initialize disk quotas.");
1484		return;
1485	}
1486}
1487
1488/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1489 * This is called after the superblock has been read in and we're ready to
1490 * iget the quota inodes.
1491 */
1492STATIC int
1493xfs_qm_init_quotainos(
1494	xfs_mount_t	*mp)
1495{
1496	struct xfs_inode	*uip = NULL;
1497	struct xfs_inode	*gip = NULL;
1498	struct xfs_inode	*pip = NULL;
1499	int			error;
1500	uint			flags = 0;
1501
1502	ASSERT(mp->m_quotainfo);
1503
1504	/*
1505	 * Get the uquota and gquota inodes
1506	 */
1507	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1508		if (XFS_IS_UQUOTA_ON(mp) &&
1509		    mp->m_sb.sb_uquotino != NULLFSINO) {
1510			ASSERT(mp->m_sb.sb_uquotino > 0);
1511			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1512					     0, 0, &uip);
1513			if (error)
1514				return error;
1515		}
1516		if (XFS_IS_GQUOTA_ON(mp) &&
1517		    mp->m_sb.sb_gquotino != NULLFSINO) {
1518			ASSERT(mp->m_sb.sb_gquotino > 0);
1519			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1520					     0, 0, &gip);
1521			if (error)
1522				goto error_rele;
1523		}
1524		if (XFS_IS_PQUOTA_ON(mp) &&
1525		    mp->m_sb.sb_pquotino != NULLFSINO) {
1526			ASSERT(mp->m_sb.sb_pquotino > 0);
1527			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1528					     0, 0, &pip);
1529			if (error)
1530				goto error_rele;
1531		}
1532	} else {
1533		flags |= XFS_QMOPT_SBVERSION;
1534	}
1535
1536	/*
1537	 * Create the three inodes, if they don't exist already. The changes
1538	 * made above will get added to a transaction and logged in one of
1539	 * the qino_alloc calls below.  If the device is readonly,
1540	 * temporarily switch to read-write to do this.
1541	 */
1542	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1543		error = xfs_qm_qino_alloc(mp, &uip,
1544					      flags | XFS_QMOPT_UQUOTA);
1545		if (error)
1546			goto error_rele;
1547
1548		flags &= ~XFS_QMOPT_SBVERSION;
1549	}
1550	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1551		error = xfs_qm_qino_alloc(mp, &gip,
1552					  flags | XFS_QMOPT_GQUOTA);
1553		if (error)
1554			goto error_rele;
1555
1556		flags &= ~XFS_QMOPT_SBVERSION;
1557	}
1558	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1559		error = xfs_qm_qino_alloc(mp, &pip,
1560					  flags | XFS_QMOPT_PQUOTA);
1561		if (error)
1562			goto error_rele;
1563	}
1564
1565	mp->m_quotainfo->qi_uquotaip = uip;
1566	mp->m_quotainfo->qi_gquotaip = gip;
1567	mp->m_quotainfo->qi_pquotaip = pip;
1568
1569	return 0;
1570
1571error_rele:
1572	if (uip)
1573		xfs_irele(uip);
1574	if (gip)
1575		xfs_irele(gip);
1576	if (pip)
1577		xfs_irele(pip);
1578	return error;
1579}
1580
1581STATIC void
1582xfs_qm_destroy_quotainos(
1583	struct xfs_quotainfo	*qi)
1584{
1585	if (qi->qi_uquotaip) {
1586		xfs_irele(qi->qi_uquotaip);
1587		qi->qi_uquotaip = NULL; /* paranoia */
1588	}
1589	if (qi->qi_gquotaip) {
1590		xfs_irele(qi->qi_gquotaip);
1591		qi->qi_gquotaip = NULL;
1592	}
1593	if (qi->qi_pquotaip) {
1594		xfs_irele(qi->qi_pquotaip);
1595		qi->qi_pquotaip = NULL;
1596	}
1597}
1598
1599STATIC void
1600xfs_qm_dqfree_one(
1601	struct xfs_dquot	*dqp)
1602{
1603	struct xfs_mount	*mp = dqp->q_mount;
1604	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1605
1606	mutex_lock(&qi->qi_tree_lock);
1607	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1608
1609	qi->qi_dquots--;
1610	mutex_unlock(&qi->qi_tree_lock);
1611
1612	xfs_qm_dqdestroy(dqp);
1613}
1614
1615/* --------------- utility functions for vnodeops ---------------- */
1616
1617
1618/*
1619 * Given an inode, a uid, gid and prid make sure that we have
1620 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1621 * quotas by creating this file.
1622 * This also attaches dquot(s) to the given inode after locking it,
1623 * and returns the dquots corresponding to the uid and/or gid.
1624 *
1625 * in	: inode (unlocked)
1626 * out	: udquot, gdquot with references taken and unlocked
1627 */
1628int
1629xfs_qm_vop_dqalloc(
1630	struct xfs_inode	*ip,
1631	kuid_t			uid,
1632	kgid_t			gid,
1633	prid_t			prid,
1634	uint			flags,
1635	struct xfs_dquot	**O_udqpp,
1636	struct xfs_dquot	**O_gdqpp,
1637	struct xfs_dquot	**O_pdqpp)
1638{
1639	struct xfs_mount	*mp = ip->i_mount;
1640	struct inode		*inode = VFS_I(ip);
1641	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1642	struct xfs_dquot	*uq = NULL;
1643	struct xfs_dquot	*gq = NULL;
1644	struct xfs_dquot	*pq = NULL;
1645	int			error;
1646	uint			lockflags;
1647
1648	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1649		return 0;
1650
 
 
1651	lockflags = XFS_ILOCK_EXCL;
1652	xfs_ilock(ip, lockflags);
1653
1654	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1655		gid = inode->i_gid;
1656
1657	/*
1658	 * Attach the dquot(s) to this inode, doing a dquot allocation
1659	 * if necessary. The dquot(s) will not be locked.
1660	 */
1661	if (XFS_NOT_DQATTACHED(mp, ip)) {
1662		error = xfs_qm_dqattach_locked(ip, true);
1663		if (error) {
1664			xfs_iunlock(ip, lockflags);
1665			return error;
1666		}
1667	}
1668
1669	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1670		ASSERT(O_udqpp);
1671		if (!uid_eq(inode->i_uid, uid)) {
1672			/*
1673			 * What we need is the dquot that has this uid, and
1674			 * if we send the inode to dqget, the uid of the inode
1675			 * takes priority over what's sent in the uid argument.
1676			 * We must unlock inode here before calling dqget if
1677			 * we're not sending the inode, because otherwise
1678			 * we'll deadlock by doing trans_reserve while
1679			 * holding ilock.
1680			 */
1681			xfs_iunlock(ip, lockflags);
1682			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1683					XFS_DQTYPE_USER, true, &uq);
1684			if (error) {
1685				ASSERT(error != -ENOENT);
1686				return error;
1687			}
1688			/*
1689			 * Get the ilock in the right order.
1690			 */
1691			xfs_dqunlock(uq);
1692			lockflags = XFS_ILOCK_SHARED;
1693			xfs_ilock(ip, lockflags);
1694		} else {
1695			/*
1696			 * Take an extra reference, because we'll return
1697			 * this to caller
1698			 */
1699			ASSERT(ip->i_udquot);
1700			uq = xfs_qm_dqhold(ip->i_udquot);
1701		}
1702	}
1703	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1704		ASSERT(O_gdqpp);
1705		if (!gid_eq(inode->i_gid, gid)) {
1706			xfs_iunlock(ip, lockflags);
1707			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1708					XFS_DQTYPE_GROUP, true, &gq);
1709			if (error) {
1710				ASSERT(error != -ENOENT);
1711				goto error_rele;
1712			}
1713			xfs_dqunlock(gq);
1714			lockflags = XFS_ILOCK_SHARED;
1715			xfs_ilock(ip, lockflags);
1716		} else {
1717			ASSERT(ip->i_gdquot);
1718			gq = xfs_qm_dqhold(ip->i_gdquot);
1719		}
1720	}
1721	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1722		ASSERT(O_pdqpp);
1723		if (ip->i_projid != prid) {
1724			xfs_iunlock(ip, lockflags);
1725			error = xfs_qm_dqget(mp, prid,
1726					XFS_DQTYPE_PROJ, true, &pq);
1727			if (error) {
1728				ASSERT(error != -ENOENT);
1729				goto error_rele;
1730			}
1731			xfs_dqunlock(pq);
1732			lockflags = XFS_ILOCK_SHARED;
1733			xfs_ilock(ip, lockflags);
1734		} else {
1735			ASSERT(ip->i_pdquot);
1736			pq = xfs_qm_dqhold(ip->i_pdquot);
1737		}
1738	}
1739	trace_xfs_dquot_dqalloc(ip);
1740
1741	xfs_iunlock(ip, lockflags);
1742	if (O_udqpp)
1743		*O_udqpp = uq;
1744	else
1745		xfs_qm_dqrele(uq);
1746	if (O_gdqpp)
1747		*O_gdqpp = gq;
1748	else
1749		xfs_qm_dqrele(gq);
1750	if (O_pdqpp)
1751		*O_pdqpp = pq;
1752	else
1753		xfs_qm_dqrele(pq);
1754	return 0;
1755
1756error_rele:
1757	xfs_qm_dqrele(gq);
1758	xfs_qm_dqrele(uq);
1759	return error;
1760}
1761
1762/*
1763 * Actually transfer ownership, and do dquot modifications.
1764 * These were already reserved.
1765 */
1766struct xfs_dquot *
1767xfs_qm_vop_chown(
1768	struct xfs_trans	*tp,
1769	struct xfs_inode	*ip,
1770	struct xfs_dquot	**IO_olddq,
1771	struct xfs_dquot	*newdq)
1772{
1773	struct xfs_dquot	*prevdq;
1774	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1775				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1776
1777
1778	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1779	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1780
1781	/* old dquot */
1782	prevdq = *IO_olddq;
1783	ASSERT(prevdq);
1784	ASSERT(prevdq != newdq);
1785
1786	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
1787	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
 
 
 
 
 
1788
1789	/* the sparkling new dquot */
1790	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
1791	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
 
1792
1793	/*
1794	 * Back when we made quota reservations for the chown, we reserved the
1795	 * ondisk blocks + delalloc blocks with the new dquot.  Now that we've
1796	 * switched the dquots, decrease the new dquot's block reservation
1797	 * (having already bumped up the real counter) so that we don't have
1798	 * any reservation to give back when we commit.
1799	 */
1800	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
 
1801			-ip->i_delayed_blks);
1802
1803	/*
1804	 * Give the incore reservation for delalloc blocks back to the old
1805	 * dquot.  We don't normally handle delalloc quota reservations
1806	 * transactionally, so just lock the dquot and subtract from the
1807	 * reservation.  Dirty the transaction because it's too late to turn
1808	 * back now.
1809	 */
1810	tp->t_flags |= XFS_TRANS_DIRTY;
1811	xfs_dqlock(prevdq);
1812	ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1813	prevdq->q_blk.reserved -= ip->i_delayed_blks;
 
 
 
 
 
1814	xfs_dqunlock(prevdq);
1815
1816	/*
1817	 * Take an extra reference, because the inode is going to keep
1818	 * this dquot pointer even after the trans_commit.
1819	 */
1820	*IO_olddq = xfs_qm_dqhold(newdq);
1821
1822	return prevdq;
1823}
1824
1825int
1826xfs_qm_vop_rename_dqattach(
1827	struct xfs_inode	**i_tab)
1828{
1829	struct xfs_mount	*mp = i_tab[0]->i_mount;
1830	int			i;
1831
1832	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1833		return 0;
1834
1835	for (i = 0; (i < 4 && i_tab[i]); i++) {
1836		struct xfs_inode	*ip = i_tab[i];
1837		int			error;
1838
1839		/*
1840		 * Watch out for duplicate entries in the table.
1841		 */
1842		if (i == 0 || ip != i_tab[i-1]) {
1843			if (XFS_NOT_DQATTACHED(mp, ip)) {
1844				error = xfs_qm_dqattach(ip);
1845				if (error)
1846					return error;
1847			}
1848		}
1849	}
1850	return 0;
1851}
1852
1853void
1854xfs_qm_vop_create_dqattach(
1855	struct xfs_trans	*tp,
1856	struct xfs_inode	*ip,
1857	struct xfs_dquot	*udqp,
1858	struct xfs_dquot	*gdqp,
1859	struct xfs_dquot	*pdqp)
1860{
1861	struct xfs_mount	*mp = tp->t_mountp;
1862
1863	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1864		return;
1865
1866	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
1867
1868	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1869		ASSERT(ip->i_udquot == NULL);
1870		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1871
1872		ip->i_udquot = xfs_qm_dqhold(udqp);
1873		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1874	}
1875	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1876		ASSERT(ip->i_gdquot == NULL);
1877		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1878
1879		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1880		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1881	}
1882	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1883		ASSERT(ip->i_pdquot == NULL);
1884		ASSERT(ip->i_projid == pdqp->q_id);
1885
1886		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1887		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1888	}
 
 
1889}
1890