Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_format.h"
  21#include "xfs_log_format.h"
  22#include "xfs_shared.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_bit.h"
  25#include "xfs_mount.h"
 
  26#include "xfs_inode.h"
  27#include "xfs_bmap.h"
  28#include "xfs_bmap_util.h"
  29#include "xfs_alloc.h"
  30#include "xfs_quota.h"
  31#include "xfs_error.h"
  32#include "xfs_trans.h"
  33#include "xfs_buf_item.h"
  34#include "xfs_trans_space.h"
  35#include "xfs_trans_priv.h"
  36#include "xfs_qm.h"
  37#include "xfs_cksum.h"
  38#include "xfs_trace.h"
  39#include "xfs_log.h"
  40#include "xfs_bmap_btree.h"
  41
  42/*
  43 * Lock order:
  44 *
  45 * ip->i_lock
  46 *   qi->qi_tree_lock
  47 *     dquot->q_qlock (xfs_dqlock() and friends)
  48 *       dquot->q_flush (xfs_dqflock() and friends)
  49 *       qi->qi_lru_lock
  50 *
  51 * If two dquots need to be locked the order is user before group/project,
  52 * otherwise by the lowest id first, see xfs_dqlock2.
  53 */
  54
  55#ifdef DEBUG
  56xfs_buftarg_t *xfs_dqerror_target;
  57int xfs_do_dqerror;
  58int xfs_dqreq_num;
  59int xfs_dqerror_mod = 33;
  60#endif
  61
  62struct kmem_zone		*xfs_qm_dqtrxzone;
  63static struct kmem_zone		*xfs_qm_dqzone;
  64
  65static struct lock_class_key xfs_dquot_group_class;
  66static struct lock_class_key xfs_dquot_project_class;
  67
  68/*
  69 * This is called to free all the memory associated with a dquot
  70 */
  71void
  72xfs_qm_dqdestroy(
  73	xfs_dquot_t	*dqp)
  74{
  75	ASSERT(list_empty(&dqp->q_lru));
  76
 
  77	mutex_destroy(&dqp->q_qlock);
  78
  79	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
  80	kmem_zone_free(xfs_qm_dqzone, dqp);
  81}
  82
  83/*
  84 * If default limits are in force, push them into the dquot now.
  85 * We overwrite the dquot limits only if they are zero and this
  86 * is not the root dquot.
  87 */
  88void
  89xfs_qm_adjust_dqlimits(
  90	struct xfs_mount	*mp,
  91	struct xfs_dquot	*dq)
  92{
  93	struct xfs_quotainfo	*q = mp->m_quotainfo;
  94	struct xfs_disk_dquot	*d = &dq->q_core;
  95	struct xfs_def_quota	*defq;
  96	int			prealloc = 0;
  97
  98	ASSERT(d->d_id);
  99	defq = xfs_get_defquota(dq, q);
 100
 101	if (defq->bsoftlimit && !d->d_blk_softlimit) {
 102		d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
 103		prealloc = 1;
 104	}
 105	if (defq->bhardlimit && !d->d_blk_hardlimit) {
 106		d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
 107		prealloc = 1;
 108	}
 109	if (defq->isoftlimit && !d->d_ino_softlimit)
 110		d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
 111	if (defq->ihardlimit && !d->d_ino_hardlimit)
 112		d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
 113	if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
 114		d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
 115	if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
 116		d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
 117
 118	if (prealloc)
 119		xfs_dquot_set_prealloc_limits(dq);
 120}
 121
 122/*
 123 * Check the limits and timers of a dquot and start or reset timers
 124 * if necessary.
 125 * This gets called even when quota enforcement is OFF, which makes our
 126 * life a little less complicated. (We just don't reject any quota
 127 * reservations in that case, when enforcement is off).
 128 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
 129 * enforcement's off.
 130 * In contrast, warnings are a little different in that they don't
 131 * 'automatically' get started when limits get exceeded.  They do
 132 * get reset to zero, however, when we find the count to be under
 133 * the soft limit (they are only ever set non-zero via userspace).
 134 */
 135void
 136xfs_qm_adjust_dqtimers(
 137	xfs_mount_t		*mp,
 138	xfs_disk_dquot_t	*d)
 139{
 140	ASSERT(d->d_id);
 141
 142#ifdef DEBUG
 143	if (d->d_blk_hardlimit)
 144		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
 145		       be64_to_cpu(d->d_blk_hardlimit));
 146	if (d->d_ino_hardlimit)
 147		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
 148		       be64_to_cpu(d->d_ino_hardlimit));
 149	if (d->d_rtb_hardlimit)
 150		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
 151		       be64_to_cpu(d->d_rtb_hardlimit));
 152#endif
 153
 154	if (!d->d_btimer) {
 155		if ((d->d_blk_softlimit &&
 156		     (be64_to_cpu(d->d_bcount) >
 157		      be64_to_cpu(d->d_blk_softlimit))) ||
 158		    (d->d_blk_hardlimit &&
 159		     (be64_to_cpu(d->d_bcount) >
 160		      be64_to_cpu(d->d_blk_hardlimit)))) {
 161			d->d_btimer = cpu_to_be32(get_seconds() +
 162					mp->m_quotainfo->qi_btimelimit);
 163		} else {
 164			d->d_bwarns = 0;
 165		}
 166	} else {
 167		if ((!d->d_blk_softlimit ||
 168		     (be64_to_cpu(d->d_bcount) <=
 169		      be64_to_cpu(d->d_blk_softlimit))) &&
 170		    (!d->d_blk_hardlimit ||
 171		    (be64_to_cpu(d->d_bcount) <=
 172		     be64_to_cpu(d->d_blk_hardlimit)))) {
 173			d->d_btimer = 0;
 174		}
 175	}
 176
 177	if (!d->d_itimer) {
 178		if ((d->d_ino_softlimit &&
 179		     (be64_to_cpu(d->d_icount) >
 180		      be64_to_cpu(d->d_ino_softlimit))) ||
 181		    (d->d_ino_hardlimit &&
 182		     (be64_to_cpu(d->d_icount) >
 183		      be64_to_cpu(d->d_ino_hardlimit)))) {
 184			d->d_itimer = cpu_to_be32(get_seconds() +
 185					mp->m_quotainfo->qi_itimelimit);
 186		} else {
 187			d->d_iwarns = 0;
 188		}
 189	} else {
 190		if ((!d->d_ino_softlimit ||
 191		     (be64_to_cpu(d->d_icount) <=
 192		      be64_to_cpu(d->d_ino_softlimit)))  &&
 193		    (!d->d_ino_hardlimit ||
 194		     (be64_to_cpu(d->d_icount) <=
 195		      be64_to_cpu(d->d_ino_hardlimit)))) {
 196			d->d_itimer = 0;
 197		}
 198	}
 199
 200	if (!d->d_rtbtimer) {
 201		if ((d->d_rtb_softlimit &&
 202		     (be64_to_cpu(d->d_rtbcount) >
 203		      be64_to_cpu(d->d_rtb_softlimit))) ||
 204		    (d->d_rtb_hardlimit &&
 205		     (be64_to_cpu(d->d_rtbcount) >
 206		      be64_to_cpu(d->d_rtb_hardlimit)))) {
 207			d->d_rtbtimer = cpu_to_be32(get_seconds() +
 208					mp->m_quotainfo->qi_rtbtimelimit);
 209		} else {
 210			d->d_rtbwarns = 0;
 211		}
 212	} else {
 213		if ((!d->d_rtb_softlimit ||
 214		     (be64_to_cpu(d->d_rtbcount) <=
 215		      be64_to_cpu(d->d_rtb_softlimit))) &&
 216		    (!d->d_rtb_hardlimit ||
 217		     (be64_to_cpu(d->d_rtbcount) <=
 218		      be64_to_cpu(d->d_rtb_hardlimit)))) {
 219			d->d_rtbtimer = 0;
 220		}
 221	}
 222}
 223
 224/*
 225 * initialize a buffer full of dquots and log the whole thing
 226 */
 227STATIC void
 228xfs_qm_init_dquot_blk(
 229	xfs_trans_t	*tp,
 230	xfs_mount_t	*mp,
 231	xfs_dqid_t	id,
 232	uint		type,
 233	xfs_buf_t	*bp)
 234{
 235	struct xfs_quotainfo	*q = mp->m_quotainfo;
 236	xfs_dqblk_t	*d;
 237	xfs_dqid_t	curid;
 238	int		i;
 239
 240	ASSERT(tp);
 241	ASSERT(xfs_buf_islocked(bp));
 242
 243	d = bp->b_addr;
 244
 245	/*
 246	 * ID of the first dquot in the block - id's are zero based.
 247	 */
 248	curid = id - (id % q->qi_dqperchunk);
 249	memset(d, 0, BBTOB(q->qi_dqchunklen));
 250	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
 251		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
 252		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
 253		d->dd_diskdq.d_id = cpu_to_be32(curid);
 254		d->dd_diskdq.d_flags = type;
 255		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 256			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
 257			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
 258					 XFS_DQUOT_CRC_OFF);
 259		}
 260	}
 261
 262	xfs_trans_dquot_buf(tp, bp,
 263			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
 264			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
 265			     XFS_BLF_GDQUOT_BUF)));
 266	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
 267}
 268
 269/*
 270 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
 271 * watermarks correspond to the soft and hard limits by default. If a soft limit
 272 * is not specified, we use 95% of the hard limit.
 273 */
 274void
 275xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
 276{
 277	__uint64_t space;
 278
 279	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
 280	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
 281	if (!dqp->q_prealloc_lo_wmark) {
 282		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
 283		do_div(dqp->q_prealloc_lo_wmark, 100);
 284		dqp->q_prealloc_lo_wmark *= 95;
 285	}
 286
 287	space = dqp->q_prealloc_hi_wmark;
 288
 289	do_div(space, 100);
 290	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
 291	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
 292	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
 293}
 294
 295/*
 296 * Allocate a block and fill it with dquots.
 297 * This is called when the bmapi finds a hole.
 298 */
 299STATIC int
 300xfs_qm_dqalloc(
 301	xfs_trans_t	**tpp,
 302	xfs_mount_t	*mp,
 303	xfs_dquot_t	*dqp,
 304	xfs_inode_t	*quotip,
 305	xfs_fileoff_t	offset_fsb,
 306	xfs_buf_t	**O_bpp)
 307{
 308	xfs_fsblock_t	firstblock;
 309	xfs_bmap_free_t flist;
 310	xfs_bmbt_irec_t map;
 311	int		nmaps, error;
 312	xfs_buf_t	*bp;
 313	xfs_trans_t	*tp = *tpp;
 314
 315	ASSERT(tp != NULL);
 316
 317	trace_xfs_dqalloc(dqp);
 318
 319	/*
 320	 * Initialize the bmap freelist prior to calling bmapi code.
 321	 */
 322	xfs_bmap_init(&flist, &firstblock);
 323	xfs_ilock(quotip, XFS_ILOCK_EXCL);
 324	/*
 325	 * Return if this type of quotas is turned off while we didn't
 326	 * have an inode lock
 327	 */
 328	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
 329		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
 330		return -ESRCH;
 331	}
 332
 333	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
 334	nmaps = 1;
 335	error = xfs_bmapi_write(tp, quotip, offset_fsb,
 336				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
 337				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
 338				&map, &nmaps, &flist);
 339	if (error)
 340		goto error0;
 341	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
 342	ASSERT(nmaps == 1);
 343	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
 344	       (map.br_startblock != HOLESTARTBLOCK));
 345
 346	/*
 347	 * Keep track of the blkno to save a lookup later
 348	 */
 349	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 350
 351	/* now we can just get the buffer (there's nothing to read yet) */
 352	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
 353			       dqp->q_blkno,
 354			       mp->m_quotainfo->qi_dqchunklen,
 355			       0);
 356	if (!bp) {
 357		error = -ENOMEM;
 358		goto error1;
 359	}
 360	bp->b_ops = &xfs_dquot_buf_ops;
 361
 362	/*
 363	 * Make a chunk of dquots out of this buffer and log
 364	 * the entire thing.
 365	 */
 366	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
 367			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
 368
 369	/*
 370	 * xfs_bmap_finish() may commit the current transaction and
 371	 * start a second transaction if the freelist is not empty.
 372	 *
 373	 * Since we still want to modify this buffer, we need to
 374	 * ensure that the buffer is not released on commit of
 375	 * the first transaction and ensure the buffer is added to the
 376	 * second transaction.
 377	 *
 378	 * If there is only one transaction then don't stop the buffer
 379	 * from being released when it commits later on.
 380	 */
 381
 382	xfs_trans_bhold(tp, bp);
 383
 384	error = xfs_bmap_finish(tpp, &flist, NULL);
 385	if (error)
 386		goto error1;
 387
 388	/* Transaction was committed? */
 389	if (*tpp != tp) {
 390		tp = *tpp;
 391		xfs_trans_bjoin(tp, bp);
 392	} else {
 393		xfs_trans_bhold_release(tp, bp);
 394	}
 395
 396	*O_bpp = bp;
 397	return 0;
 398
 399error1:
 400	xfs_bmap_cancel(&flist);
 401error0:
 402	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
 403
 404	return error;
 405}
 406
 407STATIC int
 408xfs_qm_dqrepair(
 409	struct xfs_mount	*mp,
 410	struct xfs_trans	*tp,
 411	struct xfs_dquot	*dqp,
 412	xfs_dqid_t		firstid,
 413	struct xfs_buf		**bpp)
 414{
 415	int			error;
 416	struct xfs_disk_dquot	*ddq;
 417	struct xfs_dqblk	*d;
 418	int			i;
 419
 420	/*
 421	 * Read the buffer without verification so we get the corrupted
 422	 * buffer returned to us. make sure we verify it on write, though.
 423	 */
 424	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
 425				   mp->m_quotainfo->qi_dqchunklen,
 426				   0, bpp, NULL);
 427
 428	if (error) {
 429		ASSERT(*bpp == NULL);
 430		return error;
 431	}
 432	(*bpp)->b_ops = &xfs_dquot_buf_ops;
 433
 434	ASSERT(xfs_buf_islocked(*bpp));
 435	d = (struct xfs_dqblk *)(*bpp)->b_addr;
 436
 437	/* Do the actual repair of dquots in this buffer */
 438	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
 439		ddq = &d[i].dd_diskdq;
 440		error = xfs_dqcheck(mp, ddq, firstid + i,
 441				       dqp->dq_flags & XFS_DQ_ALLTYPES,
 442				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
 443		if (error) {
 444			/* repair failed, we're screwed */
 445			xfs_trans_brelse(tp, *bpp);
 446			return -EIO;
 447		}
 448	}
 449
 450	return 0;
 451}
 452
 453/*
 454 * Maps a dquot to the buffer containing its on-disk version.
 455 * This returns a ptr to the buffer containing the on-disk dquot
 456 * in the bpp param, and a ptr to the on-disk dquot within that buffer
 457 */
 458STATIC int
 459xfs_qm_dqtobp(
 460	xfs_trans_t		**tpp,
 461	xfs_dquot_t		*dqp,
 462	xfs_disk_dquot_t	**O_ddpp,
 463	xfs_buf_t		**O_bpp,
 464	uint			flags)
 465{
 466	struct xfs_bmbt_irec	map;
 467	int			nmaps = 1, error;
 468	struct xfs_buf		*bp;
 469	struct xfs_inode	*quotip;
 470	struct xfs_mount	*mp = dqp->q_mount;
 471	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
 472	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
 473	uint			lock_mode;
 474
 475	quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
 476	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
 477
 478	lock_mode = xfs_ilock_data_map_shared(quotip);
 479	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
 480		/*
 481		 * Return if this type of quotas is turned off while we
 482		 * didn't have the quota inode lock.
 483		 */
 484		xfs_iunlock(quotip, lock_mode);
 485		return -ESRCH;
 486	}
 487
 488	/*
 489	 * Find the block map; no allocations yet
 490	 */
 491	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
 492			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
 493
 494	xfs_iunlock(quotip, lock_mode);
 495	if (error)
 496		return error;
 497
 498	ASSERT(nmaps == 1);
 499	ASSERT(map.br_blockcount == 1);
 500
 501	/*
 502	 * Offset of dquot in the (fixed sized) dquot chunk.
 503	 */
 504	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
 505		sizeof(xfs_dqblk_t);
 506
 507	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
 508	if (map.br_startblock == HOLESTARTBLOCK) {
 509		/*
 510		 * We don't allocate unless we're asked to
 511		 */
 512		if (!(flags & XFS_QMOPT_DQALLOC))
 513			return -ENOENT;
 514
 515		ASSERT(tp);
 516		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
 517					dqp->q_fileoffset, &bp);
 518		if (error)
 519			return error;
 520		tp = *tpp;
 521	} else {
 522		trace_xfs_dqtobp_read(dqp);
 523
 524		/*
 525		 * store the blkno etc so that we don't have to do the
 526		 * mapping all the time
 527		 */
 528		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 529
 530		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
 531					   dqp->q_blkno,
 532					   mp->m_quotainfo->qi_dqchunklen,
 533					   0, &bp, &xfs_dquot_buf_ops);
 534
 535		if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
 536			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
 537						mp->m_quotainfo->qi_dqperchunk;
 538			ASSERT(bp == NULL);
 539			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
 540		}
 541
 542		if (error) {
 543			ASSERT(bp == NULL);
 544			return error;
 545		}
 546	}
 547
 548	ASSERT(xfs_buf_islocked(bp));
 549	*O_bpp = bp;
 550	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
 551
 552	return 0;
 553}
 554
 555
 556/*
 557 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
 558 * and release the buffer immediately.
 559 *
 560 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
 561 */
 562int
 563xfs_qm_dqread(
 564	struct xfs_mount	*mp,
 565	xfs_dqid_t		id,
 566	uint			type,
 567	uint			flags,
 568	struct xfs_dquot	**O_dqpp)
 569{
 570	struct xfs_dquot	*dqp;
 571	struct xfs_disk_dquot	*ddqp;
 572	struct xfs_buf		*bp;
 573	struct xfs_trans	*tp = NULL;
 574	int			error;
 575
 576	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
 577
 578	dqp->dq_flags = type;
 579	dqp->q_core.d_id = cpu_to_be32(id);
 580	dqp->q_mount = mp;
 581	INIT_LIST_HEAD(&dqp->q_lru);
 582	mutex_init(&dqp->q_qlock);
 583	init_waitqueue_head(&dqp->q_pinwait);
 584
 585	/*
 586	 * Because we want to use a counting completion, complete
 587	 * the flush completion once to allow a single access to
 588	 * the flush completion without blocking.
 589	 */
 590	init_completion(&dqp->q_flush);
 591	complete(&dqp->q_flush);
 592
 593	/*
 594	 * Make sure group quotas have a different lock class than user
 595	 * quotas.
 596	 */
 597	switch (type) {
 598	case XFS_DQ_USER:
 599		/* uses the default lock class */
 600		break;
 601	case XFS_DQ_GROUP:
 602		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
 603		break;
 604	case XFS_DQ_PROJ:
 605		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
 606		break;
 607	default:
 608		ASSERT(0);
 609		break;
 610	}
 611
 612	XFS_STATS_INC(mp, xs_qm_dquot);
 613
 614	trace_xfs_dqread(dqp);
 615
 616	if (flags & XFS_QMOPT_DQALLOC) {
 617		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
 618		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc,
 619					  XFS_QM_DQALLOC_SPACE_RES(mp), 0);
 620		if (error)
 621			goto error1;
 622	}
 623
 624	/*
 625	 * get a pointer to the on-disk dquot and the buffer containing it
 626	 * dqp already knows its own type (GROUP/USER).
 627	 */
 628	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
 629	if (error) {
 630		/*
 631		 * This can happen if quotas got turned off (ESRCH),
 632		 * or if the dquot didn't exist on disk and we ask to
 633		 * allocate (ENOENT).
 634		 */
 635		trace_xfs_dqread_fail(dqp);
 636		goto error1;
 637	}
 638
 639	/* copy everything from disk dquot to the incore dquot */
 640	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
 641	xfs_qm_dquot_logitem_init(dqp);
 642
 643	/*
 644	 * Reservation counters are defined as reservation plus current usage
 645	 * to avoid having to add every time.
 646	 */
 647	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
 648	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
 649	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
 650
 651	/* initialize the dquot speculative prealloc thresholds */
 652	xfs_dquot_set_prealloc_limits(dqp);
 653
 654	/* Mark the buf so that this will stay incore a little longer */
 655	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
 656
 657	/*
 658	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
 659	 * So we need to release with xfs_trans_brelse().
 660	 * The strategy here is identical to that of inodes; we lock
 661	 * the dquot in xfs_qm_dqget() before making it accessible to
 662	 * others. This is because dquots, like inodes, need a good level of
 663	 * concurrency, and we don't want to take locks on the entire buffers
 664	 * for dquot accesses.
 665	 * Note also that the dquot buffer may even be dirty at this point, if
 666	 * this particular dquot was repaired. We still aren't afraid to
 667	 * brelse it because we have the changes incore.
 668	 */
 669	ASSERT(xfs_buf_islocked(bp));
 670	xfs_trans_brelse(tp, bp);
 671
 672	if (tp) {
 673		error = xfs_trans_commit(tp);
 674		if (error)
 675			goto error0;
 676	}
 677
 678	*O_dqpp = dqp;
 679	return error;
 680
 681error1:
 682	if (tp)
 683		xfs_trans_cancel(tp);
 684error0:
 685	xfs_qm_dqdestroy(dqp);
 686	*O_dqpp = NULL;
 687	return error;
 688}
 689
 690/*
 691 * Advance to the next id in the current chunk, or if at the
 692 * end of the chunk, skip ahead to first id in next allocated chunk
 693 * using the SEEK_DATA interface.
 694 */
 695int
 696xfs_dq_get_next_id(
 697	xfs_mount_t		*mp,
 698	uint			type,
 699	xfs_dqid_t		*id,
 700	loff_t			eof)
 701{
 702	struct xfs_inode	*quotip;
 703	xfs_fsblock_t		start;
 704	loff_t			offset;
 705	uint			lock;
 706	xfs_dqid_t		next_id;
 707	int			error = 0;
 708
 709	/* Simple advance */
 710	next_id = *id + 1;
 
 
 
 
 711
 712	/* If new ID is within the current chunk, advancing it sufficed */
 713	if (next_id % mp->m_quotainfo->qi_dqperchunk) {
 714		*id = next_id;
 715		return 0;
 716	}
 717
 718	/* Nope, next_id is now past the current chunk, so find the next one */
 719	start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
 720
 721	quotip = xfs_quota_inode(mp, type);
 722	lock = xfs_ilock_data_map_shared(quotip);
 723
 724	offset = __xfs_seek_hole_data(VFS_I(quotip), XFS_FSB_TO_B(mp, start),
 725				      eof, SEEK_DATA);
 726	if (offset < 0)
 727		error = offset;
 728
 729	xfs_iunlock(quotip, lock);
 730
 731	/* -ENXIO is essentially "no more data" */
 732	if (error)
 733		return (error == -ENXIO ? -ENOENT: error);
 734
 735	/* Convert next data offset back to a quota id */
 736	*id = XFS_B_TO_FSB(mp, offset) * mp->m_quotainfo->qi_dqperchunk;
 737	return 0;
 738}
 739
 740/*
 741 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
 742 * a locked dquot, doing an allocation (if requested) as needed.
 743 * When both an inode and an id are given, the inode's id takes precedence.
 744 * That is, if the id changes while we don't hold the ilock inside this
 745 * function, the new dquot is returned, not necessarily the one requested
 746 * in the id argument.
 747 */
 748int
 749xfs_qm_dqget(
 750	xfs_mount_t	*mp,
 751	xfs_inode_t	*ip,	  /* locked inode (optional) */
 752	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
 753	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
 754	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
 755	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
 756{
 757	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 758	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
 759	struct xfs_dquot	*dqp;
 760	loff_t			eof = 0;
 761	int			error;
 762
 763	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 764	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
 765	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
 766	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
 767		return -ESRCH;
 768	}
 769
 770#ifdef DEBUG
 771	if (xfs_do_dqerror) {
 772		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
 773		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
 774			xfs_debug(mp, "Returning error in dqget");
 775			return -EIO;
 776		}
 777	}
 778
 779	ASSERT(type == XFS_DQ_USER ||
 780	       type == XFS_DQ_PROJ ||
 781	       type == XFS_DQ_GROUP);
 782	if (ip) {
 783		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 784		ASSERT(xfs_inode_dquot(ip, type) == NULL);
 785	}
 786#endif
 787
 788	/* Get the end of the quota file if we need it */
 789	if (flags & XFS_QMOPT_DQNEXT) {
 790		struct xfs_inode	*quotip;
 791		xfs_fileoff_t		last;
 792		uint			lock_mode;
 793
 794		quotip = xfs_quota_inode(mp, type);
 795		lock_mode = xfs_ilock_data_map_shared(quotip);
 796		error = xfs_bmap_last_offset(quotip, &last, XFS_DATA_FORK);
 797		xfs_iunlock(quotip, lock_mode);
 798		if (error)
 799			return error;
 800		eof = XFS_FSB_TO_B(mp, last);
 801	}
 802
 803restart:
 804	mutex_lock(&qi->qi_tree_lock);
 805	dqp = radix_tree_lookup(tree, id);
 806	if (dqp) {
 807		xfs_dqlock(dqp);
 808		if (dqp->dq_flags & XFS_DQ_FREEING) {
 809			xfs_dqunlock(dqp);
 810			mutex_unlock(&qi->qi_tree_lock);
 811			trace_xfs_dqget_freeing(dqp);
 812			delay(1);
 813			goto restart;
 814		}
 815
 816		/* uninit / unused quota found in radix tree, keep looking  */
 817		if (flags & XFS_QMOPT_DQNEXT) {
 818			if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
 819				xfs_dqunlock(dqp);
 820				mutex_unlock(&qi->qi_tree_lock);
 821				error = xfs_dq_get_next_id(mp, type, &id, eof);
 822				if (error)
 823					return error;
 824				goto restart;
 825			}
 826		}
 827
 828		dqp->q_nrefs++;
 829		mutex_unlock(&qi->qi_tree_lock);
 830
 831		trace_xfs_dqget_hit(dqp);
 832		XFS_STATS_INC(mp, xs_qm_dqcachehits);
 833		*O_dqpp = dqp;
 834		return 0;
 835	}
 836	mutex_unlock(&qi->qi_tree_lock);
 837	XFS_STATS_INC(mp, xs_qm_dqcachemisses);
 838
 839	/*
 840	 * Dquot cache miss. We don't want to keep the inode lock across
 841	 * a (potential) disk read. Also we don't want to deal with the lock
 842	 * ordering between quotainode and this inode. OTOH, dropping the inode
 843	 * lock here means dealing with a chown that can happen before
 844	 * we re-acquire the lock.
 845	 */
 846	if (ip)
 847		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 848
 849	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
 850
 851	if (ip)
 852		xfs_ilock(ip, XFS_ILOCK_EXCL);
 853
 854	/* If we are asked to find next active id, keep looking */
 855	if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
 856		error = xfs_dq_get_next_id(mp, type, &id, eof);
 857		if (!error)
 858			goto restart;
 859	}
 860
 861	if (error)
 862		return error;
 863
 864	if (ip) {
 865		/*
 866		 * A dquot could be attached to this inode by now, since
 867		 * we had dropped the ilock.
 868		 */
 869		if (xfs_this_quota_on(mp, type)) {
 870			struct xfs_dquot	*dqp1;
 871
 872			dqp1 = xfs_inode_dquot(ip, type);
 873			if (dqp1) {
 874				xfs_qm_dqdestroy(dqp);
 875				dqp = dqp1;
 876				xfs_dqlock(dqp);
 877				goto dqret;
 878			}
 879		} else {
 880			/* inode stays locked on return */
 881			xfs_qm_dqdestroy(dqp);
 882			return -ESRCH;
 883		}
 884	}
 885
 886	mutex_lock(&qi->qi_tree_lock);
 887	error = radix_tree_insert(tree, id, dqp);
 888	if (unlikely(error)) {
 889		WARN_ON(error != -EEXIST);
 890
 891		/*
 892		 * Duplicate found. Just throw away the new dquot and start
 893		 * over.
 894		 */
 895		mutex_unlock(&qi->qi_tree_lock);
 896		trace_xfs_dqget_dup(dqp);
 897		xfs_qm_dqdestroy(dqp);
 898		XFS_STATS_INC(mp, xs_qm_dquot_dups);
 899		goto restart;
 900	}
 901
 902	/*
 903	 * We return a locked dquot to the caller, with a reference taken
 904	 */
 905	xfs_dqlock(dqp);
 906	dqp->q_nrefs = 1;
 907
 908	qi->qi_dquots++;
 909	mutex_unlock(&qi->qi_tree_lock);
 910
 911	/* If we are asked to find next active id, keep looking */
 912	if (flags & XFS_QMOPT_DQNEXT) {
 913		if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
 914			xfs_qm_dqput(dqp);
 915			error = xfs_dq_get_next_id(mp, type, &id, eof);
 916			if (error)
 917				return error;
 918			goto restart;
 919		}
 920	}
 921
 922 dqret:
 923	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
 924	trace_xfs_dqget_miss(dqp);
 925	*O_dqpp = dqp;
 926	return 0;
 927}
 928
 929/*
 930 * Release a reference to the dquot (decrement ref-count) and unlock it.
 931 *
 932 * If there is a group quota attached to this dquot, carefully release that
 933 * too without tripping over deadlocks'n'stuff.
 934 */
 935void
 936xfs_qm_dqput(
 937	struct xfs_dquot	*dqp)
 938{
 939	ASSERT(dqp->q_nrefs > 0);
 940	ASSERT(XFS_DQ_IS_LOCKED(dqp));
 941
 942	trace_xfs_dqput(dqp);
 943
 944	if (--dqp->q_nrefs == 0) {
 945		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
 946		trace_xfs_dqput_free(dqp);
 947
 948		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
 949			XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
 950	}
 951	xfs_dqunlock(dqp);
 952}
 953
 954/*
 955 * Release a dquot. Flush it if dirty, then dqput() it.
 956 * dquot must not be locked.
 957 */
 958void
 959xfs_qm_dqrele(
 960	xfs_dquot_t	*dqp)
 961{
 962	if (!dqp)
 963		return;
 964
 965	trace_xfs_dqrele(dqp);
 966
 967	xfs_dqlock(dqp);
 968	/*
 969	 * We don't care to flush it if the dquot is dirty here.
 970	 * That will create stutters that we want to avoid.
 971	 * Instead we do a delayed write when we try to reclaim
 972	 * a dirty dquot. Also xfs_sync will take part of the burden...
 973	 */
 974	xfs_qm_dqput(dqp);
 975}
 976
 977/*
 978 * This is the dquot flushing I/O completion routine.  It is called
 979 * from interrupt level when the buffer containing the dquot is
 980 * flushed to disk.  It is responsible for removing the dquot logitem
 981 * from the AIL if it has not been re-logged, and unlocking the dquot's
 982 * flush lock. This behavior is very similar to that of inodes..
 983 */
 984STATIC void
 985xfs_qm_dqflush_done(
 986	struct xfs_buf		*bp,
 987	struct xfs_log_item	*lip)
 988{
 989	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
 990	xfs_dquot_t		*dqp = qip->qli_dquot;
 991	struct xfs_ail		*ailp = lip->li_ailp;
 992
 993	/*
 994	 * We only want to pull the item from the AIL if its
 995	 * location in the log has not changed since we started the flush.
 996	 * Thus, we only bother if the dquot's lsn has
 997	 * not changed. First we check the lsn outside the lock
 998	 * since it's cheaper, and then we recheck while
 999	 * holding the lock before removing the dquot from the AIL.
1000	 */
1001	if ((lip->li_flags & XFS_LI_IN_AIL) &&
1002	    lip->li_lsn == qip->qli_flush_lsn) {
1003
1004		/* xfs_trans_ail_delete() drops the AIL lock. */
1005		spin_lock(&ailp->xa_lock);
1006		if (lip->li_lsn == qip->qli_flush_lsn)
1007			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1008		else
1009			spin_unlock(&ailp->xa_lock);
1010	}
1011
1012	/*
1013	 * Release the dq's flush lock since we're done with it.
1014	 */
1015	xfs_dqfunlock(dqp);
1016}
1017
1018/*
1019 * Write a modified dquot to disk.
1020 * The dquot must be locked and the flush lock too taken by caller.
1021 * The flush lock will not be unlocked until the dquot reaches the disk,
1022 * but the dquot is free to be unlocked and modified by the caller
1023 * in the interim. Dquot is still locked on return. This behavior is
1024 * identical to that of inodes.
1025 */
1026int
1027xfs_qm_dqflush(
1028	struct xfs_dquot	*dqp,
1029	struct xfs_buf		**bpp)
1030{
1031	struct xfs_mount	*mp = dqp->q_mount;
1032	struct xfs_buf		*bp;
1033	struct xfs_disk_dquot	*ddqp;
1034	int			error;
1035
1036	ASSERT(XFS_DQ_IS_LOCKED(dqp));
1037	ASSERT(!completion_done(&dqp->q_flush));
1038
1039	trace_xfs_dqflush(dqp);
1040
1041	*bpp = NULL;
1042
1043	xfs_qm_dqunpin_wait(dqp);
1044
1045	/*
1046	 * This may have been unpinned because the filesystem is shutting
1047	 * down forcibly. If that's the case we must not write this dquot
1048	 * to disk, because the log record didn't make it to disk.
1049	 *
1050	 * We also have to remove the log item from the AIL in this case,
1051	 * as we wait for an emptry AIL as part of the unmount process.
1052	 */
1053	if (XFS_FORCED_SHUTDOWN(mp)) {
1054		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
1055		dqp->dq_flags &= ~XFS_DQ_DIRTY;
1056
1057		xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);
1058
1059		error = -EIO;
1060		goto out_unlock;
1061	}
1062
1063	/*
1064	 * Get the buffer containing the on-disk dquot
1065	 */
1066	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1067				   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1068				   &xfs_dquot_buf_ops);
1069	if (error)
1070		goto out_unlock;
1071
1072	/*
1073	 * Calculate the location of the dquot inside the buffer.
1074	 */
1075	ddqp = bp->b_addr + dqp->q_bufoffset;
1076
1077	/*
1078	 * A simple sanity check in case we got a corrupted dquot..
1079	 */
1080	error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1081			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1082	if (error) {
1083		xfs_buf_relse(bp);
1084		xfs_dqfunlock(dqp);
1085		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1086		return -EIO;
1087	}
1088
1089	/* This is the only portion of data that needs to persist */
1090	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1091
1092	/*
1093	 * Clear the dirty field and remember the flush lsn for later use.
1094	 */
1095	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1096
1097	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1098					&dqp->q_logitem.qli_item.li_lsn);
1099
1100	/*
1101	 * copy the lsn into the on-disk dquot now while we have the in memory
1102	 * dquot here. This can't be done later in the write verifier as we
1103	 * can't get access to the log item at that point in time.
1104	 *
1105	 * We also calculate the CRC here so that the on-disk dquot in the
1106	 * buffer always has a valid CRC. This ensures there is no possibility
1107	 * of a dquot without an up-to-date CRC getting to disk.
1108	 */
1109	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1110		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1111
1112		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1113		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1114				 XFS_DQUOT_CRC_OFF);
1115	}
1116
1117	/*
1118	 * Attach an iodone routine so that we can remove this dquot from the
1119	 * AIL and release the flush lock once the dquot is synced to disk.
1120	 */
1121	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1122				  &dqp->q_logitem.qli_item);
1123
1124	/*
1125	 * If the buffer is pinned then push on the log so we won't
1126	 * get stuck waiting in the write for too long.
1127	 */
1128	if (xfs_buf_ispinned(bp)) {
1129		trace_xfs_dqflush_force(dqp);
1130		xfs_log_force(mp, 0);
1131	}
1132
1133	trace_xfs_dqflush_done(dqp);
1134	*bpp = bp;
1135	return 0;
1136
1137out_unlock:
1138	xfs_dqfunlock(dqp);
1139	return -EIO;
1140}
1141
1142/*
1143 * Lock two xfs_dquot structures.
1144 *
1145 * To avoid deadlocks we always lock the quota structure with
1146 * the lowerd id first.
1147 */
1148void
1149xfs_dqlock2(
1150	xfs_dquot_t	*d1,
1151	xfs_dquot_t	*d2)
1152{
1153	if (d1 && d2) {
1154		ASSERT(d1 != d2);
1155		if (be32_to_cpu(d1->q_core.d_id) >
1156		    be32_to_cpu(d2->q_core.d_id)) {
1157			mutex_lock(&d2->q_qlock);
1158			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1159		} else {
1160			mutex_lock(&d1->q_qlock);
1161			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1162		}
1163	} else if (d1) {
1164		mutex_lock(&d1->q_qlock);
1165	} else if (d2) {
1166		mutex_lock(&d2->q_qlock);
1167	}
1168}
1169
1170int __init
1171xfs_qm_init(void)
1172{
1173	xfs_qm_dqzone =
1174		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1175	if (!xfs_qm_dqzone)
1176		goto out;
1177
1178	xfs_qm_dqtrxzone =
1179		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1180	if (!xfs_qm_dqtrxzone)
1181		goto out_free_dqzone;
1182
1183	return 0;
1184
1185out_free_dqzone:
1186	kmem_zone_destroy(xfs_qm_dqzone);
1187out:
1188	return -ENOMEM;
1189}
1190
1191void
1192xfs_qm_exit(void)
1193{
1194	kmem_zone_destroy(xfs_qm_dqtrxzone);
1195	kmem_zone_destroy(xfs_qm_dqzone);
1196}
v4.10.11
   1/*
   2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_format.h"
  21#include "xfs_log_format.h"
  22#include "xfs_shared.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_bit.h"
  25#include "xfs_mount.h"
  26#include "xfs_defer.h"
  27#include "xfs_inode.h"
  28#include "xfs_bmap.h"
  29#include "xfs_bmap_util.h"
  30#include "xfs_alloc.h"
  31#include "xfs_quota.h"
  32#include "xfs_error.h"
  33#include "xfs_trans.h"
  34#include "xfs_buf_item.h"
  35#include "xfs_trans_space.h"
  36#include "xfs_trans_priv.h"
  37#include "xfs_qm.h"
  38#include "xfs_cksum.h"
  39#include "xfs_trace.h"
  40#include "xfs_log.h"
  41#include "xfs_bmap_btree.h"
  42
  43/*
  44 * Lock order:
  45 *
  46 * ip->i_lock
  47 *   qi->qi_tree_lock
  48 *     dquot->q_qlock (xfs_dqlock() and friends)
  49 *       dquot->q_flush (xfs_dqflock() and friends)
  50 *       qi->qi_lru_lock
  51 *
  52 * If two dquots need to be locked the order is user before group/project,
  53 * otherwise by the lowest id first, see xfs_dqlock2.
  54 */
  55
  56#ifdef DEBUG
  57xfs_buftarg_t *xfs_dqerror_target;
  58int xfs_do_dqerror;
  59int xfs_dqreq_num;
  60int xfs_dqerror_mod = 33;
  61#endif
  62
  63struct kmem_zone		*xfs_qm_dqtrxzone;
  64static struct kmem_zone		*xfs_qm_dqzone;
  65
  66static struct lock_class_key xfs_dquot_group_class;
  67static struct lock_class_key xfs_dquot_project_class;
  68
  69/*
  70 * This is called to free all the memory associated with a dquot
  71 */
  72void
  73xfs_qm_dqdestroy(
  74	xfs_dquot_t	*dqp)
  75{
  76	ASSERT(list_empty(&dqp->q_lru));
  77
  78	kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
  79	mutex_destroy(&dqp->q_qlock);
  80
  81	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
  82	kmem_zone_free(xfs_qm_dqzone, dqp);
  83}
  84
  85/*
  86 * If default limits are in force, push them into the dquot now.
  87 * We overwrite the dquot limits only if they are zero and this
  88 * is not the root dquot.
  89 */
  90void
  91xfs_qm_adjust_dqlimits(
  92	struct xfs_mount	*mp,
  93	struct xfs_dquot	*dq)
  94{
  95	struct xfs_quotainfo	*q = mp->m_quotainfo;
  96	struct xfs_disk_dquot	*d = &dq->q_core;
  97	struct xfs_def_quota	*defq;
  98	int			prealloc = 0;
  99
 100	ASSERT(d->d_id);
 101	defq = xfs_get_defquota(dq, q);
 102
 103	if (defq->bsoftlimit && !d->d_blk_softlimit) {
 104		d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
 105		prealloc = 1;
 106	}
 107	if (defq->bhardlimit && !d->d_blk_hardlimit) {
 108		d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
 109		prealloc = 1;
 110	}
 111	if (defq->isoftlimit && !d->d_ino_softlimit)
 112		d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
 113	if (defq->ihardlimit && !d->d_ino_hardlimit)
 114		d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
 115	if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
 116		d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
 117	if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
 118		d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
 119
 120	if (prealloc)
 121		xfs_dquot_set_prealloc_limits(dq);
 122}
 123
 124/*
 125 * Check the limits and timers of a dquot and start or reset timers
 126 * if necessary.
 127 * This gets called even when quota enforcement is OFF, which makes our
 128 * life a little less complicated. (We just don't reject any quota
 129 * reservations in that case, when enforcement is off).
 130 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
 131 * enforcement's off.
 132 * In contrast, warnings are a little different in that they don't
 133 * 'automatically' get started when limits get exceeded.  They do
 134 * get reset to zero, however, when we find the count to be under
 135 * the soft limit (they are only ever set non-zero via userspace).
 136 */
 137void
 138xfs_qm_adjust_dqtimers(
 139	xfs_mount_t		*mp,
 140	xfs_disk_dquot_t	*d)
 141{
 142	ASSERT(d->d_id);
 143
 144#ifdef DEBUG
 145	if (d->d_blk_hardlimit)
 146		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
 147		       be64_to_cpu(d->d_blk_hardlimit));
 148	if (d->d_ino_hardlimit)
 149		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
 150		       be64_to_cpu(d->d_ino_hardlimit));
 151	if (d->d_rtb_hardlimit)
 152		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
 153		       be64_to_cpu(d->d_rtb_hardlimit));
 154#endif
 155
 156	if (!d->d_btimer) {
 157		if ((d->d_blk_softlimit &&
 158		     (be64_to_cpu(d->d_bcount) >
 159		      be64_to_cpu(d->d_blk_softlimit))) ||
 160		    (d->d_blk_hardlimit &&
 161		     (be64_to_cpu(d->d_bcount) >
 162		      be64_to_cpu(d->d_blk_hardlimit)))) {
 163			d->d_btimer = cpu_to_be32(get_seconds() +
 164					mp->m_quotainfo->qi_btimelimit);
 165		} else {
 166			d->d_bwarns = 0;
 167		}
 168	} else {
 169		if ((!d->d_blk_softlimit ||
 170		     (be64_to_cpu(d->d_bcount) <=
 171		      be64_to_cpu(d->d_blk_softlimit))) &&
 172		    (!d->d_blk_hardlimit ||
 173		    (be64_to_cpu(d->d_bcount) <=
 174		     be64_to_cpu(d->d_blk_hardlimit)))) {
 175			d->d_btimer = 0;
 176		}
 177	}
 178
 179	if (!d->d_itimer) {
 180		if ((d->d_ino_softlimit &&
 181		     (be64_to_cpu(d->d_icount) >
 182		      be64_to_cpu(d->d_ino_softlimit))) ||
 183		    (d->d_ino_hardlimit &&
 184		     (be64_to_cpu(d->d_icount) >
 185		      be64_to_cpu(d->d_ino_hardlimit)))) {
 186			d->d_itimer = cpu_to_be32(get_seconds() +
 187					mp->m_quotainfo->qi_itimelimit);
 188		} else {
 189			d->d_iwarns = 0;
 190		}
 191	} else {
 192		if ((!d->d_ino_softlimit ||
 193		     (be64_to_cpu(d->d_icount) <=
 194		      be64_to_cpu(d->d_ino_softlimit)))  &&
 195		    (!d->d_ino_hardlimit ||
 196		     (be64_to_cpu(d->d_icount) <=
 197		      be64_to_cpu(d->d_ino_hardlimit)))) {
 198			d->d_itimer = 0;
 199		}
 200	}
 201
 202	if (!d->d_rtbtimer) {
 203		if ((d->d_rtb_softlimit &&
 204		     (be64_to_cpu(d->d_rtbcount) >
 205		      be64_to_cpu(d->d_rtb_softlimit))) ||
 206		    (d->d_rtb_hardlimit &&
 207		     (be64_to_cpu(d->d_rtbcount) >
 208		      be64_to_cpu(d->d_rtb_hardlimit)))) {
 209			d->d_rtbtimer = cpu_to_be32(get_seconds() +
 210					mp->m_quotainfo->qi_rtbtimelimit);
 211		} else {
 212			d->d_rtbwarns = 0;
 213		}
 214	} else {
 215		if ((!d->d_rtb_softlimit ||
 216		     (be64_to_cpu(d->d_rtbcount) <=
 217		      be64_to_cpu(d->d_rtb_softlimit))) &&
 218		    (!d->d_rtb_hardlimit ||
 219		     (be64_to_cpu(d->d_rtbcount) <=
 220		      be64_to_cpu(d->d_rtb_hardlimit)))) {
 221			d->d_rtbtimer = 0;
 222		}
 223	}
 224}
 225
 226/*
 227 * initialize a buffer full of dquots and log the whole thing
 228 */
 229STATIC void
 230xfs_qm_init_dquot_blk(
 231	xfs_trans_t	*tp,
 232	xfs_mount_t	*mp,
 233	xfs_dqid_t	id,
 234	uint		type,
 235	xfs_buf_t	*bp)
 236{
 237	struct xfs_quotainfo	*q = mp->m_quotainfo;
 238	xfs_dqblk_t	*d;
 239	xfs_dqid_t	curid;
 240	int		i;
 241
 242	ASSERT(tp);
 243	ASSERT(xfs_buf_islocked(bp));
 244
 245	d = bp->b_addr;
 246
 247	/*
 248	 * ID of the first dquot in the block - id's are zero based.
 249	 */
 250	curid = id - (id % q->qi_dqperchunk);
 251	memset(d, 0, BBTOB(q->qi_dqchunklen));
 252	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
 253		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
 254		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
 255		d->dd_diskdq.d_id = cpu_to_be32(curid);
 256		d->dd_diskdq.d_flags = type;
 257		if (xfs_sb_version_hascrc(&mp->m_sb)) {
 258			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
 259			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
 260					 XFS_DQUOT_CRC_OFF);
 261		}
 262	}
 263
 264	xfs_trans_dquot_buf(tp, bp,
 265			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
 266			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
 267			     XFS_BLF_GDQUOT_BUF)));
 268	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
 269}
 270
 271/*
 272 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
 273 * watermarks correspond to the soft and hard limits by default. If a soft limit
 274 * is not specified, we use 95% of the hard limit.
 275 */
 276void
 277xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
 278{
 279	__uint64_t space;
 280
 281	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
 282	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
 283	if (!dqp->q_prealloc_lo_wmark) {
 284		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
 285		do_div(dqp->q_prealloc_lo_wmark, 100);
 286		dqp->q_prealloc_lo_wmark *= 95;
 287	}
 288
 289	space = dqp->q_prealloc_hi_wmark;
 290
 291	do_div(space, 100);
 292	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
 293	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
 294	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
 295}
 296
 297/*
 298 * Allocate a block and fill it with dquots.
 299 * This is called when the bmapi finds a hole.
 300 */
 301STATIC int
 302xfs_qm_dqalloc(
 303	xfs_trans_t	**tpp,
 304	xfs_mount_t	*mp,
 305	xfs_dquot_t	*dqp,
 306	xfs_inode_t	*quotip,
 307	xfs_fileoff_t	offset_fsb,
 308	xfs_buf_t	**O_bpp)
 309{
 310	xfs_fsblock_t	firstblock;
 311	struct xfs_defer_ops dfops;
 312	xfs_bmbt_irec_t map;
 313	int		nmaps, error;
 314	xfs_buf_t	*bp;
 315	xfs_trans_t	*tp = *tpp;
 316
 317	ASSERT(tp != NULL);
 318
 319	trace_xfs_dqalloc(dqp);
 320
 321	/*
 322	 * Initialize the bmap freelist prior to calling bmapi code.
 323	 */
 324	xfs_defer_init(&dfops, &firstblock);
 325	xfs_ilock(quotip, XFS_ILOCK_EXCL);
 326	/*
 327	 * Return if this type of quotas is turned off while we didn't
 328	 * have an inode lock
 329	 */
 330	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
 331		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
 332		return -ESRCH;
 333	}
 334
 335	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
 336	nmaps = 1;
 337	error = xfs_bmapi_write(tp, quotip, offset_fsb,
 338				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
 339				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
 340				&map, &nmaps, &dfops);
 341	if (error)
 342		goto error0;
 343	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
 344	ASSERT(nmaps == 1);
 345	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
 346	       (map.br_startblock != HOLESTARTBLOCK));
 347
 348	/*
 349	 * Keep track of the blkno to save a lookup later
 350	 */
 351	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 352
 353	/* now we can just get the buffer (there's nothing to read yet) */
 354	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
 355			       dqp->q_blkno,
 356			       mp->m_quotainfo->qi_dqchunklen,
 357			       0);
 358	if (!bp) {
 359		error = -ENOMEM;
 360		goto error1;
 361	}
 362	bp->b_ops = &xfs_dquot_buf_ops;
 363
 364	/*
 365	 * Make a chunk of dquots out of this buffer and log
 366	 * the entire thing.
 367	 */
 368	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
 369			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
 370
 371	/*
 372	 * xfs_defer_finish() may commit the current transaction and
 373	 * start a second transaction if the freelist is not empty.
 374	 *
 375	 * Since we still want to modify this buffer, we need to
 376	 * ensure that the buffer is not released on commit of
 377	 * the first transaction and ensure the buffer is added to the
 378	 * second transaction.
 379	 *
 380	 * If there is only one transaction then don't stop the buffer
 381	 * from being released when it commits later on.
 382	 */
 383
 384	xfs_trans_bhold(tp, bp);
 385
 386	error = xfs_defer_finish(tpp, &dfops, NULL);
 387	if (error)
 388		goto error1;
 389
 390	/* Transaction was committed? */
 391	if (*tpp != tp) {
 392		tp = *tpp;
 393		xfs_trans_bjoin(tp, bp);
 394	} else {
 395		xfs_trans_bhold_release(tp, bp);
 396	}
 397
 398	*O_bpp = bp;
 399	return 0;
 400
 401error1:
 402	xfs_defer_cancel(&dfops);
 403error0:
 404	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
 405
 406	return error;
 407}
 408
 409STATIC int
 410xfs_qm_dqrepair(
 411	struct xfs_mount	*mp,
 412	struct xfs_trans	*tp,
 413	struct xfs_dquot	*dqp,
 414	xfs_dqid_t		firstid,
 415	struct xfs_buf		**bpp)
 416{
 417	int			error;
 418	struct xfs_disk_dquot	*ddq;
 419	struct xfs_dqblk	*d;
 420	int			i;
 421
 422	/*
 423	 * Read the buffer without verification so we get the corrupted
 424	 * buffer returned to us. make sure we verify it on write, though.
 425	 */
 426	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
 427				   mp->m_quotainfo->qi_dqchunklen,
 428				   0, bpp, NULL);
 429
 430	if (error) {
 431		ASSERT(*bpp == NULL);
 432		return error;
 433	}
 434	(*bpp)->b_ops = &xfs_dquot_buf_ops;
 435
 436	ASSERT(xfs_buf_islocked(*bpp));
 437	d = (struct xfs_dqblk *)(*bpp)->b_addr;
 438
 439	/* Do the actual repair of dquots in this buffer */
 440	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
 441		ddq = &d[i].dd_diskdq;
 442		error = xfs_dqcheck(mp, ddq, firstid + i,
 443				       dqp->dq_flags & XFS_DQ_ALLTYPES,
 444				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
 445		if (error) {
 446			/* repair failed, we're screwed */
 447			xfs_trans_brelse(tp, *bpp);
 448			return -EIO;
 449		}
 450	}
 451
 452	return 0;
 453}
 454
 455/*
 456 * Maps a dquot to the buffer containing its on-disk version.
 457 * This returns a ptr to the buffer containing the on-disk dquot
 458 * in the bpp param, and a ptr to the on-disk dquot within that buffer
 459 */
 460STATIC int
 461xfs_qm_dqtobp(
 462	xfs_trans_t		**tpp,
 463	xfs_dquot_t		*dqp,
 464	xfs_disk_dquot_t	**O_ddpp,
 465	xfs_buf_t		**O_bpp,
 466	uint			flags)
 467{
 468	struct xfs_bmbt_irec	map;
 469	int			nmaps = 1, error;
 470	struct xfs_buf		*bp;
 471	struct xfs_inode	*quotip;
 472	struct xfs_mount	*mp = dqp->q_mount;
 473	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
 474	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
 475	uint			lock_mode;
 476
 477	quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
 478	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
 479
 480	lock_mode = xfs_ilock_data_map_shared(quotip);
 481	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
 482		/*
 483		 * Return if this type of quotas is turned off while we
 484		 * didn't have the quota inode lock.
 485		 */
 486		xfs_iunlock(quotip, lock_mode);
 487		return -ESRCH;
 488	}
 489
 490	/*
 491	 * Find the block map; no allocations yet
 492	 */
 493	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
 494			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
 495
 496	xfs_iunlock(quotip, lock_mode);
 497	if (error)
 498		return error;
 499
 500	ASSERT(nmaps == 1);
 501	ASSERT(map.br_blockcount == 1);
 502
 503	/*
 504	 * Offset of dquot in the (fixed sized) dquot chunk.
 505	 */
 506	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
 507		sizeof(xfs_dqblk_t);
 508
 509	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
 510	if (map.br_startblock == HOLESTARTBLOCK) {
 511		/*
 512		 * We don't allocate unless we're asked to
 513		 */
 514		if (!(flags & XFS_QMOPT_DQALLOC))
 515			return -ENOENT;
 516
 517		ASSERT(tp);
 518		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
 519					dqp->q_fileoffset, &bp);
 520		if (error)
 521			return error;
 522		tp = *tpp;
 523	} else {
 524		trace_xfs_dqtobp_read(dqp);
 525
 526		/*
 527		 * store the blkno etc so that we don't have to do the
 528		 * mapping all the time
 529		 */
 530		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 531
 532		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
 533					   dqp->q_blkno,
 534					   mp->m_quotainfo->qi_dqchunklen,
 535					   0, &bp, &xfs_dquot_buf_ops);
 536
 537		if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
 538			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
 539						mp->m_quotainfo->qi_dqperchunk;
 540			ASSERT(bp == NULL);
 541			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
 542		}
 543
 544		if (error) {
 545			ASSERT(bp == NULL);
 546			return error;
 547		}
 548	}
 549
 550	ASSERT(xfs_buf_islocked(bp));
 551	*O_bpp = bp;
 552	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
 553
 554	return 0;
 555}
 556
 557
 558/*
 559 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
 560 * and release the buffer immediately.
 561 *
 562 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
 563 */
 564int
 565xfs_qm_dqread(
 566	struct xfs_mount	*mp,
 567	xfs_dqid_t		id,
 568	uint			type,
 569	uint			flags,
 570	struct xfs_dquot	**O_dqpp)
 571{
 572	struct xfs_dquot	*dqp;
 573	struct xfs_disk_dquot	*ddqp;
 574	struct xfs_buf		*bp;
 575	struct xfs_trans	*tp = NULL;
 576	int			error;
 577
 578	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
 579
 580	dqp->dq_flags = type;
 581	dqp->q_core.d_id = cpu_to_be32(id);
 582	dqp->q_mount = mp;
 583	INIT_LIST_HEAD(&dqp->q_lru);
 584	mutex_init(&dqp->q_qlock);
 585	init_waitqueue_head(&dqp->q_pinwait);
 586
 587	/*
 588	 * Because we want to use a counting completion, complete
 589	 * the flush completion once to allow a single access to
 590	 * the flush completion without blocking.
 591	 */
 592	init_completion(&dqp->q_flush);
 593	complete(&dqp->q_flush);
 594
 595	/*
 596	 * Make sure group quotas have a different lock class than user
 597	 * quotas.
 598	 */
 599	switch (type) {
 600	case XFS_DQ_USER:
 601		/* uses the default lock class */
 602		break;
 603	case XFS_DQ_GROUP:
 604		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
 605		break;
 606	case XFS_DQ_PROJ:
 607		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
 608		break;
 609	default:
 610		ASSERT(0);
 611		break;
 612	}
 613
 614	XFS_STATS_INC(mp, xs_qm_dquot);
 615
 616	trace_xfs_dqread(dqp);
 617
 618	if (flags & XFS_QMOPT_DQALLOC) {
 619		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
 620				XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
 
 621		if (error)
 622			goto error0;
 623	}
 624
 625	/*
 626	 * get a pointer to the on-disk dquot and the buffer containing it
 627	 * dqp already knows its own type (GROUP/USER).
 628	 */
 629	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
 630	if (error) {
 631		/*
 632		 * This can happen if quotas got turned off (ESRCH),
 633		 * or if the dquot didn't exist on disk and we ask to
 634		 * allocate (ENOENT).
 635		 */
 636		trace_xfs_dqread_fail(dqp);
 637		goto error1;
 638	}
 639
 640	/* copy everything from disk dquot to the incore dquot */
 641	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
 642	xfs_qm_dquot_logitem_init(dqp);
 643
 644	/*
 645	 * Reservation counters are defined as reservation plus current usage
 646	 * to avoid having to add every time.
 647	 */
 648	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
 649	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
 650	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
 651
 652	/* initialize the dquot speculative prealloc thresholds */
 653	xfs_dquot_set_prealloc_limits(dqp);
 654
 655	/* Mark the buf so that this will stay incore a little longer */
 656	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
 657
 658	/*
 659	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
 660	 * So we need to release with xfs_trans_brelse().
 661	 * The strategy here is identical to that of inodes; we lock
 662	 * the dquot in xfs_qm_dqget() before making it accessible to
 663	 * others. This is because dquots, like inodes, need a good level of
 664	 * concurrency, and we don't want to take locks on the entire buffers
 665	 * for dquot accesses.
 666	 * Note also that the dquot buffer may even be dirty at this point, if
 667	 * this particular dquot was repaired. We still aren't afraid to
 668	 * brelse it because we have the changes incore.
 669	 */
 670	ASSERT(xfs_buf_islocked(bp));
 671	xfs_trans_brelse(tp, bp);
 672
 673	if (tp) {
 674		error = xfs_trans_commit(tp);
 675		if (error)
 676			goto error0;
 677	}
 678
 679	*O_dqpp = dqp;
 680	return error;
 681
 682error1:
 683	if (tp)
 684		xfs_trans_cancel(tp);
 685error0:
 686	xfs_qm_dqdestroy(dqp);
 687	*O_dqpp = NULL;
 688	return error;
 689}
 690
 691/*
 692 * Advance to the next id in the current chunk, or if at the
 693 * end of the chunk, skip ahead to first id in next allocated chunk
 694 * using the SEEK_DATA interface.
 695 */
 696static int
 697xfs_dq_get_next_id(
 698	xfs_mount_t		*mp,
 699	uint			type,
 700	xfs_dqid_t		*id,
 701	loff_t			eof)
 702{
 703	struct xfs_inode	*quotip;
 704	xfs_fsblock_t		start;
 705	loff_t			offset;
 706	uint			lock;
 707	xfs_dqid_t		next_id;
 708	int			error = 0;
 709
 710	/* Simple advance */
 711	next_id = *id + 1;
 712
 713	/* If we'd wrap past the max ID, stop */
 714	if (next_id < *id)
 715		return -ENOENT;
 716
 717	/* If new ID is within the current chunk, advancing it sufficed */
 718	if (next_id % mp->m_quotainfo->qi_dqperchunk) {
 719		*id = next_id;
 720		return 0;
 721	}
 722
 723	/* Nope, next_id is now past the current chunk, so find the next one */
 724	start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
 725
 726	quotip = xfs_quota_inode(mp, type);
 727	lock = xfs_ilock_data_map_shared(quotip);
 728
 729	offset = __xfs_seek_hole_data(VFS_I(quotip), XFS_FSB_TO_B(mp, start),
 730				      eof, SEEK_DATA);
 731	if (offset < 0)
 732		error = offset;
 733
 734	xfs_iunlock(quotip, lock);
 735
 736	/* -ENXIO is essentially "no more data" */
 737	if (error)
 738		return (error == -ENXIO ? -ENOENT: error);
 739
 740	/* Convert next data offset back to a quota id */
 741	*id = XFS_B_TO_FSB(mp, offset) * mp->m_quotainfo->qi_dqperchunk;
 742	return 0;
 743}
 744
 745/*
 746 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
 747 * a locked dquot, doing an allocation (if requested) as needed.
 748 * When both an inode and an id are given, the inode's id takes precedence.
 749 * That is, if the id changes while we don't hold the ilock inside this
 750 * function, the new dquot is returned, not necessarily the one requested
 751 * in the id argument.
 752 */
 753int
 754xfs_qm_dqget(
 755	xfs_mount_t	*mp,
 756	xfs_inode_t	*ip,	  /* locked inode (optional) */
 757	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
 758	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
 759	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
 760	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
 761{
 762	struct xfs_quotainfo	*qi = mp->m_quotainfo;
 763	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
 764	struct xfs_dquot	*dqp;
 765	loff_t			eof = 0;
 766	int			error;
 767
 768	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 769	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
 770	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
 771	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
 772		return -ESRCH;
 773	}
 774
 775#ifdef DEBUG
 776	if (xfs_do_dqerror) {
 777		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
 778		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
 779			xfs_debug(mp, "Returning error in dqget");
 780			return -EIO;
 781		}
 782	}
 783
 784	ASSERT(type == XFS_DQ_USER ||
 785	       type == XFS_DQ_PROJ ||
 786	       type == XFS_DQ_GROUP);
 787	if (ip) {
 788		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 789		ASSERT(xfs_inode_dquot(ip, type) == NULL);
 790	}
 791#endif
 792
 793	/* Get the end of the quota file if we need it */
 794	if (flags & XFS_QMOPT_DQNEXT) {
 795		struct xfs_inode	*quotip;
 796		xfs_fileoff_t		last;
 797		uint			lock_mode;
 798
 799		quotip = xfs_quota_inode(mp, type);
 800		lock_mode = xfs_ilock_data_map_shared(quotip);
 801		error = xfs_bmap_last_offset(quotip, &last, XFS_DATA_FORK);
 802		xfs_iunlock(quotip, lock_mode);
 803		if (error)
 804			return error;
 805		eof = XFS_FSB_TO_B(mp, last);
 806	}
 807
 808restart:
 809	mutex_lock(&qi->qi_tree_lock);
 810	dqp = radix_tree_lookup(tree, id);
 811	if (dqp) {
 812		xfs_dqlock(dqp);
 813		if (dqp->dq_flags & XFS_DQ_FREEING) {
 814			xfs_dqunlock(dqp);
 815			mutex_unlock(&qi->qi_tree_lock);
 816			trace_xfs_dqget_freeing(dqp);
 817			delay(1);
 818			goto restart;
 819		}
 820
 821		/* uninit / unused quota found in radix tree, keep looking  */
 822		if (flags & XFS_QMOPT_DQNEXT) {
 823			if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
 824				xfs_dqunlock(dqp);
 825				mutex_unlock(&qi->qi_tree_lock);
 826				error = xfs_dq_get_next_id(mp, type, &id, eof);
 827				if (error)
 828					return error;
 829				goto restart;
 830			}
 831		}
 832
 833		dqp->q_nrefs++;
 834		mutex_unlock(&qi->qi_tree_lock);
 835
 836		trace_xfs_dqget_hit(dqp);
 837		XFS_STATS_INC(mp, xs_qm_dqcachehits);
 838		*O_dqpp = dqp;
 839		return 0;
 840	}
 841	mutex_unlock(&qi->qi_tree_lock);
 842	XFS_STATS_INC(mp, xs_qm_dqcachemisses);
 843
 844	/*
 845	 * Dquot cache miss. We don't want to keep the inode lock across
 846	 * a (potential) disk read. Also we don't want to deal with the lock
 847	 * ordering between quotainode and this inode. OTOH, dropping the inode
 848	 * lock here means dealing with a chown that can happen before
 849	 * we re-acquire the lock.
 850	 */
 851	if (ip)
 852		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 853
 854	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
 855
 856	if (ip)
 857		xfs_ilock(ip, XFS_ILOCK_EXCL);
 858
 859	/* If we are asked to find next active id, keep looking */
 860	if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
 861		error = xfs_dq_get_next_id(mp, type, &id, eof);
 862		if (!error)
 863			goto restart;
 864	}
 865
 866	if (error)
 867		return error;
 868
 869	if (ip) {
 870		/*
 871		 * A dquot could be attached to this inode by now, since
 872		 * we had dropped the ilock.
 873		 */
 874		if (xfs_this_quota_on(mp, type)) {
 875			struct xfs_dquot	*dqp1;
 876
 877			dqp1 = xfs_inode_dquot(ip, type);
 878			if (dqp1) {
 879				xfs_qm_dqdestroy(dqp);
 880				dqp = dqp1;
 881				xfs_dqlock(dqp);
 882				goto dqret;
 883			}
 884		} else {
 885			/* inode stays locked on return */
 886			xfs_qm_dqdestroy(dqp);
 887			return -ESRCH;
 888		}
 889	}
 890
 891	mutex_lock(&qi->qi_tree_lock);
 892	error = radix_tree_insert(tree, id, dqp);
 893	if (unlikely(error)) {
 894		WARN_ON(error != -EEXIST);
 895
 896		/*
 897		 * Duplicate found. Just throw away the new dquot and start
 898		 * over.
 899		 */
 900		mutex_unlock(&qi->qi_tree_lock);
 901		trace_xfs_dqget_dup(dqp);
 902		xfs_qm_dqdestroy(dqp);
 903		XFS_STATS_INC(mp, xs_qm_dquot_dups);
 904		goto restart;
 905	}
 906
 907	/*
 908	 * We return a locked dquot to the caller, with a reference taken
 909	 */
 910	xfs_dqlock(dqp);
 911	dqp->q_nrefs = 1;
 912
 913	qi->qi_dquots++;
 914	mutex_unlock(&qi->qi_tree_lock);
 915
 916	/* If we are asked to find next active id, keep looking */
 917	if (flags & XFS_QMOPT_DQNEXT) {
 918		if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
 919			xfs_qm_dqput(dqp);
 920			error = xfs_dq_get_next_id(mp, type, &id, eof);
 921			if (error)
 922				return error;
 923			goto restart;
 924		}
 925	}
 926
 927 dqret:
 928	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
 929	trace_xfs_dqget_miss(dqp);
 930	*O_dqpp = dqp;
 931	return 0;
 932}
 933
 934/*
 935 * Release a reference to the dquot (decrement ref-count) and unlock it.
 936 *
 937 * If there is a group quota attached to this dquot, carefully release that
 938 * too without tripping over deadlocks'n'stuff.
 939 */
 940void
 941xfs_qm_dqput(
 942	struct xfs_dquot	*dqp)
 943{
 944	ASSERT(dqp->q_nrefs > 0);
 945	ASSERT(XFS_DQ_IS_LOCKED(dqp));
 946
 947	trace_xfs_dqput(dqp);
 948
 949	if (--dqp->q_nrefs == 0) {
 950		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
 951		trace_xfs_dqput_free(dqp);
 952
 953		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
 954			XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
 955	}
 956	xfs_dqunlock(dqp);
 957}
 958
 959/*
 960 * Release a dquot. Flush it if dirty, then dqput() it.
 961 * dquot must not be locked.
 962 */
 963void
 964xfs_qm_dqrele(
 965	xfs_dquot_t	*dqp)
 966{
 967	if (!dqp)
 968		return;
 969
 970	trace_xfs_dqrele(dqp);
 971
 972	xfs_dqlock(dqp);
 973	/*
 974	 * We don't care to flush it if the dquot is dirty here.
 975	 * That will create stutters that we want to avoid.
 976	 * Instead we do a delayed write when we try to reclaim
 977	 * a dirty dquot. Also xfs_sync will take part of the burden...
 978	 */
 979	xfs_qm_dqput(dqp);
 980}
 981
 982/*
 983 * This is the dquot flushing I/O completion routine.  It is called
 984 * from interrupt level when the buffer containing the dquot is
 985 * flushed to disk.  It is responsible for removing the dquot logitem
 986 * from the AIL if it has not been re-logged, and unlocking the dquot's
 987 * flush lock. This behavior is very similar to that of inodes..
 988 */
 989STATIC void
 990xfs_qm_dqflush_done(
 991	struct xfs_buf		*bp,
 992	struct xfs_log_item	*lip)
 993{
 994	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
 995	xfs_dquot_t		*dqp = qip->qli_dquot;
 996	struct xfs_ail		*ailp = lip->li_ailp;
 997
 998	/*
 999	 * We only want to pull the item from the AIL if its
1000	 * location in the log has not changed since we started the flush.
1001	 * Thus, we only bother if the dquot's lsn has
1002	 * not changed. First we check the lsn outside the lock
1003	 * since it's cheaper, and then we recheck while
1004	 * holding the lock before removing the dquot from the AIL.
1005	 */
1006	if ((lip->li_flags & XFS_LI_IN_AIL) &&
1007	    lip->li_lsn == qip->qli_flush_lsn) {
1008
1009		/* xfs_trans_ail_delete() drops the AIL lock. */
1010		spin_lock(&ailp->xa_lock);
1011		if (lip->li_lsn == qip->qli_flush_lsn)
1012			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1013		else
1014			spin_unlock(&ailp->xa_lock);
1015	}
1016
1017	/*
1018	 * Release the dq's flush lock since we're done with it.
1019	 */
1020	xfs_dqfunlock(dqp);
1021}
1022
1023/*
1024 * Write a modified dquot to disk.
1025 * The dquot must be locked and the flush lock too taken by caller.
1026 * The flush lock will not be unlocked until the dquot reaches the disk,
1027 * but the dquot is free to be unlocked and modified by the caller
1028 * in the interim. Dquot is still locked on return. This behavior is
1029 * identical to that of inodes.
1030 */
1031int
1032xfs_qm_dqflush(
1033	struct xfs_dquot	*dqp,
1034	struct xfs_buf		**bpp)
1035{
1036	struct xfs_mount	*mp = dqp->q_mount;
1037	struct xfs_buf		*bp;
1038	struct xfs_disk_dquot	*ddqp;
1039	int			error;
1040
1041	ASSERT(XFS_DQ_IS_LOCKED(dqp));
1042	ASSERT(!completion_done(&dqp->q_flush));
1043
1044	trace_xfs_dqflush(dqp);
1045
1046	*bpp = NULL;
1047
1048	xfs_qm_dqunpin_wait(dqp);
1049
1050	/*
1051	 * This may have been unpinned because the filesystem is shutting
1052	 * down forcibly. If that's the case we must not write this dquot
1053	 * to disk, because the log record didn't make it to disk.
1054	 *
1055	 * We also have to remove the log item from the AIL in this case,
1056	 * as we wait for an emptry AIL as part of the unmount process.
1057	 */
1058	if (XFS_FORCED_SHUTDOWN(mp)) {
1059		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
1060		dqp->dq_flags &= ~XFS_DQ_DIRTY;
1061
1062		xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);
1063
1064		error = -EIO;
1065		goto out_unlock;
1066	}
1067
1068	/*
1069	 * Get the buffer containing the on-disk dquot
1070	 */
1071	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1072				   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1073				   &xfs_dquot_buf_ops);
1074	if (error)
1075		goto out_unlock;
1076
1077	/*
1078	 * Calculate the location of the dquot inside the buffer.
1079	 */
1080	ddqp = bp->b_addr + dqp->q_bufoffset;
1081
1082	/*
1083	 * A simple sanity check in case we got a corrupted dquot..
1084	 */
1085	error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1086			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1087	if (error) {
1088		xfs_buf_relse(bp);
1089		xfs_dqfunlock(dqp);
1090		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1091		return -EIO;
1092	}
1093
1094	/* This is the only portion of data that needs to persist */
1095	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1096
1097	/*
1098	 * Clear the dirty field and remember the flush lsn for later use.
1099	 */
1100	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1101
1102	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1103					&dqp->q_logitem.qli_item.li_lsn);
1104
1105	/*
1106	 * copy the lsn into the on-disk dquot now while we have the in memory
1107	 * dquot here. This can't be done later in the write verifier as we
1108	 * can't get access to the log item at that point in time.
1109	 *
1110	 * We also calculate the CRC here so that the on-disk dquot in the
1111	 * buffer always has a valid CRC. This ensures there is no possibility
1112	 * of a dquot without an up-to-date CRC getting to disk.
1113	 */
1114	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1115		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1116
1117		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1118		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1119				 XFS_DQUOT_CRC_OFF);
1120	}
1121
1122	/*
1123	 * Attach an iodone routine so that we can remove this dquot from the
1124	 * AIL and release the flush lock once the dquot is synced to disk.
1125	 */
1126	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1127				  &dqp->q_logitem.qli_item);
1128
1129	/*
1130	 * If the buffer is pinned then push on the log so we won't
1131	 * get stuck waiting in the write for too long.
1132	 */
1133	if (xfs_buf_ispinned(bp)) {
1134		trace_xfs_dqflush_force(dqp);
1135		xfs_log_force(mp, 0);
1136	}
1137
1138	trace_xfs_dqflush_done(dqp);
1139	*bpp = bp;
1140	return 0;
1141
1142out_unlock:
1143	xfs_dqfunlock(dqp);
1144	return -EIO;
1145}
1146
1147/*
1148 * Lock two xfs_dquot structures.
1149 *
1150 * To avoid deadlocks we always lock the quota structure with
1151 * the lowerd id first.
1152 */
1153void
1154xfs_dqlock2(
1155	xfs_dquot_t	*d1,
1156	xfs_dquot_t	*d2)
1157{
1158	if (d1 && d2) {
1159		ASSERT(d1 != d2);
1160		if (be32_to_cpu(d1->q_core.d_id) >
1161		    be32_to_cpu(d2->q_core.d_id)) {
1162			mutex_lock(&d2->q_qlock);
1163			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1164		} else {
1165			mutex_lock(&d1->q_qlock);
1166			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1167		}
1168	} else if (d1) {
1169		mutex_lock(&d1->q_qlock);
1170	} else if (d2) {
1171		mutex_lock(&d2->q_qlock);
1172	}
1173}
1174
1175int __init
1176xfs_qm_init(void)
1177{
1178	xfs_qm_dqzone =
1179		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1180	if (!xfs_qm_dqzone)
1181		goto out;
1182
1183	xfs_qm_dqtrxzone =
1184		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1185	if (!xfs_qm_dqtrxzone)
1186		goto out_free_dqzone;
1187
1188	return 0;
1189
1190out_free_dqzone:
1191	kmem_zone_destroy(xfs_qm_dqzone);
1192out:
1193	return -ENOMEM;
1194}
1195
1196void
1197xfs_qm_exit(void)
1198{
1199	kmem_zone_destroy(xfs_qm_dqtrxzone);
1200	kmem_zone_destroy(xfs_qm_dqzone);
1201}