Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_log.h"
 21#include "xfs_trans.h"
 22#include "xfs_sb.h"
 23#include "xfs_ag.h"
 24#include "xfs_alloc.h"
 25#include "xfs_quota.h"
 26#include "xfs_mount.h"
 27#include "xfs_bmap_btree.h"
 28#include "xfs_inode.h"
 29#include "xfs_itable.h"
 30#include "xfs_bmap.h"
 31#include "xfs_rtalloc.h"
 32#include "xfs_error.h"
 33#include "xfs_attr.h"
 34#include "xfs_buf_item.h"
 35#include "xfs_trans_priv.h"
 
 36#include "xfs_qm.h"
 
 
 37
 38STATIC void	xfs_trans_alloc_dqinfo(xfs_trans_t *);
 39
 40/*
 41 * Add the locked dquot to the transaction.
 42 * The dquot must be locked, and it cannot be associated with any
 43 * transaction.
 44 */
 45void
 46xfs_trans_dqjoin(
 47	xfs_trans_t	*tp,
 48	xfs_dquot_t	*dqp)
 49{
 50	ASSERT(dqp->q_transp != tp);
 51	ASSERT(XFS_DQ_IS_LOCKED(dqp));
 52	ASSERT(dqp->q_logitem.qli_dquot == dqp);
 53
 54	/*
 55	 * Get a log_item_desc to point at the new item.
 56	 */
 57	xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
 58
 59	/*
 60	 * Initialize d_transp so we can later determine if this dquot is
 61	 * associated with this transaction.
 62	 */
 63	dqp->q_transp = tp;
 64}
 65
 66
 67/*
 68 * This is called to mark the dquot as needing
 69 * to be logged when the transaction is committed.  The dquot must
 70 * already be associated with the given transaction.
 71 * Note that it marks the entire transaction as dirty. In the ordinary
 72 * case, this gets called via xfs_trans_commit, after the transaction
 73 * is already dirty. However, there's nothing stop this from getting
 74 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
 75 * flag.
 76 */
 77void
 78xfs_trans_log_dquot(
 79	xfs_trans_t	*tp,
 80	xfs_dquot_t	*dqp)
 81{
 82	ASSERT(dqp->q_transp == tp);
 83	ASSERT(XFS_DQ_IS_LOCKED(dqp));
 84
 
 
 
 
 
 
 85	tp->t_flags |= XFS_TRANS_DIRTY;
 86	dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 87}
 88
 89/*
 90 * Carry forward whatever is left of the quota blk reservation to
 91 * the spanky new transaction
 92 */
 93void
 94xfs_trans_dup_dqinfo(
 95	xfs_trans_t	*otp,
 96	xfs_trans_t	*ntp)
 97{
 98	xfs_dqtrx_t	*oq, *nq;
 99	int		i,j;
100	xfs_dqtrx_t	*oqa, *nqa;
 
101
102	if (!otp->t_dqinfo)
103		return;
104
105	xfs_trans_alloc_dqinfo(ntp);
106	oqa = otp->t_dqinfo->dqa_usrdquots;
107	nqa = ntp->t_dqinfo->dqa_usrdquots;
108
109	/*
110	 * Because the quota blk reservation is carried forward,
111	 * it is also necessary to carry forward the DQ_DIRTY flag.
112	 */
113	if(otp->t_flags & XFS_TRANS_DQ_DIRTY)
114		ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
115
116	for (j = 0; j < 2; j++) {
 
 
117		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
 
 
118			if (oqa[i].qt_dquot == NULL)
119				break;
120			oq = &oqa[i];
121			nq = &nqa[i];
122
 
 
 
123			nq->qt_dquot = oq->qt_dquot;
124			nq->qt_bcount_delta = nq->qt_icount_delta = 0;
125			nq->qt_rtbcount_delta = 0;
126
127			/*
128			 * Transfer whatever is left of the reservations.
129			 */
130			nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used;
131			oq->qt_blk_res = oq->qt_blk_res_used;
132
133			nq->qt_rtblk_res = oq->qt_rtblk_res -
134				oq->qt_rtblk_res_used;
135			oq->qt_rtblk_res = oq->qt_rtblk_res_used;
136
137			nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
138			oq->qt_ino_res = oq->qt_ino_res_used;
139
140		}
141		oqa = otp->t_dqinfo->dqa_grpdquots;
142		nqa = ntp->t_dqinfo->dqa_grpdquots;
143	}
144}
145
146/*
147 * Wrap around mod_dquot to account for both user and group quotas.
148 */
149void
150xfs_trans_mod_dquot_byino(
151	xfs_trans_t	*tp,
152	xfs_inode_t	*ip,
153	uint		field,
154	long		delta)
155{
156	xfs_mount_t	*mp = tp->t_mountp;
157
158	if (!XFS_IS_QUOTA_RUNNING(mp) ||
159	    !XFS_IS_QUOTA_ON(mp) ||
160	    ip->i_ino == mp->m_sb.sb_uquotino ||
161	    ip->i_ino == mp->m_sb.sb_gquotino)
162		return;
163
164	if (tp->t_dqinfo == NULL)
165		xfs_trans_alloc_dqinfo(tp);
166
167	if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
168		(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
169	if (XFS_IS_OQUOTA_ON(mp) && ip->i_gdquot)
170		(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
 
 
171}
172
173STATIC xfs_dqtrx_t *
174xfs_trans_get_dqtrx(
175	xfs_trans_t	*tp,
176	xfs_dquot_t	*dqp)
177{
178	int		i;
179	xfs_dqtrx_t	*qa;
180
181	qa = XFS_QM_ISUDQ(dqp) ?
182		tp->t_dqinfo->dqa_usrdquots : tp->t_dqinfo->dqa_grpdquots;
 
 
 
 
 
 
 
 
 
 
 
183
184	for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
185		if (qa[i].qt_dquot == NULL ||
186		    qa[i].qt_dquot == dqp)
187			return &qa[i];
188	}
189
190	return NULL;
191}
192
193/*
194 * Make the changes in the transaction structure.
195 * The moral equivalent to xfs_trans_mod_sb().
196 * We don't touch any fields in the dquot, so we don't care
197 * if it's locked or not (most of the time it won't be).
198 */
199void
200xfs_trans_mod_dquot(
201	xfs_trans_t	*tp,
202	xfs_dquot_t	*dqp,
203	uint		field,
204	long		delta)
205{
206	xfs_dqtrx_t	*qtrx;
207
208	ASSERT(tp);
209	ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
210	qtrx = NULL;
211
 
 
 
212	if (tp->t_dqinfo == NULL)
213		xfs_trans_alloc_dqinfo(tp);
214	/*
215	 * Find either the first free slot or the slot that belongs
216	 * to this dquot.
217	 */
218	qtrx = xfs_trans_get_dqtrx(tp, dqp);
219	ASSERT(qtrx);
220	if (qtrx->qt_dquot == NULL)
221		qtrx->qt_dquot = dqp;
222
223	switch (field) {
 
224
225		/*
226		 * regular disk blk reservation
227		 */
228	      case XFS_TRANS_DQ_RES_BLKS:
229		qtrx->qt_blk_res += (ulong)delta;
230		break;
231
232		/*
233		 * inode reservation
234		 */
235	      case XFS_TRANS_DQ_RES_INOS:
236		qtrx->qt_ino_res += (ulong)delta;
237		break;
238
239		/*
240		 * disk blocks used.
241		 */
242	      case XFS_TRANS_DQ_BCOUNT:
243		if (qtrx->qt_blk_res && delta > 0) {
244			qtrx->qt_blk_res_used += (ulong)delta;
245			ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used);
246		}
247		qtrx->qt_bcount_delta += delta;
248		break;
249
250	      case XFS_TRANS_DQ_DELBCOUNT:
251		qtrx->qt_delbcnt_delta += delta;
252		break;
253
254		/*
255		 * Inode Count
256		 */
257	      case XFS_TRANS_DQ_ICOUNT:
258		if (qtrx->qt_ino_res && delta > 0) {
259			qtrx->qt_ino_res_used += (ulong)delta;
260			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
261		}
262		qtrx->qt_icount_delta += delta;
263		break;
264
265		/*
266		 * rtblk reservation
267		 */
268	      case XFS_TRANS_DQ_RES_RTBLKS:
269		qtrx->qt_rtblk_res += (ulong)delta;
270		break;
271
272		/*
273		 * rtblk count
274		 */
275	      case XFS_TRANS_DQ_RTBCOUNT:
276		if (qtrx->qt_rtblk_res && delta > 0) {
277			qtrx->qt_rtblk_res_used += (ulong)delta;
278			ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
279		}
280		qtrx->qt_rtbcount_delta += delta;
281		break;
282
283	      case XFS_TRANS_DQ_DELRTBCOUNT:
284		qtrx->qt_delrtb_delta += delta;
285		break;
286
287	      default:
288		ASSERT(0);
289	}
290	tp->t_flags |= XFS_TRANS_DQ_DIRTY;
 
291}
292
293
294/*
295 * Given an array of dqtrx structures, lock all the dquots associated
296 * and join them to the transaction, provided they have been modified.
297 * We know that the highest number of dquots (of one type - usr OR grp),
298 * involved in a transaction is 2 and that both usr and grp combined - 3.
299 * So, we don't attempt to make this very generic.
300 */
301STATIC void
302xfs_trans_dqlockedjoin(
303	xfs_trans_t	*tp,
304	xfs_dqtrx_t	*q)
305{
306	ASSERT(q[0].qt_dquot != NULL);
307	if (q[1].qt_dquot == NULL) {
308		xfs_dqlock(q[0].qt_dquot);
309		xfs_trans_dqjoin(tp, q[0].qt_dquot);
310	} else {
311		ASSERT(XFS_QM_TRANS_MAXDQS == 2);
312		xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
313		xfs_trans_dqjoin(tp, q[0].qt_dquot);
314		xfs_trans_dqjoin(tp, q[1].qt_dquot);
315	}
316}
317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
319/*
320 * Called by xfs_trans_commit() and similar in spirit to
321 * xfs_trans_apply_sb_deltas().
322 * Go thru all the dquots belonging to this transaction and modify the
323 * INCORE dquot to reflect the actual usages.
324 * Unreserve just the reservations done by this transaction.
325 * dquot is still left locked at exit.
326 */
327void
328xfs_trans_apply_dquot_deltas(
329	xfs_trans_t		*tp)
330{
331	int			i, j;
332	xfs_dquot_t		*dqp;
333	xfs_dqtrx_t		*qtrx, *qa;
334	xfs_disk_dquot_t	*d;
335	long			totalbdelta;
336	long			totalrtbdelta;
337
338	if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
339		return;
340
341	ASSERT(tp->t_dqinfo);
342	qa = tp->t_dqinfo->dqa_usrdquots;
343	for (j = 0; j < 2; j++) {
344		if (qa[0].qt_dquot == NULL) {
345			qa = tp->t_dqinfo->dqa_grpdquots;
346			continue;
347		}
348
349		/*
350		 * Lock all of the dquots and join them to the transaction.
351		 */
352		xfs_trans_dqlockedjoin(tp, qa);
353
354		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
 
 
355			qtrx = &qa[i];
356			/*
357			 * The array of dquots is filled
358			 * sequentially, not sparsely.
359			 */
360			if ((dqp = qtrx->qt_dquot) == NULL)
361				break;
362
363			ASSERT(XFS_DQ_IS_LOCKED(dqp));
364			ASSERT(dqp->q_transp == tp);
365
366			/*
367			 * adjust the actual number of blocks used
368			 */
369			d = &dqp->q_core;
370
371			/*
372			 * The issue here is - sometimes we don't make a blkquota
373			 * reservation intentionally to be fair to users
374			 * (when the amount is small). On the other hand,
375			 * delayed allocs do make reservations, but that's
376			 * outside of a transaction, so we have no
377			 * idea how much was really reserved.
378			 * So, here we've accumulated delayed allocation blks and
379			 * non-delay blks. The assumption is that the
380			 * delayed ones are always reserved (outside of a
381			 * transaction), and the others may or may not have
382			 * quota reservations.
383			 */
384			totalbdelta = qtrx->qt_bcount_delta +
385				qtrx->qt_delbcnt_delta;
386			totalrtbdelta = qtrx->qt_rtbcount_delta +
387				qtrx->qt_delrtb_delta;
 
 
 
 
 
 
 
388#ifdef DEBUG
389			if (totalbdelta < 0)
390				ASSERT(be64_to_cpu(d->d_bcount) >=
391				       -totalbdelta);
392
393			if (totalrtbdelta < 0)
394				ASSERT(be64_to_cpu(d->d_rtbcount) >=
395				       -totalrtbdelta);
396
397			if (qtrx->qt_icount_delta < 0)
398				ASSERT(be64_to_cpu(d->d_icount) >=
399				       -qtrx->qt_icount_delta);
400#endif
401			if (totalbdelta)
402				be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
403
404			if (qtrx->qt_icount_delta)
405				be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
406
407			if (totalrtbdelta)
408				be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
 
 
 
 
409
410			/*
411			 * Get any default limits in use.
412			 * Start/reset the timer(s) if needed.
413			 */
414			if (d->d_id) {
415				xfs_qm_adjust_dqlimits(tp->t_mountp, d);
416				xfs_qm_adjust_dqtimers(tp->t_mountp, d);
417			}
418
419			dqp->dq_flags |= XFS_DQ_DIRTY;
420			/*
421			 * add this to the list of items to get logged
422			 */
423			xfs_trans_log_dquot(tp, dqp);
424			/*
425			 * Take off what's left of the original reservation.
426			 * In case of delayed allocations, there's no
427			 * reservation that a transaction structure knows of.
428			 */
429			if (qtrx->qt_blk_res != 0) {
430				if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
431					if (qtrx->qt_blk_res >
432					    qtrx->qt_blk_res_used)
433						dqp->q_res_bcount -= (xfs_qcnt_t)
434							(qtrx->qt_blk_res -
435							 qtrx->qt_blk_res_used);
436					else
437						dqp->q_res_bcount -= (xfs_qcnt_t)
438							(qtrx->qt_blk_res_used -
439							 qtrx->qt_blk_res);
440				}
441			} else {
442				/*
443				 * These blks were never reserved, either inside
444				 * a transaction or outside one (in a delayed
445				 * allocation). Also, this isn't always a
446				 * negative number since we sometimes
447				 * deliberately skip quota reservations.
448				 */
449				if (qtrx->qt_bcount_delta) {
450					dqp->q_res_bcount +=
451					      (xfs_qcnt_t)qtrx->qt_bcount_delta;
452				}
453			}
454			/*
455			 * Adjust the RT reservation.
456			 */
457			if (qtrx->qt_rtblk_res != 0) {
458				if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
459					if (qtrx->qt_rtblk_res >
460					    qtrx->qt_rtblk_res_used)
461					       dqp->q_res_rtbcount -= (xfs_qcnt_t)
462						       (qtrx->qt_rtblk_res -
463							qtrx->qt_rtblk_res_used);
464					else
465					       dqp->q_res_rtbcount -= (xfs_qcnt_t)
466						       (qtrx->qt_rtblk_res_used -
467							qtrx->qt_rtblk_res);
468				}
469			} else {
470				if (qtrx->qt_rtbcount_delta)
471					dqp->q_res_rtbcount +=
472					    (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
473			}
474
475			/*
476			 * Adjust the inode reservation.
477			 */
478			if (qtrx->qt_ino_res != 0) {
479				ASSERT(qtrx->qt_ino_res >=
480				       qtrx->qt_ino_res_used);
481				if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
482					dqp->q_res_icount -= (xfs_qcnt_t)
483						(qtrx->qt_ino_res -
484						 qtrx->qt_ino_res_used);
485			} else {
486				if (qtrx->qt_icount_delta)
487					dqp->q_res_icount +=
488					    (xfs_qcnt_t)qtrx->qt_icount_delta;
489			}
490
491			ASSERT(dqp->q_res_bcount >=
492				be64_to_cpu(dqp->q_core.d_bcount));
493			ASSERT(dqp->q_res_icount >=
494				be64_to_cpu(dqp->q_core.d_icount));
495			ASSERT(dqp->q_res_rtbcount >=
496				be64_to_cpu(dqp->q_core.d_rtbcount));
497		}
498		/*
499		 * Do the group quotas next
500		 */
501		qa = tp->t_dqinfo->dqa_grpdquots;
502	}
503}
504
505/*
506 * Release the reservations, and adjust the dquots accordingly.
507 * This is called only when the transaction is being aborted. If by
508 * any chance we have done dquot modifications incore (ie. deltas) already,
509 * we simply throw those away, since that's the expected behavior
510 * when a transaction is curtailed without a commit.
511 */
512void
513xfs_trans_unreserve_and_mod_dquots(
514	xfs_trans_t		*tp)
515{
516	int			i, j;
517	xfs_dquot_t		*dqp;
518	xfs_dqtrx_t		*qtrx, *qa;
519	boolean_t		locked;
520
521	if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
522		return;
523
524	qa = tp->t_dqinfo->dqa_usrdquots;
 
525
526	for (j = 0; j < 2; j++) {
527		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
528			qtrx = &qa[i];
529			/*
530			 * We assume that the array of dquots is filled
531			 * sequentially, not sparsely.
532			 */
533			if ((dqp = qtrx->qt_dquot) == NULL)
534				break;
535			/*
536			 * Unreserve the original reservation. We don't care
537			 * about the number of blocks used field, or deltas.
538			 * Also we don't bother to zero the fields.
539			 */
540			locked = B_FALSE;
541			if (qtrx->qt_blk_res) {
542				xfs_dqlock(dqp);
543				locked = B_TRUE;
544				dqp->q_res_bcount -=
545					(xfs_qcnt_t)qtrx->qt_blk_res;
546			}
547			if (qtrx->qt_ino_res) {
548				if (!locked) {
549					xfs_dqlock(dqp);
550					locked = B_TRUE;
551				}
552				dqp->q_res_icount -=
553					(xfs_qcnt_t)qtrx->qt_ino_res;
554			}
555
556			if (qtrx->qt_rtblk_res) {
557				if (!locked) {
558					xfs_dqlock(dqp);
559					locked = B_TRUE;
560				}
561				dqp->q_res_rtbcount -=
562					(xfs_qcnt_t)qtrx->qt_rtblk_res;
563			}
564			if (locked)
565				xfs_dqunlock(dqp);
566
567		}
568		qa = tp->t_dqinfo->dqa_grpdquots;
569	}
570}
571
572STATIC void
573xfs_quota_warn(
574	struct xfs_mount	*mp,
575	struct xfs_dquot	*dqp,
576	int			type)
577{
578	/* no warnings for project quotas - we just return ENOSPC later */
579	if (dqp->dq_flags & XFS_DQ_PROJ)
 
 
 
 
 
 
 
 
 
 
 
580		return;
581	quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA,
582			   be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev,
583			   type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
584}
585
586/*
587 * This reserves disk blocks and inodes against a dquot.
588 * Flags indicate if the dquot is to be locked here and also
589 * if the blk reservation is for RT or regular blocks.
590 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
591 */
592STATIC int
593xfs_trans_dqresv(
594	xfs_trans_t	*tp,
595	xfs_mount_t	*mp,
596	xfs_dquot_t	*dqp,
597	long		nblks,
598	long		ninos,
599	uint		flags)
600{
601	xfs_qcnt_t	hardlimit;
602	xfs_qcnt_t	softlimit;
603	time_t		timer;
604	xfs_qwarncnt_t	warns;
605	xfs_qwarncnt_t	warnlimit;
606	xfs_qcnt_t	total_count;
607	xfs_qcnt_t	*resbcountp;
608	xfs_quotainfo_t	*q = mp->m_quotainfo;
609
610
611	xfs_dqlock(dqp);
612
 
 
613	if (flags & XFS_TRANS_DQ_RES_BLKS) {
614		hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
615		if (!hardlimit)
616			hardlimit = q->qi_bhardlimit;
617		softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
618		if (!softlimit)
619			softlimit = q->qi_bsoftlimit;
620		timer = be32_to_cpu(dqp->q_core.d_btimer);
621		warns = be16_to_cpu(dqp->q_core.d_bwarns);
622		warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
623		resbcountp = &dqp->q_res_bcount;
624	} else {
625		ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
626		hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
627		if (!hardlimit)
628			hardlimit = q->qi_rtbhardlimit;
629		softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
630		if (!softlimit)
631			softlimit = q->qi_rtbsoftlimit;
632		timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
633		warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
634		warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
635		resbcountp = &dqp->q_res_rtbcount;
636	}
637
638	if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
639	    dqp->q_core.d_id &&
640	    ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
641	     (XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) &&
642	      (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) {
643		if (nblks > 0) {
644			/*
645			 * dquot is locked already. See if we'd go over the
646			 * hardlimit or exceed the timelimit if we allocate
647			 * nblks.
648			 */
649			total_count = *resbcountp + nblks;
650			if (hardlimit && total_count > hardlimit) {
651				xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
652				goto error_return;
653			}
654			if (softlimit && total_count > softlimit) {
655				if ((timer != 0 && get_seconds() > timer) ||
656				    (warns != 0 && warns >= warnlimit)) {
657					xfs_quota_warn(mp, dqp,
658						       QUOTA_NL_BSOFTLONGWARN);
659					goto error_return;
660				}
661
662				xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
663			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664		}
665		if (ninos > 0) {
666			total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
667			timer = be32_to_cpu(dqp->q_core.d_itimer);
668			warns = be16_to_cpu(dqp->q_core.d_iwarns);
669			warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
670			hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
671			if (!hardlimit)
672				hardlimit = q->qi_ihardlimit;
673			softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
674			if (!softlimit)
675				softlimit = q->qi_isoftlimit;
676
677			if (hardlimit && total_count > hardlimit) {
678				xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
 
 
 
679				goto error_return;
680			}
681			if (softlimit && total_count > softlimit) {
682				if  ((timer != 0 && get_seconds() > timer) ||
683				     (warns != 0 && warns >= warnlimit)) {
684					xfs_quota_warn(mp, dqp,
685						       QUOTA_NL_ISOFTLONGWARN);
686					goto error_return;
687				}
688				xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
689			}
690		}
691	}
692
693	/*
694	 * Change the reservation, but not the actual usage.
695	 * Note that q_res_bcount = q_core.d_bcount + resv
696	 */
697	(*resbcountp) += (xfs_qcnt_t)nblks;
698	if (ninos != 0)
699		dqp->q_res_icount += (xfs_qcnt_t)ninos;
700
701	/*
702	 * note the reservation amt in the trans struct too,
703	 * so that the transaction knows how much was reserved by
704	 * it against this particular dquot.
705	 * We don't do this when we are reserving for a delayed allocation,
706	 * because we don't have the luxury of a transaction envelope then.
707	 */
708	if (tp) {
709		ASSERT(tp->t_dqinfo);
710		ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
711		if (nblks != 0)
712			xfs_trans_mod_dquot(tp, dqp,
713					    flags & XFS_QMOPT_RESBLK_MASK,
714					    nblks);
715		if (ninos != 0)
716			xfs_trans_mod_dquot(tp, dqp,
717					    XFS_TRANS_DQ_RES_INOS,
718					    ninos);
719	}
720	ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
721	ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
722	ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
723
724	xfs_dqunlock(dqp);
725	return 0;
726
727error_return:
728	xfs_dqunlock(dqp);
729	if (flags & XFS_QMOPT_ENOSPC)
730		return ENOSPC;
731	return EDQUOT;
 
 
 
 
732}
733
734
735/*
736 * Given dquot(s), make disk block and/or inode reservations against them.
737 * The fact that this does the reservation against both the usr and
738 * grp/prj quotas is important, because this follows a both-or-nothing
739 * approach.
740 *
741 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
742 *	   XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT.  Used by pquota.
743 *	   XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
744 *	   XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
745 * dquots are unlocked on return, if they were not locked by caller.
746 */
747int
748xfs_trans_reserve_quota_bydquots(
749	xfs_trans_t	*tp,
750	xfs_mount_t	*mp,
751	xfs_dquot_t	*udqp,
752	xfs_dquot_t	*gdqp,
753	long		nblks,
754	long		ninos,
755	uint		flags)
 
756{
757	int		resvd = 0, error;
758
759	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
760		return 0;
761
762	if (tp && tp->t_dqinfo == NULL)
763		xfs_trans_alloc_dqinfo(tp);
764
765	ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
766
767	if (udqp) {
768		error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
769					(flags & ~XFS_QMOPT_ENOSPC));
770		if (error)
771			return error;
772		resvd = 1;
773	}
774
775	if (gdqp) {
776		error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
777		if (error) {
778			/*
779			 * can't do it, so backout previous reservation
780			 */
781			if (resvd) {
782				flags |= XFS_QMOPT_FORCE_RES;
783				xfs_trans_dqresv(tp, mp, udqp,
784						 -nblks, -ninos, flags);
785			}
786			return error;
787		}
788	}
789
790	/*
791	 * Didn't change anything critical, so, no need to log
792	 */
793	return 0;
 
 
 
 
 
 
 
 
 
 
794}
795
796
797/*
798 * Lock the dquot and change the reservation if we can.
799 * This doesn't change the actual usage, just the reservation.
800 * The inode sent in is locked.
801 */
802int
803xfs_trans_reserve_quota_nblks(
804	struct xfs_trans	*tp,
805	struct xfs_inode	*ip,
806	long			nblks,
807	long			ninos,
808	uint			flags)
809{
810	struct xfs_mount	*mp = ip->i_mount;
 
 
811
812	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
813		return 0;
814	if (XFS_IS_PQUOTA_ON(mp))
815		flags |= XFS_QMOPT_ENOSPC;
816
817	ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
818	ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
819
 
820	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
821	ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
822				XFS_TRANS_DQ_RES_RTBLKS ||
823	       (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
824				XFS_TRANS_DQ_RES_BLKS);
825
826	/*
827	 * Reserve nblks against these dquots, with trans as the mediator.
828	 */
829	return xfs_trans_reserve_quota_bydquots(tp, mp,
830						ip->i_udquot, ip->i_gdquot,
831						nblks, ninos, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
832}
833
834/*
835 * This routine is called to allocate a quotaoff log item.
836 */
837xfs_qoff_logitem_t *
838xfs_trans_get_qoff_item(
839	xfs_trans_t		*tp,
840	xfs_qoff_logitem_t	*startqoff,
841	uint			flags)
842{
843	xfs_qoff_logitem_t	*q;
844
845	ASSERT(tp != NULL);
846
847	q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
848	ASSERT(q != NULL);
849
850	/*
851	 * Get a log_item_desc to point at the new item.
852	 */
853	xfs_trans_add_item(tp, &q->qql_item);
854	return q;
855}
856
857
858/*
859 * This is called to mark the quotaoff logitem as needing
860 * to be logged when the transaction is committed.  The logitem must
861 * already be associated with the given transaction.
862 */
863void
864xfs_trans_log_quotaoff_item(
865	xfs_trans_t		*tp,
866	xfs_qoff_logitem_t	*qlp)
867{
868	tp->t_flags |= XFS_TRANS_DIRTY;
869	qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY;
870}
871
872STATIC void
873xfs_trans_alloc_dqinfo(
874	xfs_trans_t	*tp)
875{
876	tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP);
 
877}
878
879void
880xfs_trans_free_dqinfo(
881	xfs_trans_t	*tp)
882{
883	if (!tp->t_dqinfo)
884		return;
885	kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
886	tp->t_dqinfo = NULL;
887}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
  4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 
 
 12#include "xfs_mount.h"
 
 13#include "xfs_inode.h"
 14#include "xfs_trans.h"
 
 
 
 
 
 15#include "xfs_trans_priv.h"
 16#include "xfs_quota.h"
 17#include "xfs_qm.h"
 18#include "xfs_trace.h"
 19#include "xfs_error.h"
 20
 21STATIC void	xfs_trans_alloc_dqinfo(xfs_trans_t *);
 22
 23/*
 24 * Add the locked dquot to the transaction.
 25 * The dquot must be locked, and it cannot be associated with any
 26 * transaction.
 27 */
 28void
 29xfs_trans_dqjoin(
 30	struct xfs_trans	*tp,
 31	struct xfs_dquot	*dqp)
 32{
 
 33	ASSERT(XFS_DQ_IS_LOCKED(dqp));
 34	ASSERT(dqp->q_logitem.qli_dquot == dqp);
 35
 36	/*
 37	 * Get a log_item_desc to point at the new item.
 38	 */
 39	xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
 
 
 
 
 
 
 40}
 41
 
 42/*
 43 * This is called to mark the dquot as needing
 44 * to be logged when the transaction is committed.  The dquot must
 45 * already be associated with the given transaction.
 46 * Note that it marks the entire transaction as dirty. In the ordinary
 47 * case, this gets called via xfs_trans_commit, after the transaction
 48 * is already dirty. However, there's nothing stop this from getting
 49 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
 50 * flag.
 51 */
 52void
 53xfs_trans_log_dquot(
 54	struct xfs_trans	*tp,
 55	struct xfs_dquot	*dqp)
 56{
 
 57	ASSERT(XFS_DQ_IS_LOCKED(dqp));
 58
 59	/* Upgrade the dquot to bigtime format if possible. */
 60	if (dqp->q_id != 0 &&
 61	    xfs_sb_version_hasbigtime(&tp->t_mountp->m_sb) &&
 62	    !(dqp->q_type & XFS_DQTYPE_BIGTIME))
 63		dqp->q_type |= XFS_DQTYPE_BIGTIME;
 64
 65	tp->t_flags |= XFS_TRANS_DIRTY;
 66	set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
 67}
 68
 69/*
 70 * Carry forward whatever is left of the quota blk reservation to
 71 * the spanky new transaction
 72 */
 73void
 74xfs_trans_dup_dqinfo(
 75	struct xfs_trans	*otp,
 76	struct xfs_trans	*ntp)
 77{
 78	struct xfs_dqtrx	*oq, *nq;
 79	int			i, j;
 80	struct xfs_dqtrx	*oqa, *nqa;
 81	uint64_t		blk_res_used;
 82
 83	if (!otp->t_dqinfo)
 84		return;
 85
 86	xfs_trans_alloc_dqinfo(ntp);
 
 
 
 
 
 
 
 
 
 87
 88	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
 89		oqa = otp->t_dqinfo->dqs[j];
 90		nqa = ntp->t_dqinfo->dqs[j];
 91		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
 92			blk_res_used = 0;
 93
 94			if (oqa[i].qt_dquot == NULL)
 95				break;
 96			oq = &oqa[i];
 97			nq = &nqa[i];
 98
 99			if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
100				blk_res_used = oq->qt_bcount_delta;
101
102			nq->qt_dquot = oq->qt_dquot;
103			nq->qt_bcount_delta = nq->qt_icount_delta = 0;
104			nq->qt_rtbcount_delta = 0;
105
106			/*
107			 * Transfer whatever is left of the reservations.
108			 */
109			nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
110			oq->qt_blk_res = blk_res_used;
111
112			nq->qt_rtblk_res = oq->qt_rtblk_res -
113				oq->qt_rtblk_res_used;
114			oq->qt_rtblk_res = oq->qt_rtblk_res_used;
115
116			nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
117			oq->qt_ino_res = oq->qt_ino_res_used;
118
119		}
 
 
120	}
121}
122
123/*
124 * Wrap around mod_dquot to account for both user and group quotas.
125 */
126void
127xfs_trans_mod_dquot_byino(
128	xfs_trans_t	*tp,
129	xfs_inode_t	*ip,
130	uint		field,
131	int64_t		delta)
132{
133	xfs_mount_t	*mp = tp->t_mountp;
134
135	if (!XFS_IS_QUOTA_RUNNING(mp) ||
136	    !XFS_IS_QUOTA_ON(mp) ||
137	    xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
 
138		return;
139
 
 
 
140	if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
141		(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
142	if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
143		(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
144	if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
145		(void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
146}
147
148STATIC struct xfs_dqtrx *
149xfs_trans_get_dqtrx(
150	struct xfs_trans	*tp,
151	struct xfs_dquot	*dqp)
152{
153	int			i;
154	struct xfs_dqtrx	*qa;
155
156	switch (xfs_dquot_type(dqp)) {
157	case XFS_DQTYPE_USER:
158		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
159		break;
160	case XFS_DQTYPE_GROUP:
161		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
162		break;
163	case XFS_DQTYPE_PROJ:
164		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
165		break;
166	default:
167		return NULL;
168	}
169
170	for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
171		if (qa[i].qt_dquot == NULL ||
172		    qa[i].qt_dquot == dqp)
173			return &qa[i];
174	}
175
176	return NULL;
177}
178
179/*
180 * Make the changes in the transaction structure.
181 * The moral equivalent to xfs_trans_mod_sb().
182 * We don't touch any fields in the dquot, so we don't care
183 * if it's locked or not (most of the time it won't be).
184 */
185void
186xfs_trans_mod_dquot(
187	struct xfs_trans	*tp,
188	struct xfs_dquot	*dqp,
189	uint			field,
190	int64_t			delta)
191{
192	struct xfs_dqtrx	*qtrx;
193
194	ASSERT(tp);
195	ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
196	qtrx = NULL;
197
198	if (!delta)
199		return;
200
201	if (tp->t_dqinfo == NULL)
202		xfs_trans_alloc_dqinfo(tp);
203	/*
204	 * Find either the first free slot or the slot that belongs
205	 * to this dquot.
206	 */
207	qtrx = xfs_trans_get_dqtrx(tp, dqp);
208	ASSERT(qtrx);
209	if (qtrx->qt_dquot == NULL)
210		qtrx->qt_dquot = dqp;
211
212	trace_xfs_trans_mod_dquot_before(qtrx);
213	trace_xfs_trans_mod_dquot(tp, dqp, field, delta);
214
215	switch (field) {
216	/* regular disk blk reservation */
217	case XFS_TRANS_DQ_RES_BLKS:
218		qtrx->qt_blk_res += delta;
 
219		break;
220
221	/* inode reservation */
222	case XFS_TRANS_DQ_RES_INOS:
223		qtrx->qt_ino_res += delta;
 
 
224		break;
225
226	/* disk blocks used. */
227	case XFS_TRANS_DQ_BCOUNT:
 
 
 
 
 
 
228		qtrx->qt_bcount_delta += delta;
229		break;
230
231	case XFS_TRANS_DQ_DELBCOUNT:
232		qtrx->qt_delbcnt_delta += delta;
233		break;
234
235	/* Inode Count */
236	case XFS_TRANS_DQ_ICOUNT:
 
 
237		if (qtrx->qt_ino_res && delta > 0) {
238			qtrx->qt_ino_res_used += delta;
239			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
240		}
241		qtrx->qt_icount_delta += delta;
242		break;
243
244	/* rtblk reservation */
245	case XFS_TRANS_DQ_RES_RTBLKS:
246		qtrx->qt_rtblk_res += delta;
 
 
247		break;
248
249	/* rtblk count */
250	case XFS_TRANS_DQ_RTBCOUNT:
 
 
251		if (qtrx->qt_rtblk_res && delta > 0) {
252			qtrx->qt_rtblk_res_used += delta;
253			ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
254		}
255		qtrx->qt_rtbcount_delta += delta;
256		break;
257
258	case XFS_TRANS_DQ_DELRTBCOUNT:
259		qtrx->qt_delrtb_delta += delta;
260		break;
261
262	default:
263		ASSERT(0);
264	}
265
266	trace_xfs_trans_mod_dquot_after(qtrx);
267}
268
269
270/*
271 * Given an array of dqtrx structures, lock all the dquots associated and join
272 * them to the transaction, provided they have been modified.  We know that the
273 * highest number of dquots of one type - usr, grp and prj - involved in a
274 * transaction is 3 so we don't need to make this very generic.
 
275 */
276STATIC void
277xfs_trans_dqlockedjoin(
278	struct xfs_trans	*tp,
279	struct xfs_dqtrx	*q)
280{
281	ASSERT(q[0].qt_dquot != NULL);
282	if (q[1].qt_dquot == NULL) {
283		xfs_dqlock(q[0].qt_dquot);
284		xfs_trans_dqjoin(tp, q[0].qt_dquot);
285	} else {
286		ASSERT(XFS_QM_TRANS_MAXDQS == 2);
287		xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
288		xfs_trans_dqjoin(tp, q[0].qt_dquot);
289		xfs_trans_dqjoin(tp, q[1].qt_dquot);
290	}
291}
292
293/* Apply dqtrx changes to the quota reservation counters. */
294static inline void
295xfs_apply_quota_reservation_deltas(
296	struct xfs_dquot_res	*res,
297	uint64_t		reserved,
298	int64_t			res_used,
299	int64_t			count_delta)
300{
301	if (reserved != 0) {
302		/*
303		 * Subtle math here: If reserved > res_used (the normal case),
304		 * we're simply subtracting the unused transaction quota
305		 * reservation from the dquot reservation.
306		 *
307		 * If, however, res_used > reserved, then we have allocated
308		 * more quota blocks than were reserved for the transaction.
309		 * We must add that excess to the dquot reservation since it
310		 * tracks (usage + resv) and by definition we didn't reserve
311		 * that excess.
312		 */
313		res->reserved -= abs(reserved - res_used);
314	} else if (count_delta != 0) {
315		/*
316		 * These blks were never reserved, either inside a transaction
317		 * or outside one (in a delayed allocation). Also, this isn't
318		 * always a negative number since we sometimes deliberately
319		 * skip quota reservations.
320		 */
321		res->reserved += count_delta;
322	}
323}
324
325/*
326 * Called by xfs_trans_commit() and similar in spirit to
327 * xfs_trans_apply_sb_deltas().
328 * Go thru all the dquots belonging to this transaction and modify the
329 * INCORE dquot to reflect the actual usages.
330 * Unreserve just the reservations done by this transaction.
331 * dquot is still left locked at exit.
332 */
333void
334xfs_trans_apply_dquot_deltas(
335	struct xfs_trans	*tp)
336{
337	int			i, j;
338	struct xfs_dquot	*dqp;
339	struct xfs_dqtrx	*qtrx, *qa;
340	int64_t			totalbdelta;
341	int64_t			totalrtbdelta;
 
342
343	if (!tp->t_dqinfo)
344		return;
345
346	ASSERT(tp->t_dqinfo);
347	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
348		qa = tp->t_dqinfo->dqs[j];
349		if (qa[0].qt_dquot == NULL)
 
350			continue;
 
351
352		/*
353		 * Lock all of the dquots and join them to the transaction.
354		 */
355		xfs_trans_dqlockedjoin(tp, qa);
356
357		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
358			uint64_t	blk_res_used;
359
360			qtrx = &qa[i];
361			/*
362			 * The array of dquots is filled
363			 * sequentially, not sparsely.
364			 */
365			if ((dqp = qtrx->qt_dquot) == NULL)
366				break;
367
368			ASSERT(XFS_DQ_IS_LOCKED(dqp));
 
369
370			/*
371			 * adjust the actual number of blocks used
372			 */
 
373
374			/*
375			 * The issue here is - sometimes we don't make a blkquota
376			 * reservation intentionally to be fair to users
377			 * (when the amount is small). On the other hand,
378			 * delayed allocs do make reservations, but that's
379			 * outside of a transaction, so we have no
380			 * idea how much was really reserved.
381			 * So, here we've accumulated delayed allocation blks and
382			 * non-delay blks. The assumption is that the
383			 * delayed ones are always reserved (outside of a
384			 * transaction), and the others may or may not have
385			 * quota reservations.
386			 */
387			totalbdelta = qtrx->qt_bcount_delta +
388				qtrx->qt_delbcnt_delta;
389			totalrtbdelta = qtrx->qt_rtbcount_delta +
390				qtrx->qt_delrtb_delta;
391
392			if (totalbdelta != 0 || totalrtbdelta != 0 ||
393			    qtrx->qt_icount_delta != 0) {
394				trace_xfs_trans_apply_dquot_deltas_before(dqp);
395				trace_xfs_trans_apply_dquot_deltas(qtrx);
396			}
397
398#ifdef DEBUG
399			if (totalbdelta < 0)
400				ASSERT(dqp->q_blk.count >= -totalbdelta);
 
401
402			if (totalrtbdelta < 0)
403				ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
 
404
405			if (qtrx->qt_icount_delta < 0)
406				ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
 
407#endif
408			if (totalbdelta)
409				dqp->q_blk.count += totalbdelta;
410
411			if (qtrx->qt_icount_delta)
412				dqp->q_ino.count += qtrx->qt_icount_delta;
413
414			if (totalrtbdelta)
415				dqp->q_rtb.count += totalrtbdelta;
416
417			if (totalbdelta != 0 || totalrtbdelta != 0 ||
418			    qtrx->qt_icount_delta != 0)
419				trace_xfs_trans_apply_dquot_deltas_after(dqp);
420
421			/*
422			 * Get any default limits in use.
423			 * Start/reset the timer(s) if needed.
424			 */
425			if (dqp->q_id) {
426				xfs_qm_adjust_dqlimits(dqp);
427				xfs_qm_adjust_dqtimers(dqp);
428			}
429
430			dqp->q_flags |= XFS_DQFLAG_DIRTY;
431			/*
432			 * add this to the list of items to get logged
433			 */
434			xfs_trans_log_dquot(tp, dqp);
435			/*
436			 * Take off what's left of the original reservation.
437			 * In case of delayed allocations, there's no
438			 * reservation that a transaction structure knows of.
439			 */
440			blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
441			xfs_apply_quota_reservation_deltas(&dqp->q_blk,
442					qtrx->qt_blk_res, blk_res_used,
443					qtrx->qt_bcount_delta);
444
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445			/*
446			 * Adjust the RT reservation.
447			 */
448			xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
449					qtrx->qt_rtblk_res,
450					qtrx->qt_rtblk_res_used,
451					qtrx->qt_rtbcount_delta);
 
 
 
 
 
 
 
 
 
 
 
 
 
452
453			/*
454			 * Adjust the inode reservation.
455			 */
456			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
457			xfs_apply_quota_reservation_deltas(&dqp->q_ino,
458					qtrx->qt_ino_res,
459					qtrx->qt_ino_res_used,
460					qtrx->qt_icount_delta);
461
462			ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
463			ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
464			ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
 
 
 
 
 
 
 
 
 
 
465		}
 
 
 
 
466	}
467}
468
469/*
470 * Release the reservations, and adjust the dquots accordingly.
471 * This is called only when the transaction is being aborted. If by
472 * any chance we have done dquot modifications incore (ie. deltas) already,
473 * we simply throw those away, since that's the expected behavior
474 * when a transaction is curtailed without a commit.
475 */
476void
477xfs_trans_unreserve_and_mod_dquots(
478	struct xfs_trans	*tp)
479{
480	int			i, j;
481	struct xfs_dquot	*dqp;
482	struct xfs_dqtrx	*qtrx, *qa;
483	bool			locked;
484
485	if (!tp->t_dqinfo)
486		return;
487
488	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
489		qa = tp->t_dqinfo->dqs[j];
490
 
491		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
492			qtrx = &qa[i];
493			/*
494			 * We assume that the array of dquots is filled
495			 * sequentially, not sparsely.
496			 */
497			if ((dqp = qtrx->qt_dquot) == NULL)
498				break;
499			/*
500			 * Unreserve the original reservation. We don't care
501			 * about the number of blocks used field, or deltas.
502			 * Also we don't bother to zero the fields.
503			 */
504			locked = false;
505			if (qtrx->qt_blk_res) {
506				xfs_dqlock(dqp);
507				locked = true;
508				dqp->q_blk.reserved -=
509					(xfs_qcnt_t)qtrx->qt_blk_res;
510			}
511			if (qtrx->qt_ino_res) {
512				if (!locked) {
513					xfs_dqlock(dqp);
514					locked = true;
515				}
516				dqp->q_ino.reserved -=
517					(xfs_qcnt_t)qtrx->qt_ino_res;
518			}
519
520			if (qtrx->qt_rtblk_res) {
521				if (!locked) {
522					xfs_dqlock(dqp);
523					locked = true;
524				}
525				dqp->q_rtb.reserved -=
526					(xfs_qcnt_t)qtrx->qt_rtblk_res;
527			}
528			if (locked)
529				xfs_dqunlock(dqp);
530
531		}
 
532	}
533}
534
535STATIC void
536xfs_quota_warn(
537	struct xfs_mount	*mp,
538	struct xfs_dquot	*dqp,
539	int			type)
540{
541	enum quota_type		qtype;
542
543	switch (xfs_dquot_type(dqp)) {
544	case XFS_DQTYPE_PROJ:
545		qtype = PRJQUOTA;
546		break;
547	case XFS_DQTYPE_USER:
548		qtype = USRQUOTA;
549		break;
550	case XFS_DQTYPE_GROUP:
551		qtype = GRPQUOTA;
552		break;
553	default:
554		return;
555	}
556
557	quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
558			   mp->m_super->s_dev, type);
559}
560
561/*
562 * Decide if we can make an additional reservation against a quota resource.
563 * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
564 *
565 * Note that we assume that the numeric difference between the inode and block
566 * warning codes will always be 3 since it's userspace ABI now, and will never
567 * decrease the quota reservation, so the *BELOW messages are irrelevant.
568 */
569static inline int
570xfs_dqresv_check(
571	struct xfs_dquot_res	*res,
572	struct xfs_quota_limits	*qlim,
573	int64_t			delta,
574	bool			*fatal)
575{
576	xfs_qcnt_t		hardlimit = res->hardlimit;
577	xfs_qcnt_t		softlimit = res->softlimit;
578	xfs_qcnt_t		total_count = res->reserved + delta;
579
580	BUILD_BUG_ON(QUOTA_NL_BHARDWARN     != QUOTA_NL_IHARDWARN + 3);
581	BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
582	BUILD_BUG_ON(QUOTA_NL_BSOFTWARN     != QUOTA_NL_ISOFTWARN + 3);
583
584	*fatal = false;
585	if (delta <= 0)
586		return QUOTA_NL_NOWARN;
587
588	if (!hardlimit)
589		hardlimit = qlim->hard;
590	if (!softlimit)
591		softlimit = qlim->soft;
592
593	if (hardlimit && total_count > hardlimit) {
594		*fatal = true;
595		return QUOTA_NL_IHARDWARN;
596	}
597
598	if (softlimit && total_count > softlimit) {
599		time64_t	now = ktime_get_real_seconds();
600
601		if ((res->timer != 0 && now > res->timer) ||
602		    (res->warnings != 0 && res->warnings >= qlim->warn)) {
603			*fatal = true;
604			return QUOTA_NL_ISOFTLONGWARN;
605		}
606
607		res->warnings++;
608		return QUOTA_NL_ISOFTWARN;
609	}
610
611	return QUOTA_NL_NOWARN;
612}
613
614/*
615 * This reserves disk blocks and inodes against a dquot.
616 * Flags indicate if the dquot is to be locked here and also
617 * if the blk reservation is for RT or regular blocks.
618 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
619 */
620STATIC int
621xfs_trans_dqresv(
622	struct xfs_trans	*tp,
623	struct xfs_mount	*mp,
624	struct xfs_dquot	*dqp,
625	int64_t			nblks,
626	long			ninos,
627	uint			flags)
628{
629	struct xfs_quotainfo	*q = mp->m_quotainfo;
630	struct xfs_def_quota	*defq;
631	struct xfs_dquot_res	*blkres;
632	struct xfs_quota_limits	*qlim;
 
 
 
 
 
633
634	xfs_dqlock(dqp);
635
636	defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
637
638	if (flags & XFS_TRANS_DQ_RES_BLKS) {
639		blkres = &dqp->q_blk;
640		qlim = &defq->blk;
 
 
 
 
 
 
 
 
641	} else {
642		blkres = &dqp->q_rtb;
643		qlim = &defq->rtb;
644	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
645
646	if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
647	    xfs_dquot_is_enforced(dqp)) {
648		int		quota_nl;
649		bool		fatal;
650
651		/*
652		 * dquot is locked already. See if we'd go over the hardlimit
653		 * or exceed the timelimit if we'd reserve resources.
654		 */
655		quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
656		if (quota_nl != QUOTA_NL_NOWARN) {
657			/*
658			 * Quota block warning codes are 3 more than the inode
659			 * codes, which we check above.
660			 */
661			xfs_quota_warn(mp, dqp, quota_nl + 3);
662			if (fatal)
663				goto error_return;
664		}
 
 
 
 
 
 
 
 
 
 
 
665
666		quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
667				&fatal);
668		if (quota_nl != QUOTA_NL_NOWARN) {
669			xfs_quota_warn(mp, dqp, quota_nl);
670			if (fatal)
671				goto error_return;
 
 
 
 
 
 
 
 
 
 
672		}
673	}
674
675	/*
676	 * Change the reservation, but not the actual usage.
677	 * Note that q_blk.reserved = q_blk.count + resv
678	 */
679	blkres->reserved += (xfs_qcnt_t)nblks;
680	dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
 
681
682	/*
683	 * note the reservation amt in the trans struct too,
684	 * so that the transaction knows how much was reserved by
685	 * it against this particular dquot.
686	 * We don't do this when we are reserving for a delayed allocation,
687	 * because we don't have the luxury of a transaction envelope then.
688	 */
689	if (tp) {
 
690		ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
691		xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK,
692				    nblks);
693		xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos);
694	}
695
696	if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) ||
697	    XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) ||
698	    XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count))
699		goto error_corrupt;
 
 
 
700
701	xfs_dqunlock(dqp);
702	return 0;
703
704error_return:
705	xfs_dqunlock(dqp);
706	if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
707		return -ENOSPC;
708	return -EDQUOT;
709error_corrupt:
710	xfs_dqunlock(dqp);
711	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
712	return -EFSCORRUPTED;
713}
714
715
716/*
717 * Given dquot(s), make disk block and/or inode reservations against them.
718 * The fact that this does the reservation against user, group and
719 * project quotas is important, because this follows a all-or-nothing
720 * approach.
721 *
722 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
723 *	   XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT.  Used by pquota.
724 *	   XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
725 *	   XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
726 * dquots are unlocked on return, if they were not locked by caller.
727 */
728int
729xfs_trans_reserve_quota_bydquots(
730	struct xfs_trans	*tp,
731	struct xfs_mount	*mp,
732	struct xfs_dquot	*udqp,
733	struct xfs_dquot	*gdqp,
734	struct xfs_dquot	*pdqp,
735	int64_t			nblks,
736	long			ninos,
737	uint			flags)
738{
739	int		error;
740
741	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
742		return 0;
743
 
 
 
744	ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
745
746	if (udqp) {
747		error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
 
748		if (error)
749			return error;
 
750	}
751
752	if (gdqp) {
753		error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
754		if (error)
755			goto unwind_usr;
756	}
757
758	if (pdqp) {
759		error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
760		if (error)
761			goto unwind_grp;
 
 
 
762	}
763
764	/*
765	 * Didn't change anything critical, so, no need to log
766	 */
767	return 0;
768
769unwind_grp:
770	flags |= XFS_QMOPT_FORCE_RES;
771	if (gdqp)
772		xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
773unwind_usr:
774	flags |= XFS_QMOPT_FORCE_RES;
775	if (udqp)
776		xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
777	return error;
778}
779
780
781/*
782 * Lock the dquot and change the reservation if we can.
783 * This doesn't change the actual usage, just the reservation.
784 * The inode sent in is locked.
785 */
786int
787xfs_trans_reserve_quota_nblks(
788	struct xfs_trans	*tp,
789	struct xfs_inode	*ip,
790	int64_t			dblocks,
791	int64_t			rblocks,
792	bool			force)
793{
794	struct xfs_mount	*mp = ip->i_mount;
795	unsigned int		qflags = 0;
796	int			error;
797
798	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
799		return 0;
 
 
 
 
 
800
801	ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
802	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
 
 
 
803
804	if (force)
805		qflags |= XFS_QMOPT_FORCE_RES;
806
807	/* Reserve data device quota against the inode's dquots. */
808	error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
809			ip->i_gdquot, ip->i_pdquot, dblocks, 0,
810			XFS_QMOPT_RES_REGBLKS | qflags);
811	if (error)
812		return error;
813
814	/* Do the same but for realtime blocks. */
815	error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
816			ip->i_gdquot, ip->i_pdquot, rblocks, 0,
817			XFS_QMOPT_RES_RTBLKS | qflags);
818	if (error) {
819		xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
820				ip->i_gdquot, ip->i_pdquot, -dblocks, 0,
821				XFS_QMOPT_RES_REGBLKS);
822		return error;
823	}
824
825	return 0;
826}
827
828/* Change the quota reservations for an inode creation activity. */
829int
830xfs_trans_reserve_quota_icreate(
831	struct xfs_trans	*tp,
832	struct xfs_dquot	*udqp,
833	struct xfs_dquot	*gdqp,
834	struct xfs_dquot	*pdqp,
835	int64_t			dblocks)
836{
837	struct xfs_mount	*mp = tp->t_mountp;
838
839	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
840		return 0;
841
842	return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp,
843			dblocks, 1, XFS_QMOPT_RES_REGBLKS);
844}
845
846/*
847 * This routine is called to allocate a quotaoff log item.
848 */
849struct xfs_qoff_logitem *
850xfs_trans_get_qoff_item(
851	struct xfs_trans	*tp,
852	struct xfs_qoff_logitem	*startqoff,
853	uint			flags)
854{
855	struct xfs_qoff_logitem	*q;
856
857	ASSERT(tp != NULL);
858
859	q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
860	ASSERT(q != NULL);
861
862	/*
863	 * Get a log_item_desc to point at the new item.
864	 */
865	xfs_trans_add_item(tp, &q->qql_item);
866	return q;
867}
868
869
870/*
871 * This is called to mark the quotaoff logitem as needing
872 * to be logged when the transaction is committed.  The logitem must
873 * already be associated with the given transaction.
874 */
875void
876xfs_trans_log_quotaoff_item(
877	struct xfs_trans	*tp,
878	struct xfs_qoff_logitem	*qlp)
879{
880	tp->t_flags |= XFS_TRANS_DIRTY;
881	set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
882}
883
884STATIC void
885xfs_trans_alloc_dqinfo(
886	xfs_trans_t	*tp)
887{
888	tp->t_dqinfo = kmem_cache_zalloc(xfs_qm_dqtrxzone,
889					 GFP_KERNEL | __GFP_NOFAIL);
890}
891
892void
893xfs_trans_free_dqinfo(
894	xfs_trans_t	*tp)
895{
896	if (!tp->t_dqinfo)
897		return;
898	kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
899	tp->t_dqinfo = NULL;
900}