Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6
  7
  8#include "xfs.h"
  9#include "xfs_fs.h"
 10#include "xfs_shared.h"
 11#include "xfs_format.h"
 12#include "xfs_log_format.h"
 13#include "xfs_trans_resv.h"
 14#include "xfs_sb.h"
 15#include "xfs_mount.h"
 16#include "xfs_inode.h"
 17#include "xfs_trans.h"
 18#include "xfs_quota.h"
 19#include "xfs_qm.h"
 20#include "xfs_icache.h"
 21
 
 
 
 
 
 
 
 
 
 
 
 
 22int
 23xfs_qm_scall_quotaoff(
 24	xfs_mount_t		*mp,
 25	uint			flags)
 26{
 
 
 
 
 
 
 27	/*
 28	 * No file system can have quotas enabled on disk but not in core.
 29	 * Note that quota utilities (like quotaoff) _expect_
 30	 * errno == -EEXIST here.
 31	 */
 32	if ((mp->m_qflags & flags) == 0)
 33		return -EEXIST;
 
 
 
 34
 35	/*
 36	 * We do not support actually turning off quota accounting any more.
 37	 * Just log a warning and ignore the accounting related flags.
 
 
 38	 */
 39	if (flags & XFS_ALL_QUOTA_ACCT)
 40		xfs_info(mp, "disabling of quota accounting not supported.");
 41
 42	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
 43	mp->m_qflags &= ~(flags & XFS_ALL_QUOTA_ENFD);
 44	spin_lock(&mp->m_sb_lock);
 45	mp->m_sb.sb_qflags = mp->m_qflags;
 46	spin_unlock(&mp->m_sb_lock);
 47	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48
 49	/* XXX what to do if error ? Revert back to old vals incore ? */
 50	return xfs_sync_sb(mp, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51}
 52
 53STATIC int
 54xfs_qm_scall_trunc_qfile(
 55	struct xfs_mount	*mp,
 56	xfs_ino_t		ino)
 57{
 58	struct xfs_inode	*ip;
 59	struct xfs_trans	*tp;
 60	int			error;
 61
 62	if (ino == NULLFSINO)
 63		return 0;
 64
 65	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
 66	if (error)
 67		return error;
 68
 69	xfs_ilock(ip, XFS_IOLOCK_EXCL);
 70
 71	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 72	if (error) {
 73		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 74		goto out_put;
 75	}
 76
 77	xfs_ilock(ip, XFS_ILOCK_EXCL);
 78	xfs_trans_ijoin(tp, ip, 0);
 79
 80	ip->i_disk_size = 0;
 81	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 82
 83	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
 84	if (error) {
 85		xfs_trans_cancel(tp);
 86		goto out_unlock;
 87	}
 88
 89	ASSERT(ip->i_df.if_nextents == 0);
 90
 91	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
 92	error = xfs_trans_commit(tp);
 93
 94out_unlock:
 95	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
 96out_put:
 97	xfs_irele(ip);
 98	return error;
 99}
100
101int
102xfs_qm_scall_trunc_qfiles(
103	xfs_mount_t	*mp,
104	uint		flags)
105{
106	int		error = -EINVAL;
107
108	if (!xfs_has_quota(mp) || flags == 0 ||
109	    (flags & ~XFS_QMOPT_QUOTALL)) {
110		xfs_debug(mp, "%s: flags=%x m_qflags=%x",
111			__func__, flags, mp->m_qflags);
112		return -EINVAL;
113	}
114
115	if (flags & XFS_QMOPT_UQUOTA) {
116		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
117		if (error)
118			return error;
119	}
120	if (flags & XFS_QMOPT_GQUOTA) {
121		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
122		if (error)
123			return error;
124	}
125	if (flags & XFS_QMOPT_PQUOTA)
126		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
127
128	return error;
129}
130
131/*
132 * Switch on (a given) quota enforcement for a filesystem.  This takes
133 * effect immediately.
134 * (Switching on quota accounting must be done at mount time.)
135 */
136int
137xfs_qm_scall_quotaon(
138	xfs_mount_t	*mp,
139	uint		flags)
140{
141	int		error;
142	uint		qf;
143
 
144	/*
145	 * Switching on quota accounting must be done at mount time,
146	 * only consider quota enforcement stuff here.
147	 */
148	flags &= XFS_ALL_QUOTA_ENFD;
149
150	if (flags == 0) {
151		xfs_debug(mp, "%s: zero flags, m_qflags=%x",
152			__func__, mp->m_qflags);
153		return -EINVAL;
154	}
155
156	/*
157	 * Can't enforce without accounting. We check the superblock
158	 * qflags here instead of m_qflags because rootfs can have
159	 * quota acct on ondisk without m_qflags' knowing.
160	 */
161	if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
162	     (flags & XFS_UQUOTA_ENFD)) ||
163	    ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
164	     (flags & XFS_GQUOTA_ENFD)) ||
165	    ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
166	     (flags & XFS_PQUOTA_ENFD))) {
167		xfs_debug(mp,
168			"%s: Can't enforce without acct, flags=%x sbflags=%x",
169			__func__, flags, mp->m_sb.sb_qflags);
170		return -EINVAL;
171	}
172	/*
173	 * If everything's up to-date incore, then don't waste time.
174	 */
175	if ((mp->m_qflags & flags) == flags)
176		return -EEXIST;
177
178	/*
179	 * Change sb_qflags on disk but not incore mp->qflags
180	 * if this is the root filesystem.
181	 */
182	spin_lock(&mp->m_sb_lock);
183	qf = mp->m_sb.sb_qflags;
184	mp->m_sb.sb_qflags = qf | flags;
185	spin_unlock(&mp->m_sb_lock);
186
187	/*
188	 * There's nothing to change if it's the same.
189	 */
190	if ((qf & flags) == flags)
191		return -EEXIST;
192
193	error = xfs_sync_sb(mp, false);
194	if (error)
195		return error;
196	/*
197	 * If we aren't trying to switch on quota enforcement, we are done.
198	 */
199	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
200	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
201	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
202	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
203	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
204	     (mp->m_qflags & XFS_GQUOTA_ACCT)))
205		return 0;
206
207	if (!XFS_IS_QUOTA_ON(mp))
208		return -ESRCH;
209
210	/*
211	 * Switch on quota enforcement in core.
212	 */
213	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
214	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
215	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
216
217	return 0;
218}
219
220#define XFS_QC_MASK (QC_LIMIT_MASK | QC_TIMER_MASK)
221
222/*
223 * Adjust limits of this quota, and the defaults if passed in.  Returns true
224 * if the new limits made sense and were applied, false otherwise.
225 */
226static inline bool
227xfs_setqlim_limits(
228	struct xfs_mount	*mp,
229	struct xfs_dquot_res	*res,
230	struct xfs_quota_limits	*qlim,
231	xfs_qcnt_t		hard,
232	xfs_qcnt_t		soft,
233	const char		*tag)
234{
235	/* The hard limit can't be less than the soft limit. */
236	if (hard != 0 && hard < soft) {
237		xfs_debug(mp, "%shard %lld < %ssoft %lld", tag, hard, tag,
238				soft);
239		return false;
240	}
241
242	res->hardlimit = hard;
243	res->softlimit = soft;
244	if (qlim) {
245		qlim->hard = hard;
246		qlim->soft = soft;
247	}
248
249	return true;
250}
251
252static inline void
253xfs_setqlim_timer(
254	struct xfs_mount	*mp,
255	struct xfs_dquot_res	*res,
256	struct xfs_quota_limits	*qlim,
257	s64			timer)
258{
259	if (qlim) {
260		/* Set the length of the default grace period. */
261		res->timer = xfs_dquot_set_grace_period(timer);
262		qlim->time = res->timer;
263	} else {
264		/* Set the grace period expiration on a quota. */
265		res->timer = xfs_dquot_set_timeout(mp, timer);
266	}
267}
268
269/*
270 * Adjust quota limits, and start/stop timers accordingly.
271 */
272int
273xfs_qm_scall_setqlim(
274	struct xfs_mount	*mp,
275	xfs_dqid_t		id,
276	xfs_dqtype_t		type,
277	struct qc_dqblk		*newlim)
278{
279	struct xfs_quotainfo	*q = mp->m_quotainfo;
 
280	struct xfs_dquot	*dqp;
281	struct xfs_trans	*tp;
282	struct xfs_def_quota	*defq;
283	struct xfs_dquot_res	*res;
284	struct xfs_quota_limits	*qlim;
285	int			error;
286	xfs_qcnt_t		hard, soft;
287
288	if (newlim->d_fieldmask & ~XFS_QC_MASK)
289		return -EINVAL;
290	if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
291		return 0;
292
293	/*
 
 
 
 
 
 
 
294	 * Get the dquot (locked) before we start, as we need to do a
295	 * transaction to allocate it if it doesn't exist. Once we have the
296	 * dquot, unlock it so we can start the next transaction safely. We hold
297	 * a reference to the dquot, so it's safe to do this unlock/lock without
298	 * it being reclaimed in the mean time.
299	 */
300	error = xfs_qm_dqget(mp, id, type, true, &dqp);
301	if (error) {
302		ASSERT(error != -ENOENT);
303		return error;
304	}
305
306	defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
307	xfs_dqunlock(dqp);
308
309	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
310	if (error)
311		goto out_rele;
312
313	xfs_dqlock(dqp);
314	xfs_trans_dqjoin(tp, dqp);
 
315
316	/*
317	 * Update quota limits, warnings, and timers, and the defaults
318	 * if we're touching id == 0.
319	 *
320	 * Make sure that hardlimits are >= soft limits before changing.
321	 *
322	 * Update warnings counter(s) if requested.
323	 *
324	 * Timelimits for the super user set the relative time the other users
325	 * can be over quota for this file system. If it is zero a default is
326	 * used.  Ditto for the default soft and hard limit values (already
327	 * done, above), and for warnings.
328	 *
329	 * For other IDs, userspace can bump out the grace period if over
330	 * the soft limit.
331	 */
332
333	/* Blocks on the data device. */
334	hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
335		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
336			dqp->q_blk.hardlimit;
337	soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
338		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
339			dqp->q_blk.softlimit;
340	res = &dqp->q_blk;
341	qlim = id == 0 ? &defq->blk : NULL;
342
343	if (xfs_setqlim_limits(mp, res, qlim, hard, soft, "blk"))
344		xfs_dquot_set_prealloc_limits(dqp);
345	if (newlim->d_fieldmask & QC_SPC_TIMER)
346		xfs_setqlim_timer(mp, res, qlim, newlim->d_spc_timer);
347
348	/* Blocks on the realtime device. */
 
 
 
349	hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
350		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
351			dqp->q_rtb.hardlimit;
352	soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
353		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
354			dqp->q_rtb.softlimit;
355	res = &dqp->q_rtb;
356	qlim = id == 0 ? &defq->rtb : NULL;
357
358	xfs_setqlim_limits(mp, res, qlim, hard, soft, "rtb");
359	if (newlim->d_fieldmask & QC_RT_SPC_TIMER)
360		xfs_setqlim_timer(mp, res, qlim, newlim->d_rt_spc_timer);
 
 
 
 
361
362	/* Inodes */
363	hard = (newlim->d_fieldmask & QC_INO_HARD) ?
364		(xfs_qcnt_t) newlim->d_ino_hardlimit :
365			dqp->q_ino.hardlimit;
366	soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
367		(xfs_qcnt_t) newlim->d_ino_softlimit :
368			dqp->q_ino.softlimit;
369	res = &dqp->q_ino;
370	qlim = id == 0 ? &defq->ino : NULL;
371
372	xfs_setqlim_limits(mp, res, qlim, hard, soft, "ino");
373	if (newlim->d_fieldmask & QC_INO_TIMER)
374		xfs_setqlim_timer(mp, res, qlim, newlim->d_ino_timer);
 
 
 
 
375
376	if (id != 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377		/*
378		 * If the user is now over quota, start the timelimit.
379		 * The user will not be 'warned'.
380		 * Note that we keep the timers ticking, whether enforcement
381		 * is on or off. We don't really want to bother with iterating
382		 * over all ondisk dquots and turning the timers on/off.
383		 */
384		xfs_qm_adjust_dqtimers(dqp);
385	}
386	dqp->q_flags |= XFS_DQFLAG_DIRTY;
387	xfs_trans_log_dquot(tp, dqp);
388
389	error = xfs_trans_commit(tp);
390
391out_rele:
392	xfs_qm_dqrele(dqp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393	return error;
394}
395
396/* Fill out the quota context. */
397static void
398xfs_qm_scall_getquota_fill_qc(
399	struct xfs_mount	*mp,
400	xfs_dqtype_t		type,
401	const struct xfs_dquot	*dqp,
402	struct qc_dqblk		*dst)
403{
404	memset(dst, 0, sizeof(*dst));
405	dst->d_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_blk.hardlimit);
406	dst->d_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_blk.softlimit);
407	dst->d_ino_hardlimit = dqp->q_ino.hardlimit;
408	dst->d_ino_softlimit = dqp->q_ino.softlimit;
409	dst->d_space = XFS_FSB_TO_B(mp, dqp->q_blk.reserved);
410	dst->d_ino_count = dqp->q_ino.reserved;
411	dst->d_spc_timer = dqp->q_blk.timer;
412	dst->d_ino_timer = dqp->q_ino.timer;
413	dst->d_ino_warns = 0;
414	dst->d_spc_warns = 0;
415	dst->d_rt_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.hardlimit);
416	dst->d_rt_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.softlimit);
417	dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_rtb.reserved);
418	dst->d_rt_spc_timer = dqp->q_rtb.timer;
419	dst->d_rt_spc_warns = 0;
 
 
 
 
420
421	/*
422	 * Internally, we don't reset all the timers when quota enforcement
423	 * gets turned off. No need to confuse the user level code,
424	 * so return zeroes in that case.
425	 */
426	if (!xfs_dquot_is_enforced(dqp)) {
 
 
 
 
 
427		dst->d_spc_timer = 0;
428		dst->d_ino_timer = 0;
429		dst->d_rt_spc_timer = 0;
430	}
431
432#ifdef DEBUG
433	if (xfs_dquot_is_enforced(dqp) && dqp->q_id != 0) {
 
 
 
434		if ((dst->d_space > dst->d_spc_softlimit) &&
435		    (dst->d_spc_softlimit > 0)) {
436			ASSERT(dst->d_spc_timer != 0);
437		}
438		if ((dst->d_ino_count > dqp->q_ino.softlimit) &&
439		    (dqp->q_ino.softlimit > 0)) {
440			ASSERT(dst->d_ino_timer != 0);
441		}
442	}
443#endif
444}
445
446/* Return the quota information for the dquot matching id. */
447int
448xfs_qm_scall_getquota(
449	struct xfs_mount	*mp,
450	xfs_dqid_t		id,
451	xfs_dqtype_t		type,
452	struct qc_dqblk		*dst)
453{
454	struct xfs_dquot	*dqp;
455	int			error;
456
457	/*
458	 * Expedite pending inodegc work at the start of a quota reporting
459	 * scan but don't block waiting for it to complete.
460	 */
461	if (id == 0)
462		xfs_inodegc_push(mp);
463
464	/*
465	 * Try to get the dquot. We don't want it allocated on disk, so don't
466	 * set doalloc. If it doesn't exist, we'll get ENOENT back.
467	 */
468	error = xfs_qm_dqget(mp, id, type, false, &dqp);
469	if (error)
470		return error;
471
472	/*
473	 * If everything's NULL, this dquot doesn't quite exist as far as
474	 * our utility programs are concerned.
475	 */
476	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
477		error = -ENOENT;
478		goto out_put;
479	}
480
481	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
482
483out_put:
484	xfs_qm_dqput(dqp);
485	return error;
486}
487
488/*
489 * Return the quota information for the first initialized dquot whose id
490 * is at least as high as id.
491 */
492int
493xfs_qm_scall_getquota_next(
494	struct xfs_mount	*mp,
495	xfs_dqid_t		*id,
496	xfs_dqtype_t		type,
497	struct qc_dqblk		*dst)
498{
499	struct xfs_dquot	*dqp;
500	int			error;
501
502	/* Flush inodegc work at the start of a quota reporting scan. */
503	if (*id == 0)
504		xfs_inodegc_push(mp);
505
506	error = xfs_qm_dqget_next(mp, *id, type, &dqp);
507	if (error)
508		return error;
509
510	/* Fill in the ID we actually read from disk */
511	*id = dqp->q_id;
512
513	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
514
515	xfs_qm_dqput(dqp);
516	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
517}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6
  7
  8#include "xfs.h"
  9#include "xfs_fs.h"
 10#include "xfs_shared.h"
 11#include "xfs_format.h"
 12#include "xfs_log_format.h"
 13#include "xfs_trans_resv.h"
 14#include "xfs_sb.h"
 15#include "xfs_mount.h"
 16#include "xfs_inode.h"
 17#include "xfs_trans.h"
 18#include "xfs_quota.h"
 19#include "xfs_qm.h"
 20#include "xfs_icache.h"
 21
 22STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
 23STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
 24					uint);
 25
 26/*
 27 * Turn off quota accounting and/or enforcement for all udquots and/or
 28 * gdquots. Called only at unmount time.
 29 *
 30 * This assumes that there are no dquots of this file system cached
 31 * incore, and modifies the ondisk dquot directly. Therefore, for example,
 32 * it is an error to call this twice, without purging the cache.
 33 */
 34int
 35xfs_qm_scall_quotaoff(
 36	xfs_mount_t		*mp,
 37	uint			flags)
 38{
 39	struct xfs_quotainfo	*q = mp->m_quotainfo;
 40	uint			dqtype;
 41	int			error;
 42	uint			inactivate_flags;
 43	xfs_qoff_logitem_t	*qoffstart;
 44
 45	/*
 46	 * No file system can have quotas enabled on disk but not in core.
 47	 * Note that quota utilities (like quotaoff) _expect_
 48	 * errno == -EEXIST here.
 49	 */
 50	if ((mp->m_qflags & flags) == 0)
 51		return -EEXIST;
 52	error = 0;
 53
 54	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
 55
 56	/*
 57	 * We don't want to deal with two quotaoffs messing up each other,
 58	 * so we're going to serialize it. quotaoff isn't exactly a performance
 59	 * critical thing.
 60	 * If quotaoff, then we must be dealing with the root filesystem.
 61	 */
 62	ASSERT(q);
 63	mutex_lock(&q->qi_quotaofflock);
 64
 65	/*
 66	 * If we're just turning off quota enforcement, change mp and go.
 67	 */
 68	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
 69		mp->m_qflags &= ~(flags);
 70
 71		spin_lock(&mp->m_sb_lock);
 72		mp->m_sb.sb_qflags = mp->m_qflags;
 73		spin_unlock(&mp->m_sb_lock);
 74		mutex_unlock(&q->qi_quotaofflock);
 75
 76		/* XXX what to do if error ? Revert back to old vals incore ? */
 77		return xfs_sync_sb(mp, false);
 78	}
 79
 80	dqtype = 0;
 81	inactivate_flags = 0;
 82	/*
 83	 * If accounting is off, we must turn enforcement off, clear the
 84	 * quota 'CHKD' certificate to make it known that we have to
 85	 * do a quotacheck the next time this quota is turned on.
 86	 */
 87	if (flags & XFS_UQUOTA_ACCT) {
 88		dqtype |= XFS_QMOPT_UQUOTA;
 89		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
 90		inactivate_flags |= XFS_UQUOTA_ACTIVE;
 91	}
 92	if (flags & XFS_GQUOTA_ACCT) {
 93		dqtype |= XFS_QMOPT_GQUOTA;
 94		flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
 95		inactivate_flags |= XFS_GQUOTA_ACTIVE;
 96	}
 97	if (flags & XFS_PQUOTA_ACCT) {
 98		dqtype |= XFS_QMOPT_PQUOTA;
 99		flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
100		inactivate_flags |= XFS_PQUOTA_ACTIVE;
101	}
102
103	/*
104	 * Nothing to do?  Don't complain. This happens when we're just
105	 * turning off quota enforcement.
106	 */
107	if ((mp->m_qflags & flags) == 0)
108		goto out_unlock;
109
110	/*
111	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
112	 * and synchronously. If we fail to write, we should abort the
113	 * operation as it cannot be recovered safely if we crash.
114	 */
115	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
116	if (error)
117		goto out_unlock;
118
119	/*
120	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
121	 * to take care of the race between dqget and quotaoff. We don't take
122	 * any special locks to reset these bits. All processes need to check
123	 * these bits *after* taking inode lock(s) to see if the particular
124	 * quota type is in the process of being turned off. If *ACTIVE, it is
125	 * guaranteed that all dquot structures and all quotainode ptrs will all
126	 * stay valid as long as that inode is kept locked.
127	 *
128	 * There is no turning back after this.
129	 */
130	mp->m_qflags &= ~inactivate_flags;
131
132	/*
133	 * Give back all the dquot reference(s) held by inodes.
134	 * Here we go thru every single incore inode in this file system, and
135	 * do a dqrele on the i_udquot/i_gdquot that it may have.
136	 * Essentially, as long as somebody has an inode locked, this guarantees
137	 * that quotas will not be turned off. This is handy because in a
138	 * transaction once we lock the inode(s) and check for quotaon, we can
139	 * depend on the quota inodes (and other things) being valid as long as
140	 * we keep the lock(s).
141	 */
142	xfs_qm_dqrele_all_inodes(mp, flags);
143
144	/*
145	 * Next we make the changes in the quota flag in the mount struct.
146	 * This isn't protected by a particular lock directly, because we
147	 * don't want to take a mrlock every time we depend on quotas being on.
148	 */
149	mp->m_qflags &= ~flags;
150
151	/*
152	 * Go through all the dquots of this file system and purge them,
153	 * according to what was turned off.
154	 */
155	xfs_qm_dqpurge_all(mp, dqtype);
156
157	/*
158	 * Transactions that had started before ACTIVE state bit was cleared
159	 * could have logged many dquots, so they'd have higher LSNs than
160	 * the first QUOTAOFF log record does. If we happen to crash when
161	 * the tail of the log has gone past the QUOTAOFF record, but
162	 * before the last dquot modification, those dquots __will__
163	 * recover, and that's not good.
164	 *
165	 * So, we have QUOTAOFF start and end logitems; the start
166	 * logitem won't get overwritten until the end logitem appears...
167	 */
168	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
169	if (error) {
170		/* We're screwed now. Shutdown is the only option. */
171		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
172		goto out_unlock;
173	}
174
175	/*
176	 * If all quotas are completely turned off, close shop.
177	 */
178	if (mp->m_qflags == 0) {
179		mutex_unlock(&q->qi_quotaofflock);
180		xfs_qm_destroy_quotainfo(mp);
181		return 0;
182	}
183
184	/*
185	 * Release our quotainode references if we don't need them anymore.
186	 */
187	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
188		xfs_irele(q->qi_uquotaip);
189		q->qi_uquotaip = NULL;
190	}
191	if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
192		xfs_irele(q->qi_gquotaip);
193		q->qi_gquotaip = NULL;
194	}
195	if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
196		xfs_irele(q->qi_pquotaip);
197		q->qi_pquotaip = NULL;
198	}
199
200out_unlock:
201	mutex_unlock(&q->qi_quotaofflock);
202	return error;
203}
204
205STATIC int
206xfs_qm_scall_trunc_qfile(
207	struct xfs_mount	*mp,
208	xfs_ino_t		ino)
209{
210	struct xfs_inode	*ip;
211	struct xfs_trans	*tp;
212	int			error;
213
214	if (ino == NULLFSINO)
215		return 0;
216
217	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
218	if (error)
219		return error;
220
221	xfs_ilock(ip, XFS_IOLOCK_EXCL);
222
223	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
224	if (error) {
225		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
226		goto out_put;
227	}
228
229	xfs_ilock(ip, XFS_ILOCK_EXCL);
230	xfs_trans_ijoin(tp, ip, 0);
231
232	ip->i_d.di_size = 0;
233	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
234
235	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
236	if (error) {
237		xfs_trans_cancel(tp);
238		goto out_unlock;
239	}
240
241	ASSERT(ip->i_d.di_nextents == 0);
242
243	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
244	error = xfs_trans_commit(tp);
245
246out_unlock:
247	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
248out_put:
249	xfs_irele(ip);
250	return error;
251}
252
253int
254xfs_qm_scall_trunc_qfiles(
255	xfs_mount_t	*mp,
256	uint		flags)
257{
258	int		error = -EINVAL;
259
260	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
261	    (flags & ~XFS_DQ_ALLTYPES)) {
262		xfs_debug(mp, "%s: flags=%x m_qflags=%x",
263			__func__, flags, mp->m_qflags);
264		return -EINVAL;
265	}
266
267	if (flags & XFS_DQ_USER) {
268		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
269		if (error)
270			return error;
271	}
272	if (flags & XFS_DQ_GROUP) {
273		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
274		if (error)
275			return error;
276	}
277	if (flags & XFS_DQ_PROJ)
278		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
279
280	return error;
281}
282
283/*
284 * Switch on (a given) quota enforcement for a filesystem.  This takes
285 * effect immediately.
286 * (Switching on quota accounting must be done at mount time.)
287 */
288int
289xfs_qm_scall_quotaon(
290	xfs_mount_t	*mp,
291	uint		flags)
292{
293	int		error;
294	uint		qf;
295
296	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
297	/*
298	 * Switching on quota accounting must be done at mount time.
 
299	 */
300	flags &= ~(XFS_ALL_QUOTA_ACCT);
301
302	if (flags == 0) {
303		xfs_debug(mp, "%s: zero flags, m_qflags=%x",
304			__func__, mp->m_qflags);
305		return -EINVAL;
306	}
307
308	/*
309	 * Can't enforce without accounting. We check the superblock
310	 * qflags here instead of m_qflags because rootfs can have
311	 * quota acct on ondisk without m_qflags' knowing.
312	 */
313	if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
314	     (flags & XFS_UQUOTA_ENFD)) ||
315	    ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
316	     (flags & XFS_GQUOTA_ENFD)) ||
317	    ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
318	     (flags & XFS_PQUOTA_ENFD))) {
319		xfs_debug(mp,
320			"%s: Can't enforce without acct, flags=%x sbflags=%x",
321			__func__, flags, mp->m_sb.sb_qflags);
322		return -EINVAL;
323	}
324	/*
325	 * If everything's up to-date incore, then don't waste time.
326	 */
327	if ((mp->m_qflags & flags) == flags)
328		return -EEXIST;
329
330	/*
331	 * Change sb_qflags on disk but not incore mp->qflags
332	 * if this is the root filesystem.
333	 */
334	spin_lock(&mp->m_sb_lock);
335	qf = mp->m_sb.sb_qflags;
336	mp->m_sb.sb_qflags = qf | flags;
337	spin_unlock(&mp->m_sb_lock);
338
339	/*
340	 * There's nothing to change if it's the same.
341	 */
342	if ((qf & flags) == flags)
343		return -EEXIST;
344
345	error = xfs_sync_sb(mp, false);
346	if (error)
347		return error;
348	/*
349	 * If we aren't trying to switch on quota enforcement, we are done.
350	 */
351	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
352	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
353	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
354	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
355	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
356	     (mp->m_qflags & XFS_GQUOTA_ACCT)))
357		return 0;
358
359	if (! XFS_IS_QUOTA_RUNNING(mp))
360		return -ESRCH;
361
362	/*
363	 * Switch on quota enforcement in core.
364	 */
365	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
366	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
367	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
368
369	return 0;
370}
371
372#define XFS_QC_MASK \
373	(QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
374
375/*
376 * Adjust quota limits, and start/stop timers accordingly.
377 */
378int
379xfs_qm_scall_setqlim(
380	struct xfs_mount	*mp,
381	xfs_dqid_t		id,
382	uint			type,
383	struct qc_dqblk		*newlim)
384{
385	struct xfs_quotainfo	*q = mp->m_quotainfo;
386	struct xfs_disk_dquot	*ddq;
387	struct xfs_dquot	*dqp;
388	struct xfs_trans	*tp;
389	struct xfs_def_quota	*defq;
 
 
390	int			error;
391	xfs_qcnt_t		hard, soft;
392
393	if (newlim->d_fieldmask & ~XFS_QC_MASK)
394		return -EINVAL;
395	if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
396		return 0;
397
398	/*
399	 * We don't want to race with a quotaoff so take the quotaoff lock.
400	 * We don't hold an inode lock, so there's nothing else to stop
401	 * a quotaoff from happening.
402	 */
403	mutex_lock(&q->qi_quotaofflock);
404
405	/*
406	 * Get the dquot (locked) before we start, as we need to do a
407	 * transaction to allocate it if it doesn't exist. Once we have the
408	 * dquot, unlock it so we can start the next transaction safely. We hold
409	 * a reference to the dquot, so it's safe to do this unlock/lock without
410	 * it being reclaimed in the mean time.
411	 */
412	error = xfs_qm_dqget(mp, id, type, true, &dqp);
413	if (error) {
414		ASSERT(error != -ENOENT);
415		goto out_unlock;
416	}
417
418	defq = xfs_get_defquota(dqp, q);
419	xfs_dqunlock(dqp);
420
421	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
422	if (error)
423		goto out_rele;
424
425	xfs_dqlock(dqp);
426	xfs_trans_dqjoin(tp, dqp);
427	ddq = &dqp->q_core;
428
429	/*
 
 
 
430	 * Make sure that hardlimits are >= soft limits before changing.
 
 
 
 
 
 
 
 
 
 
431	 */
 
 
432	hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
433		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
434			be64_to_cpu(ddq->d_blk_hardlimit);
435	soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
436		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
437			be64_to_cpu(ddq->d_blk_softlimit);
438	if (hard == 0 || hard >= soft) {
439		ddq->d_blk_hardlimit = cpu_to_be64(hard);
440		ddq->d_blk_softlimit = cpu_to_be64(soft);
 
441		xfs_dquot_set_prealloc_limits(dqp);
442		if (id == 0) {
443			defq->bhardlimit = hard;
444			defq->bsoftlimit = soft;
445		}
446	} else {
447		xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
448	}
449	hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
450		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
451			be64_to_cpu(ddq->d_rtb_hardlimit);
452	soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
453		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
454			be64_to_cpu(ddq->d_rtb_softlimit);
455	if (hard == 0 || hard >= soft) {
456		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
457		ddq->d_rtb_softlimit = cpu_to_be64(soft);
458		if (id == 0) {
459			defq->rtbhardlimit = hard;
460			defq->rtbsoftlimit = soft;
461		}
462	} else {
463		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
464	}
465
 
466	hard = (newlim->d_fieldmask & QC_INO_HARD) ?
467		(xfs_qcnt_t) newlim->d_ino_hardlimit :
468			be64_to_cpu(ddq->d_ino_hardlimit);
469	soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
470		(xfs_qcnt_t) newlim->d_ino_softlimit :
471			be64_to_cpu(ddq->d_ino_softlimit);
472	if (hard == 0 || hard >= soft) {
473		ddq->d_ino_hardlimit = cpu_to_be64(hard);
474		ddq->d_ino_softlimit = cpu_to_be64(soft);
475		if (id == 0) {
476			defq->ihardlimit = hard;
477			defq->isoftlimit = soft;
478		}
479	} else {
480		xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
481	}
482
483	/*
484	 * Update warnings counter(s) if requested
485	 */
486	if (newlim->d_fieldmask & QC_SPC_WARNS)
487		ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
488	if (newlim->d_fieldmask & QC_INO_WARNS)
489		ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
490	if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
491		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
492
493	if (id == 0) {
494		/*
495		 * Timelimits for the super user set the relative time
496		 * the other users can be over quota for this file system.
497		 * If it is zero a default is used.  Ditto for the default
498		 * soft and hard limit values (already done, above), and
499		 * for warnings.
500		 */
501		if (newlim->d_fieldmask & QC_SPC_TIMER) {
502			q->qi_btimelimit = newlim->d_spc_timer;
503			ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
504		}
505		if (newlim->d_fieldmask & QC_INO_TIMER) {
506			q->qi_itimelimit = newlim->d_ino_timer;
507			ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
508		}
509		if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
510			q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
511			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
512		}
513		if (newlim->d_fieldmask & QC_SPC_WARNS)
514			q->qi_bwarnlimit = newlim->d_spc_warns;
515		if (newlim->d_fieldmask & QC_INO_WARNS)
516			q->qi_iwarnlimit = newlim->d_ino_warns;
517		if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
518			q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
519	} else {
520		/*
521		 * If the user is now over quota, start the timelimit.
522		 * The user will not be 'warned'.
523		 * Note that we keep the timers ticking, whether enforcement
524		 * is on or off. We don't really want to bother with iterating
525		 * over all ondisk dquots and turning the timers on/off.
526		 */
527		xfs_qm_adjust_dqtimers(mp, ddq);
528	}
529	dqp->dq_flags |= XFS_DQ_DIRTY;
530	xfs_trans_log_dquot(tp, dqp);
531
532	error = xfs_trans_commit(tp);
533
534out_rele:
535	xfs_qm_dqrele(dqp);
536out_unlock:
537	mutex_unlock(&q->qi_quotaofflock);
538	return error;
539}
540
541STATIC int
542xfs_qm_log_quotaoff_end(
543	xfs_mount_t		*mp,
544	xfs_qoff_logitem_t	*startqoff,
545	uint			flags)
546{
547	xfs_trans_t		*tp;
548	int			error;
549	xfs_qoff_logitem_t	*qoffi;
550
551	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
552	if (error)
553		return error;
554
555	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
556					flags & XFS_ALL_QUOTA_ACCT);
557	xfs_trans_log_quotaoff_item(tp, qoffi);
558
559	/*
560	 * We have to make sure that the transaction is secure on disk before we
561	 * return and actually stop quota accounting. So, make it synchronous.
562	 * We don't care about quotoff's performance.
563	 */
564	xfs_trans_set_sync(tp);
565	return xfs_trans_commit(tp);
566}
567
568
569STATIC int
570xfs_qm_log_quotaoff(
571	xfs_mount_t	       *mp,
572	xfs_qoff_logitem_t     **qoffstartp,
573	uint		       flags)
574{
575	xfs_trans_t	       *tp;
576	int			error;
577	xfs_qoff_logitem_t     *qoffi;
578
579	*qoffstartp = NULL;
580
581	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
582	if (error)
583		goto out;
584
585	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
586	xfs_trans_log_quotaoff_item(tp, qoffi);
587
588	spin_lock(&mp->m_sb_lock);
589	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
590	spin_unlock(&mp->m_sb_lock);
591
592	xfs_log_sb(tp);
593
594	/*
595	 * We have to make sure that the transaction is secure on disk before we
596	 * return and actually stop quota accounting. So, make it synchronous.
597	 * We don't care about quotoff's performance.
598	 */
599	xfs_trans_set_sync(tp);
600	error = xfs_trans_commit(tp);
601	if (error)
602		goto out;
603
604	*qoffstartp = qoffi;
605out:
606	return error;
607}
608
609/* Fill out the quota context. */
610static void
611xfs_qm_scall_getquota_fill_qc(
612	struct xfs_mount	*mp,
613	uint			type,
614	const struct xfs_dquot	*dqp,
615	struct qc_dqblk		*dst)
616{
617	memset(dst, 0, sizeof(*dst));
618	dst->d_spc_hardlimit =
619		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
620	dst->d_spc_softlimit =
621		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
622	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
623	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
624	dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
625	dst->d_ino_count = dqp->q_res_icount;
626	dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
627	dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
628	dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
629	dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
630	dst->d_rt_spc_hardlimit =
631		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
632	dst->d_rt_spc_softlimit =
633		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
634	dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
635	dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
636	dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
637
638	/*
639	 * Internally, we don't reset all the timers when quota enforcement
640	 * gets turned off. No need to confuse the user level code,
641	 * so return zeroes in that case.
642	 */
643	if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
644	     dqp->q_core.d_flags == XFS_DQ_USER) ||
645	    (!XFS_IS_GQUOTA_ENFORCED(mp) &&
646	     dqp->q_core.d_flags == XFS_DQ_GROUP) ||
647	    (!XFS_IS_PQUOTA_ENFORCED(mp) &&
648	     dqp->q_core.d_flags == XFS_DQ_PROJ)) {
649		dst->d_spc_timer = 0;
650		dst->d_ino_timer = 0;
651		dst->d_rt_spc_timer = 0;
652	}
653
654#ifdef DEBUG
655	if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
656	     (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
657	     (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
658	    dqp->q_core.d_id != 0) {
659		if ((dst->d_space > dst->d_spc_softlimit) &&
660		    (dst->d_spc_softlimit > 0)) {
661			ASSERT(dst->d_spc_timer != 0);
662		}
663		if ((dst->d_ino_count > dst->d_ino_softlimit) &&
664		    (dst->d_ino_softlimit > 0)) {
665			ASSERT(dst->d_ino_timer != 0);
666		}
667	}
668#endif
669}
670
671/* Return the quota information for the dquot matching id. */
672int
673xfs_qm_scall_getquota(
674	struct xfs_mount	*mp,
675	xfs_dqid_t		id,
676	uint			type,
677	struct qc_dqblk		*dst)
678{
679	struct xfs_dquot	*dqp;
680	int			error;
681
682	/*
 
 
 
 
 
 
 
683	 * Try to get the dquot. We don't want it allocated on disk, so don't
684	 * set doalloc. If it doesn't exist, we'll get ENOENT back.
685	 */
686	error = xfs_qm_dqget(mp, id, type, false, &dqp);
687	if (error)
688		return error;
689
690	/*
691	 * If everything's NULL, this dquot doesn't quite exist as far as
692	 * our utility programs are concerned.
693	 */
694	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
695		error = -ENOENT;
696		goto out_put;
697	}
698
699	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
700
701out_put:
702	xfs_qm_dqput(dqp);
703	return error;
704}
705
706/*
707 * Return the quota information for the first initialized dquot whose id
708 * is at least as high as id.
709 */
710int
711xfs_qm_scall_getquota_next(
712	struct xfs_mount	*mp,
713	xfs_dqid_t		*id,
714	uint			type,
715	struct qc_dqblk		*dst)
716{
717	struct xfs_dquot	*dqp;
718	int			error;
719
 
 
 
 
720	error = xfs_qm_dqget_next(mp, *id, type, &dqp);
721	if (error)
722		return error;
723
724	/* Fill in the ID we actually read from disk */
725	*id = be32_to_cpu(dqp->q_core.d_id);
726
727	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
728
729	xfs_qm_dqput(dqp);
730	return error;
731}
732
733STATIC int
734xfs_dqrele_inode(
735	struct xfs_inode	*ip,
736	int			flags,
737	void			*args)
738{
739	/* skip quota inodes */
740	if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
741	    ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
742	    ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
743		ASSERT(ip->i_udquot == NULL);
744		ASSERT(ip->i_gdquot == NULL);
745		ASSERT(ip->i_pdquot == NULL);
746		return 0;
747	}
748
749	xfs_ilock(ip, XFS_ILOCK_EXCL);
750	if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
751		xfs_qm_dqrele(ip->i_udquot);
752		ip->i_udquot = NULL;
753	}
754	if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
755		xfs_qm_dqrele(ip->i_gdquot);
756		ip->i_gdquot = NULL;
757	}
758	if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
759		xfs_qm_dqrele(ip->i_pdquot);
760		ip->i_pdquot = NULL;
761	}
762	xfs_iunlock(ip, XFS_ILOCK_EXCL);
763	return 0;
764}
765
766
767/*
768 * Go thru all the inodes in the file system, releasing their dquots.
769 *
770 * Note that the mount structure gets modified to indicate that quotas are off
771 * AFTER this, in the case of quotaoff.
772 */
773void
774xfs_qm_dqrele_all_inodes(
775	struct xfs_mount *mp,
776	uint		 flags)
777{
778	ASSERT(mp->m_quotainfo);
779	xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
780				    XFS_AGITER_INEW_WAIT);
781}