Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v3.5.6
 
  1/*
  2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18
 19#include <linux/capability.h>
 20
 21#include "xfs.h"
 22#include "xfs_fs.h"
 23#include "xfs_bit.h"
 24#include "xfs_log.h"
 25#include "xfs_trans.h"
 
 26#include "xfs_sb.h"
 27#include "xfs_ag.h"
 28#include "xfs_alloc.h"
 29#include "xfs_quota.h"
 30#include "xfs_mount.h"
 31#include "xfs_bmap_btree.h"
 32#include "xfs_inode.h"
 33#include "xfs_inode_item.h"
 34#include "xfs_itable.h"
 35#include "xfs_bmap.h"
 36#include "xfs_rtalloc.h"
 37#include "xfs_error.h"
 38#include "xfs_attr.h"
 39#include "xfs_buf_item.h"
 40#include "xfs_utils.h"
 41#include "xfs_qm.h"
 42#include "xfs_trace.h"
 43
 44STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
 45STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
 46					uint);
 47STATIC uint	xfs_qm_export_flags(uint);
 48STATIC uint	xfs_qm_export_qtype_flags(uint);
 49
 50/*
 51 * Turn off quota accounting and/or enforcement for all udquots and/or
 52 * gdquots. Called only at unmount time.
 53 *
 54 * This assumes that there are no dquots of this file system cached
 55 * incore, and modifies the ondisk dquot directly. Therefore, for example,
 56 * it is an error to call this twice, without purging the cache.
 57 */
 58int
 59xfs_qm_scall_quotaoff(
 60	xfs_mount_t		*mp,
 61	uint			flags)
 62{
 63	struct xfs_quotainfo	*q = mp->m_quotainfo;
 64	uint			dqtype;
 65	int			error;
 66	uint			inactivate_flags;
 67	xfs_qoff_logitem_t	*qoffstart;
 68
 69	/*
 70	 * No file system can have quotas enabled on disk but not in core.
 71	 * Note that quota utilities (like quotaoff) _expect_
 72	 * errno == EEXIST here.
 73	 */
 74	if ((mp->m_qflags & flags) == 0)
 75		return XFS_ERROR(EEXIST);
 76	error = 0;
 77
 78	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
 79
 80	/*
 81	 * We don't want to deal with two quotaoffs messing up each other,
 82	 * so we're going to serialize it. quotaoff isn't exactly a performance
 83	 * critical thing.
 84	 * If quotaoff, then we must be dealing with the root filesystem.
 85	 */
 86	ASSERT(q);
 87	mutex_lock(&q->qi_quotaofflock);
 88
 89	/*
 90	 * If we're just turning off quota enforcement, change mp and go.
 91	 */
 92	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
 93		mp->m_qflags &= ~(flags);
 94
 95		spin_lock(&mp->m_sb_lock);
 96		mp->m_sb.sb_qflags = mp->m_qflags;
 97		spin_unlock(&mp->m_sb_lock);
 98		mutex_unlock(&q->qi_quotaofflock);
 99
100		/* XXX what to do if error ? Revert back to old vals incore ? */
101		error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
102		return (error);
103	}
104
105	dqtype = 0;
106	inactivate_flags = 0;
107	/*
108	 * If accounting is off, we must turn enforcement off, clear the
109	 * quota 'CHKD' certificate to make it known that we have to
110	 * do a quotacheck the next time this quota is turned on.
111	 */
112	if (flags & XFS_UQUOTA_ACCT) {
113		dqtype |= XFS_QMOPT_UQUOTA;
114		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
115		inactivate_flags |= XFS_UQUOTA_ACTIVE;
116	}
117	if (flags & XFS_GQUOTA_ACCT) {
118		dqtype |= XFS_QMOPT_GQUOTA;
119		flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
120		inactivate_flags |= XFS_GQUOTA_ACTIVE;
121	} else if (flags & XFS_PQUOTA_ACCT) {
122		dqtype |= XFS_QMOPT_PQUOTA;
123		flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
124		inactivate_flags |= XFS_PQUOTA_ACTIVE;
125	}
126
127	/*
128	 * Nothing to do?  Don't complain. This happens when we're just
129	 * turning off quota enforcement.
130	 */
131	if ((mp->m_qflags & flags) == 0)
132		goto out_unlock;
133
134	/*
135	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
136	 * and synchronously. If we fail to write, we should abort the
137	 * operation as it cannot be recovered safely if we crash.
138	 */
139	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
140	if (error)
141		goto out_unlock;
142
143	/*
144	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
145	 * to take care of the race between dqget and quotaoff. We don't take
146	 * any special locks to reset these bits. All processes need to check
147	 * these bits *after* taking inode lock(s) to see if the particular
148	 * quota type is in the process of being turned off. If *ACTIVE, it is
149	 * guaranteed that all dquot structures and all quotainode ptrs will all
150	 * stay valid as long as that inode is kept locked.
151	 *
152	 * There is no turning back after this.
153	 */
154	mp->m_qflags &= ~inactivate_flags;
155
156	/*
157	 * Give back all the dquot reference(s) held by inodes.
158	 * Here we go thru every single incore inode in this file system, and
159	 * do a dqrele on the i_udquot/i_gdquot that it may have.
160	 * Essentially, as long as somebody has an inode locked, this guarantees
161	 * that quotas will not be turned off. This is handy because in a
162	 * transaction once we lock the inode(s) and check for quotaon, we can
163	 * depend on the quota inodes (and other things) being valid as long as
164	 * we keep the lock(s).
165	 */
166	xfs_qm_dqrele_all_inodes(mp, flags);
167
168	/*
169	 * Next we make the changes in the quota flag in the mount struct.
170	 * This isn't protected by a particular lock directly, because we
171	 * don't want to take a mrlock every time we depend on quotas being on.
172	 */
173	mp->m_qflags &= ~flags;
174
175	/*
176	 * Go through all the dquots of this file system and purge them,
177	 * according to what was turned off.
178	 */
179	xfs_qm_dqpurge_all(mp, dqtype);
180
181	/*
182	 * Transactions that had started before ACTIVE state bit was cleared
183	 * could have logged many dquots, so they'd have higher LSNs than
184	 * the first QUOTAOFF log record does. If we happen to crash when
185	 * the tail of the log has gone past the QUOTAOFF record, but
186	 * before the last dquot modification, those dquots __will__
187	 * recover, and that's not good.
188	 *
189	 * So, we have QUOTAOFF start and end logitems; the start
190	 * logitem won't get overwritten until the end logitem appears...
191	 */
192	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
193	if (error) {
194		/* We're screwed now. Shutdown is the only option. */
195		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
196		goto out_unlock;
197	}
198
199	/*
200	 * If quotas is completely disabled, close shop.
201	 */
202	if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
203	    ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
204		mutex_unlock(&q->qi_quotaofflock);
205		xfs_qm_destroy_quotainfo(mp);
206		return (0);
207	}
208
209	/*
210	 * Release our quotainode references if we don't need them anymore.
211	 */
212	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
213		IRELE(q->qi_uquotaip);
214		q->qi_uquotaip = NULL;
215	}
216	if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
217		IRELE(q->qi_gquotaip);
218		q->qi_gquotaip = NULL;
219	}
220
221out_unlock:
222	mutex_unlock(&q->qi_quotaofflock);
223	return error;
224}
225
226STATIC int
227xfs_qm_scall_trunc_qfile(
228	struct xfs_mount	*mp,
229	xfs_ino_t		ino)
230{
231	struct xfs_inode	*ip;
232	struct xfs_trans	*tp;
233	int			error;
234
235	if (ino == NULLFSINO)
236		return 0;
237
238	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
239	if (error)
240		return error;
241
242	xfs_ilock(ip, XFS_IOLOCK_EXCL);
243
244	tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
245	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
246				  XFS_TRANS_PERM_LOG_RES,
247				  XFS_ITRUNCATE_LOG_COUNT);
248	if (error) {
249		xfs_trans_cancel(tp, 0);
250		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
251		goto out_put;
252	}
253
254	xfs_ilock(ip, XFS_ILOCK_EXCL);
255	xfs_trans_ijoin(tp, ip, 0);
256
257	ip->i_d.di_size = 0;
258	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
259
260	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
261	if (error) {
262		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
263				     XFS_TRANS_ABORT);
264		goto out_unlock;
265	}
266
267	ASSERT(ip->i_d.di_nextents == 0);
268
269	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
270	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
271
272out_unlock:
273	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
274out_put:
275	IRELE(ip);
276	return error;
277}
278
279int
280xfs_qm_scall_trunc_qfiles(
281	xfs_mount_t	*mp,
282	uint		flags)
283{
284	int		error = 0, error2 = 0;
285
286	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
287		xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
 
288			__func__, flags, mp->m_qflags);
289		return XFS_ERROR(EINVAL);
290	}
291
292	if (flags & XFS_DQ_USER)
293		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
294	if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
295		error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
 
 
 
 
 
 
 
 
296
297	return error ? error : error2;
298}
299
300/*
301 * Switch on (a given) quota enforcement for a filesystem.  This takes
302 * effect immediately.
303 * (Switching on quota accounting must be done at mount time.)
304 */
305int
306xfs_qm_scall_quotaon(
307	xfs_mount_t	*mp,
308	uint		flags)
309{
310	int		error;
311	uint		qf;
312	__int64_t	sbflags;
313
314	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
315	/*
316	 * Switching on quota accounting must be done at mount time.
 
317	 */
318	flags &= ~(XFS_ALL_QUOTA_ACCT);
319
320	sbflags = 0;
321
322	if (flags == 0) {
323		xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
324			__func__, mp->m_qflags);
325		return XFS_ERROR(EINVAL);
326	}
327
328	/* No fs can turn on quotas with a delayed effect */
329	ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
330
331	/*
332	 * Can't enforce without accounting. We check the superblock
333	 * qflags here instead of m_qflags because rootfs can have
334	 * quota acct on ondisk without m_qflags' knowing.
335	 */
336	if (((flags & XFS_UQUOTA_ACCT) == 0 &&
337	    (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
338	    (flags & XFS_UQUOTA_ENFD))
339	    ||
340	    ((flags & XFS_PQUOTA_ACCT) == 0 &&
341	    (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
342	    (flags & XFS_GQUOTA_ACCT) == 0 &&
343	    (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
344	    (flags & XFS_OQUOTA_ENFD))) {
345		xfs_debug(mp,
346			"%s: Can't enforce without acct, flags=%x sbflags=%x\n",
347			__func__, flags, mp->m_sb.sb_qflags);
348		return XFS_ERROR(EINVAL);
349	}
350	/*
351	 * If everything's up to-date incore, then don't waste time.
352	 */
353	if ((mp->m_qflags & flags) == flags)
354		return XFS_ERROR(EEXIST);
355
356	/*
357	 * Change sb_qflags on disk but not incore mp->qflags
358	 * if this is the root filesystem.
359	 */
360	spin_lock(&mp->m_sb_lock);
361	qf = mp->m_sb.sb_qflags;
362	mp->m_sb.sb_qflags = qf | flags;
363	spin_unlock(&mp->m_sb_lock);
364
365	/*
366	 * There's nothing to change if it's the same.
367	 */
368	if ((qf & flags) == flags && sbflags == 0)
369		return XFS_ERROR(EEXIST);
370	sbflags |= XFS_SB_QFLAGS;
371
372	if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
373		return (error);
 
374	/*
375	 * If we aren't trying to switch on quota enforcement, we are done.
376	 */
377	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
378	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
379	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
380	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
381	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
382	     (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
383	    (flags & XFS_ALL_QUOTA_ENFD) == 0)
384		return (0);
385
386	if (! XFS_IS_QUOTA_RUNNING(mp))
387		return XFS_ERROR(ESRCH);
388
389	/*
390	 * Switch on quota enforcement in core.
391	 */
392	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
393	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
394	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
395
396	return (0);
397}
398
 
399
400/*
401 * Return quota status information, such as uquota-off, enforcements, etc.
 
402 */
403int
404xfs_qm_scall_getqstat(
405	struct xfs_mount	*mp,
406	struct fs_quota_stat	*out)
 
 
 
 
407{
408	struct xfs_quotainfo	*q = mp->m_quotainfo;
409	struct xfs_inode	*uip, *gip;
410	boolean_t		tempuqip, tempgqip;
 
 
 
411
412	uip = gip = NULL;
413	tempuqip = tempgqip = B_FALSE;
414	memset(out, 0, sizeof(fs_quota_stat_t));
415
416	out->qs_version = FS_QSTAT_VERSION;
417	if (!xfs_sb_version_hasquota(&mp->m_sb)) {
418		out->qs_uquota.qfs_ino = NULLFSINO;
419		out->qs_gquota.qfs_ino = NULLFSINO;
420		return (0);
421	}
422	out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
423							(XFS_ALL_QUOTA_ACCT|
424							 XFS_ALL_QUOTA_ENFD));
425	out->qs_pad = 0;
426	out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
427	out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
428
429	if (q) {
430		uip = q->qi_uquotaip;
431		gip = q->qi_gquotaip;
432	}
433	if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
434		if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
435					0, 0, &uip) == 0)
436			tempuqip = B_TRUE;
437	}
438	if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
439		if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
440					0, 0, &gip) == 0)
441			tempgqip = B_TRUE;
442	}
443	if (uip) {
444		out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
445		out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
446		if (tempuqip)
447			IRELE(uip);
448	}
449	if (gip) {
450		out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
451		out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
452		if (tempgqip)
453			IRELE(gip);
454	}
455	if (q) {
456		out->qs_incoredqs = q->qi_dquots;
457		out->qs_btimelimit = q->qi_btimelimit;
458		out->qs_itimelimit = q->qi_itimelimit;
459		out->qs_rtbtimelimit = q->qi_rtbtimelimit;
460		out->qs_bwarnlimit = q->qi_bwarnlimit;
461		out->qs_iwarnlimit = q->qi_iwarnlimit;
462	}
463	return 0;
 
464}
465
466#define XFS_DQ_MASK \
467	(FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468
469/*
470 * Adjust quota limits, and start/stop timers accordingly.
471 */
472int
473xfs_qm_scall_setqlim(
474	xfs_mount_t		*mp,
475	xfs_dqid_t		id,
476	uint			type,
477	fs_disk_quota_t		*newlim)
478{
479	struct xfs_quotainfo	*q = mp->m_quotainfo;
480	xfs_disk_dquot_t	*ddq;
481	xfs_dquot_t		*dqp;
482	xfs_trans_t		*tp;
 
 
483	int			error;
484	xfs_qcnt_t		hard, soft;
485
486	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
487		return EINVAL;
488	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
489		return 0;
490
491	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
492	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
493				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
494		xfs_trans_cancel(tp, 0);
495		return (error);
496	}
497
498	/*
499	 * We don't want to race with a quotaoff so take the quotaoff lock.
500	 * (We don't hold an inode lock, so there's nothing else to stop
501	 * a quotaoff from happening). (XXXThis doesn't currently happen
502	 * because we take the vfslock before calling xfs_qm_sysent).
 
503	 */
504	mutex_lock(&q->qi_quotaofflock);
505
506	/*
507	 * Get the dquot (locked), and join it to the transaction.
508	 * Allocate the dquot if this doesn't exist.
509	 */
510	if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
511		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
512		ASSERT(error != ENOENT);
513		goto out_unlock;
514	}
 
 
 
 
 
 
 
 
 
515	xfs_trans_dqjoin(tp, dqp);
516	ddq = &dqp->q_core;
517
518	/*
 
 
 
519	 * Make sure that hardlimits are >= soft limits before changing.
 
 
 
 
 
 
 
 
 
 
520	 */
521	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
522		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
523			be64_to_cpu(ddq->d_blk_hardlimit);
524	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
525		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
526			be64_to_cpu(ddq->d_blk_softlimit);
527	if (hard == 0 || hard >= soft) {
528		ddq->d_blk_hardlimit = cpu_to_be64(hard);
529		ddq->d_blk_softlimit = cpu_to_be64(soft);
530		if (id == 0) {
531			q->qi_bhardlimit = hard;
532			q->qi_bsoftlimit = soft;
533		}
534	} else {
535		xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
536	}
537	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
538		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
539			be64_to_cpu(ddq->d_rtb_hardlimit);
540	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
541		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
542			be64_to_cpu(ddq->d_rtb_softlimit);
543	if (hard == 0 || hard >= soft) {
544		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
545		ddq->d_rtb_softlimit = cpu_to_be64(soft);
546		if (id == 0) {
547			q->qi_rtbhardlimit = hard;
548			q->qi_rtbsoftlimit = soft;
549		}
550	} else {
551		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
552	}
553
554	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555		(xfs_qcnt_t) newlim->d_ino_hardlimit :
556			be64_to_cpu(ddq->d_ino_hardlimit);
557	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
558		(xfs_qcnt_t) newlim->d_ino_softlimit :
559			be64_to_cpu(ddq->d_ino_softlimit);
560	if (hard == 0 || hard >= soft) {
561		ddq->d_ino_hardlimit = cpu_to_be64(hard);
562		ddq->d_ino_softlimit = cpu_to_be64(soft);
563		if (id == 0) {
564			q->qi_ihardlimit = hard;
565			q->qi_isoftlimit = soft;
566		}
567	} else {
568		xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
569	}
570
571	/*
572	 * Update warnings counter(s) if requested
573	 */
574	if (newlim->d_fieldmask & FS_DQ_BWARNS)
575		ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
576	if (newlim->d_fieldmask & FS_DQ_IWARNS)
577		ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
578	if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
579		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
580
581	if (id == 0) {
582		/*
583		 * Timelimits for the super user set the relative time
584		 * the other users can be over quota for this file system.
585		 * If it is zero a default is used.  Ditto for the default
586		 * soft and hard limit values (already done, above), and
587		 * for warnings.
588		 */
589		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
590			q->qi_btimelimit = newlim->d_btimer;
591			ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
592		}
593		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
594			q->qi_itimelimit = newlim->d_itimer;
595			ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
596		}
597		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
598			q->qi_rtbtimelimit = newlim->d_rtbtimer;
599			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
600		}
601		if (newlim->d_fieldmask & FS_DQ_BWARNS)
602			q->qi_bwarnlimit = newlim->d_bwarns;
603		if (newlim->d_fieldmask & FS_DQ_IWARNS)
604			q->qi_iwarnlimit = newlim->d_iwarns;
605		if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
606			q->qi_rtbwarnlimit = newlim->d_rtbwarns;
607	} else {
608		/*
609		 * If the user is now over quota, start the timelimit.
610		 * The user will not be 'warned'.
611		 * Note that we keep the timers ticking, whether enforcement
612		 * is on or off. We don't really want to bother with iterating
613		 * over all ondisk dquots and turning the timers on/off.
614		 */
615		xfs_qm_adjust_dqtimers(mp, ddq);
616	}
617	dqp->dq_flags |= XFS_DQ_DIRTY;
618	xfs_trans_log_dquot(tp, dqp);
619
620	error = xfs_trans_commit(tp, 0);
621	xfs_qm_dqrele(dqp);
622
623 out_unlock:
624	mutex_unlock(&q->qi_quotaofflock);
625	return error;
626}
627
628STATIC int
629xfs_qm_log_quotaoff_end(
630	xfs_mount_t		*mp,
631	xfs_qoff_logitem_t	*startqoff,
632	uint			flags)
 
 
633{
634	xfs_trans_t		*tp;
635	int			error;
636	xfs_qoff_logitem_t	*qoffi;
637
638	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
639
640	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2,
641				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
642		xfs_trans_cancel(tp, 0);
643		return (error);
644	}
645
646	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
647					flags & XFS_ALL_QUOTA_ACCT);
648	xfs_trans_log_quotaoff_item(tp, qoffi);
 
649
650	/*
651	 * We have to make sure that the transaction is secure on disk before we
652	 * return and actually stop quota accounting. So, make it synchronous.
653	 * We don't care about quotoff's performance.
654	 */
655	xfs_trans_set_sync(tp);
656	error = xfs_trans_commit(tp, 0);
657	return (error);
658}
659
660
661STATIC int
662xfs_qm_log_quotaoff(
663	xfs_mount_t	       *mp,
664	xfs_qoff_logitem_t     **qoffstartp,
665	uint		       flags)
666{
667	xfs_trans_t	       *tp;
668	int			error;
669	xfs_qoff_logitem_t     *qoffi=NULL;
670	uint			oldsbqflag=0;
671
672	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
673	if ((error = xfs_trans_reserve(tp, 0,
674				      sizeof(xfs_qoff_logitem_t) * 2 +
675				      mp->m_sb.sb_sectsize + 128,
676				      0,
677				      0,
678				      XFS_DEFAULT_LOG_COUNT))) {
679		goto error0;
680	}
681
682	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
683	xfs_trans_log_quotaoff_item(tp, qoffi);
684
685	spin_lock(&mp->m_sb_lock);
686	oldsbqflag = mp->m_sb.sb_qflags;
687	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
688	spin_unlock(&mp->m_sb_lock);
689
690	xfs_mod_sb(tp, XFS_SB_QFLAGS);
691
692	/*
693	 * We have to make sure that the transaction is secure on disk before we
694	 * return and actually stop quota accounting. So, make it synchronous.
695	 * We don't care about quotoff's performance.
696	 */
697	xfs_trans_set_sync(tp);
698	error = xfs_trans_commit(tp, 0);
699
700error0:
701	if (error) {
702		xfs_trans_cancel(tp, 0);
703		/*
704		 * No one else is modifying sb_qflags, so this is OK.
705		 * We still hold the quotaofflock.
706		 */
707		spin_lock(&mp->m_sb_lock);
708		mp->m_sb.sb_qflags = oldsbqflag;
709		spin_unlock(&mp->m_sb_lock);
710	}
711	*qoffstartp = qoffi;
712	return (error);
713}
714
715
716int
717xfs_qm_scall_getquota(
718	struct xfs_mount	*mp,
719	xfs_dqid_t		id,
720	uint			type,
721	struct fs_disk_quota	*dst)
722{
723	struct xfs_dquot	*dqp;
724	int			error;
725
726	/*
727	 * Try to get the dquot. We don't want it allocated on disk, so
728	 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
729	 * exist, we'll get ENOENT back.
730	 */
731	error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
 
 
 
 
 
 
 
732	if (error)
733		return error;
734
735	/*
736	 * If everything's NULL, this dquot doesn't quite exist as far as
737	 * our utility programs are concerned.
738	 */
739	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
740		error = XFS_ERROR(ENOENT);
741		goto out_put;
742	}
743
744	memset(dst, 0, sizeof(*dst));
745	dst->d_version = FS_DQUOT_VERSION;
746	dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
747	dst->d_id = be32_to_cpu(dqp->q_core.d_id);
748	dst->d_blk_hardlimit =
749		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
750	dst->d_blk_softlimit =
751		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
752	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
753	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
754	dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
755	dst->d_icount = dqp->q_res_icount;
756	dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
757	dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
758	dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
759	dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
760	dst->d_rtb_hardlimit =
761		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
762	dst->d_rtb_softlimit =
763		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
764	dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
765	dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
766	dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
767
768	/*
769	 * Internally, we don't reset all the timers when quota enforcement
770	 * gets turned off. No need to confuse the user level code,
771	 * so return zeroes in that case.
772	 */
773	if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) ||
774	    (!XFS_IS_OQUOTA_ENFORCED(mp) &&
775			(dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
776		dst->d_btimer = 0;
777		dst->d_itimer = 0;
778		dst->d_rtbtimer = 0;
779	}
780
781#ifdef DEBUG
782	if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
783	     (XFS_IS_OQUOTA_ENFORCED(mp) &&
784			(dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
785	    dst->d_id != 0) {
786		if (((int) dst->d_bcount > (int) dst->d_blk_softlimit) &&
787		    (dst->d_blk_softlimit > 0)) {
788			ASSERT(dst->d_btimer != 0);
789		}
790		if (((int) dst->d_icount > (int) dst->d_ino_softlimit) &&
791		    (dst->d_ino_softlimit > 0)) {
792			ASSERT(dst->d_itimer != 0);
793		}
794	}
795#endif
796out_put:
797	xfs_qm_dqput(dqp);
798	return error;
799}
800
801STATIC uint
802xfs_qm_export_qtype_flags(
803	uint flags)
804{
805	/*
806	 * Can't be more than one, or none.
807	 */
808	ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
809		(FS_PROJ_QUOTA | FS_USER_QUOTA));
810	ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
811		(FS_PROJ_QUOTA | FS_GROUP_QUOTA));
812	ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
813		(FS_USER_QUOTA | FS_GROUP_QUOTA));
814	ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
815
816	return (flags & XFS_DQ_USER) ?
817		FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
818			FS_PROJ_QUOTA : FS_GROUP_QUOTA;
819}
820
821STATIC uint
822xfs_qm_export_flags(
823	uint flags)
824{
825	uint uflags;
826
827	uflags = 0;
828	if (flags & XFS_UQUOTA_ACCT)
829		uflags |= FS_QUOTA_UDQ_ACCT;
830	if (flags & XFS_PQUOTA_ACCT)
831		uflags |= FS_QUOTA_PDQ_ACCT;
832	if (flags & XFS_GQUOTA_ACCT)
833		uflags |= FS_QUOTA_GDQ_ACCT;
834	if (flags & XFS_UQUOTA_ENFD)
835		uflags |= FS_QUOTA_UDQ_ENFD;
836	if (flags & (XFS_OQUOTA_ENFD)) {
837		uflags |= (flags & XFS_GQUOTA_ACCT) ?
838			FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD;
839	}
840	return (uflags);
841}
842
 
 
 
843
844STATIC int
845xfs_dqrele_inode(
846	struct xfs_inode	*ip,
847	struct xfs_perag	*pag,
848	int			flags)
849{
850	/* skip quota inodes */
851	if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
852	    ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
853		ASSERT(ip->i_udquot == NULL);
854		ASSERT(ip->i_gdquot == NULL);
855		return 0;
856	}
857
858	xfs_ilock(ip, XFS_ILOCK_EXCL);
859	if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
860		xfs_qm_dqrele(ip->i_udquot);
861		ip->i_udquot = NULL;
862	}
863	if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
864		xfs_qm_dqrele(ip->i_gdquot);
865		ip->i_gdquot = NULL;
866	}
867	xfs_iunlock(ip, XFS_ILOCK_EXCL);
868	return 0;
869}
870
 
871
872/*
873 * Go thru all the inodes in the file system, releasing their dquots.
874 *
875 * Note that the mount structure gets modified to indicate that quotas are off
876 * AFTER this, in the case of quotaoff.
877 */
878void
879xfs_qm_dqrele_all_inodes(
880	struct xfs_mount *mp,
881	uint		 flags)
882{
883	ASSERT(mp->m_quotainfo);
884	xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags);
885}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
 
  7
  8#include "xfs.h"
  9#include "xfs_fs.h"
 10#include "xfs_shared.h"
 11#include "xfs_format.h"
 12#include "xfs_log_format.h"
 13#include "xfs_trans_resv.h"
 14#include "xfs_sb.h"
 
 
 
 15#include "xfs_mount.h"
 
 16#include "xfs_inode.h"
 17#include "xfs_trans.h"
 18#include "xfs_quota.h"
 
 
 
 
 
 
 19#include "xfs_qm.h"
 20#include "xfs_icache.h"
 21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22int
 23xfs_qm_scall_quotaoff(
 24	xfs_mount_t		*mp,
 25	uint			flags)
 26{
 
 
 
 
 
 
 27	/*
 28	 * No file system can have quotas enabled on disk but not in core.
 29	 * Note that quota utilities (like quotaoff) _expect_
 30	 * errno == -EEXIST here.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31	 */
 32	if ((mp->m_qflags & flags) == 0)
 33		return -EEXIST;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34
 35	/*
 36	 * We do not support actually turning off quota accounting any more.
 37	 * Just log a warning and ignore the accounting related flags.
 
 
 
 
 
 
 38	 */
 39	if (flags & XFS_ALL_QUOTA_ACCT)
 40		xfs_info(mp, "disabling of quota accounting not supported.");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41
 42	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
 43	mp->m_qflags &= ~(flags & XFS_ALL_QUOTA_ENFD);
 44	spin_lock(&mp->m_sb_lock);
 45	mp->m_sb.sb_qflags = mp->m_qflags;
 46	spin_unlock(&mp->m_sb_lock);
 47	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48
 49	/* XXX what to do if error ? Revert back to old vals incore ? */
 50	return xfs_sync_sb(mp, false);
 
 51}
 52
 53STATIC int
 54xfs_qm_scall_trunc_qfile(
 55	struct xfs_mount	*mp,
 56	xfs_ino_t		ino)
 57{
 58	struct xfs_inode	*ip;
 59	struct xfs_trans	*tp;
 60	int			error;
 61
 62	if (ino == NULLFSINO)
 63		return 0;
 64
 65	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
 66	if (error)
 67		return error;
 68
 69	xfs_ilock(ip, XFS_IOLOCK_EXCL);
 70
 71	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 
 
 
 72	if (error) {
 
 73		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 74		goto out_put;
 75	}
 76
 77	xfs_ilock(ip, XFS_ILOCK_EXCL);
 78	xfs_trans_ijoin(tp, ip, 0);
 79
 80	ip->i_disk_size = 0;
 81	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 82
 83	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
 84	if (error) {
 85		xfs_trans_cancel(tp);
 
 86		goto out_unlock;
 87	}
 88
 89	ASSERT(ip->i_df.if_nextents == 0);
 90
 91	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
 92	error = xfs_trans_commit(tp);
 93
 94out_unlock:
 95	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
 96out_put:
 97	xfs_irele(ip);
 98	return error;
 99}
100
101int
102xfs_qm_scall_trunc_qfiles(
103	xfs_mount_t	*mp,
104	uint		flags)
105{
106	int		error = -EINVAL;
107
108	if (!xfs_has_quota(mp) || flags == 0 ||
109	    (flags & ~XFS_QMOPT_QUOTALL)) {
110		xfs_debug(mp, "%s: flags=%x m_qflags=%x",
111			__func__, flags, mp->m_qflags);
112		return -EINVAL;
113	}
114
115	if (flags & XFS_QMOPT_UQUOTA) {
116		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
117		if (error)
118			return error;
119	}
120	if (flags & XFS_QMOPT_GQUOTA) {
121		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
122		if (error)
123			return error;
124	}
125	if (flags & XFS_QMOPT_PQUOTA)
126		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
127
128	return error;
129}
130
131/*
132 * Switch on (a given) quota enforcement for a filesystem.  This takes
133 * effect immediately.
134 * (Switching on quota accounting must be done at mount time.)
135 */
136int
137xfs_qm_scall_quotaon(
138	xfs_mount_t	*mp,
139	uint		flags)
140{
141	int		error;
142	uint		qf;
 
143
 
144	/*
145	 * Switching on quota accounting must be done at mount time,
146	 * only consider quota enforcement stuff here.
147	 */
148	flags &= XFS_ALL_QUOTA_ENFD;
 
 
149
150	if (flags == 0) {
151		xfs_debug(mp, "%s: zero flags, m_qflags=%x",
152			__func__, mp->m_qflags);
153		return -EINVAL;
154	}
155
 
 
 
156	/*
157	 * Can't enforce without accounting. We check the superblock
158	 * qflags here instead of m_qflags because rootfs can have
159	 * quota acct on ondisk without m_qflags' knowing.
160	 */
161	if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
162	     (flags & XFS_UQUOTA_ENFD)) ||
163	    ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
164	     (flags & XFS_GQUOTA_ENFD)) ||
165	    ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
166	     (flags & XFS_PQUOTA_ENFD))) {
 
 
 
167		xfs_debug(mp,
168			"%s: Can't enforce without acct, flags=%x sbflags=%x",
169			__func__, flags, mp->m_sb.sb_qflags);
170		return -EINVAL;
171	}
172	/*
173	 * If everything's up to-date incore, then don't waste time.
174	 */
175	if ((mp->m_qflags & flags) == flags)
176		return -EEXIST;
177
178	/*
179	 * Change sb_qflags on disk but not incore mp->qflags
180	 * if this is the root filesystem.
181	 */
182	spin_lock(&mp->m_sb_lock);
183	qf = mp->m_sb.sb_qflags;
184	mp->m_sb.sb_qflags = qf | flags;
185	spin_unlock(&mp->m_sb_lock);
186
187	/*
188	 * There's nothing to change if it's the same.
189	 */
190	if ((qf & flags) == flags)
191		return -EEXIST;
 
192
193	error = xfs_sync_sb(mp, false);
194	if (error)
195		return error;
196	/*
197	 * If we aren't trying to switch on quota enforcement, we are done.
198	 */
199	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
200	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
201	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
202	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
203	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
204	     (mp->m_qflags & XFS_GQUOTA_ACCT)))
205		return 0;
 
206
207	if (!XFS_IS_QUOTA_ON(mp))
208		return -ESRCH;
209
210	/*
211	 * Switch on quota enforcement in core.
212	 */
213	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
214	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
215	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
216
217	return 0;
218}
219
220#define XFS_QC_MASK (QC_LIMIT_MASK | QC_TIMER_MASK)
221
222/*
223 * Adjust limits of this quota, and the defaults if passed in.  Returns true
224 * if the new limits made sense and were applied, false otherwise.
225 */
226static inline bool
227xfs_setqlim_limits(
228	struct xfs_mount	*mp,
229	struct xfs_dquot_res	*res,
230	struct xfs_quota_limits	*qlim,
231	xfs_qcnt_t		hard,
232	xfs_qcnt_t		soft,
233	const char		*tag)
234{
235	/* The hard limit can't be less than the soft limit. */
236	if (hard != 0 && hard < soft) {
237		xfs_debug(mp, "%shard %lld < %ssoft %lld", tag, hard, tag,
238				soft);
239		return false;
240	}
241
242	res->hardlimit = hard;
243	res->softlimit = soft;
244	if (qlim) {
245		qlim->hard = hard;
246		qlim->soft = soft;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247	}
248
249	return true;
250}
251
252static inline void
253xfs_setqlim_timer(
254	struct xfs_mount	*mp,
255	struct xfs_dquot_res	*res,
256	struct xfs_quota_limits	*qlim,
257	s64			timer)
258{
259	if (qlim) {
260		/* Set the length of the default grace period. */
261		res->timer = xfs_dquot_set_grace_period(timer);
262		qlim->time = res->timer;
263	} else {
264		/* Set the grace period expiration on a quota. */
265		res->timer = xfs_dquot_set_timeout(mp, timer);
266	}
267}
268
269/*
270 * Adjust quota limits, and start/stop timers accordingly.
271 */
272int
273xfs_qm_scall_setqlim(
274	struct xfs_mount	*mp,
275	xfs_dqid_t		id,
276	xfs_dqtype_t		type,
277	struct qc_dqblk		*newlim)
278{
279	struct xfs_quotainfo	*q = mp->m_quotainfo;
280	struct xfs_dquot	*dqp;
281	struct xfs_trans	*tp;
282	struct xfs_def_quota	*defq;
283	struct xfs_dquot_res	*res;
284	struct xfs_quota_limits	*qlim;
285	int			error;
286	xfs_qcnt_t		hard, soft;
287
288	if (newlim->d_fieldmask & ~XFS_QC_MASK)
289		return -EINVAL;
290	if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
291		return 0;
292
 
 
 
 
 
 
 
293	/*
294	 * Get the dquot (locked) before we start, as we need to do a
295	 * transaction to allocate it if it doesn't exist. Once we have the
296	 * dquot, unlock it so we can start the next transaction safely. We hold
297	 * a reference to the dquot, so it's safe to do this unlock/lock without
298	 * it being reclaimed in the mean time.
299	 */
300	error = xfs_qm_dqget(mp, id, type, true, &dqp);
301	if (error) {
302		ASSERT(error != -ENOENT);
303		return error;
 
 
 
 
 
 
304	}
305
306	defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
307	xfs_dqunlock(dqp);
308
309	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
310	if (error)
311		goto out_rele;
312
313	xfs_dqlock(dqp);
314	xfs_trans_dqjoin(tp, dqp);
 
315
316	/*
317	 * Update quota limits, warnings, and timers, and the defaults
318	 * if we're touching id == 0.
319	 *
320	 * Make sure that hardlimits are >= soft limits before changing.
321	 *
322	 * Update warnings counter(s) if requested.
323	 *
324	 * Timelimits for the super user set the relative time the other users
325	 * can be over quota for this file system. If it is zero a default is
326	 * used.  Ditto for the default soft and hard limit values (already
327	 * done, above), and for warnings.
328	 *
329	 * For other IDs, userspace can bump out the grace period if over
330	 * the soft limit.
331	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
333	/* Blocks on the data device. */
334	hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
335		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
336			dqp->q_blk.hardlimit;
337	soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
338		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
339			dqp->q_blk.softlimit;
340	res = &dqp->q_blk;
341	qlim = id == 0 ? &defq->blk : NULL;
342
343	if (xfs_setqlim_limits(mp, res, qlim, hard, soft, "blk"))
344		xfs_dquot_set_prealloc_limits(dqp);
345	if (newlim->d_fieldmask & QC_SPC_TIMER)
346		xfs_setqlim_timer(mp, res, qlim, newlim->d_spc_timer);
347
348	/* Blocks on the realtime device. */
349	hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
350		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
351			dqp->q_rtb.hardlimit;
352	soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
353		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
354			dqp->q_rtb.softlimit;
355	res = &dqp->q_rtb;
356	qlim = id == 0 ? &defq->rtb : NULL;
357
358	xfs_setqlim_limits(mp, res, qlim, hard, soft, "rtb");
359	if (newlim->d_fieldmask & QC_RT_SPC_TIMER)
360		xfs_setqlim_timer(mp, res, qlim, newlim->d_rt_spc_timer);
361
362	/* Inodes */
363	hard = (newlim->d_fieldmask & QC_INO_HARD) ?
364		(xfs_qcnt_t) newlim->d_ino_hardlimit :
365			dqp->q_ino.hardlimit;
366	soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
367		(xfs_qcnt_t) newlim->d_ino_softlimit :
368			dqp->q_ino.softlimit;
369	res = &dqp->q_ino;
370	qlim = id == 0 ? &defq->ino : NULL;
371
372	xfs_setqlim_limits(mp, res, qlim, hard, soft, "ino");
373	if (newlim->d_fieldmask & QC_INO_TIMER)
374		xfs_setqlim_timer(mp, res, qlim, newlim->d_ino_timer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375
376	if (id != 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377		/*
378		 * If the user is now over quota, start the timelimit.
379		 * The user will not be 'warned'.
380		 * Note that we keep the timers ticking, whether enforcement
381		 * is on or off. We don't really want to bother with iterating
382		 * over all ondisk dquots and turning the timers on/off.
383		 */
384		xfs_qm_adjust_dqtimers(dqp);
385	}
386	dqp->q_flags |= XFS_DQFLAG_DIRTY;
387	xfs_trans_log_dquot(tp, dqp);
388
389	error = xfs_trans_commit(tp);
 
390
391out_rele:
392	xfs_qm_dqrele(dqp);
393	return error;
394}
395
396/* Fill out the quota context. */
397static void
398xfs_qm_scall_getquota_fill_qc(
399	struct xfs_mount	*mp,
400	xfs_dqtype_t		type,
401	const struct xfs_dquot	*dqp,
402	struct qc_dqblk		*dst)
403{
404	memset(dst, 0, sizeof(*dst));
405	dst->d_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_blk.hardlimit);
406	dst->d_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_blk.softlimit);
407	dst->d_ino_hardlimit = dqp->q_ino.hardlimit;
408	dst->d_ino_softlimit = dqp->q_ino.softlimit;
409	dst->d_space = XFS_FSB_TO_B(mp, dqp->q_blk.reserved);
410	dst->d_ino_count = dqp->q_ino.reserved;
411	dst->d_spc_timer = dqp->q_blk.timer;
412	dst->d_ino_timer = dqp->q_ino.timer;
413	dst->d_ino_warns = 0;
414	dst->d_spc_warns = 0;
415	dst->d_rt_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.hardlimit);
416	dst->d_rt_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.softlimit);
417	dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_rtb.reserved);
418	dst->d_rt_spc_timer = dqp->q_rtb.timer;
419	dst->d_rt_spc_warns = 0;
420
421	/*
422	 * Internally, we don't reset all the timers when quota enforcement
423	 * gets turned off. No need to confuse the user level code,
424	 * so return zeroes in that case.
425	 */
426	if (!xfs_dquot_is_enforced(dqp)) {
427		dst->d_spc_timer = 0;
428		dst->d_ino_timer = 0;
429		dst->d_rt_spc_timer = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430	}
431
432#ifdef DEBUG
433	if (xfs_dquot_is_enforced(dqp) && dqp->q_id != 0) {
434		if ((dst->d_space > dst->d_spc_softlimit) &&
435		    (dst->d_spc_softlimit > 0)) {
436			ASSERT(dst->d_spc_timer != 0);
437		}
438		if ((dst->d_ino_count > dqp->q_ino.softlimit) &&
439		    (dqp->q_ino.softlimit > 0)) {
440			ASSERT(dst->d_ino_timer != 0);
441		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442	}
443#endif
 
444}
445
446/* Return the quota information for the dquot matching id. */
447int
448xfs_qm_scall_getquota(
449	struct xfs_mount	*mp,
450	xfs_dqid_t		id,
451	xfs_dqtype_t		type,
452	struct qc_dqblk		*dst)
453{
454	struct xfs_dquot	*dqp;
455	int			error;
456
457	/*
458	 * Expedite pending inodegc work at the start of a quota reporting
459	 * scan but don't block waiting for it to complete.
 
460	 */
461	if (id == 0)
462		xfs_inodegc_push(mp);
463
464	/*
465	 * Try to get the dquot. We don't want it allocated on disk, so don't
466	 * set doalloc. If it doesn't exist, we'll get ENOENT back.
467	 */
468	error = xfs_qm_dqget(mp, id, type, false, &dqp);
469	if (error)
470		return error;
471
472	/*
473	 * If everything's NULL, this dquot doesn't quite exist as far as
474	 * our utility programs are concerned.
475	 */
476	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
477		error = -ENOENT;
478		goto out_put;
479	}
480
481	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
482
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483out_put:
484	xfs_qm_dqput(dqp);
485	return error;
486}
487
488/*
489 * Return the quota information for the first initialized dquot whose id
490 * is at least as high as id.
491 */
492int
493xfs_qm_scall_getquota_next(
494	struct xfs_mount	*mp,
495	xfs_dqid_t		*id,
496	xfs_dqtype_t		type,
497	struct qc_dqblk		*dst)
 
 
 
 
 
 
 
 
 
 
 
 
 
498{
499	struct xfs_dquot	*dqp;
500	int			error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501
502	/* Flush inodegc work at the start of a quota reporting scan. */
503	if (*id == 0)
504		xfs_inodegc_push(mp);
505
506	error = xfs_qm_dqget_next(mp, *id, type, &dqp);
507	if (error)
508		return error;
 
 
 
 
 
 
 
 
 
 
509
510	/* Fill in the ID we actually read from disk */
511	*id = dqp->q_id;
 
 
 
 
 
 
 
 
 
 
512
513	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
514
515	xfs_qm_dqput(dqp);
516	return error;
 
 
 
 
 
 
 
 
 
 
 
517}