Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18
 19#include <linux/capability.h>
 20
 21#include "xfs.h"
 22#include "xfs_fs.h"
 
 
 
 
 23#include "xfs_bit.h"
 24#include "xfs_log.h"
 25#include "xfs_inum.h"
 26#include "xfs_trans.h"
 27#include "xfs_sb.h"
 28#include "xfs_ag.h"
 29#include "xfs_alloc.h"
 30#include "xfs_quota.h"
 31#include "xfs_mount.h"
 32#include "xfs_bmap_btree.h"
 33#include "xfs_inode.h"
 34#include "xfs_itable.h"
 35#include "xfs_bmap.h"
 36#include "xfs_rtalloc.h"
 37#include "xfs_error.h"
 38#include "xfs_attr.h"
 39#include "xfs_buf_item.h"
 40#include "xfs_utils.h"
 41#include "xfs_qm.h"
 42#include "xfs_trace.h"
 
 43
 44STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
 45STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
 46					uint);
 47STATIC uint	xfs_qm_export_flags(uint);
 48STATIC uint	xfs_qm_export_qtype_flags(uint);
 49STATIC void	xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *,
 50					fs_disk_quota_t *);
 51
 52
 53/*
 54 * Turn off quota accounting and/or enforcement for all udquots and/or
 55 * gdquots. Called only at unmount time.
 56 *
 57 * This assumes that there are no dquots of this file system cached
 58 * incore, and modifies the ondisk dquot directly. Therefore, for example,
 59 * it is an error to call this twice, without purging the cache.
 60 */
 61int
 62xfs_qm_scall_quotaoff(
 63	xfs_mount_t		*mp,
 64	uint			flags)
 65{
 66	struct xfs_quotainfo	*q = mp->m_quotainfo;
 67	uint			dqtype;
 68	int			error;
 69	uint			inactivate_flags;
 70	xfs_qoff_logitem_t	*qoffstart;
 71	int			nculprits;
 72
 73	/*
 74	 * No file system can have quotas enabled on disk but not in core.
 75	 * Note that quota utilities (like quotaoff) _expect_
 76	 * errno == EEXIST here.
 77	 */
 78	if ((mp->m_qflags & flags) == 0)
 79		return XFS_ERROR(EEXIST);
 80	error = 0;
 81
 82	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
 83
 84	/*
 85	 * We don't want to deal with two quotaoffs messing up each other,
 86	 * so we're going to serialize it. quotaoff isn't exactly a performance
 87	 * critical thing.
 88	 * If quotaoff, then we must be dealing with the root filesystem.
 89	 */
 90	ASSERT(q);
 91	mutex_lock(&q->qi_quotaofflock);
 92
 93	/*
 94	 * If we're just turning off quota enforcement, change mp and go.
 95	 */
 96	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
 97		mp->m_qflags &= ~(flags);
 98
 99		spin_lock(&mp->m_sb_lock);
100		mp->m_sb.sb_qflags = mp->m_qflags;
101		spin_unlock(&mp->m_sb_lock);
102		mutex_unlock(&q->qi_quotaofflock);
103
104		/* XXX what to do if error ? Revert back to old vals incore ? */
105		error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
106		return (error);
107	}
108
109	dqtype = 0;
110	inactivate_flags = 0;
111	/*
112	 * If accounting is off, we must turn enforcement off, clear the
113	 * quota 'CHKD' certificate to make it known that we have to
114	 * do a quotacheck the next time this quota is turned on.
115	 */
116	if (flags & XFS_UQUOTA_ACCT) {
117		dqtype |= XFS_QMOPT_UQUOTA;
118		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
119		inactivate_flags |= XFS_UQUOTA_ACTIVE;
120	}
121	if (flags & XFS_GQUOTA_ACCT) {
122		dqtype |= XFS_QMOPT_GQUOTA;
123		flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
124		inactivate_flags |= XFS_GQUOTA_ACTIVE;
125	} else if (flags & XFS_PQUOTA_ACCT) {
 
126		dqtype |= XFS_QMOPT_PQUOTA;
127		flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
128		inactivate_flags |= XFS_PQUOTA_ACTIVE;
129	}
130
131	/*
132	 * Nothing to do?  Don't complain. This happens when we're just
133	 * turning off quota enforcement.
134	 */
135	if ((mp->m_qflags & flags) == 0)
136		goto out_unlock;
137
138	/*
139	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
140	 * and synchronously. If we fail to write, we should abort the
141	 * operation as it cannot be recovered safely if we crash.
142	 */
143	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
144	if (error)
145		goto out_unlock;
146
147	/*
148	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
149	 * to take care of the race between dqget and quotaoff. We don't take
150	 * any special locks to reset these bits. All processes need to check
151	 * these bits *after* taking inode lock(s) to see if the particular
152	 * quota type is in the process of being turned off. If *ACTIVE, it is
153	 * guaranteed that all dquot structures and all quotainode ptrs will all
154	 * stay valid as long as that inode is kept locked.
155	 *
156	 * There is no turning back after this.
157	 */
158	mp->m_qflags &= ~inactivate_flags;
159
160	/*
161	 * Give back all the dquot reference(s) held by inodes.
162	 * Here we go thru every single incore inode in this file system, and
163	 * do a dqrele on the i_udquot/i_gdquot that it may have.
164	 * Essentially, as long as somebody has an inode locked, this guarantees
165	 * that quotas will not be turned off. This is handy because in a
166	 * transaction once we lock the inode(s) and check for quotaon, we can
167	 * depend on the quota inodes (and other things) being valid as long as
168	 * we keep the lock(s).
169	 */
170	xfs_qm_dqrele_all_inodes(mp, flags);
171
172	/*
173	 * Next we make the changes in the quota flag in the mount struct.
174	 * This isn't protected by a particular lock directly, because we
175	 * don't want to take a mrlock every time we depend on quotas being on.
176	 */
177	mp->m_qflags &= ~(flags);
178
179	/*
180	 * Go through all the dquots of this file system and purge them,
181	 * according to what was turned off. We may not be able to get rid
182	 * of all dquots, because dquots can have temporary references that
183	 * are not attached to inodes. eg. xfs_setattr, xfs_create.
184	 * So, if we couldn't purge all the dquots from the filesystem,
185	 * we can't get rid of the incore data structures.
186	 */
187	while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype)))
188		delay(10 * nculprits);
189
190	/*
191	 * Transactions that had started before ACTIVE state bit was cleared
192	 * could have logged many dquots, so they'd have higher LSNs than
193	 * the first QUOTAOFF log record does. If we happen to crash when
194	 * the tail of the log has gone past the QUOTAOFF record, but
195	 * before the last dquot modification, those dquots __will__
196	 * recover, and that's not good.
197	 *
198	 * So, we have QUOTAOFF start and end logitems; the start
199	 * logitem won't get overwritten until the end logitem appears...
200	 */
201	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
202	if (error) {
203		/* We're screwed now. Shutdown is the only option. */
204		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
205		goto out_unlock;
206	}
207
208	/*
209	 * If quotas is completely disabled, close shop.
210	 */
211	if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
212	    ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
213		mutex_unlock(&q->qi_quotaofflock);
214		xfs_qm_destroy_quotainfo(mp);
215		return (0);
216	}
217
218	/*
219	 * Release our quotainode references if we don't need them anymore.
220	 */
221	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
222		IRELE(q->qi_uquotaip);
223		q->qi_uquotaip = NULL;
224	}
225	if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
226		IRELE(q->qi_gquotaip);
227		q->qi_gquotaip = NULL;
228	}
 
 
 
 
229
230out_unlock:
231	mutex_unlock(&q->qi_quotaofflock);
232	return error;
233}
234
235STATIC int
236xfs_qm_scall_trunc_qfile(
237	struct xfs_mount	*mp,
238	xfs_ino_t		ino)
239{
240	struct xfs_inode	*ip;
241	struct xfs_trans	*tp;
242	int			error;
243
244	if (ino == NULLFSINO)
245		return 0;
246
247	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
248	if (error)
249		return error;
250
251	xfs_ilock(ip, XFS_IOLOCK_EXCL);
252
253	tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
254	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
255				  XFS_TRANS_PERM_LOG_RES,
256				  XFS_ITRUNCATE_LOG_COUNT);
257	if (error) {
258		xfs_trans_cancel(tp, 0);
259		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
260		goto out_put;
261	}
262
263	xfs_ilock(ip, XFS_ILOCK_EXCL);
264	xfs_trans_ijoin(tp, ip);
 
 
 
265
266	error = xfs_itruncate_data(&tp, ip, 0);
267	if (error) {
268		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
269				     XFS_TRANS_ABORT);
270		goto out_unlock;
271	}
272
 
 
273	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
274	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
275
276out_unlock:
277	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
278out_put:
279	IRELE(ip);
280	return error;
281}
282
283int
284xfs_qm_scall_trunc_qfiles(
285	xfs_mount_t	*mp,
286	uint		flags)
287{
288	int		error = 0, error2 = 0;
289
290	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
291		xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
292			__func__, flags, mp->m_qflags);
293		return XFS_ERROR(EINVAL);
294	}
295
296	if (flags & XFS_DQ_USER)
297		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
298	if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
299		error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
 
 
 
 
 
 
 
 
300
301	return error ? error : error2;
302}
303
304/*
305 * Switch on (a given) quota enforcement for a filesystem.  This takes
306 * effect immediately.
307 * (Switching on quota accounting must be done at mount time.)
308 */
309int
310xfs_qm_scall_quotaon(
311	xfs_mount_t	*mp,
312	uint		flags)
313{
314	int		error;
315	uint		qf;
316	__int64_t	sbflags;
317
318	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
319	/*
320	 * Switching on quota accounting must be done at mount time.
321	 */
322	flags &= ~(XFS_ALL_QUOTA_ACCT);
323
324	sbflags = 0;
325
326	if (flags == 0) {
327		xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
328			__func__, mp->m_qflags);
329		return XFS_ERROR(EINVAL);
330	}
331
332	/* No fs can turn on quotas with a delayed effect */
333	ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
334
335	/*
336	 * Can't enforce without accounting. We check the superblock
337	 * qflags here instead of m_qflags because rootfs can have
338	 * quota acct on ondisk without m_qflags' knowing.
339	 */
340	if (((flags & XFS_UQUOTA_ACCT) == 0 &&
341	    (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
342	    (flags & XFS_UQUOTA_ENFD))
343	    ||
 
 
344	    ((flags & XFS_PQUOTA_ACCT) == 0 &&
345	    (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
346	    (flags & XFS_GQUOTA_ACCT) == 0 &&
347	    (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
348	    (flags & XFS_OQUOTA_ENFD))) {
349		xfs_debug(mp,
350			"%s: Can't enforce without acct, flags=%x sbflags=%x\n",
351			__func__, flags, mp->m_sb.sb_qflags);
352		return XFS_ERROR(EINVAL);
353	}
354	/*
355	 * If everything's up to-date incore, then don't waste time.
356	 */
357	if ((mp->m_qflags & flags) == flags)
358		return XFS_ERROR(EEXIST);
359
360	/*
361	 * Change sb_qflags on disk but not incore mp->qflags
362	 * if this is the root filesystem.
363	 */
364	spin_lock(&mp->m_sb_lock);
365	qf = mp->m_sb.sb_qflags;
366	mp->m_sb.sb_qflags = qf | flags;
367	spin_unlock(&mp->m_sb_lock);
368
369	/*
370	 * There's nothing to change if it's the same.
371	 */
372	if ((qf & flags) == flags && sbflags == 0)
373		return XFS_ERROR(EEXIST);
374	sbflags |= XFS_SB_QFLAGS;
375
376	if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
377		return (error);
378	/*
379	 * If we aren't trying to switch on quota enforcement, we are done.
380	 */
381	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
382	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
383	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
384	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
385	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
386	     (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
387	    (flags & XFS_ALL_QUOTA_ENFD) == 0)
388		return (0);
389
390	if (! XFS_IS_QUOTA_RUNNING(mp))
391		return XFS_ERROR(ESRCH);
392
393	/*
394	 * Switch on quota enforcement in core.
395	 */
396	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
397	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
398	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
399
400	return (0);
401}
402
403
404/*
405 * Return quota status information, such as uquota-off, enforcements, etc.
 
406 */
407int
408xfs_qm_scall_getqstat(
409	struct xfs_mount	*mp,
410	struct fs_quota_stat	*out)
411{
412	struct xfs_quotainfo	*q = mp->m_quotainfo;
413	struct xfs_inode	*uip, *gip;
414	boolean_t		tempuqip, tempgqip;
 
 
 
 
415
416	uip = gip = NULL;
417	tempuqip = tempgqip = B_FALSE;
418	memset(out, 0, sizeof(fs_quota_stat_t));
419
420	out->qs_version = FS_QSTAT_VERSION;
421	if (!xfs_sb_version_hasquota(&mp->m_sb)) {
422		out->qs_uquota.qfs_ino = NULLFSINO;
423		out->qs_gquota.qfs_ino = NULLFSINO;
424		return (0);
425	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426	out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
427							(XFS_ALL_QUOTA_ACCT|
428							 XFS_ALL_QUOTA_ENFD));
429	out->qs_pad = 0;
430	out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
431	out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
 
432
433	if (q) {
434		uip = q->qi_uquotaip;
435		gip = q->qi_gquotaip;
 
436	}
437	if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
438		if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
439					0, 0, &uip) == 0)
440			tempuqip = B_TRUE;
441	}
442	if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
443		if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
444					0, 0, &gip) == 0)
445			tempgqip = B_TRUE;
 
 
 
 
 
446	}
447	if (uip) {
448		out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
449		out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
450		if (tempuqip)
451			IRELE(uip);
452	}
 
453	if (gip) {
454		out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
455		out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
456		if (tempgqip)
457			IRELE(gip);
458	}
 
 
 
 
 
 
459	if (q) {
460		out->qs_incoredqs = q->qi_dquots;
461		out->qs_btimelimit = q->qi_btimelimit;
462		out->qs_itimelimit = q->qi_itimelimit;
463		out->qs_rtbtimelimit = q->qi_rtbtimelimit;
464		out->qs_bwarnlimit = q->qi_bwarnlimit;
465		out->qs_iwarnlimit = q->qi_iwarnlimit;
466	}
467	return 0;
468}
469
470#define XFS_DQ_MASK \
471	(FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
472
473/*
474 * Adjust quota limits, and start/stop timers accordingly.
475 */
476int
477xfs_qm_scall_setqlim(
478	xfs_mount_t		*mp,
479	xfs_dqid_t		id,
480	uint			type,
481	fs_disk_quota_t		*newlim)
482{
483	struct xfs_quotainfo	*q = mp->m_quotainfo;
484	xfs_disk_dquot_t	*ddq;
485	xfs_dquot_t		*dqp;
486	xfs_trans_t		*tp;
487	int			error;
488	xfs_qcnt_t		hard, soft;
489
490	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
491		return EINVAL;
492	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
493		return 0;
494
495	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
496	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
497				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
498		xfs_trans_cancel(tp, 0);
499		return (error);
500	}
501
502	/*
503	 * We don't want to race with a quotaoff so take the quotaoff lock.
504	 * (We don't hold an inode lock, so there's nothing else to stop
505	 * a quotaoff from happening). (XXXThis doesn't currently happen
506	 * because we take the vfslock before calling xfs_qm_sysent).
507	 */
508	mutex_lock(&q->qi_quotaofflock);
509
510	/*
511	 * Get the dquot (locked), and join it to the transaction.
512	 * Allocate the dquot if this doesn't exist.
 
 
 
513	 */
514	if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
515		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
516		ASSERT(error != ENOENT);
517		goto out_unlock;
518	}
 
 
 
 
 
 
 
 
 
 
519	xfs_trans_dqjoin(tp, dqp);
520	ddq = &dqp->q_core;
521
522	/*
523	 * Make sure that hardlimits are >= soft limits before changing.
524	 */
525	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
526		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
527			be64_to_cpu(ddq->d_blk_hardlimit);
528	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
529		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
530			be64_to_cpu(ddq->d_blk_softlimit);
531	if (hard == 0 || hard >= soft) {
532		ddq->d_blk_hardlimit = cpu_to_be64(hard);
533		ddq->d_blk_softlimit = cpu_to_be64(soft);
 
534		if (id == 0) {
535			q->qi_bhardlimit = hard;
536			q->qi_bsoftlimit = soft;
537		}
538	} else {
539		xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
540	}
541	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
542		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
543			be64_to_cpu(ddq->d_rtb_hardlimit);
544	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
545		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
546			be64_to_cpu(ddq->d_rtb_softlimit);
547	if (hard == 0 || hard >= soft) {
548		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
549		ddq->d_rtb_softlimit = cpu_to_be64(soft);
550		if (id == 0) {
551			q->qi_rtbhardlimit = hard;
552			q->qi_rtbsoftlimit = soft;
553		}
554	} else {
555		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
556	}
557
558	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
559		(xfs_qcnt_t) newlim->d_ino_hardlimit :
560			be64_to_cpu(ddq->d_ino_hardlimit);
561	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
562		(xfs_qcnt_t) newlim->d_ino_softlimit :
563			be64_to_cpu(ddq->d_ino_softlimit);
564	if (hard == 0 || hard >= soft) {
565		ddq->d_ino_hardlimit = cpu_to_be64(hard);
566		ddq->d_ino_softlimit = cpu_to_be64(soft);
567		if (id == 0) {
568			q->qi_ihardlimit = hard;
569			q->qi_isoftlimit = soft;
570		}
571	} else {
572		xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
573	}
574
575	/*
576	 * Update warnings counter(s) if requested
577	 */
578	if (newlim->d_fieldmask & FS_DQ_BWARNS)
579		ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
580	if (newlim->d_fieldmask & FS_DQ_IWARNS)
581		ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
582	if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
583		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
584
585	if (id == 0) {
586		/*
587		 * Timelimits for the super user set the relative time
588		 * the other users can be over quota for this file system.
589		 * If it is zero a default is used.  Ditto for the default
590		 * soft and hard limit values (already done, above), and
591		 * for warnings.
592		 */
593		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
594			q->qi_btimelimit = newlim->d_btimer;
595			ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
596		}
597		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
598			q->qi_itimelimit = newlim->d_itimer;
599			ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
600		}
601		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
602			q->qi_rtbtimelimit = newlim->d_rtbtimer;
603			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
604		}
605		if (newlim->d_fieldmask & FS_DQ_BWARNS)
606			q->qi_bwarnlimit = newlim->d_bwarns;
607		if (newlim->d_fieldmask & FS_DQ_IWARNS)
608			q->qi_iwarnlimit = newlim->d_iwarns;
609		if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
610			q->qi_rtbwarnlimit = newlim->d_rtbwarns;
611	} else {
612		/*
613		 * If the user is now over quota, start the timelimit.
614		 * The user will not be 'warned'.
615		 * Note that we keep the timers ticking, whether enforcement
616		 * is on or off. We don't really want to bother with iterating
617		 * over all ondisk dquots and turning the timers on/off.
618		 */
619		xfs_qm_adjust_dqtimers(mp, ddq);
620	}
621	dqp->dq_flags |= XFS_DQ_DIRTY;
622	xfs_trans_log_dquot(tp, dqp);
623
624	error = xfs_trans_commit(tp, 0);
625	xfs_qm_dqrele(dqp);
626
627 out_unlock:
 
 
628	mutex_unlock(&q->qi_quotaofflock);
629	return error;
630}
631
632int
633xfs_qm_scall_getquota(
634	xfs_mount_t	*mp,
635	xfs_dqid_t	id,
636	uint		type,
637	fs_disk_quota_t *out)
638{
639	xfs_dquot_t	*dqp;
640	int		error;
641
642	/*
643	 * Try to get the dquot. We don't want it allocated on disk, so
644	 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
645	 * exist, we'll get ENOENT back.
646	 */
647	if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) {
648		return (error);
649	}
650
651	/*
652	 * If everything's NULL, this dquot doesn't quite exist as far as
653	 * our utility programs are concerned.
654	 */
655	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
656		xfs_qm_dqput(dqp);
657		return XFS_ERROR(ENOENT);
658	}
659	/*
660	 * Convert the disk dquot to the exportable format
661	 */
662	xfs_qm_export_dquot(mp, &dqp->q_core, out);
663	xfs_qm_dqput(dqp);
664	return (error ? XFS_ERROR(EFAULT) : 0);
665}
666
667
668STATIC int
669xfs_qm_log_quotaoff_end(
670	xfs_mount_t		*mp,
671	xfs_qoff_logitem_t	*startqoff,
672	uint			flags)
673{
674	xfs_trans_t		*tp;
675	int			error;
676	xfs_qoff_logitem_t	*qoffi;
677
678	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
679
680	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2,
681				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
682		xfs_trans_cancel(tp, 0);
683		return (error);
684	}
685
686	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
687					flags & XFS_ALL_QUOTA_ACCT);
688	xfs_trans_log_quotaoff_item(tp, qoffi);
689
690	/*
691	 * We have to make sure that the transaction is secure on disk before we
692	 * return and actually stop quota accounting. So, make it synchronous.
693	 * We don't care about quotoff's performance.
694	 */
695	xfs_trans_set_sync(tp);
696	error = xfs_trans_commit(tp, 0);
697	return (error);
698}
699
700
701STATIC int
702xfs_qm_log_quotaoff(
703	xfs_mount_t	       *mp,
704	xfs_qoff_logitem_t     **qoffstartp,
705	uint		       flags)
706{
707	xfs_trans_t	       *tp;
708	int			error;
709	xfs_qoff_logitem_t     *qoffi=NULL;
710	uint			oldsbqflag=0;
711
712	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
713	if ((error = xfs_trans_reserve(tp, 0,
714				      sizeof(xfs_qoff_logitem_t) * 2 +
715				      mp->m_sb.sb_sectsize + 128,
716				      0,
717				      0,
718				      XFS_DEFAULT_LOG_COUNT))) {
719		goto error0;
720	}
721
722	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
723	xfs_trans_log_quotaoff_item(tp, qoffi);
724
725	spin_lock(&mp->m_sb_lock);
726	oldsbqflag = mp->m_sb.sb_qflags;
727	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
728	spin_unlock(&mp->m_sb_lock);
729
730	xfs_mod_sb(tp, XFS_SB_QFLAGS);
731
732	/*
733	 * We have to make sure that the transaction is secure on disk before we
734	 * return and actually stop quota accounting. So, make it synchronous.
735	 * We don't care about quotoff's performance.
736	 */
737	xfs_trans_set_sync(tp);
738	error = xfs_trans_commit(tp, 0);
739
740error0:
741	if (error) {
742		xfs_trans_cancel(tp, 0);
743		/*
744		 * No one else is modifying sb_qflags, so this is OK.
745		 * We still hold the quotaofflock.
746		 */
747		spin_lock(&mp->m_sb_lock);
748		mp->m_sb.sb_qflags = oldsbqflag;
749		spin_unlock(&mp->m_sb_lock);
750	}
751	*qoffstartp = qoffi;
752	return (error);
753}
754
755
756/*
757 * Translate an internal style on-disk-dquot to the exportable format.
758 * The main differences are that the counters/limits are all in Basic
759 * Blocks (BBs) instead of the internal FSBs, and all on-disk data has
760 * to be converted to the native endianness.
761 */
762STATIC void
763xfs_qm_export_dquot(
764	xfs_mount_t		*mp,
765	xfs_disk_dquot_t	*src,
766	struct fs_disk_quota	*dst)
767{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
768	memset(dst, 0, sizeof(*dst));
769	dst->d_version = FS_DQUOT_VERSION;  /* different from src->d_version */
770	dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags);
771	dst->d_id = be32_to_cpu(src->d_id);
772	dst->d_blk_hardlimit =
773		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit));
774	dst->d_blk_softlimit =
775		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit));
776	dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit);
777	dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit);
778	dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount));
779	dst->d_icount = be64_to_cpu(src->d_icount);
780	dst->d_btimer = be32_to_cpu(src->d_btimer);
781	dst->d_itimer = be32_to_cpu(src->d_itimer);
782	dst->d_iwarns = be16_to_cpu(src->d_iwarns);
783	dst->d_bwarns = be16_to_cpu(src->d_bwarns);
784	dst->d_rtb_hardlimit =
785		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit));
786	dst->d_rtb_softlimit =
787		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit));
788	dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount));
789	dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer);
790	dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns);
791
792	/*
793	 * Internally, we don't reset all the timers when quota enforcement
794	 * gets turned off. No need to confuse the user level code,
795	 * so return zeroes in that case.
796	 */
797	if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) ||
798	    (!XFS_IS_OQUOTA_ENFORCED(mp) &&
799			(src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
 
 
 
800		dst->d_btimer = 0;
801		dst->d_itimer = 0;
802		dst->d_rtbtimer = 0;
803	}
804
805#ifdef DEBUG
806	if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
807	     (XFS_IS_OQUOTA_ENFORCED(mp) &&
808			(dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
809	    dst->d_id != 0) {
810		if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) &&
811		    (dst->d_blk_softlimit > 0)) {
812			ASSERT(dst->d_btimer != 0);
813		}
814		if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) &&
815		    (dst->d_ino_softlimit > 0)) {
816			ASSERT(dst->d_itimer != 0);
817		}
818	}
819#endif
 
 
 
820}
821
822STATIC uint
823xfs_qm_export_qtype_flags(
824	uint flags)
825{
826	/*
827	 * Can't be more than one, or none.
828	 */
829	ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
830		(FS_PROJ_QUOTA | FS_USER_QUOTA));
831	ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
832		(FS_PROJ_QUOTA | FS_GROUP_QUOTA));
833	ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
834		(FS_USER_QUOTA | FS_GROUP_QUOTA));
835	ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
836
837	return (flags & XFS_DQ_USER) ?
838		FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
839			FS_PROJ_QUOTA : FS_GROUP_QUOTA;
840}
841
842STATIC uint
843xfs_qm_export_flags(
844	uint flags)
845{
846	uint uflags;
847
848	uflags = 0;
849	if (flags & XFS_UQUOTA_ACCT)
850		uflags |= FS_QUOTA_UDQ_ACCT;
851	if (flags & XFS_PQUOTA_ACCT)
852		uflags |= FS_QUOTA_PDQ_ACCT;
853	if (flags & XFS_GQUOTA_ACCT)
854		uflags |= FS_QUOTA_GDQ_ACCT;
 
 
855	if (flags & XFS_UQUOTA_ENFD)
856		uflags |= FS_QUOTA_UDQ_ENFD;
857	if (flags & (XFS_OQUOTA_ENFD)) {
858		uflags |= (flags & XFS_GQUOTA_ACCT) ?
859			FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD;
860	}
861	return (uflags);
862}
863
864
865STATIC int
866xfs_dqrele_inode(
867	struct xfs_inode	*ip,
868	struct xfs_perag	*pag,
869	int			flags)
 
870{
871	/* skip quota inodes */
872	if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
873	    ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
 
874		ASSERT(ip->i_udquot == NULL);
875		ASSERT(ip->i_gdquot == NULL);
 
876		return 0;
877	}
878
879	xfs_ilock(ip, XFS_ILOCK_EXCL);
880	if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
881		xfs_qm_dqrele(ip->i_udquot);
882		ip->i_udquot = NULL;
883	}
884	if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
885		xfs_qm_dqrele(ip->i_gdquot);
886		ip->i_gdquot = NULL;
887	}
 
 
 
 
888	xfs_iunlock(ip, XFS_ILOCK_EXCL);
889	return 0;
890}
891
892
893/*
894 * Go thru all the inodes in the file system, releasing their dquots.
895 *
896 * Note that the mount structure gets modified to indicate that quotas are off
897 * AFTER this, in the case of quotaoff.
898 */
899void
900xfs_qm_dqrele_all_inodes(
901	struct xfs_mount *mp,
902	uint		 flags)
903{
904	ASSERT(mp->m_quotainfo);
905	xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags);
906}
v3.15
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18
  19#include <linux/capability.h>
  20
  21#include "xfs.h"
  22#include "xfs_fs.h"
  23#include "xfs_shared.h"
  24#include "xfs_format.h"
  25#include "xfs_log_format.h"
  26#include "xfs_trans_resv.h"
  27#include "xfs_bit.h"
 
 
 
  28#include "xfs_sb.h"
  29#include "xfs_ag.h"
 
 
  30#include "xfs_mount.h"
 
  31#include "xfs_inode.h"
  32#include "xfs_trans.h"
 
 
  33#include "xfs_error.h"
  34#include "xfs_quota.h"
 
 
  35#include "xfs_qm.h"
  36#include "xfs_trace.h"
  37#include "xfs_icache.h"
  38
  39STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
  40STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
  41					uint);
  42STATIC uint	xfs_qm_export_flags(uint);
  43STATIC uint	xfs_qm_export_qtype_flags(uint);
 
 
 
  44
  45/*
  46 * Turn off quota accounting and/or enforcement for all udquots and/or
  47 * gdquots. Called only at unmount time.
  48 *
  49 * This assumes that there are no dquots of this file system cached
  50 * incore, and modifies the ondisk dquot directly. Therefore, for example,
  51 * it is an error to call this twice, without purging the cache.
  52 */
  53int
  54xfs_qm_scall_quotaoff(
  55	xfs_mount_t		*mp,
  56	uint			flags)
  57{
  58	struct xfs_quotainfo	*q = mp->m_quotainfo;
  59	uint			dqtype;
  60	int			error;
  61	uint			inactivate_flags;
  62	xfs_qoff_logitem_t	*qoffstart;
 
  63
  64	/*
  65	 * No file system can have quotas enabled on disk but not in core.
  66	 * Note that quota utilities (like quotaoff) _expect_
  67	 * errno == EEXIST here.
  68	 */
  69	if ((mp->m_qflags & flags) == 0)
  70		return XFS_ERROR(EEXIST);
  71	error = 0;
  72
  73	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
  74
  75	/*
  76	 * We don't want to deal with two quotaoffs messing up each other,
  77	 * so we're going to serialize it. quotaoff isn't exactly a performance
  78	 * critical thing.
  79	 * If quotaoff, then we must be dealing with the root filesystem.
  80	 */
  81	ASSERT(q);
  82	mutex_lock(&q->qi_quotaofflock);
  83
  84	/*
  85	 * If we're just turning off quota enforcement, change mp and go.
  86	 */
  87	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
  88		mp->m_qflags &= ~(flags);
  89
  90		spin_lock(&mp->m_sb_lock);
  91		mp->m_sb.sb_qflags = mp->m_qflags;
  92		spin_unlock(&mp->m_sb_lock);
  93		mutex_unlock(&q->qi_quotaofflock);
  94
  95		/* XXX what to do if error ? Revert back to old vals incore ? */
  96		error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
  97		return (error);
  98	}
  99
 100	dqtype = 0;
 101	inactivate_flags = 0;
 102	/*
 103	 * If accounting is off, we must turn enforcement off, clear the
 104	 * quota 'CHKD' certificate to make it known that we have to
 105	 * do a quotacheck the next time this quota is turned on.
 106	 */
 107	if (flags & XFS_UQUOTA_ACCT) {
 108		dqtype |= XFS_QMOPT_UQUOTA;
 109		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
 110		inactivate_flags |= XFS_UQUOTA_ACTIVE;
 111	}
 112	if (flags & XFS_GQUOTA_ACCT) {
 113		dqtype |= XFS_QMOPT_GQUOTA;
 114		flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
 115		inactivate_flags |= XFS_GQUOTA_ACTIVE;
 116	}
 117	if (flags & XFS_PQUOTA_ACCT) {
 118		dqtype |= XFS_QMOPT_PQUOTA;
 119		flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
 120		inactivate_flags |= XFS_PQUOTA_ACTIVE;
 121	}
 122
 123	/*
 124	 * Nothing to do?  Don't complain. This happens when we're just
 125	 * turning off quota enforcement.
 126	 */
 127	if ((mp->m_qflags & flags) == 0)
 128		goto out_unlock;
 129
 130	/*
 131	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
 132	 * and synchronously. If we fail to write, we should abort the
 133	 * operation as it cannot be recovered safely if we crash.
 134	 */
 135	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
 136	if (error)
 137		goto out_unlock;
 138
 139	/*
 140	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
 141	 * to take care of the race between dqget and quotaoff. We don't take
 142	 * any special locks to reset these bits. All processes need to check
 143	 * these bits *after* taking inode lock(s) to see if the particular
 144	 * quota type is in the process of being turned off. If *ACTIVE, it is
 145	 * guaranteed that all dquot structures and all quotainode ptrs will all
 146	 * stay valid as long as that inode is kept locked.
 147	 *
 148	 * There is no turning back after this.
 149	 */
 150	mp->m_qflags &= ~inactivate_flags;
 151
 152	/*
 153	 * Give back all the dquot reference(s) held by inodes.
 154	 * Here we go thru every single incore inode in this file system, and
 155	 * do a dqrele on the i_udquot/i_gdquot that it may have.
 156	 * Essentially, as long as somebody has an inode locked, this guarantees
 157	 * that quotas will not be turned off. This is handy because in a
 158	 * transaction once we lock the inode(s) and check for quotaon, we can
 159	 * depend on the quota inodes (and other things) being valid as long as
 160	 * we keep the lock(s).
 161	 */
 162	xfs_qm_dqrele_all_inodes(mp, flags);
 163
 164	/*
 165	 * Next we make the changes in the quota flag in the mount struct.
 166	 * This isn't protected by a particular lock directly, because we
 167	 * don't want to take a mrlock every time we depend on quotas being on.
 168	 */
 169	mp->m_qflags &= ~flags;
 170
 171	/*
 172	 * Go through all the dquots of this file system and purge them,
 173	 * according to what was turned off.
 
 
 
 
 174	 */
 175	xfs_qm_dqpurge_all(mp, dqtype);
 
 176
 177	/*
 178	 * Transactions that had started before ACTIVE state bit was cleared
 179	 * could have logged many dquots, so they'd have higher LSNs than
 180	 * the first QUOTAOFF log record does. If we happen to crash when
 181	 * the tail of the log has gone past the QUOTAOFF record, but
 182	 * before the last dquot modification, those dquots __will__
 183	 * recover, and that's not good.
 184	 *
 185	 * So, we have QUOTAOFF start and end logitems; the start
 186	 * logitem won't get overwritten until the end logitem appears...
 187	 */
 188	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
 189	if (error) {
 190		/* We're screwed now. Shutdown is the only option. */
 191		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 192		goto out_unlock;
 193	}
 194
 195	/*
 196	 * If all quotas are completely turned off, close shop.
 197	 */
 198	if (mp->m_qflags == 0) {
 
 199		mutex_unlock(&q->qi_quotaofflock);
 200		xfs_qm_destroy_quotainfo(mp);
 201		return (0);
 202	}
 203
 204	/*
 205	 * Release our quotainode references if we don't need them anymore.
 206	 */
 207	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
 208		IRELE(q->qi_uquotaip);
 209		q->qi_uquotaip = NULL;
 210	}
 211	if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
 212		IRELE(q->qi_gquotaip);
 213		q->qi_gquotaip = NULL;
 214	}
 215	if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
 216		IRELE(q->qi_pquotaip);
 217		q->qi_pquotaip = NULL;
 218	}
 219
 220out_unlock:
 221	mutex_unlock(&q->qi_quotaofflock);
 222	return error;
 223}
 224
 225STATIC int
 226xfs_qm_scall_trunc_qfile(
 227	struct xfs_mount	*mp,
 228	xfs_ino_t		ino)
 229{
 230	struct xfs_inode	*ip;
 231	struct xfs_trans	*tp;
 232	int			error;
 233
 234	if (ino == NULLFSINO)
 235		return 0;
 236
 237	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
 238	if (error)
 239		return error;
 240
 241	xfs_ilock(ip, XFS_IOLOCK_EXCL);
 242
 243	tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
 244	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
 
 
 245	if (error) {
 246		xfs_trans_cancel(tp, 0);
 247		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 248		goto out_put;
 249	}
 250
 251	xfs_ilock(ip, XFS_ILOCK_EXCL);
 252	xfs_trans_ijoin(tp, ip, 0);
 253
 254	ip->i_d.di_size = 0;
 255	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 256
 257	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
 258	if (error) {
 259		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
 260				     XFS_TRANS_ABORT);
 261		goto out_unlock;
 262	}
 263
 264	ASSERT(ip->i_d.di_nextents == 0);
 265
 266	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
 267	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
 268
 269out_unlock:
 270	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
 271out_put:
 272	IRELE(ip);
 273	return error;
 274}
 275
 276int
 277xfs_qm_scall_trunc_qfiles(
 278	xfs_mount_t	*mp,
 279	uint		flags)
 280{
 281	int		error;
 282
 283	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
 284		xfs_debug(mp, "%s: flags=%x m_qflags=%x",
 285			__func__, flags, mp->m_qflags);
 286		return XFS_ERROR(EINVAL);
 287	}
 288
 289	if (flags & XFS_DQ_USER) {
 290		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
 291		if (error)
 292			return error;
 293	}
 294	if (flags & XFS_DQ_GROUP) {
 295		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
 296		if (error)
 297			return error;
 298	}
 299	if (flags & XFS_DQ_PROJ)
 300		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
 301
 302	return error;
 303}
 304
 305/*
 306 * Switch on (a given) quota enforcement for a filesystem.  This takes
 307 * effect immediately.
 308 * (Switching on quota accounting must be done at mount time.)
 309 */
 310int
 311xfs_qm_scall_quotaon(
 312	xfs_mount_t	*mp,
 313	uint		flags)
 314{
 315	int		error;
 316	uint		qf;
 317	__int64_t	sbflags;
 318
 319	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
 320	/*
 321	 * Switching on quota accounting must be done at mount time.
 322	 */
 323	flags &= ~(XFS_ALL_QUOTA_ACCT);
 324
 325	sbflags = 0;
 326
 327	if (flags == 0) {
 328		xfs_debug(mp, "%s: zero flags, m_qflags=%x",
 329			__func__, mp->m_qflags);
 330		return XFS_ERROR(EINVAL);
 331	}
 332
 333	/* No fs can turn on quotas with a delayed effect */
 334	ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
 335
 336	/*
 337	 * Can't enforce without accounting. We check the superblock
 338	 * qflags here instead of m_qflags because rootfs can have
 339	 * quota acct on ondisk without m_qflags' knowing.
 340	 */
 341	if (((flags & XFS_UQUOTA_ACCT) == 0 &&
 342	     (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
 343	     (flags & XFS_UQUOTA_ENFD)) ||
 344	    ((flags & XFS_GQUOTA_ACCT) == 0 &&
 345	     (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
 346	     (flags & XFS_GQUOTA_ENFD)) ||
 347	    ((flags & XFS_PQUOTA_ACCT) == 0 &&
 348	     (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
 349	     (flags & XFS_PQUOTA_ENFD))) {
 
 
 350		xfs_debug(mp,
 351			"%s: Can't enforce without acct, flags=%x sbflags=%x",
 352			__func__, flags, mp->m_sb.sb_qflags);
 353		return XFS_ERROR(EINVAL);
 354	}
 355	/*
 356	 * If everything's up to-date incore, then don't waste time.
 357	 */
 358	if ((mp->m_qflags & flags) == flags)
 359		return XFS_ERROR(EEXIST);
 360
 361	/*
 362	 * Change sb_qflags on disk but not incore mp->qflags
 363	 * if this is the root filesystem.
 364	 */
 365	spin_lock(&mp->m_sb_lock);
 366	qf = mp->m_sb.sb_qflags;
 367	mp->m_sb.sb_qflags = qf | flags;
 368	spin_unlock(&mp->m_sb_lock);
 369
 370	/*
 371	 * There's nothing to change if it's the same.
 372	 */
 373	if ((qf & flags) == flags && sbflags == 0)
 374		return XFS_ERROR(EEXIST);
 375	sbflags |= XFS_SB_QFLAGS;
 376
 377	if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
 378		return (error);
 379	/*
 380	 * If we aren't trying to switch on quota enforcement, we are done.
 381	 */
 382	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
 383	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
 384	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
 385	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
 386	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
 387	     (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
 388	    (flags & XFS_ALL_QUOTA_ENFD) == 0)
 389		return (0);
 390
 391	if (! XFS_IS_QUOTA_RUNNING(mp))
 392		return XFS_ERROR(ESRCH);
 393
 394	/*
 395	 * Switch on quota enforcement in core.
 396	 */
 397	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
 398	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
 399	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
 400
 401	return (0);
 402}
 403
 404
 405/*
 406 * Return quota status information, such as uquota-off, enforcements, etc.
 407 * for Q_XGETQSTAT command.
 408 */
 409int
 410xfs_qm_scall_getqstat(
 411	struct xfs_mount	*mp,
 412	struct fs_quota_stat	*out)
 413{
 414	struct xfs_quotainfo	*q = mp->m_quotainfo;
 415	struct xfs_inode	*uip = NULL;
 416	struct xfs_inode	*gip = NULL;
 417	struct xfs_inode	*pip = NULL;
 418	bool                    tempuqip = false;
 419	bool                    tempgqip = false;
 420	bool                    temppqip = false;
 421
 
 
 422	memset(out, 0, sizeof(fs_quota_stat_t));
 423
 424	out->qs_version = FS_QSTAT_VERSION;
 425	if (!xfs_sb_version_hasquota(&mp->m_sb)) {
 426		out->qs_uquota.qfs_ino = NULLFSINO;
 427		out->qs_gquota.qfs_ino = NULLFSINO;
 428		return (0);
 429	}
 430
 431	out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
 432							(XFS_ALL_QUOTA_ACCT|
 433							 XFS_ALL_QUOTA_ENFD));
 434	if (q) {
 435		uip = q->qi_uquotaip;
 436		gip = q->qi_gquotaip;
 437		pip = q->qi_pquotaip;
 438	}
 439	if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
 440		if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
 441					0, 0, &uip) == 0)
 442			tempuqip = true;
 443	}
 444	if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
 445		if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
 446					0, 0, &gip) == 0)
 447			tempgqip = true;
 448	}
 449	/*
 450	 * Q_XGETQSTAT doesn't have room for both group and project quotas.
 451	 * So, allow the project quota values to be copied out only if
 452	 * there is no group quota information available.
 453	 */
 454	if (!gip) {
 455		if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) {
 456			if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
 457						0, 0, &pip) == 0)
 458				temppqip = true;
 459		}
 460	} else
 461		pip = NULL;
 462	if (uip) {
 463		out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
 464		out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
 465		out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
 466		if (tempuqip)
 467			IRELE(uip);
 468	}
 469
 470	if (gip) {
 471		out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
 472		out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
 473		out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
 474		if (tempgqip)
 475			IRELE(gip);
 476	}
 477	if (pip) {
 478		out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
 479		out->qs_gquota.qfs_nblks = pip->i_d.di_nblocks;
 480		out->qs_gquota.qfs_nextents = pip->i_d.di_nextents;
 481		if (temppqip)
 482			IRELE(pip);
 483	}
 484	if (q) {
 485		out->qs_incoredqs = q->qi_dquots;
 486		out->qs_btimelimit = q->qi_btimelimit;
 487		out->qs_itimelimit = q->qi_itimelimit;
 488		out->qs_rtbtimelimit = q->qi_rtbtimelimit;
 489		out->qs_bwarnlimit = q->qi_bwarnlimit;
 490		out->qs_iwarnlimit = q->qi_iwarnlimit;
 491	}
 492	return 0;
 493}
 494
 495/*
 496 * Return quota status information, such as uquota-off, enforcements, etc.
 497 * for Q_XGETQSTATV command, to support separate project quota field.
 498 */
 499int
 500xfs_qm_scall_getqstatv(
 501	struct xfs_mount	*mp,
 502	struct fs_quota_statv	*out)
 503{
 504	struct xfs_quotainfo	*q = mp->m_quotainfo;
 505	struct xfs_inode	*uip = NULL;
 506	struct xfs_inode	*gip = NULL;
 507	struct xfs_inode	*pip = NULL;
 508	bool                    tempuqip = false;
 509	bool                    tempgqip = false;
 510	bool                    temppqip = false;
 511
 512	if (!xfs_sb_version_hasquota(&mp->m_sb)) {
 513		out->qs_uquota.qfs_ino = NULLFSINO;
 514		out->qs_gquota.qfs_ino = NULLFSINO;
 515		out->qs_pquota.qfs_ino = NULLFSINO;
 516		return (0);
 517	}
 518
 519	out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
 520							(XFS_ALL_QUOTA_ACCT|
 521							 XFS_ALL_QUOTA_ENFD));
 
 522	out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
 523	out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
 524	out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino;
 525
 526	if (q) {
 527		uip = q->qi_uquotaip;
 528		gip = q->qi_gquotaip;
 529		pip = q->qi_pquotaip;
 530	}
 531	if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
 532		if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
 533					0, 0, &uip) == 0)
 534			tempuqip = true;
 535	}
 536	if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
 537		if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
 538					0, 0, &gip) == 0)
 539			tempgqip = true;
 540	}
 541	if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) {
 542		if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
 543					0, 0, &pip) == 0)
 544			temppqip = true;
 545	}
 546	if (uip) {
 547		out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
 548		out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
 549		if (tempuqip)
 550			IRELE(uip);
 551	}
 552
 553	if (gip) {
 554		out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
 555		out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
 556		if (tempgqip)
 557			IRELE(gip);
 558	}
 559	if (pip) {
 560		out->qs_pquota.qfs_nblks = pip->i_d.di_nblocks;
 561		out->qs_pquota.qfs_nextents = pip->i_d.di_nextents;
 562		if (temppqip)
 563			IRELE(pip);
 564	}
 565	if (q) {
 566		out->qs_incoredqs = q->qi_dquots;
 567		out->qs_btimelimit = q->qi_btimelimit;
 568		out->qs_itimelimit = q->qi_itimelimit;
 569		out->qs_rtbtimelimit = q->qi_rtbtimelimit;
 570		out->qs_bwarnlimit = q->qi_bwarnlimit;
 571		out->qs_iwarnlimit = q->qi_iwarnlimit;
 572	}
 573	return 0;
 574}
 575
 576#define XFS_DQ_MASK \
 577	(FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
 578
 579/*
 580 * Adjust quota limits, and start/stop timers accordingly.
 581 */
 582int
 583xfs_qm_scall_setqlim(
 584	struct xfs_mount	*mp,
 585	xfs_dqid_t		id,
 586	uint			type,
 587	fs_disk_quota_t		*newlim)
 588{
 589	struct xfs_quotainfo	*q = mp->m_quotainfo;
 590	struct xfs_disk_dquot	*ddq;
 591	struct xfs_dquot	*dqp;
 592	struct xfs_trans	*tp;
 593	int			error;
 594	xfs_qcnt_t		hard, soft;
 595
 596	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
 597		return EINVAL;
 598	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
 599		return 0;
 600
 
 
 
 
 
 
 
 601	/*
 602	 * We don't want to race with a quotaoff so take the quotaoff lock.
 603	 * We don't hold an inode lock, so there's nothing else to stop
 604	 * a quotaoff from happening.
 
 605	 */
 606	mutex_lock(&q->qi_quotaofflock);
 607
 608	/*
 609	 * Get the dquot (locked) before we start, as we need to do a
 610	 * transaction to allocate it if it doesn't exist. Once we have the
 611	 * dquot, unlock it so we can start the next transaction safely. We hold
 612	 * a reference to the dquot, so it's safe to do this unlock/lock without
 613	 * it being reclaimed in the mean time.
 614	 */
 615	error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);
 616	if (error) {
 617		ASSERT(error != ENOENT);
 618		goto out_unlock;
 619	}
 620	xfs_dqunlock(dqp);
 621
 622	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
 623	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0);
 624	if (error) {
 625		xfs_trans_cancel(tp, 0);
 626		goto out_rele;
 627	}
 628
 629	xfs_dqlock(dqp);
 630	xfs_trans_dqjoin(tp, dqp);
 631	ddq = &dqp->q_core;
 632
 633	/*
 634	 * Make sure that hardlimits are >= soft limits before changing.
 635	 */
 636	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
 637		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
 638			be64_to_cpu(ddq->d_blk_hardlimit);
 639	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
 640		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
 641			be64_to_cpu(ddq->d_blk_softlimit);
 642	if (hard == 0 || hard >= soft) {
 643		ddq->d_blk_hardlimit = cpu_to_be64(hard);
 644		ddq->d_blk_softlimit = cpu_to_be64(soft);
 645		xfs_dquot_set_prealloc_limits(dqp);
 646		if (id == 0) {
 647			q->qi_bhardlimit = hard;
 648			q->qi_bsoftlimit = soft;
 649		}
 650	} else {
 651		xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
 652	}
 653	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
 654		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
 655			be64_to_cpu(ddq->d_rtb_hardlimit);
 656	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
 657		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
 658			be64_to_cpu(ddq->d_rtb_softlimit);
 659	if (hard == 0 || hard >= soft) {
 660		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
 661		ddq->d_rtb_softlimit = cpu_to_be64(soft);
 662		if (id == 0) {
 663			q->qi_rtbhardlimit = hard;
 664			q->qi_rtbsoftlimit = soft;
 665		}
 666	} else {
 667		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
 668	}
 669
 670	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
 671		(xfs_qcnt_t) newlim->d_ino_hardlimit :
 672			be64_to_cpu(ddq->d_ino_hardlimit);
 673	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
 674		(xfs_qcnt_t) newlim->d_ino_softlimit :
 675			be64_to_cpu(ddq->d_ino_softlimit);
 676	if (hard == 0 || hard >= soft) {
 677		ddq->d_ino_hardlimit = cpu_to_be64(hard);
 678		ddq->d_ino_softlimit = cpu_to_be64(soft);
 679		if (id == 0) {
 680			q->qi_ihardlimit = hard;
 681			q->qi_isoftlimit = soft;
 682		}
 683	} else {
 684		xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
 685	}
 686
 687	/*
 688	 * Update warnings counter(s) if requested
 689	 */
 690	if (newlim->d_fieldmask & FS_DQ_BWARNS)
 691		ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
 692	if (newlim->d_fieldmask & FS_DQ_IWARNS)
 693		ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
 694	if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
 695		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
 696
 697	if (id == 0) {
 698		/*
 699		 * Timelimits for the super user set the relative time
 700		 * the other users can be over quota for this file system.
 701		 * If it is zero a default is used.  Ditto for the default
 702		 * soft and hard limit values (already done, above), and
 703		 * for warnings.
 704		 */
 705		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
 706			q->qi_btimelimit = newlim->d_btimer;
 707			ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
 708		}
 709		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
 710			q->qi_itimelimit = newlim->d_itimer;
 711			ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
 712		}
 713		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
 714			q->qi_rtbtimelimit = newlim->d_rtbtimer;
 715			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
 716		}
 717		if (newlim->d_fieldmask & FS_DQ_BWARNS)
 718			q->qi_bwarnlimit = newlim->d_bwarns;
 719		if (newlim->d_fieldmask & FS_DQ_IWARNS)
 720			q->qi_iwarnlimit = newlim->d_iwarns;
 721		if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
 722			q->qi_rtbwarnlimit = newlim->d_rtbwarns;
 723	} else {
 724		/*
 725		 * If the user is now over quota, start the timelimit.
 726		 * The user will not be 'warned'.
 727		 * Note that we keep the timers ticking, whether enforcement
 728		 * is on or off. We don't really want to bother with iterating
 729		 * over all ondisk dquots and turning the timers on/off.
 730		 */
 731		xfs_qm_adjust_dqtimers(mp, ddq);
 732	}
 733	dqp->dq_flags |= XFS_DQ_DIRTY;
 734	xfs_trans_log_dquot(tp, dqp);
 735
 736	error = xfs_trans_commit(tp, 0);
 
 737
 738out_rele:
 739	xfs_qm_dqrele(dqp);
 740out_unlock:
 741	mutex_unlock(&q->qi_quotaofflock);
 742	return error;
 743}
 744
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 745STATIC int
 746xfs_qm_log_quotaoff_end(
 747	xfs_mount_t		*mp,
 748	xfs_qoff_logitem_t	*startqoff,
 749	uint			flags)
 750{
 751	xfs_trans_t		*tp;
 752	int			error;
 753	xfs_qoff_logitem_t	*qoffi;
 754
 755	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
 756
 757	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_equotaoff, 0, 0);
 758	if (error) {
 759		xfs_trans_cancel(tp, 0);
 760		return (error);
 761	}
 762
 763	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
 764					flags & XFS_ALL_QUOTA_ACCT);
 765	xfs_trans_log_quotaoff_item(tp, qoffi);
 766
 767	/*
 768	 * We have to make sure that the transaction is secure on disk before we
 769	 * return and actually stop quota accounting. So, make it synchronous.
 770	 * We don't care about quotoff's performance.
 771	 */
 772	xfs_trans_set_sync(tp);
 773	error = xfs_trans_commit(tp, 0);
 774	return (error);
 775}
 776
 777
 778STATIC int
 779xfs_qm_log_quotaoff(
 780	xfs_mount_t	       *mp,
 781	xfs_qoff_logitem_t     **qoffstartp,
 782	uint		       flags)
 783{
 784	xfs_trans_t	       *tp;
 785	int			error;
 786	xfs_qoff_logitem_t     *qoffi=NULL;
 787	uint			oldsbqflag=0;
 788
 789	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
 790	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_quotaoff, 0, 0);
 791	if (error)
 
 
 
 
 792		goto error0;
 
 793
 794	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
 795	xfs_trans_log_quotaoff_item(tp, qoffi);
 796
 797	spin_lock(&mp->m_sb_lock);
 798	oldsbqflag = mp->m_sb.sb_qflags;
 799	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
 800	spin_unlock(&mp->m_sb_lock);
 801
 802	xfs_mod_sb(tp, XFS_SB_QFLAGS);
 803
 804	/*
 805	 * We have to make sure that the transaction is secure on disk before we
 806	 * return and actually stop quota accounting. So, make it synchronous.
 807	 * We don't care about quotoff's performance.
 808	 */
 809	xfs_trans_set_sync(tp);
 810	error = xfs_trans_commit(tp, 0);
 811
 812error0:
 813	if (error) {
 814		xfs_trans_cancel(tp, 0);
 815		/*
 816		 * No one else is modifying sb_qflags, so this is OK.
 817		 * We still hold the quotaofflock.
 818		 */
 819		spin_lock(&mp->m_sb_lock);
 820		mp->m_sb.sb_qflags = oldsbqflag;
 821		spin_unlock(&mp->m_sb_lock);
 822	}
 823	*qoffstartp = qoffi;
 824	return (error);
 825}
 826
 827
 828int
 829xfs_qm_scall_getquota(
 830	struct xfs_mount	*mp,
 831	xfs_dqid_t		id,
 832	uint			type,
 
 
 
 
 
 833	struct fs_disk_quota	*dst)
 834{
 835	struct xfs_dquot	*dqp;
 836	int			error;
 837
 838	/*
 839	 * Try to get the dquot. We don't want it allocated on disk, so
 840	 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
 841	 * exist, we'll get ENOENT back.
 842	 */
 843	error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
 844	if (error)
 845		return error;
 846
 847	/*
 848	 * If everything's NULL, this dquot doesn't quite exist as far as
 849	 * our utility programs are concerned.
 850	 */
 851	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
 852		error = XFS_ERROR(ENOENT);
 853		goto out_put;
 854	}
 855
 856	memset(dst, 0, sizeof(*dst));
 857	dst->d_version = FS_DQUOT_VERSION;
 858	dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
 859	dst->d_id = be32_to_cpu(dqp->q_core.d_id);
 860	dst->d_blk_hardlimit =
 861		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
 862	dst->d_blk_softlimit =
 863		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
 864	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
 865	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
 866	dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
 867	dst->d_icount = dqp->q_res_icount;
 868	dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
 869	dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
 870	dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
 871	dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
 872	dst->d_rtb_hardlimit =
 873		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
 874	dst->d_rtb_softlimit =
 875		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
 876	dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
 877	dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
 878	dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
 879
 880	/*
 881	 * Internally, we don't reset all the timers when quota enforcement
 882	 * gets turned off. No need to confuse the user level code,
 883	 * so return zeroes in that case.
 884	 */
 885	if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
 886	     dqp->q_core.d_flags == XFS_DQ_USER) ||
 887	    (!XFS_IS_GQUOTA_ENFORCED(mp) &&
 888	     dqp->q_core.d_flags == XFS_DQ_GROUP) ||
 889	    (!XFS_IS_PQUOTA_ENFORCED(mp) &&
 890	     dqp->q_core.d_flags == XFS_DQ_PROJ)) {
 891		dst->d_btimer = 0;
 892		dst->d_itimer = 0;
 893		dst->d_rtbtimer = 0;
 894	}
 895
 896#ifdef DEBUG
 897	if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
 898	     (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
 899	     (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
 900	    dst->d_id != 0) {
 901		if ((dst->d_bcount > dst->d_blk_softlimit) &&
 902		    (dst->d_blk_softlimit > 0)) {
 903			ASSERT(dst->d_btimer != 0);
 904		}
 905		if ((dst->d_icount > dst->d_ino_softlimit) &&
 906		    (dst->d_ino_softlimit > 0)) {
 907			ASSERT(dst->d_itimer != 0);
 908		}
 909	}
 910#endif
 911out_put:
 912	xfs_qm_dqput(dqp);
 913	return error;
 914}
 915
 916STATIC uint
 917xfs_qm_export_qtype_flags(
 918	uint flags)
 919{
 920	/*
 921	 * Can't be more than one, or none.
 922	 */
 923	ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
 924		(FS_PROJ_QUOTA | FS_USER_QUOTA));
 925	ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
 926		(FS_PROJ_QUOTA | FS_GROUP_QUOTA));
 927	ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
 928		(FS_USER_QUOTA | FS_GROUP_QUOTA));
 929	ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
 930
 931	return (flags & XFS_DQ_USER) ?
 932		FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
 933			FS_PROJ_QUOTA : FS_GROUP_QUOTA;
 934}
 935
 936STATIC uint
 937xfs_qm_export_flags(
 938	uint flags)
 939{
 940	uint uflags;
 941
 942	uflags = 0;
 943	if (flags & XFS_UQUOTA_ACCT)
 944		uflags |= FS_QUOTA_UDQ_ACCT;
 
 
 945	if (flags & XFS_GQUOTA_ACCT)
 946		uflags |= FS_QUOTA_GDQ_ACCT;
 947	if (flags & XFS_PQUOTA_ACCT)
 948		uflags |= FS_QUOTA_PDQ_ACCT;
 949	if (flags & XFS_UQUOTA_ENFD)
 950		uflags |= FS_QUOTA_UDQ_ENFD;
 951	if (flags & XFS_GQUOTA_ENFD)
 952		uflags |= FS_QUOTA_GDQ_ENFD;
 953	if (flags & XFS_PQUOTA_ENFD)
 954		uflags |= FS_QUOTA_PDQ_ENFD;
 955	return (uflags);
 956}
 957
 958
 959STATIC int
 960xfs_dqrele_inode(
 961	struct xfs_inode	*ip,
 962	struct xfs_perag	*pag,
 963	int			flags,
 964	void			*args)
 965{
 966	/* skip quota inodes */
 967	if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
 968	    ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
 969	    ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
 970		ASSERT(ip->i_udquot == NULL);
 971		ASSERT(ip->i_gdquot == NULL);
 972		ASSERT(ip->i_pdquot == NULL);
 973		return 0;
 974	}
 975
 976	xfs_ilock(ip, XFS_ILOCK_EXCL);
 977	if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
 978		xfs_qm_dqrele(ip->i_udquot);
 979		ip->i_udquot = NULL;
 980	}
 981	if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
 982		xfs_qm_dqrele(ip->i_gdquot);
 983		ip->i_gdquot = NULL;
 984	}
 985	if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
 986		xfs_qm_dqrele(ip->i_pdquot);
 987		ip->i_pdquot = NULL;
 988	}
 989	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 990	return 0;
 991}
 992
 993
 994/*
 995 * Go thru all the inodes in the file system, releasing their dquots.
 996 *
 997 * Note that the mount structure gets modified to indicate that quotas are off
 998 * AFTER this, in the case of quotaoff.
 999 */
1000void
1001xfs_qm_dqrele_all_inodes(
1002	struct xfs_mount *mp,
1003	uint		 flags)
1004{
1005	ASSERT(mp->m_quotainfo);
1006	xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
1007}