Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_trans.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_alloc.h"
26#include "xfs_quota.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_ialloc_btree.h"
30#include "xfs_dinode.h"
31#include "xfs_inode.h"
32#include "xfs_ialloc.h"
33#include "xfs_itable.h"
34#include "xfs_rtalloc.h"
35#include "xfs_error.h"
36#include "xfs_bmap.h"
37#include "xfs_attr.h"
38#include "xfs_buf_item.h"
39#include "xfs_trans_space.h"
40#include "xfs_utils.h"
41#include "xfs_qm.h"
42#include "xfs_trace.h"
43
44/*
45 * The global quota manager. There is only one of these for the entire
46 * system, _not_ one per file system. XQM keeps track of the overall
47 * quota functionality, including maintaining the freelist and hash
48 * tables of dquots.
49 */
50STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
51STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
52STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
53
54/*
55 * We use the batch lookup interface to iterate over the dquots as it
56 * currently is the only interface into the radix tree code that allows
57 * fuzzy lookups instead of exact matches. Holding the lock over multiple
58 * operations is fine as all callers are used either during mount/umount
59 * or quotaoff.
60 */
61#define XFS_DQ_LOOKUP_BATCH 32
62
63STATIC int
64xfs_qm_dquot_walk(
65 struct xfs_mount *mp,
66 int type,
67 int (*execute)(struct xfs_dquot *dqp, void *data),
68 void *data)
69{
70 struct xfs_quotainfo *qi = mp->m_quotainfo;
71 struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type);
72 uint32_t next_index;
73 int last_error = 0;
74 int skipped;
75 int nr_found;
76
77restart:
78 skipped = 0;
79 next_index = 0;
80 nr_found = 0;
81
82 while (1) {
83 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
84 int error = 0;
85 int i;
86
87 mutex_lock(&qi->qi_tree_lock);
88 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
89 next_index, XFS_DQ_LOOKUP_BATCH);
90 if (!nr_found) {
91 mutex_unlock(&qi->qi_tree_lock);
92 break;
93 }
94
95 for (i = 0; i < nr_found; i++) {
96 struct xfs_dquot *dqp = batch[i];
97
98 next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
99
100 error = execute(batch[i], data);
101 if (error == EAGAIN) {
102 skipped++;
103 continue;
104 }
105 if (error && last_error != EFSCORRUPTED)
106 last_error = error;
107 }
108
109 mutex_unlock(&qi->qi_tree_lock);
110
111 /* bail out if the filesystem is corrupted. */
112 if (last_error == EFSCORRUPTED) {
113 skipped = 0;
114 break;
115 }
116 }
117
118 if (skipped) {
119 delay(1);
120 goto restart;
121 }
122
123 return last_error;
124}
125
126
127/*
128 * Purge a dquot from all tracking data structures and free it.
129 */
130STATIC int
131xfs_qm_dqpurge(
132 struct xfs_dquot *dqp,
133 void *data)
134{
135 struct xfs_mount *mp = dqp->q_mount;
136 struct xfs_quotainfo *qi = mp->m_quotainfo;
137 struct xfs_dquot *gdqp = NULL;
138
139 xfs_dqlock(dqp);
140 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
141 xfs_dqunlock(dqp);
142 return EAGAIN;
143 }
144
145 /*
146 * If this quota has a group hint attached, prepare for releasing it
147 * now.
148 */
149 gdqp = dqp->q_gdquot;
150 if (gdqp) {
151 xfs_dqlock(gdqp);
152 dqp->q_gdquot = NULL;
153 }
154
155 dqp->dq_flags |= XFS_DQ_FREEING;
156
157 xfs_dqflock(dqp);
158
159 /*
160 * If we are turning this type of quotas off, we don't care
161 * about the dirty metadata sitting in this dquot. OTOH, if
162 * we're unmounting, we do care, so we flush it and wait.
163 */
164 if (XFS_DQ_IS_DIRTY(dqp)) {
165 struct xfs_buf *bp = NULL;
166 int error;
167
168 /*
169 * We don't care about getting disk errors here. We need
170 * to purge this dquot anyway, so we go ahead regardless.
171 */
172 error = xfs_qm_dqflush(dqp, &bp);
173 if (error) {
174 xfs_warn(mp, "%s: dquot %p flush failed",
175 __func__, dqp);
176 } else {
177 error = xfs_bwrite(bp);
178 xfs_buf_relse(bp);
179 }
180 xfs_dqflock(dqp);
181 }
182
183 ASSERT(atomic_read(&dqp->q_pincount) == 0);
184 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
185 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
186
187 xfs_dqfunlock(dqp);
188 xfs_dqunlock(dqp);
189
190 radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
191 be32_to_cpu(dqp->q_core.d_id));
192 qi->qi_dquots--;
193
194 /*
195 * We move dquots to the freelist as soon as their reference count
196 * hits zero, so it really should be on the freelist here.
197 */
198 mutex_lock(&qi->qi_lru_lock);
199 ASSERT(!list_empty(&dqp->q_lru));
200 list_del_init(&dqp->q_lru);
201 qi->qi_lru_count--;
202 XFS_STATS_DEC(xs_qm_dquot_unused);
203 mutex_unlock(&qi->qi_lru_lock);
204
205 xfs_qm_dqdestroy(dqp);
206
207 if (gdqp)
208 xfs_qm_dqput(gdqp);
209 return 0;
210}
211
212/*
213 * Purge the dquot cache.
214 */
215void
216xfs_qm_dqpurge_all(
217 struct xfs_mount *mp,
218 uint flags)
219{
220 if (flags & XFS_QMOPT_UQUOTA)
221 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
222 if (flags & XFS_QMOPT_GQUOTA)
223 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
224 if (flags & XFS_QMOPT_PQUOTA)
225 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
226}
227
228/*
229 * Just destroy the quotainfo structure.
230 */
231void
232xfs_qm_unmount(
233 struct xfs_mount *mp)
234{
235 if (mp->m_quotainfo) {
236 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
237 xfs_qm_destroy_quotainfo(mp);
238 }
239}
240
241
242/*
243 * This is called from xfs_mountfs to start quotas and initialize all
244 * necessary data structures like quotainfo. This is also responsible for
245 * running a quotacheck as necessary. We are guaranteed that the superblock
246 * is consistently read in at this point.
247 *
248 * If we fail here, the mount will continue with quota turned off. We don't
249 * need to inidicate success or failure at all.
250 */
251void
252xfs_qm_mount_quotas(
253 xfs_mount_t *mp)
254{
255 int error = 0;
256 uint sbf;
257
258 /*
259 * If quotas on realtime volumes is not supported, we disable
260 * quotas immediately.
261 */
262 if (mp->m_sb.sb_rextents) {
263 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
264 mp->m_qflags = 0;
265 goto write_changes;
266 }
267
268 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
269
270 /*
271 * Allocate the quotainfo structure inside the mount struct, and
272 * create quotainode(s), and change/rev superblock if necessary.
273 */
274 error = xfs_qm_init_quotainfo(mp);
275 if (error) {
276 /*
277 * We must turn off quotas.
278 */
279 ASSERT(mp->m_quotainfo == NULL);
280 mp->m_qflags = 0;
281 goto write_changes;
282 }
283 /*
284 * If any of the quotas are not consistent, do a quotacheck.
285 */
286 if (XFS_QM_NEED_QUOTACHECK(mp)) {
287 error = xfs_qm_quotacheck(mp);
288 if (error) {
289 /* Quotacheck failed and disabled quotas. */
290 return;
291 }
292 }
293 /*
294 * If one type of quotas is off, then it will lose its
295 * quotachecked status, since we won't be doing accounting for
296 * that type anymore.
297 */
298 if (!XFS_IS_UQUOTA_ON(mp))
299 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
300 if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
301 mp->m_qflags &= ~XFS_OQUOTA_CHKD;
302
303 write_changes:
304 /*
305 * We actually don't have to acquire the m_sb_lock at all.
306 * This can only be called from mount, and that's single threaded. XXX
307 */
308 spin_lock(&mp->m_sb_lock);
309 sbf = mp->m_sb.sb_qflags;
310 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
311 spin_unlock(&mp->m_sb_lock);
312
313 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
314 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
315 /*
316 * We could only have been turning quotas off.
317 * We aren't in very good shape actually because
318 * the incore structures are convinced that quotas are
319 * off, but the on disk superblock doesn't know that !
320 */
321 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
322 xfs_alert(mp, "%s: Superblock update failed!",
323 __func__);
324 }
325 }
326
327 if (error) {
328 xfs_warn(mp, "Failed to initialize disk quotas.");
329 return;
330 }
331}
332
333/*
334 * Called from the vfsops layer.
335 */
336void
337xfs_qm_unmount_quotas(
338 xfs_mount_t *mp)
339{
340 /*
341 * Release the dquots that root inode, et al might be holding,
342 * before we flush quotas and blow away the quotainfo structure.
343 */
344 ASSERT(mp->m_rootip);
345 xfs_qm_dqdetach(mp->m_rootip);
346 if (mp->m_rbmip)
347 xfs_qm_dqdetach(mp->m_rbmip);
348 if (mp->m_rsumip)
349 xfs_qm_dqdetach(mp->m_rsumip);
350
351 /*
352 * Release the quota inodes.
353 */
354 if (mp->m_quotainfo) {
355 if (mp->m_quotainfo->qi_uquotaip) {
356 IRELE(mp->m_quotainfo->qi_uquotaip);
357 mp->m_quotainfo->qi_uquotaip = NULL;
358 }
359 if (mp->m_quotainfo->qi_gquotaip) {
360 IRELE(mp->m_quotainfo->qi_gquotaip);
361 mp->m_quotainfo->qi_gquotaip = NULL;
362 }
363 }
364}
365
366STATIC int
367xfs_qm_dqattach_one(
368 xfs_inode_t *ip,
369 xfs_dqid_t id,
370 uint type,
371 uint doalloc,
372 xfs_dquot_t *udqhint, /* hint */
373 xfs_dquot_t **IO_idqpp)
374{
375 xfs_dquot_t *dqp;
376 int error;
377
378 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
379 error = 0;
380
381 /*
382 * See if we already have it in the inode itself. IO_idqpp is
383 * &i_udquot or &i_gdquot. This made the code look weird, but
384 * made the logic a lot simpler.
385 */
386 dqp = *IO_idqpp;
387 if (dqp) {
388 trace_xfs_dqattach_found(dqp);
389 return 0;
390 }
391
392 /*
393 * udqhint is the i_udquot field in inode, and is non-NULL only
394 * when the type arg is group/project. Its purpose is to save a
395 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
396 * the user dquot.
397 */
398 if (udqhint) {
399 ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
400 xfs_dqlock(udqhint);
401
402 /*
403 * No need to take dqlock to look at the id.
404 *
405 * The ID can't change until it gets reclaimed, and it won't
406 * be reclaimed as long as we have a ref from inode and we
407 * hold the ilock.
408 */
409 dqp = udqhint->q_gdquot;
410 if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
411 ASSERT(*IO_idqpp == NULL);
412
413 *IO_idqpp = xfs_qm_dqhold(dqp);
414 xfs_dqunlock(udqhint);
415 return 0;
416 }
417
418 /*
419 * We can't hold a dquot lock when we call the dqget code.
420 * We'll deadlock in no time, because of (not conforming to)
421 * lock ordering - the inodelock comes before any dquot lock,
422 * and we may drop and reacquire the ilock in xfs_qm_dqget().
423 */
424 xfs_dqunlock(udqhint);
425 }
426
427 /*
428 * Find the dquot from somewhere. This bumps the
429 * reference count of dquot and returns it locked.
430 * This can return ENOENT if dquot didn't exist on
431 * disk and we didn't ask it to allocate;
432 * ESRCH if quotas got turned off suddenly.
433 */
434 error = xfs_qm_dqget(ip->i_mount, ip, id, type,
435 doalloc | XFS_QMOPT_DOWARN, &dqp);
436 if (error)
437 return error;
438
439 trace_xfs_dqattach_get(dqp);
440
441 /*
442 * dqget may have dropped and re-acquired the ilock, but it guarantees
443 * that the dquot returned is the one that should go in the inode.
444 */
445 *IO_idqpp = dqp;
446 xfs_dqunlock(dqp);
447 return 0;
448}
449
450
451/*
452 * Given a udquot and gdquot, attach a ptr to the group dquot in the
453 * udquot as a hint for future lookups.
454 */
455STATIC void
456xfs_qm_dqattach_grouphint(
457 xfs_dquot_t *udq,
458 xfs_dquot_t *gdq)
459{
460 xfs_dquot_t *tmp;
461
462 xfs_dqlock(udq);
463
464 tmp = udq->q_gdquot;
465 if (tmp) {
466 if (tmp == gdq)
467 goto done;
468
469 udq->q_gdquot = NULL;
470 xfs_qm_dqrele(tmp);
471 }
472
473 udq->q_gdquot = xfs_qm_dqhold(gdq);
474done:
475 xfs_dqunlock(udq);
476}
477
478static bool
479xfs_qm_need_dqattach(
480 struct xfs_inode *ip)
481{
482 struct xfs_mount *mp = ip->i_mount;
483
484 if (!XFS_IS_QUOTA_RUNNING(mp))
485 return false;
486 if (!XFS_IS_QUOTA_ON(mp))
487 return false;
488 if (!XFS_NOT_DQATTACHED(mp, ip))
489 return false;
490 if (ip->i_ino == mp->m_sb.sb_uquotino ||
491 ip->i_ino == mp->m_sb.sb_gquotino)
492 return false;
493 return true;
494}
495
496/*
497 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
498 * into account.
499 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
500 * Inode may get unlocked and relocked in here, and the caller must deal with
501 * the consequences.
502 */
503int
504xfs_qm_dqattach_locked(
505 xfs_inode_t *ip,
506 uint flags)
507{
508 xfs_mount_t *mp = ip->i_mount;
509 uint nquotas = 0;
510 int error = 0;
511
512 if (!xfs_qm_need_dqattach(ip))
513 return 0;
514
515 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
516
517 if (XFS_IS_UQUOTA_ON(mp)) {
518 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
519 flags & XFS_QMOPT_DQALLOC,
520 NULL, &ip->i_udquot);
521 if (error)
522 goto done;
523 nquotas++;
524 }
525
526 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
527 if (XFS_IS_OQUOTA_ON(mp)) {
528 error = XFS_IS_GQUOTA_ON(mp) ?
529 xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
530 flags & XFS_QMOPT_DQALLOC,
531 ip->i_udquot, &ip->i_gdquot) :
532 xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
533 flags & XFS_QMOPT_DQALLOC,
534 ip->i_udquot, &ip->i_gdquot);
535 /*
536 * Don't worry about the udquot that we may have
537 * attached above. It'll get detached, if not already.
538 */
539 if (error)
540 goto done;
541 nquotas++;
542 }
543
544 /*
545 * Attach this group quota to the user quota as a hint.
546 * This WON'T, in general, result in a thrash.
547 */
548 if (nquotas == 2) {
549 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
550 ASSERT(ip->i_udquot);
551 ASSERT(ip->i_gdquot);
552
553 /*
554 * We do not have i_udquot locked at this point, but this check
555 * is OK since we don't depend on the i_gdquot to be accurate
556 * 100% all the time. It is just a hint, and this will
557 * succeed in general.
558 */
559 if (ip->i_udquot->q_gdquot != ip->i_gdquot)
560 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
561 }
562
563 done:
564#ifdef DEBUG
565 if (!error) {
566 if (XFS_IS_UQUOTA_ON(mp))
567 ASSERT(ip->i_udquot);
568 if (XFS_IS_OQUOTA_ON(mp))
569 ASSERT(ip->i_gdquot);
570 }
571 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
572#endif
573 return error;
574}
575
576int
577xfs_qm_dqattach(
578 struct xfs_inode *ip,
579 uint flags)
580{
581 int error;
582
583 if (!xfs_qm_need_dqattach(ip))
584 return 0;
585
586 xfs_ilock(ip, XFS_ILOCK_EXCL);
587 error = xfs_qm_dqattach_locked(ip, flags);
588 xfs_iunlock(ip, XFS_ILOCK_EXCL);
589
590 return error;
591}
592
593/*
594 * Release dquots (and their references) if any.
595 * The inode should be locked EXCL except when this's called by
596 * xfs_ireclaim.
597 */
598void
599xfs_qm_dqdetach(
600 xfs_inode_t *ip)
601{
602 if (!(ip->i_udquot || ip->i_gdquot))
603 return;
604
605 trace_xfs_dquot_dqdetach(ip);
606
607 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
608 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
609 if (ip->i_udquot) {
610 xfs_qm_dqrele(ip->i_udquot);
611 ip->i_udquot = NULL;
612 }
613 if (ip->i_gdquot) {
614 xfs_qm_dqrele(ip->i_gdquot);
615 ip->i_gdquot = NULL;
616 }
617}
618
619/*
620 * This initializes all the quota information that's kept in the
621 * mount structure
622 */
623STATIC int
624xfs_qm_init_quotainfo(
625 xfs_mount_t *mp)
626{
627 xfs_quotainfo_t *qinf;
628 int error;
629 xfs_dquot_t *dqp;
630
631 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
632
633 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
634
635 /*
636 * See if quotainodes are setup, and if not, allocate them,
637 * and change the superblock accordingly.
638 */
639 if ((error = xfs_qm_init_quotainos(mp))) {
640 kmem_free(qinf);
641 mp->m_quotainfo = NULL;
642 return error;
643 }
644
645 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
646 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
647 mutex_init(&qinf->qi_tree_lock);
648
649 INIT_LIST_HEAD(&qinf->qi_lru_list);
650 qinf->qi_lru_count = 0;
651 mutex_init(&qinf->qi_lru_lock);
652
653 /* mutex used to serialize quotaoffs */
654 mutex_init(&qinf->qi_quotaofflock);
655
656 /* Precalc some constants */
657 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
658 ASSERT(qinf->qi_dqchunklen);
659 qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
660 do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
661
662 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
663
664 /*
665 * We try to get the limits from the superuser's limits fields.
666 * This is quite hacky, but it is standard quota practice.
667 *
668 * We look at the USR dquot with id == 0 first, but if user quotas
669 * are not enabled we goto the GRP dquot with id == 0.
670 * We don't really care to keep separate default limits for user
671 * and group quotas, at least not at this point.
672 *
673 * Since we may not have done a quotacheck by this point, just read
674 * the dquot without attaching it to any hashtables or lists.
675 */
676 error = xfs_qm_dqread(mp, 0,
677 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
678 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
679 XFS_DQ_PROJ),
680 XFS_QMOPT_DOWARN, &dqp);
681 if (!error) {
682 xfs_disk_dquot_t *ddqp = &dqp->q_core;
683
684 /*
685 * The warnings and timers set the grace period given to
686 * a user or group before he or she can not perform any
687 * more writing. If it is zero, a default is used.
688 */
689 qinf->qi_btimelimit = ddqp->d_btimer ?
690 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
691 qinf->qi_itimelimit = ddqp->d_itimer ?
692 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
693 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
694 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
695 qinf->qi_bwarnlimit = ddqp->d_bwarns ?
696 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
697 qinf->qi_iwarnlimit = ddqp->d_iwarns ?
698 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
699 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
700 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
701 qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
702 qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
703 qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
704 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
705 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
706 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
707
708 xfs_qm_dqdestroy(dqp);
709 } else {
710 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
711 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
712 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
713 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
714 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
715 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
716 }
717
718 qinf->qi_shrinker.shrink = xfs_qm_shake;
719 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
720 register_shrinker(&qinf->qi_shrinker);
721 return 0;
722}
723
724
725/*
726 * Gets called when unmounting a filesystem or when all quotas get
727 * turned off.
728 * This purges the quota inodes, destroys locks and frees itself.
729 */
730void
731xfs_qm_destroy_quotainfo(
732 xfs_mount_t *mp)
733{
734 xfs_quotainfo_t *qi;
735
736 qi = mp->m_quotainfo;
737 ASSERT(qi != NULL);
738
739 unregister_shrinker(&qi->qi_shrinker);
740
741 if (qi->qi_uquotaip) {
742 IRELE(qi->qi_uquotaip);
743 qi->qi_uquotaip = NULL; /* paranoia */
744 }
745 if (qi->qi_gquotaip) {
746 IRELE(qi->qi_gquotaip);
747 qi->qi_gquotaip = NULL;
748 }
749 mutex_destroy(&qi->qi_quotaofflock);
750 kmem_free(qi);
751 mp->m_quotainfo = NULL;
752}
753
754/*
755 * Create an inode and return with a reference already taken, but unlocked
756 * This is how we create quota inodes
757 */
758STATIC int
759xfs_qm_qino_alloc(
760 xfs_mount_t *mp,
761 xfs_inode_t **ip,
762 __int64_t sbfields,
763 uint flags)
764{
765 xfs_trans_t *tp;
766 int error;
767 int committed;
768
769 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
770 if ((error = xfs_trans_reserve(tp,
771 XFS_QM_QINOCREATE_SPACE_RES(mp),
772 XFS_CREATE_LOG_RES(mp), 0,
773 XFS_TRANS_PERM_LOG_RES,
774 XFS_CREATE_LOG_COUNT))) {
775 xfs_trans_cancel(tp, 0);
776 return error;
777 }
778
779 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
780 if (error) {
781 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
782 XFS_TRANS_ABORT);
783 return error;
784 }
785
786 /*
787 * Make the changes in the superblock, and log those too.
788 * sbfields arg may contain fields other than *QUOTINO;
789 * VERSIONNUM for example.
790 */
791 spin_lock(&mp->m_sb_lock);
792 if (flags & XFS_QMOPT_SBVERSION) {
793 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
794 ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
795 XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
796 (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
797 XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
798
799 xfs_sb_version_addquota(&mp->m_sb);
800 mp->m_sb.sb_uquotino = NULLFSINO;
801 mp->m_sb.sb_gquotino = NULLFSINO;
802
803 /* qflags will get updated _after_ quotacheck */
804 mp->m_sb.sb_qflags = 0;
805 }
806 if (flags & XFS_QMOPT_UQUOTA)
807 mp->m_sb.sb_uquotino = (*ip)->i_ino;
808 else
809 mp->m_sb.sb_gquotino = (*ip)->i_ino;
810 spin_unlock(&mp->m_sb_lock);
811 xfs_mod_sb(tp, sbfields);
812
813 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
814 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
815 return error;
816 }
817 return 0;
818}
819
820
821STATIC void
822xfs_qm_reset_dqcounts(
823 xfs_mount_t *mp,
824 xfs_buf_t *bp,
825 xfs_dqid_t id,
826 uint type)
827{
828 xfs_disk_dquot_t *ddq;
829 int j;
830
831 trace_xfs_reset_dqcounts(bp, _RET_IP_);
832
833 /*
834 * Reset all counters and timers. They'll be
835 * started afresh by xfs_qm_quotacheck.
836 */
837#ifdef DEBUG
838 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
839 do_div(j, sizeof(xfs_dqblk_t));
840 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
841#endif
842 ddq = bp->b_addr;
843 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
844 /*
845 * Do a sanity check, and if needed, repair the dqblk. Don't
846 * output any warnings because it's perfectly possible to
847 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
848 */
849 (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
850 "xfs_quotacheck");
851 ddq->d_bcount = 0;
852 ddq->d_icount = 0;
853 ddq->d_rtbcount = 0;
854 ddq->d_btimer = 0;
855 ddq->d_itimer = 0;
856 ddq->d_rtbtimer = 0;
857 ddq->d_bwarns = 0;
858 ddq->d_iwarns = 0;
859 ddq->d_rtbwarns = 0;
860 ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
861 }
862}
863
864STATIC int
865xfs_qm_dqiter_bufs(
866 struct xfs_mount *mp,
867 xfs_dqid_t firstid,
868 xfs_fsblock_t bno,
869 xfs_filblks_t blkcnt,
870 uint flags,
871 struct list_head *buffer_list)
872{
873 struct xfs_buf *bp;
874 int error;
875 int type;
876
877 ASSERT(blkcnt > 0);
878 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
879 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
880 error = 0;
881
882 /*
883 * Blkcnt arg can be a very big number, and might even be
884 * larger than the log itself. So, we have to break it up into
885 * manageable-sized transactions.
886 * Note that we don't start a permanent transaction here; we might
887 * not be able to get a log reservation for the whole thing up front,
888 * and we don't really care to either, because we just discard
889 * everything if we were to crash in the middle of this loop.
890 */
891 while (blkcnt--) {
892 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
893 XFS_FSB_TO_DADDR(mp, bno),
894 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
895 if (error)
896 break;
897
898 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
899 xfs_buf_delwri_queue(bp, buffer_list);
900 xfs_buf_relse(bp);
901 /*
902 * goto the next block.
903 */
904 bno++;
905 firstid += mp->m_quotainfo->qi_dqperchunk;
906 }
907
908 return error;
909}
910
911/*
912 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
913 * caller supplied function for every chunk of dquots that we find.
914 */
915STATIC int
916xfs_qm_dqiterate(
917 struct xfs_mount *mp,
918 struct xfs_inode *qip,
919 uint flags,
920 struct list_head *buffer_list)
921{
922 struct xfs_bmbt_irec *map;
923 int i, nmaps; /* number of map entries */
924 int error; /* return value */
925 xfs_fileoff_t lblkno;
926 xfs_filblks_t maxlblkcnt;
927 xfs_dqid_t firstid;
928 xfs_fsblock_t rablkno;
929 xfs_filblks_t rablkcnt;
930
931 error = 0;
932 /*
933 * This looks racy, but we can't keep an inode lock across a
934 * trans_reserve. But, this gets called during quotacheck, and that
935 * happens only at mount time which is single threaded.
936 */
937 if (qip->i_d.di_nblocks == 0)
938 return 0;
939
940 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
941
942 lblkno = 0;
943 maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
944 do {
945 nmaps = XFS_DQITER_MAP_SIZE;
946 /*
947 * We aren't changing the inode itself. Just changing
948 * some of its data. No new blocks are added here, and
949 * the inode is never added to the transaction.
950 */
951 xfs_ilock(qip, XFS_ILOCK_SHARED);
952 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
953 map, &nmaps, 0);
954 xfs_iunlock(qip, XFS_ILOCK_SHARED);
955 if (error)
956 break;
957
958 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
959 for (i = 0; i < nmaps; i++) {
960 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
961 ASSERT(map[i].br_blockcount);
962
963
964 lblkno += map[i].br_blockcount;
965
966 if (map[i].br_startblock == HOLESTARTBLOCK)
967 continue;
968
969 firstid = (xfs_dqid_t) map[i].br_startoff *
970 mp->m_quotainfo->qi_dqperchunk;
971 /*
972 * Do a read-ahead on the next extent.
973 */
974 if ((i+1 < nmaps) &&
975 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
976 rablkcnt = map[i+1].br_blockcount;
977 rablkno = map[i+1].br_startblock;
978 while (rablkcnt--) {
979 xfs_buf_readahead(mp->m_ddev_targp,
980 XFS_FSB_TO_DADDR(mp, rablkno),
981 mp->m_quotainfo->qi_dqchunklen);
982 rablkno++;
983 }
984 }
985 /*
986 * Iterate thru all the blks in the extent and
987 * reset the counters of all the dquots inside them.
988 */
989 error = xfs_qm_dqiter_bufs(mp, firstid,
990 map[i].br_startblock,
991 map[i].br_blockcount,
992 flags, buffer_list);
993 if (error)
994 goto out;
995 }
996 } while (nmaps > 0);
997
998out:
999 kmem_free(map);
1000 return error;
1001}
1002
1003/*
1004 * Called by dqusage_adjust in doing a quotacheck.
1005 *
1006 * Given the inode, and a dquot id this updates both the incore dqout as well
1007 * as the buffer copy. This is so that once the quotacheck is done, we can
1008 * just log all the buffers, as opposed to logging numerous updates to
1009 * individual dquots.
1010 */
1011STATIC int
1012xfs_qm_quotacheck_dqadjust(
1013 struct xfs_inode *ip,
1014 xfs_dqid_t id,
1015 uint type,
1016 xfs_qcnt_t nblks,
1017 xfs_qcnt_t rtblks)
1018{
1019 struct xfs_mount *mp = ip->i_mount;
1020 struct xfs_dquot *dqp;
1021 int error;
1022
1023 error = xfs_qm_dqget(mp, ip, id, type,
1024 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1025 if (error) {
1026 /*
1027 * Shouldn't be able to turn off quotas here.
1028 */
1029 ASSERT(error != ESRCH);
1030 ASSERT(error != ENOENT);
1031 return error;
1032 }
1033
1034 trace_xfs_dqadjust(dqp);
1035
1036 /*
1037 * Adjust the inode count and the block count to reflect this inode's
1038 * resource usage.
1039 */
1040 be64_add_cpu(&dqp->q_core.d_icount, 1);
1041 dqp->q_res_icount++;
1042 if (nblks) {
1043 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1044 dqp->q_res_bcount += nblks;
1045 }
1046 if (rtblks) {
1047 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1048 dqp->q_res_rtbcount += rtblks;
1049 }
1050
1051 /*
1052 * Set default limits, adjust timers (since we changed usages)
1053 *
1054 * There are no timers for the default values set in the root dquot.
1055 */
1056 if (dqp->q_core.d_id) {
1057 xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
1058 xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1059 }
1060
1061 dqp->dq_flags |= XFS_DQ_DIRTY;
1062 xfs_qm_dqput(dqp);
1063 return 0;
1064}
1065
1066STATIC int
1067xfs_qm_get_rtblks(
1068 xfs_inode_t *ip,
1069 xfs_qcnt_t *O_rtblks)
1070{
1071 xfs_filblks_t rtblks; /* total rt blks */
1072 xfs_extnum_t idx; /* extent record index */
1073 xfs_ifork_t *ifp; /* inode fork pointer */
1074 xfs_extnum_t nextents; /* number of extent entries */
1075 int error;
1076
1077 ASSERT(XFS_IS_REALTIME_INODE(ip));
1078 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1079 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1080 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1081 return error;
1082 }
1083 rtblks = 0;
1084 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1085 for (idx = 0; idx < nextents; idx++)
1086 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1087 *O_rtblks = (xfs_qcnt_t)rtblks;
1088 return 0;
1089}
1090
1091/*
1092 * callback routine supplied to bulkstat(). Given an inumber, find its
1093 * dquots and update them to account for resources taken by that inode.
1094 */
1095/* ARGSUSED */
1096STATIC int
1097xfs_qm_dqusage_adjust(
1098 xfs_mount_t *mp, /* mount point for filesystem */
1099 xfs_ino_t ino, /* inode number to get data for */
1100 void __user *buffer, /* not used */
1101 int ubsize, /* not used */
1102 int *ubused, /* not used */
1103 int *res) /* result code value */
1104{
1105 xfs_inode_t *ip;
1106 xfs_qcnt_t nblks, rtblks = 0;
1107 int error;
1108
1109 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1110
1111 /*
1112 * rootino must have its resources accounted for, not so with the quota
1113 * inodes.
1114 */
1115 if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
1116 *res = BULKSTAT_RV_NOTHING;
1117 return XFS_ERROR(EINVAL);
1118 }
1119
1120 /*
1121 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1122 * interface expects the inode to be exclusively locked because that's
1123 * the case in all other instances. It's OK that we do this because
1124 * quotacheck is done only at mount time.
1125 */
1126 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1127 if (error) {
1128 *res = BULKSTAT_RV_NOTHING;
1129 return error;
1130 }
1131
1132 ASSERT(ip->i_delayed_blks == 0);
1133
1134 if (XFS_IS_REALTIME_INODE(ip)) {
1135 /*
1136 * Walk thru the extent list and count the realtime blocks.
1137 */
1138 error = xfs_qm_get_rtblks(ip, &rtblks);
1139 if (error)
1140 goto error0;
1141 }
1142
1143 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1144
1145 /*
1146 * Add the (disk blocks and inode) resources occupied by this
1147 * inode to its dquots. We do this adjustment in the incore dquot,
1148 * and also copy the changes to its buffer.
1149 * We don't care about putting these changes in a transaction
1150 * envelope because if we crash in the middle of a 'quotacheck'
1151 * we have to start from the beginning anyway.
1152 * Once we're done, we'll log all the dquot bufs.
1153 *
1154 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1155 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1156 */
1157 if (XFS_IS_UQUOTA_ON(mp)) {
1158 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1159 XFS_DQ_USER, nblks, rtblks);
1160 if (error)
1161 goto error0;
1162 }
1163
1164 if (XFS_IS_GQUOTA_ON(mp)) {
1165 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1166 XFS_DQ_GROUP, nblks, rtblks);
1167 if (error)
1168 goto error0;
1169 }
1170
1171 if (XFS_IS_PQUOTA_ON(mp)) {
1172 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1173 XFS_DQ_PROJ, nblks, rtblks);
1174 if (error)
1175 goto error0;
1176 }
1177
1178 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1179 IRELE(ip);
1180 *res = BULKSTAT_RV_DIDONE;
1181 return 0;
1182
1183error0:
1184 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1185 IRELE(ip);
1186 *res = BULKSTAT_RV_GIVEUP;
1187 return error;
1188}
1189
1190STATIC int
1191xfs_qm_flush_one(
1192 struct xfs_dquot *dqp,
1193 void *data)
1194{
1195 struct list_head *buffer_list = data;
1196 struct xfs_buf *bp = NULL;
1197 int error = 0;
1198
1199 xfs_dqlock(dqp);
1200 if (dqp->dq_flags & XFS_DQ_FREEING)
1201 goto out_unlock;
1202 if (!XFS_DQ_IS_DIRTY(dqp))
1203 goto out_unlock;
1204
1205 xfs_dqflock(dqp);
1206 error = xfs_qm_dqflush(dqp, &bp);
1207 if (error)
1208 goto out_unlock;
1209
1210 xfs_buf_delwri_queue(bp, buffer_list);
1211 xfs_buf_relse(bp);
1212out_unlock:
1213 xfs_dqunlock(dqp);
1214 return error;
1215}
1216
1217/*
1218 * Walk thru all the filesystem inodes and construct a consistent view
1219 * of the disk quota world. If the quotacheck fails, disable quotas.
1220 */
1221int
1222xfs_qm_quotacheck(
1223 xfs_mount_t *mp)
1224{
1225 int done, count, error, error2;
1226 xfs_ino_t lastino;
1227 size_t structsz;
1228 xfs_inode_t *uip, *gip;
1229 uint flags;
1230 LIST_HEAD (buffer_list);
1231
1232 count = INT_MAX;
1233 structsz = 1;
1234 lastino = 0;
1235 flags = 0;
1236
1237 ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
1238 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1239
1240 xfs_notice(mp, "Quotacheck needed: Please wait.");
1241
1242 /*
1243 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1244 * their counters to zero. We need a clean slate.
1245 * We don't log our changes till later.
1246 */
1247 uip = mp->m_quotainfo->qi_uquotaip;
1248 if (uip) {
1249 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1250 &buffer_list);
1251 if (error)
1252 goto error_return;
1253 flags |= XFS_UQUOTA_CHKD;
1254 }
1255
1256 gip = mp->m_quotainfo->qi_gquotaip;
1257 if (gip) {
1258 error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
1259 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA,
1260 &buffer_list);
1261 if (error)
1262 goto error_return;
1263 flags |= XFS_OQUOTA_CHKD;
1264 }
1265
1266 do {
1267 /*
1268 * Iterate thru all the inodes in the file system,
1269 * adjusting the corresponding dquot counters in core.
1270 */
1271 error = xfs_bulkstat(mp, &lastino, &count,
1272 xfs_qm_dqusage_adjust,
1273 structsz, NULL, &done);
1274 if (error)
1275 break;
1276
1277 } while (!done);
1278
1279 /*
1280 * We've made all the changes that we need to make incore. Flush them
1281 * down to disk buffers if everything was updated successfully.
1282 */
1283 if (XFS_IS_UQUOTA_ON(mp)) {
1284 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1285 &buffer_list);
1286 }
1287 if (XFS_IS_GQUOTA_ON(mp)) {
1288 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1289 &buffer_list);
1290 if (!error)
1291 error = error2;
1292 }
1293 if (XFS_IS_PQUOTA_ON(mp)) {
1294 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1295 &buffer_list);
1296 if (!error)
1297 error = error2;
1298 }
1299
1300 error2 = xfs_buf_delwri_submit(&buffer_list);
1301 if (!error)
1302 error = error2;
1303
1304 /*
1305 * We can get this error if we couldn't do a dquot allocation inside
1306 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1307 * dirty dquots that might be cached, we just want to get rid of them
1308 * and turn quotaoff. The dquots won't be attached to any of the inodes
1309 * at this point (because we intentionally didn't in dqget_noattach).
1310 */
1311 if (error) {
1312 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1313 goto error_return;
1314 }
1315
1316 /*
1317 * If one type of quotas is off, then it will lose its
1318 * quotachecked status, since we won't be doing accounting for
1319 * that type anymore.
1320 */
1321 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1322 mp->m_qflags |= flags;
1323
1324 error_return:
1325 while (!list_empty(&buffer_list)) {
1326 struct xfs_buf *bp =
1327 list_first_entry(&buffer_list, struct xfs_buf, b_list);
1328 list_del_init(&bp->b_list);
1329 xfs_buf_relse(bp);
1330 }
1331
1332 if (error) {
1333 xfs_warn(mp,
1334 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1335 error);
1336 /*
1337 * We must turn off quotas.
1338 */
1339 ASSERT(mp->m_quotainfo != NULL);
1340 xfs_qm_destroy_quotainfo(mp);
1341 if (xfs_mount_reset_sbqflags(mp)) {
1342 xfs_warn(mp,
1343 "Quotacheck: Failed to reset quota flags.");
1344 }
1345 } else
1346 xfs_notice(mp, "Quotacheck: Done.");
1347 return (error);
1348}
1349
1350/*
1351 * This is called after the superblock has been read in and we're ready to
1352 * iget the quota inodes.
1353 */
1354STATIC int
1355xfs_qm_init_quotainos(
1356 xfs_mount_t *mp)
1357{
1358 xfs_inode_t *uip, *gip;
1359 int error;
1360 __int64_t sbflags;
1361 uint flags;
1362
1363 ASSERT(mp->m_quotainfo);
1364 uip = gip = NULL;
1365 sbflags = 0;
1366 flags = 0;
1367
1368 /*
1369 * Get the uquota and gquota inodes
1370 */
1371 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1372 if (XFS_IS_UQUOTA_ON(mp) &&
1373 mp->m_sb.sb_uquotino != NULLFSINO) {
1374 ASSERT(mp->m_sb.sb_uquotino > 0);
1375 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1376 0, 0, &uip)))
1377 return XFS_ERROR(error);
1378 }
1379 if (XFS_IS_OQUOTA_ON(mp) &&
1380 mp->m_sb.sb_gquotino != NULLFSINO) {
1381 ASSERT(mp->m_sb.sb_gquotino > 0);
1382 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1383 0, 0, &gip))) {
1384 if (uip)
1385 IRELE(uip);
1386 return XFS_ERROR(error);
1387 }
1388 }
1389 } else {
1390 flags |= XFS_QMOPT_SBVERSION;
1391 sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1392 XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
1393 }
1394
1395 /*
1396 * Create the two inodes, if they don't exist already. The changes
1397 * made above will get added to a transaction and logged in one of
1398 * the qino_alloc calls below. If the device is readonly,
1399 * temporarily switch to read-write to do this.
1400 */
1401 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1402 if ((error = xfs_qm_qino_alloc(mp, &uip,
1403 sbflags | XFS_SB_UQUOTINO,
1404 flags | XFS_QMOPT_UQUOTA)))
1405 return XFS_ERROR(error);
1406
1407 flags &= ~XFS_QMOPT_SBVERSION;
1408 }
1409 if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
1410 flags |= (XFS_IS_GQUOTA_ON(mp) ?
1411 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
1412 error = xfs_qm_qino_alloc(mp, &gip,
1413 sbflags | XFS_SB_GQUOTINO, flags);
1414 if (error) {
1415 if (uip)
1416 IRELE(uip);
1417
1418 return XFS_ERROR(error);
1419 }
1420 }
1421
1422 mp->m_quotainfo->qi_uquotaip = uip;
1423 mp->m_quotainfo->qi_gquotaip = gip;
1424
1425 return 0;
1426}
1427
1428STATIC void
1429xfs_qm_dqfree_one(
1430 struct xfs_dquot *dqp)
1431{
1432 struct xfs_mount *mp = dqp->q_mount;
1433 struct xfs_quotainfo *qi = mp->m_quotainfo;
1434
1435 mutex_lock(&qi->qi_tree_lock);
1436 radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
1437 be32_to_cpu(dqp->q_core.d_id));
1438
1439 qi->qi_dquots--;
1440 mutex_unlock(&qi->qi_tree_lock);
1441
1442 xfs_qm_dqdestroy(dqp);
1443}
1444
1445STATIC void
1446xfs_qm_dqreclaim_one(
1447 struct xfs_dquot *dqp,
1448 struct list_head *buffer_list,
1449 struct list_head *dispose_list)
1450{
1451 struct xfs_mount *mp = dqp->q_mount;
1452 struct xfs_quotainfo *qi = mp->m_quotainfo;
1453 int error;
1454
1455 if (!xfs_dqlock_nowait(dqp))
1456 goto out_busy;
1457
1458 /*
1459 * This dquot has acquired a reference in the meantime remove it from
1460 * the freelist and try again.
1461 */
1462 if (dqp->q_nrefs) {
1463 xfs_dqunlock(dqp);
1464
1465 trace_xfs_dqreclaim_want(dqp);
1466 XFS_STATS_INC(xs_qm_dqwants);
1467
1468 list_del_init(&dqp->q_lru);
1469 qi->qi_lru_count--;
1470 XFS_STATS_DEC(xs_qm_dquot_unused);
1471 return;
1472 }
1473
1474 /*
1475 * Try to grab the flush lock. If this dquot is in the process of
1476 * getting flushed to disk, we don't want to reclaim it.
1477 */
1478 if (!xfs_dqflock_nowait(dqp))
1479 goto out_busy;
1480
1481 if (XFS_DQ_IS_DIRTY(dqp)) {
1482 struct xfs_buf *bp = NULL;
1483
1484 trace_xfs_dqreclaim_dirty(dqp);
1485
1486 error = xfs_qm_dqflush(dqp, &bp);
1487 if (error) {
1488 xfs_warn(mp, "%s: dquot %p flush failed",
1489 __func__, dqp);
1490 goto out_busy;
1491 }
1492
1493 xfs_buf_delwri_queue(bp, buffer_list);
1494 xfs_buf_relse(bp);
1495 /*
1496 * Give the dquot another try on the freelist, as the
1497 * flushing will take some time.
1498 */
1499 goto out_busy;
1500 }
1501 xfs_dqfunlock(dqp);
1502
1503 /*
1504 * Prevent lookups now that we are past the point of no return.
1505 */
1506 dqp->dq_flags |= XFS_DQ_FREEING;
1507 xfs_dqunlock(dqp);
1508
1509 ASSERT(dqp->q_nrefs == 0);
1510 list_move_tail(&dqp->q_lru, dispose_list);
1511 qi->qi_lru_count--;
1512 XFS_STATS_DEC(xs_qm_dquot_unused);
1513
1514 trace_xfs_dqreclaim_done(dqp);
1515 XFS_STATS_INC(xs_qm_dqreclaims);
1516 return;
1517
1518out_busy:
1519 xfs_dqunlock(dqp);
1520
1521 /*
1522 * Move the dquot to the tail of the list so that we don't spin on it.
1523 */
1524 list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
1525
1526 trace_xfs_dqreclaim_busy(dqp);
1527 XFS_STATS_INC(xs_qm_dqreclaim_misses);
1528}
1529
1530STATIC int
1531xfs_qm_shake(
1532 struct shrinker *shrink,
1533 struct shrink_control *sc)
1534{
1535 struct xfs_quotainfo *qi =
1536 container_of(shrink, struct xfs_quotainfo, qi_shrinker);
1537 int nr_to_scan = sc->nr_to_scan;
1538 LIST_HEAD (buffer_list);
1539 LIST_HEAD (dispose_list);
1540 struct xfs_dquot *dqp;
1541 int error;
1542
1543 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
1544 return 0;
1545 if (!nr_to_scan)
1546 goto out;
1547
1548 mutex_lock(&qi->qi_lru_lock);
1549 while (!list_empty(&qi->qi_lru_list)) {
1550 if (nr_to_scan-- <= 0)
1551 break;
1552 dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot,
1553 q_lru);
1554 xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list);
1555 }
1556 mutex_unlock(&qi->qi_lru_lock);
1557
1558 error = xfs_buf_delwri_submit(&buffer_list);
1559 if (error)
1560 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
1561
1562 while (!list_empty(&dispose_list)) {
1563 dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru);
1564 list_del_init(&dqp->q_lru);
1565 xfs_qm_dqfree_one(dqp);
1566 }
1567
1568out:
1569 return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure;
1570}
1571
1572/*
1573 * Start a transaction and write the incore superblock changes to
1574 * disk. flags parameter indicates which fields have changed.
1575 */
1576int
1577xfs_qm_write_sb_changes(
1578 xfs_mount_t *mp,
1579 __int64_t flags)
1580{
1581 xfs_trans_t *tp;
1582 int error;
1583
1584 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1585 if ((error = xfs_trans_reserve(tp, 0,
1586 mp->m_sb.sb_sectsize + 128, 0,
1587 0,
1588 XFS_DEFAULT_LOG_COUNT))) {
1589 xfs_trans_cancel(tp, 0);
1590 return error;
1591 }
1592
1593 xfs_mod_sb(tp, flags);
1594 error = xfs_trans_commit(tp, 0);
1595
1596 return error;
1597}
1598
1599
1600/* --------------- utility functions for vnodeops ---------------- */
1601
1602
1603/*
1604 * Given an inode, a uid, gid and prid make sure that we have
1605 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1606 * quotas by creating this file.
1607 * This also attaches dquot(s) to the given inode after locking it,
1608 * and returns the dquots corresponding to the uid and/or gid.
1609 *
1610 * in : inode (unlocked)
1611 * out : udquot, gdquot with references taken and unlocked
1612 */
1613int
1614xfs_qm_vop_dqalloc(
1615 struct xfs_inode *ip,
1616 uid_t uid,
1617 gid_t gid,
1618 prid_t prid,
1619 uint flags,
1620 struct xfs_dquot **O_udqpp,
1621 struct xfs_dquot **O_gdqpp)
1622{
1623 struct xfs_mount *mp = ip->i_mount;
1624 struct xfs_dquot *uq, *gq;
1625 int error;
1626 uint lockflags;
1627
1628 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1629 return 0;
1630
1631 lockflags = XFS_ILOCK_EXCL;
1632 xfs_ilock(ip, lockflags);
1633
1634 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1635 gid = ip->i_d.di_gid;
1636
1637 /*
1638 * Attach the dquot(s) to this inode, doing a dquot allocation
1639 * if necessary. The dquot(s) will not be locked.
1640 */
1641 if (XFS_NOT_DQATTACHED(mp, ip)) {
1642 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1643 if (error) {
1644 xfs_iunlock(ip, lockflags);
1645 return error;
1646 }
1647 }
1648
1649 uq = gq = NULL;
1650 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1651 if (ip->i_d.di_uid != uid) {
1652 /*
1653 * What we need is the dquot that has this uid, and
1654 * if we send the inode to dqget, the uid of the inode
1655 * takes priority over what's sent in the uid argument.
1656 * We must unlock inode here before calling dqget if
1657 * we're not sending the inode, because otherwise
1658 * we'll deadlock by doing trans_reserve while
1659 * holding ilock.
1660 */
1661 xfs_iunlock(ip, lockflags);
1662 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
1663 XFS_DQ_USER,
1664 XFS_QMOPT_DQALLOC |
1665 XFS_QMOPT_DOWARN,
1666 &uq))) {
1667 ASSERT(error != ENOENT);
1668 return error;
1669 }
1670 /*
1671 * Get the ilock in the right order.
1672 */
1673 xfs_dqunlock(uq);
1674 lockflags = XFS_ILOCK_SHARED;
1675 xfs_ilock(ip, lockflags);
1676 } else {
1677 /*
1678 * Take an extra reference, because we'll return
1679 * this to caller
1680 */
1681 ASSERT(ip->i_udquot);
1682 uq = xfs_qm_dqhold(ip->i_udquot);
1683 }
1684 }
1685 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1686 if (ip->i_d.di_gid != gid) {
1687 xfs_iunlock(ip, lockflags);
1688 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
1689 XFS_DQ_GROUP,
1690 XFS_QMOPT_DQALLOC |
1691 XFS_QMOPT_DOWARN,
1692 &gq))) {
1693 if (uq)
1694 xfs_qm_dqrele(uq);
1695 ASSERT(error != ENOENT);
1696 return error;
1697 }
1698 xfs_dqunlock(gq);
1699 lockflags = XFS_ILOCK_SHARED;
1700 xfs_ilock(ip, lockflags);
1701 } else {
1702 ASSERT(ip->i_gdquot);
1703 gq = xfs_qm_dqhold(ip->i_gdquot);
1704 }
1705 } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1706 if (xfs_get_projid(ip) != prid) {
1707 xfs_iunlock(ip, lockflags);
1708 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1709 XFS_DQ_PROJ,
1710 XFS_QMOPT_DQALLOC |
1711 XFS_QMOPT_DOWARN,
1712 &gq))) {
1713 if (uq)
1714 xfs_qm_dqrele(uq);
1715 ASSERT(error != ENOENT);
1716 return (error);
1717 }
1718 xfs_dqunlock(gq);
1719 lockflags = XFS_ILOCK_SHARED;
1720 xfs_ilock(ip, lockflags);
1721 } else {
1722 ASSERT(ip->i_gdquot);
1723 gq = xfs_qm_dqhold(ip->i_gdquot);
1724 }
1725 }
1726 if (uq)
1727 trace_xfs_dquot_dqalloc(ip);
1728
1729 xfs_iunlock(ip, lockflags);
1730 if (O_udqpp)
1731 *O_udqpp = uq;
1732 else if (uq)
1733 xfs_qm_dqrele(uq);
1734 if (O_gdqpp)
1735 *O_gdqpp = gq;
1736 else if (gq)
1737 xfs_qm_dqrele(gq);
1738 return 0;
1739}
1740
1741/*
1742 * Actually transfer ownership, and do dquot modifications.
1743 * These were already reserved.
1744 */
1745xfs_dquot_t *
1746xfs_qm_vop_chown(
1747 xfs_trans_t *tp,
1748 xfs_inode_t *ip,
1749 xfs_dquot_t **IO_olddq,
1750 xfs_dquot_t *newdq)
1751{
1752 xfs_dquot_t *prevdq;
1753 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1754 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1755
1756
1757 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1758 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1759
1760 /* old dquot */
1761 prevdq = *IO_olddq;
1762 ASSERT(prevdq);
1763 ASSERT(prevdq != newdq);
1764
1765 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1766 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1767
1768 /* the sparkling new dquot */
1769 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1770 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1771
1772 /*
1773 * Take an extra reference, because the inode is going to keep
1774 * this dquot pointer even after the trans_commit.
1775 */
1776 *IO_olddq = xfs_qm_dqhold(newdq);
1777
1778 return prevdq;
1779}
1780
1781/*
1782 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1783 */
1784int
1785xfs_qm_vop_chown_reserve(
1786 xfs_trans_t *tp,
1787 xfs_inode_t *ip,
1788 xfs_dquot_t *udqp,
1789 xfs_dquot_t *gdqp,
1790 uint flags)
1791{
1792 xfs_mount_t *mp = ip->i_mount;
1793 uint delblks, blkflags, prjflags = 0;
1794 xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
1795 int error;
1796
1797
1798 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1799 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1800
1801 delblks = ip->i_delayed_blks;
1802 delblksudq = delblksgdq = unresudq = unresgdq = NULL;
1803 blkflags = XFS_IS_REALTIME_INODE(ip) ?
1804 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1805
1806 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1807 ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
1808 delblksudq = udqp;
1809 /*
1810 * If there are delayed allocation blocks, then we have to
1811 * unreserve those from the old dquot, and add them to the
1812 * new dquot.
1813 */
1814 if (delblks) {
1815 ASSERT(ip->i_udquot);
1816 unresudq = ip->i_udquot;
1817 }
1818 }
1819 if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
1820 if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
1821 xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
1822 prjflags = XFS_QMOPT_ENOSPC;
1823
1824 if (prjflags ||
1825 (XFS_IS_GQUOTA_ON(ip->i_mount) &&
1826 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
1827 delblksgdq = gdqp;
1828 if (delblks) {
1829 ASSERT(ip->i_gdquot);
1830 unresgdq = ip->i_gdquot;
1831 }
1832 }
1833 }
1834
1835 if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1836 delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
1837 flags | blkflags | prjflags)))
1838 return (error);
1839
1840 /*
1841 * Do the delayed blks reservations/unreservations now. Since, these
1842 * are done without the help of a transaction, if a reservation fails
1843 * its previous reservations won't be automatically undone by trans
1844 * code. So, we have to do it manually here.
1845 */
1846 if (delblks) {
1847 /*
1848 * Do the reservations first. Unreservation can't fail.
1849 */
1850 ASSERT(delblksudq || delblksgdq);
1851 ASSERT(unresudq || unresgdq);
1852 if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1853 delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
1854 flags | blkflags | prjflags)))
1855 return (error);
1856 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1857 unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
1858 blkflags);
1859 }
1860
1861 return (0);
1862}
1863
1864int
1865xfs_qm_vop_rename_dqattach(
1866 struct xfs_inode **i_tab)
1867{
1868 struct xfs_mount *mp = i_tab[0]->i_mount;
1869 int i;
1870
1871 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1872 return 0;
1873
1874 for (i = 0; (i < 4 && i_tab[i]); i++) {
1875 struct xfs_inode *ip = i_tab[i];
1876 int error;
1877
1878 /*
1879 * Watch out for duplicate entries in the table.
1880 */
1881 if (i == 0 || ip != i_tab[i-1]) {
1882 if (XFS_NOT_DQATTACHED(mp, ip)) {
1883 error = xfs_qm_dqattach(ip, 0);
1884 if (error)
1885 return error;
1886 }
1887 }
1888 }
1889 return 0;
1890}
1891
1892void
1893xfs_qm_vop_create_dqattach(
1894 struct xfs_trans *tp,
1895 struct xfs_inode *ip,
1896 struct xfs_dquot *udqp,
1897 struct xfs_dquot *gdqp)
1898{
1899 struct xfs_mount *mp = tp->t_mountp;
1900
1901 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1902 return;
1903
1904 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1905 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1906
1907 if (udqp) {
1908 ASSERT(ip->i_udquot == NULL);
1909 ASSERT(XFS_IS_UQUOTA_ON(mp));
1910 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1911
1912 ip->i_udquot = xfs_qm_dqhold(udqp);
1913 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1914 }
1915 if (gdqp) {
1916 ASSERT(ip->i_gdquot == NULL);
1917 ASSERT(XFS_IS_OQUOTA_ON(mp));
1918 ASSERT((XFS_IS_GQUOTA_ON(mp) ?
1919 ip->i_d.di_gid : xfs_get_projid(ip)) ==
1920 be32_to_cpu(gdqp->q_core.d_id));
1921
1922 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1923 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1924 }
1925}
1926
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_sb.h"
14#include "xfs_mount.h"
15#include "xfs_inode.h"
16#include "xfs_iwalk.h"
17#include "xfs_quota.h"
18#include "xfs_bmap.h"
19#include "xfs_bmap_util.h"
20#include "xfs_trans.h"
21#include "xfs_trans_space.h"
22#include "xfs_qm.h"
23#include "xfs_trace.h"
24#include "xfs_icache.h"
25#include "xfs_error.h"
26#include "xfs_ag.h"
27#include "xfs_ialloc.h"
28#include "xfs_log_priv.h"
29
30/*
31 * The global quota manager. There is only one of these for the entire
32 * system, _not_ one per file system. XQM keeps track of the overall
33 * quota functionality, including maintaining the freelist and hash
34 * tables of dquots.
35 */
36STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
37STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
38
39STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
40STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
41/*
42 * We use the batch lookup interface to iterate over the dquots as it
43 * currently is the only interface into the radix tree code that allows
44 * fuzzy lookups instead of exact matches. Holding the lock over multiple
45 * operations is fine as all callers are used either during mount/umount
46 * or quotaoff.
47 */
48#define XFS_DQ_LOOKUP_BATCH 32
49
50STATIC int
51xfs_qm_dquot_walk(
52 struct xfs_mount *mp,
53 xfs_dqtype_t type,
54 int (*execute)(struct xfs_dquot *dqp, void *data),
55 void *data)
56{
57 struct xfs_quotainfo *qi = mp->m_quotainfo;
58 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
59 uint32_t next_index;
60 int last_error = 0;
61 int skipped;
62 int nr_found;
63
64restart:
65 skipped = 0;
66 next_index = 0;
67 nr_found = 0;
68
69 while (1) {
70 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
71 int error;
72 int i;
73
74 mutex_lock(&qi->qi_tree_lock);
75 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
76 next_index, XFS_DQ_LOOKUP_BATCH);
77 if (!nr_found) {
78 mutex_unlock(&qi->qi_tree_lock);
79 break;
80 }
81
82 for (i = 0; i < nr_found; i++) {
83 struct xfs_dquot *dqp = batch[i];
84
85 next_index = dqp->q_id + 1;
86
87 error = execute(batch[i], data);
88 if (error == -EAGAIN) {
89 skipped++;
90 continue;
91 }
92 if (error && last_error != -EFSCORRUPTED)
93 last_error = error;
94 }
95
96 mutex_unlock(&qi->qi_tree_lock);
97
98 /* bail out if the filesystem is corrupted. */
99 if (last_error == -EFSCORRUPTED) {
100 skipped = 0;
101 break;
102 }
103 /* we're done if id overflows back to zero */
104 if (!next_index)
105 break;
106 }
107
108 if (skipped) {
109 delay(1);
110 goto restart;
111 }
112
113 return last_error;
114}
115
116
117/*
118 * Purge a dquot from all tracking data structures and free it.
119 */
120STATIC int
121xfs_qm_dqpurge(
122 struct xfs_dquot *dqp,
123 void *data)
124{
125 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
126 int error = -EAGAIN;
127
128 xfs_dqlock(dqp);
129 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
130 goto out_unlock;
131
132 dqp->q_flags |= XFS_DQFLAG_FREEING;
133
134 xfs_dqflock(dqp);
135
136 /*
137 * If we are turning this type of quotas off, we don't care
138 * about the dirty metadata sitting in this dquot. OTOH, if
139 * we're unmounting, we do care, so we flush it and wait.
140 */
141 if (XFS_DQ_IS_DIRTY(dqp)) {
142 struct xfs_buf *bp = NULL;
143
144 /*
145 * We don't care about getting disk errors here. We need
146 * to purge this dquot anyway, so we go ahead regardless.
147 */
148 error = xfs_qm_dqflush(dqp, &bp);
149 if (!error) {
150 error = xfs_bwrite(bp);
151 xfs_buf_relse(bp);
152 } else if (error == -EAGAIN) {
153 dqp->q_flags &= ~XFS_DQFLAG_FREEING;
154 goto out_unlock;
155 }
156 xfs_dqflock(dqp);
157 }
158
159 ASSERT(atomic_read(&dqp->q_pincount) == 0);
160 ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
161 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
162
163 xfs_dqfunlock(dqp);
164 xfs_dqunlock(dqp);
165
166 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
167 qi->qi_dquots--;
168
169 /*
170 * We move dquots to the freelist as soon as their reference count
171 * hits zero, so it really should be on the freelist here.
172 */
173 ASSERT(!list_empty(&dqp->q_lru));
174 list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
175 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
176
177 xfs_qm_dqdestroy(dqp);
178 return 0;
179
180out_unlock:
181 xfs_dqunlock(dqp);
182 return error;
183}
184
185/*
186 * Purge the dquot cache.
187 */
188static void
189xfs_qm_dqpurge_all(
190 struct xfs_mount *mp)
191{
192 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
193 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
194 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
195}
196
197/*
198 * Just destroy the quotainfo structure.
199 */
200void
201xfs_qm_unmount(
202 struct xfs_mount *mp)
203{
204 if (mp->m_quotainfo) {
205 xfs_qm_dqpurge_all(mp);
206 xfs_qm_destroy_quotainfo(mp);
207 }
208}
209
210/*
211 * Called from the vfsops layer.
212 */
213void
214xfs_qm_unmount_quotas(
215 xfs_mount_t *mp)
216{
217 /*
218 * Release the dquots that root inode, et al might be holding,
219 * before we flush quotas and blow away the quotainfo structure.
220 */
221 ASSERT(mp->m_rootip);
222 xfs_qm_dqdetach(mp->m_rootip);
223 if (mp->m_rbmip)
224 xfs_qm_dqdetach(mp->m_rbmip);
225 if (mp->m_rsumip)
226 xfs_qm_dqdetach(mp->m_rsumip);
227
228 /*
229 * Release the quota inodes.
230 */
231 if (mp->m_quotainfo) {
232 if (mp->m_quotainfo->qi_uquotaip) {
233 xfs_irele(mp->m_quotainfo->qi_uquotaip);
234 mp->m_quotainfo->qi_uquotaip = NULL;
235 }
236 if (mp->m_quotainfo->qi_gquotaip) {
237 xfs_irele(mp->m_quotainfo->qi_gquotaip);
238 mp->m_quotainfo->qi_gquotaip = NULL;
239 }
240 if (mp->m_quotainfo->qi_pquotaip) {
241 xfs_irele(mp->m_quotainfo->qi_pquotaip);
242 mp->m_quotainfo->qi_pquotaip = NULL;
243 }
244 }
245}
246
247STATIC int
248xfs_qm_dqattach_one(
249 struct xfs_inode *ip,
250 xfs_dqtype_t type,
251 bool doalloc,
252 struct xfs_dquot **IO_idqpp)
253{
254 struct xfs_dquot *dqp;
255 int error;
256
257 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
258 error = 0;
259
260 /*
261 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
262 * or &i_gdquot. This made the code look weird, but made the logic a lot
263 * simpler.
264 */
265 dqp = *IO_idqpp;
266 if (dqp) {
267 trace_xfs_dqattach_found(dqp);
268 return 0;
269 }
270
271 /*
272 * Find the dquot from somewhere. This bumps the reference count of
273 * dquot and returns it locked. This can return ENOENT if dquot didn't
274 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
275 * turned off suddenly.
276 */
277 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
278 if (error)
279 return error;
280
281 trace_xfs_dqattach_get(dqp);
282
283 /*
284 * dqget may have dropped and re-acquired the ilock, but it guarantees
285 * that the dquot returned is the one that should go in the inode.
286 */
287 *IO_idqpp = dqp;
288 xfs_dqunlock(dqp);
289 return 0;
290}
291
292static bool
293xfs_qm_need_dqattach(
294 struct xfs_inode *ip)
295{
296 struct xfs_mount *mp = ip->i_mount;
297
298 if (!XFS_IS_QUOTA_ON(mp))
299 return false;
300 if (!XFS_NOT_DQATTACHED(mp, ip))
301 return false;
302 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
303 return false;
304 return true;
305}
306
307/*
308 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
309 * into account.
310 * If @doalloc is true, the dquot(s) will be allocated if needed.
311 * Inode may get unlocked and relocked in here, and the caller must deal with
312 * the consequences.
313 */
314int
315xfs_qm_dqattach_locked(
316 xfs_inode_t *ip,
317 bool doalloc)
318{
319 xfs_mount_t *mp = ip->i_mount;
320 int error = 0;
321
322 if (!xfs_qm_need_dqattach(ip))
323 return 0;
324
325 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
326
327 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
328 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
329 doalloc, &ip->i_udquot);
330 if (error)
331 goto done;
332 ASSERT(ip->i_udquot);
333 }
334
335 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
336 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
337 doalloc, &ip->i_gdquot);
338 if (error)
339 goto done;
340 ASSERT(ip->i_gdquot);
341 }
342
343 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
344 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
345 doalloc, &ip->i_pdquot);
346 if (error)
347 goto done;
348 ASSERT(ip->i_pdquot);
349 }
350
351done:
352 /*
353 * Don't worry about the dquots that we may have attached before any
354 * error - they'll get detached later if it has not already been done.
355 */
356 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
357 return error;
358}
359
360int
361xfs_qm_dqattach(
362 struct xfs_inode *ip)
363{
364 int error;
365
366 if (!xfs_qm_need_dqattach(ip))
367 return 0;
368
369 xfs_ilock(ip, XFS_ILOCK_EXCL);
370 error = xfs_qm_dqattach_locked(ip, false);
371 xfs_iunlock(ip, XFS_ILOCK_EXCL);
372
373 return error;
374}
375
376/*
377 * Release dquots (and their references) if any.
378 * The inode should be locked EXCL except when this's called by
379 * xfs_ireclaim.
380 */
381void
382xfs_qm_dqdetach(
383 xfs_inode_t *ip)
384{
385 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
386 return;
387
388 trace_xfs_dquot_dqdetach(ip);
389
390 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
391 if (ip->i_udquot) {
392 xfs_qm_dqrele(ip->i_udquot);
393 ip->i_udquot = NULL;
394 }
395 if (ip->i_gdquot) {
396 xfs_qm_dqrele(ip->i_gdquot);
397 ip->i_gdquot = NULL;
398 }
399 if (ip->i_pdquot) {
400 xfs_qm_dqrele(ip->i_pdquot);
401 ip->i_pdquot = NULL;
402 }
403}
404
405struct xfs_qm_isolate {
406 struct list_head buffers;
407 struct list_head dispose;
408};
409
410static enum lru_status
411xfs_qm_dquot_isolate(
412 struct list_head *item,
413 struct list_lru_one *lru,
414 spinlock_t *lru_lock,
415 void *arg)
416 __releases(lru_lock) __acquires(lru_lock)
417{
418 struct xfs_dquot *dqp = container_of(item,
419 struct xfs_dquot, q_lru);
420 struct xfs_qm_isolate *isol = arg;
421
422 if (!xfs_dqlock_nowait(dqp))
423 goto out_miss_busy;
424
425 /*
426 * If something else is freeing this dquot and hasn't yet removed it
427 * from the LRU, leave it for the freeing task to complete the freeing
428 * process rather than risk it being free from under us here.
429 */
430 if (dqp->q_flags & XFS_DQFLAG_FREEING)
431 goto out_miss_unlock;
432
433 /*
434 * This dquot has acquired a reference in the meantime remove it from
435 * the freelist and try again.
436 */
437 if (dqp->q_nrefs) {
438 xfs_dqunlock(dqp);
439 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
440
441 trace_xfs_dqreclaim_want(dqp);
442 list_lru_isolate(lru, &dqp->q_lru);
443 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
444 return LRU_REMOVED;
445 }
446
447 /*
448 * If the dquot is dirty, flush it. If it's already being flushed, just
449 * skip it so there is time for the IO to complete before we try to
450 * reclaim it again on the next LRU pass.
451 */
452 if (!xfs_dqflock_nowait(dqp))
453 goto out_miss_unlock;
454
455 if (XFS_DQ_IS_DIRTY(dqp)) {
456 struct xfs_buf *bp = NULL;
457 int error;
458
459 trace_xfs_dqreclaim_dirty(dqp);
460
461 /* we have to drop the LRU lock to flush the dquot */
462 spin_unlock(lru_lock);
463
464 error = xfs_qm_dqflush(dqp, &bp);
465 if (error)
466 goto out_unlock_dirty;
467
468 xfs_buf_delwri_queue(bp, &isol->buffers);
469 xfs_buf_relse(bp);
470 goto out_unlock_dirty;
471 }
472 xfs_dqfunlock(dqp);
473
474 /*
475 * Prevent lookups now that we are past the point of no return.
476 */
477 dqp->q_flags |= XFS_DQFLAG_FREEING;
478 xfs_dqunlock(dqp);
479
480 ASSERT(dqp->q_nrefs == 0);
481 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
482 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
483 trace_xfs_dqreclaim_done(dqp);
484 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
485 return LRU_REMOVED;
486
487out_miss_unlock:
488 xfs_dqunlock(dqp);
489out_miss_busy:
490 trace_xfs_dqreclaim_busy(dqp);
491 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
492 return LRU_SKIP;
493
494out_unlock_dirty:
495 trace_xfs_dqreclaim_busy(dqp);
496 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
497 xfs_dqunlock(dqp);
498 spin_lock(lru_lock);
499 return LRU_RETRY;
500}
501
502static unsigned long
503xfs_qm_shrink_scan(
504 struct shrinker *shrink,
505 struct shrink_control *sc)
506{
507 struct xfs_quotainfo *qi = shrink->private_data;
508 struct xfs_qm_isolate isol;
509 unsigned long freed;
510 int error;
511
512 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
513 return 0;
514
515 INIT_LIST_HEAD(&isol.buffers);
516 INIT_LIST_HEAD(&isol.dispose);
517
518 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
519 xfs_qm_dquot_isolate, &isol);
520
521 error = xfs_buf_delwri_submit(&isol.buffers);
522 if (error)
523 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
524
525 while (!list_empty(&isol.dispose)) {
526 struct xfs_dquot *dqp;
527
528 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
529 list_del_init(&dqp->q_lru);
530 xfs_qm_dqfree_one(dqp);
531 }
532
533 return freed;
534}
535
536static unsigned long
537xfs_qm_shrink_count(
538 struct shrinker *shrink,
539 struct shrink_control *sc)
540{
541 struct xfs_quotainfo *qi = shrink->private_data;
542
543 return list_lru_shrink_count(&qi->qi_lru, sc);
544}
545
546STATIC void
547xfs_qm_set_defquota(
548 struct xfs_mount *mp,
549 xfs_dqtype_t type,
550 struct xfs_quotainfo *qinf)
551{
552 struct xfs_dquot *dqp;
553 struct xfs_def_quota *defq;
554 int error;
555
556 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
557 if (error)
558 return;
559
560 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
561
562 /*
563 * Timers and warnings have been already set, let's just set the
564 * default limits for this quota type
565 */
566 defq->blk.hard = dqp->q_blk.hardlimit;
567 defq->blk.soft = dqp->q_blk.softlimit;
568 defq->ino.hard = dqp->q_ino.hardlimit;
569 defq->ino.soft = dqp->q_ino.softlimit;
570 defq->rtb.hard = dqp->q_rtb.hardlimit;
571 defq->rtb.soft = dqp->q_rtb.softlimit;
572 xfs_qm_dqdestroy(dqp);
573}
574
575/* Initialize quota time limits from the root dquot. */
576static void
577xfs_qm_init_timelimits(
578 struct xfs_mount *mp,
579 xfs_dqtype_t type)
580{
581 struct xfs_quotainfo *qinf = mp->m_quotainfo;
582 struct xfs_def_quota *defq;
583 struct xfs_dquot *dqp;
584 int error;
585
586 defq = xfs_get_defquota(qinf, type);
587
588 defq->blk.time = XFS_QM_BTIMELIMIT;
589 defq->ino.time = XFS_QM_ITIMELIMIT;
590 defq->rtb.time = XFS_QM_RTBTIMELIMIT;
591
592 /*
593 * We try to get the limits from the superuser's limits fields.
594 * This is quite hacky, but it is standard quota practice.
595 *
596 * Since we may not have done a quotacheck by this point, just read
597 * the dquot without attaching it to any hashtables or lists.
598 */
599 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
600 if (error)
601 return;
602
603 /*
604 * The warnings and timers set the grace period given to
605 * a user or group before he or she can not perform any
606 * more writing. If it is zero, a default is used.
607 */
608 if (dqp->q_blk.timer)
609 defq->blk.time = dqp->q_blk.timer;
610 if (dqp->q_ino.timer)
611 defq->ino.time = dqp->q_ino.timer;
612 if (dqp->q_rtb.timer)
613 defq->rtb.time = dqp->q_rtb.timer;
614
615 xfs_qm_dqdestroy(dqp);
616}
617
618/*
619 * This initializes all the quota information that's kept in the
620 * mount structure
621 */
622STATIC int
623xfs_qm_init_quotainfo(
624 struct xfs_mount *mp)
625{
626 struct xfs_quotainfo *qinf;
627 int error;
628
629 ASSERT(XFS_IS_QUOTA_ON(mp));
630
631 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
632
633 error = list_lru_init(&qinf->qi_lru);
634 if (error)
635 goto out_free_qinf;
636
637 /*
638 * See if quotainodes are setup, and if not, allocate them,
639 * and change the superblock accordingly.
640 */
641 error = xfs_qm_init_quotainos(mp);
642 if (error)
643 goto out_free_lru;
644
645 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
646 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
647 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
648 mutex_init(&qinf->qi_tree_lock);
649
650 /* mutex used to serialize quotaoffs */
651 mutex_init(&qinf->qi_quotaofflock);
652
653 /* Precalc some constants */
654 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
655 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
656 if (xfs_has_bigtime(mp)) {
657 qinf->qi_expiry_min =
658 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
659 qinf->qi_expiry_max =
660 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
661 } else {
662 qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
663 qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
664 }
665 trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
666 qinf->qi_expiry_max);
667
668 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
669
670 xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
671 xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
672 xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
673
674 if (XFS_IS_UQUOTA_ON(mp))
675 xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
676 if (XFS_IS_GQUOTA_ON(mp))
677 xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
678 if (XFS_IS_PQUOTA_ON(mp))
679 xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
680
681 qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s",
682 mp->m_super->s_id);
683 if (!qinf->qi_shrinker) {
684 error = -ENOMEM;
685 goto out_free_inos;
686 }
687
688 qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
689 qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
690 qinf->qi_shrinker->private_data = qinf;
691
692 shrinker_register(qinf->qi_shrinker);
693
694 return 0;
695
696out_free_inos:
697 mutex_destroy(&qinf->qi_quotaofflock);
698 mutex_destroy(&qinf->qi_tree_lock);
699 xfs_qm_destroy_quotainos(qinf);
700out_free_lru:
701 list_lru_destroy(&qinf->qi_lru);
702out_free_qinf:
703 kmem_free(qinf);
704 mp->m_quotainfo = NULL;
705 return error;
706}
707
708/*
709 * Gets called when unmounting a filesystem or when all quotas get
710 * turned off.
711 * This purges the quota inodes, destroys locks and frees itself.
712 */
713void
714xfs_qm_destroy_quotainfo(
715 struct xfs_mount *mp)
716{
717 struct xfs_quotainfo *qi;
718
719 qi = mp->m_quotainfo;
720 ASSERT(qi != NULL);
721
722 shrinker_free(qi->qi_shrinker);
723 list_lru_destroy(&qi->qi_lru);
724 xfs_qm_destroy_quotainos(qi);
725 mutex_destroy(&qi->qi_tree_lock);
726 mutex_destroy(&qi->qi_quotaofflock);
727 kmem_free(qi);
728 mp->m_quotainfo = NULL;
729}
730
731/*
732 * Create an inode and return with a reference already taken, but unlocked
733 * This is how we create quota inodes
734 */
735STATIC int
736xfs_qm_qino_alloc(
737 struct xfs_mount *mp,
738 struct xfs_inode **ipp,
739 unsigned int flags)
740{
741 struct xfs_trans *tp;
742 int error;
743 bool need_alloc = true;
744
745 *ipp = NULL;
746 /*
747 * With superblock that doesn't have separate pquotino, we
748 * share an inode between gquota and pquota. If the on-disk
749 * superblock has GQUOTA and the filesystem is now mounted
750 * with PQUOTA, just use sb_gquotino for sb_pquotino and
751 * vice-versa.
752 */
753 if (!xfs_has_pquotino(mp) &&
754 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
755 xfs_ino_t ino = NULLFSINO;
756
757 if ((flags & XFS_QMOPT_PQUOTA) &&
758 (mp->m_sb.sb_gquotino != NULLFSINO)) {
759 ino = mp->m_sb.sb_gquotino;
760 if (XFS_IS_CORRUPT(mp,
761 mp->m_sb.sb_pquotino != NULLFSINO))
762 return -EFSCORRUPTED;
763 } else if ((flags & XFS_QMOPT_GQUOTA) &&
764 (mp->m_sb.sb_pquotino != NULLFSINO)) {
765 ino = mp->m_sb.sb_pquotino;
766 if (XFS_IS_CORRUPT(mp,
767 mp->m_sb.sb_gquotino != NULLFSINO))
768 return -EFSCORRUPTED;
769 }
770 if (ino != NULLFSINO) {
771 error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
772 if (error)
773 return error;
774 mp->m_sb.sb_gquotino = NULLFSINO;
775 mp->m_sb.sb_pquotino = NULLFSINO;
776 need_alloc = false;
777 }
778 }
779
780 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
781 need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
782 0, 0, &tp);
783 if (error)
784 return error;
785
786 if (need_alloc) {
787 xfs_ino_t ino;
788
789 error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
790 if (!error)
791 error = xfs_init_new_inode(&nop_mnt_idmap, tp, NULL, ino,
792 S_IFREG, 1, 0, 0, false, ipp);
793 if (error) {
794 xfs_trans_cancel(tp);
795 return error;
796 }
797 }
798
799 /*
800 * Make the changes in the superblock, and log those too.
801 * sbfields arg may contain fields other than *QUOTINO;
802 * VERSIONNUM for example.
803 */
804 spin_lock(&mp->m_sb_lock);
805 if (flags & XFS_QMOPT_SBVERSION) {
806 ASSERT(!xfs_has_quota(mp));
807
808 xfs_add_quota(mp);
809 mp->m_sb.sb_uquotino = NULLFSINO;
810 mp->m_sb.sb_gquotino = NULLFSINO;
811 mp->m_sb.sb_pquotino = NULLFSINO;
812
813 /* qflags will get updated fully _after_ quotacheck */
814 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
815 }
816 if (flags & XFS_QMOPT_UQUOTA)
817 mp->m_sb.sb_uquotino = (*ipp)->i_ino;
818 else if (flags & XFS_QMOPT_GQUOTA)
819 mp->m_sb.sb_gquotino = (*ipp)->i_ino;
820 else
821 mp->m_sb.sb_pquotino = (*ipp)->i_ino;
822 spin_unlock(&mp->m_sb_lock);
823 xfs_log_sb(tp);
824
825 error = xfs_trans_commit(tp);
826 if (error) {
827 ASSERT(xfs_is_shutdown(mp));
828 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
829 }
830 if (need_alloc)
831 xfs_finish_inode_setup(*ipp);
832 return error;
833}
834
835
836STATIC void
837xfs_qm_reset_dqcounts(
838 struct xfs_mount *mp,
839 struct xfs_buf *bp,
840 xfs_dqid_t id,
841 xfs_dqtype_t type)
842{
843 struct xfs_dqblk *dqb;
844 int j;
845
846 trace_xfs_reset_dqcounts(bp, _RET_IP_);
847
848 /*
849 * Reset all counters and timers. They'll be
850 * started afresh by xfs_qm_quotacheck.
851 */
852#ifdef DEBUG
853 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
854 sizeof(struct xfs_dqblk);
855 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
856#endif
857 dqb = bp->b_addr;
858 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
859 struct xfs_disk_dquot *ddq;
860
861 ddq = (struct xfs_disk_dquot *)&dqb[j];
862
863 /*
864 * Do a sanity check, and if needed, repair the dqblk. Don't
865 * output any warnings because it's perfectly possible to
866 * find uninitialised dquot blks. See comment in
867 * xfs_dquot_verify.
868 */
869 if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
870 (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
871 xfs_dqblk_repair(mp, &dqb[j], id + j, type);
872
873 /*
874 * Reset type in case we are reusing group quota file for
875 * project quotas or vice versa
876 */
877 ddq->d_type = type;
878 ddq->d_bcount = 0;
879 ddq->d_icount = 0;
880 ddq->d_rtbcount = 0;
881
882 /*
883 * dquot id 0 stores the default grace period and the maximum
884 * warning limit that were set by the administrator, so we
885 * should not reset them.
886 */
887 if (ddq->d_id != 0) {
888 ddq->d_btimer = 0;
889 ddq->d_itimer = 0;
890 ddq->d_rtbtimer = 0;
891 ddq->d_bwarns = 0;
892 ddq->d_iwarns = 0;
893 ddq->d_rtbwarns = 0;
894 if (xfs_has_bigtime(mp))
895 ddq->d_type |= XFS_DQTYPE_BIGTIME;
896 }
897
898 if (xfs_has_crc(mp)) {
899 xfs_update_cksum((char *)&dqb[j],
900 sizeof(struct xfs_dqblk),
901 XFS_DQUOT_CRC_OFF);
902 }
903 }
904}
905
906STATIC int
907xfs_qm_reset_dqcounts_all(
908 struct xfs_mount *mp,
909 xfs_dqid_t firstid,
910 xfs_fsblock_t bno,
911 xfs_filblks_t blkcnt,
912 xfs_dqtype_t type,
913 struct list_head *buffer_list)
914{
915 struct xfs_buf *bp;
916 int error = 0;
917
918 ASSERT(blkcnt > 0);
919
920 /*
921 * Blkcnt arg can be a very big number, and might even be
922 * larger than the log itself. So, we have to break it up into
923 * manageable-sized transactions.
924 * Note that we don't start a permanent transaction here; we might
925 * not be able to get a log reservation for the whole thing up front,
926 * and we don't really care to either, because we just discard
927 * everything if we were to crash in the middle of this loop.
928 */
929 while (blkcnt--) {
930 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
931 XFS_FSB_TO_DADDR(mp, bno),
932 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
933 &xfs_dquot_buf_ops);
934
935 /*
936 * CRC and validation errors will return a EFSCORRUPTED here. If
937 * this occurs, re-read without CRC validation so that we can
938 * repair the damage via xfs_qm_reset_dqcounts(). This process
939 * will leave a trace in the log indicating corruption has
940 * been detected.
941 */
942 if (error == -EFSCORRUPTED) {
943 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
944 XFS_FSB_TO_DADDR(mp, bno),
945 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
946 NULL);
947 }
948
949 if (error)
950 break;
951
952 /*
953 * A corrupt buffer might not have a verifier attached, so
954 * make sure we have the correct one attached before writeback
955 * occurs.
956 */
957 bp->b_ops = &xfs_dquot_buf_ops;
958 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
959 xfs_buf_delwri_queue(bp, buffer_list);
960 xfs_buf_relse(bp);
961
962 /* goto the next block. */
963 bno++;
964 firstid += mp->m_quotainfo->qi_dqperchunk;
965 }
966
967 return error;
968}
969
970/*
971 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
972 * counters for every chunk of dquots that we find.
973 */
974STATIC int
975xfs_qm_reset_dqcounts_buf(
976 struct xfs_mount *mp,
977 struct xfs_inode *qip,
978 xfs_dqtype_t type,
979 struct list_head *buffer_list)
980{
981 struct xfs_bmbt_irec *map;
982 int i, nmaps; /* number of map entries */
983 int error; /* return value */
984 xfs_fileoff_t lblkno;
985 xfs_filblks_t maxlblkcnt;
986 xfs_dqid_t firstid;
987 xfs_fsblock_t rablkno;
988 xfs_filblks_t rablkcnt;
989
990 error = 0;
991 /*
992 * This looks racy, but we can't keep an inode lock across a
993 * trans_reserve. But, this gets called during quotacheck, and that
994 * happens only at mount time which is single threaded.
995 */
996 if (qip->i_nblocks == 0)
997 return 0;
998
999 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
1000
1001 lblkno = 0;
1002 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1003 do {
1004 uint lock_mode;
1005
1006 nmaps = XFS_DQITER_MAP_SIZE;
1007 /*
1008 * We aren't changing the inode itself. Just changing
1009 * some of its data. No new blocks are added here, and
1010 * the inode is never added to the transaction.
1011 */
1012 lock_mode = xfs_ilock_data_map_shared(qip);
1013 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1014 map, &nmaps, 0);
1015 xfs_iunlock(qip, lock_mode);
1016 if (error)
1017 break;
1018
1019 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1020 for (i = 0; i < nmaps; i++) {
1021 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1022 ASSERT(map[i].br_blockcount);
1023
1024
1025 lblkno += map[i].br_blockcount;
1026
1027 if (map[i].br_startblock == HOLESTARTBLOCK)
1028 continue;
1029
1030 firstid = (xfs_dqid_t) map[i].br_startoff *
1031 mp->m_quotainfo->qi_dqperchunk;
1032 /*
1033 * Do a read-ahead on the next extent.
1034 */
1035 if ((i+1 < nmaps) &&
1036 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1037 rablkcnt = map[i+1].br_blockcount;
1038 rablkno = map[i+1].br_startblock;
1039 while (rablkcnt--) {
1040 xfs_buf_readahead(mp->m_ddev_targp,
1041 XFS_FSB_TO_DADDR(mp, rablkno),
1042 mp->m_quotainfo->qi_dqchunklen,
1043 &xfs_dquot_buf_ops);
1044 rablkno++;
1045 }
1046 }
1047 /*
1048 * Iterate thru all the blks in the extent and
1049 * reset the counters of all the dquots inside them.
1050 */
1051 error = xfs_qm_reset_dqcounts_all(mp, firstid,
1052 map[i].br_startblock,
1053 map[i].br_blockcount,
1054 type, buffer_list);
1055 if (error)
1056 goto out;
1057 }
1058 } while (nmaps > 0);
1059
1060out:
1061 kmem_free(map);
1062 return error;
1063}
1064
1065/*
1066 * Called by dqusage_adjust in doing a quotacheck.
1067 *
1068 * Given the inode, and a dquot id this updates both the incore dqout as well
1069 * as the buffer copy. This is so that once the quotacheck is done, we can
1070 * just log all the buffers, as opposed to logging numerous updates to
1071 * individual dquots.
1072 */
1073STATIC int
1074xfs_qm_quotacheck_dqadjust(
1075 struct xfs_inode *ip,
1076 xfs_dqtype_t type,
1077 xfs_qcnt_t nblks,
1078 xfs_qcnt_t rtblks)
1079{
1080 struct xfs_mount *mp = ip->i_mount;
1081 struct xfs_dquot *dqp;
1082 xfs_dqid_t id;
1083 int error;
1084
1085 id = xfs_qm_id_for_quotatype(ip, type);
1086 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1087 if (error) {
1088 /*
1089 * Shouldn't be able to turn off quotas here.
1090 */
1091 ASSERT(error != -ESRCH);
1092 ASSERT(error != -ENOENT);
1093 return error;
1094 }
1095
1096 trace_xfs_dqadjust(dqp);
1097
1098 /*
1099 * Adjust the inode count and the block count to reflect this inode's
1100 * resource usage.
1101 */
1102 dqp->q_ino.count++;
1103 dqp->q_ino.reserved++;
1104 if (nblks) {
1105 dqp->q_blk.count += nblks;
1106 dqp->q_blk.reserved += nblks;
1107 }
1108 if (rtblks) {
1109 dqp->q_rtb.count += rtblks;
1110 dqp->q_rtb.reserved += rtblks;
1111 }
1112
1113 /*
1114 * Set default limits, adjust timers (since we changed usages)
1115 *
1116 * There are no timers for the default values set in the root dquot.
1117 */
1118 if (dqp->q_id) {
1119 xfs_qm_adjust_dqlimits(dqp);
1120 xfs_qm_adjust_dqtimers(dqp);
1121 }
1122
1123 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1124 xfs_qm_dqput(dqp);
1125 return 0;
1126}
1127
1128/*
1129 * callback routine supplied to bulkstat(). Given an inumber, find its
1130 * dquots and update them to account for resources taken by that inode.
1131 */
1132/* ARGSUSED */
1133STATIC int
1134xfs_qm_dqusage_adjust(
1135 struct xfs_mount *mp,
1136 struct xfs_trans *tp,
1137 xfs_ino_t ino,
1138 void *data)
1139{
1140 struct xfs_inode *ip;
1141 xfs_qcnt_t nblks;
1142 xfs_filblks_t rtblks = 0; /* total rt blks */
1143 int error;
1144
1145 ASSERT(XFS_IS_QUOTA_ON(mp));
1146
1147 /*
1148 * rootino must have its resources accounted for, not so with the quota
1149 * inodes.
1150 */
1151 if (xfs_is_quota_inode(&mp->m_sb, ino))
1152 return 0;
1153
1154 /*
1155 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1156 * at mount time and therefore nobody will be racing chown/chproj.
1157 */
1158 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1159 if (error == -EINVAL || error == -ENOENT)
1160 return 0;
1161 if (error)
1162 return error;
1163
1164 /*
1165 * Reload the incore unlinked list to avoid failure in inodegc.
1166 * Use an unlocked check here because unrecovered unlinked inodes
1167 * should be somewhat rare.
1168 */
1169 if (xfs_inode_unlinked_incomplete(ip)) {
1170 error = xfs_inode_reload_unlinked(ip);
1171 if (error) {
1172 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1173 goto error0;
1174 }
1175 }
1176
1177 ASSERT(ip->i_delayed_blks == 0);
1178
1179 if (XFS_IS_REALTIME_INODE(ip)) {
1180 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1181
1182 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1183 if (error)
1184 goto error0;
1185
1186 xfs_bmap_count_leaves(ifp, &rtblks);
1187 }
1188
1189 nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1190 xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
1191
1192 /*
1193 * Add the (disk blocks and inode) resources occupied by this
1194 * inode to its dquots. We do this adjustment in the incore dquot,
1195 * and also copy the changes to its buffer.
1196 * We don't care about putting these changes in a transaction
1197 * envelope because if we crash in the middle of a 'quotacheck'
1198 * we have to start from the beginning anyway.
1199 * Once we're done, we'll log all the dquot bufs.
1200 *
1201 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1202 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1203 */
1204 if (XFS_IS_UQUOTA_ON(mp)) {
1205 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1206 rtblks);
1207 if (error)
1208 goto error0;
1209 }
1210
1211 if (XFS_IS_GQUOTA_ON(mp)) {
1212 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1213 rtblks);
1214 if (error)
1215 goto error0;
1216 }
1217
1218 if (XFS_IS_PQUOTA_ON(mp)) {
1219 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1220 rtblks);
1221 if (error)
1222 goto error0;
1223 }
1224
1225error0:
1226 xfs_irele(ip);
1227 return error;
1228}
1229
1230STATIC int
1231xfs_qm_flush_one(
1232 struct xfs_dquot *dqp,
1233 void *data)
1234{
1235 struct xfs_mount *mp = dqp->q_mount;
1236 struct list_head *buffer_list = data;
1237 struct xfs_buf *bp = NULL;
1238 int error = 0;
1239
1240 xfs_dqlock(dqp);
1241 if (dqp->q_flags & XFS_DQFLAG_FREEING)
1242 goto out_unlock;
1243 if (!XFS_DQ_IS_DIRTY(dqp))
1244 goto out_unlock;
1245
1246 /*
1247 * The only way the dquot is already flush locked by the time quotacheck
1248 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1249 * it for the final time. Quotacheck collects all dquot bufs in the
1250 * local delwri queue before dquots are dirtied, so reclaim can't have
1251 * possibly queued it for I/O. The only way out is to push the buffer to
1252 * cycle the flush lock.
1253 */
1254 if (!xfs_dqflock_nowait(dqp)) {
1255 /* buf is pinned in-core by delwri list */
1256 error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1257 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1258 if (error)
1259 goto out_unlock;
1260
1261 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1262 error = -EAGAIN;
1263 xfs_buf_relse(bp);
1264 goto out_unlock;
1265 }
1266 xfs_buf_unlock(bp);
1267
1268 xfs_buf_delwri_pushbuf(bp, buffer_list);
1269 xfs_buf_rele(bp);
1270
1271 error = -EAGAIN;
1272 goto out_unlock;
1273 }
1274
1275 error = xfs_qm_dqflush(dqp, &bp);
1276 if (error)
1277 goto out_unlock;
1278
1279 xfs_buf_delwri_queue(bp, buffer_list);
1280 xfs_buf_relse(bp);
1281out_unlock:
1282 xfs_dqunlock(dqp);
1283 return error;
1284}
1285
1286/*
1287 * Walk thru all the filesystem inodes and construct a consistent view
1288 * of the disk quota world. If the quotacheck fails, disable quotas.
1289 */
1290STATIC int
1291xfs_qm_quotacheck(
1292 xfs_mount_t *mp)
1293{
1294 int error, error2;
1295 uint flags;
1296 LIST_HEAD (buffer_list);
1297 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1298 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1299 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1300
1301 flags = 0;
1302
1303 ASSERT(uip || gip || pip);
1304 ASSERT(XFS_IS_QUOTA_ON(mp));
1305
1306 xfs_notice(mp, "Quotacheck needed: Please wait.");
1307
1308 /*
1309 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1310 * their counters to zero. We need a clean slate.
1311 * We don't log our changes till later.
1312 */
1313 if (uip) {
1314 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1315 &buffer_list);
1316 if (error)
1317 goto error_return;
1318 flags |= XFS_UQUOTA_CHKD;
1319 }
1320
1321 if (gip) {
1322 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1323 &buffer_list);
1324 if (error)
1325 goto error_return;
1326 flags |= XFS_GQUOTA_CHKD;
1327 }
1328
1329 if (pip) {
1330 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1331 &buffer_list);
1332 if (error)
1333 goto error_return;
1334 flags |= XFS_PQUOTA_CHKD;
1335 }
1336
1337 xfs_set_quotacheck_running(mp);
1338 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1339 NULL);
1340 xfs_clear_quotacheck_running(mp);
1341
1342 /*
1343 * On error, the inode walk may have partially populated the dquot
1344 * caches. We must purge them before disabling quota and tearing down
1345 * the quotainfo, or else the dquots will leak.
1346 */
1347 if (error)
1348 goto error_purge;
1349
1350 /*
1351 * We've made all the changes that we need to make incore. Flush them
1352 * down to disk buffers if everything was updated successfully.
1353 */
1354 if (XFS_IS_UQUOTA_ON(mp)) {
1355 error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1356 &buffer_list);
1357 }
1358 if (XFS_IS_GQUOTA_ON(mp)) {
1359 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1360 &buffer_list);
1361 if (!error)
1362 error = error2;
1363 }
1364 if (XFS_IS_PQUOTA_ON(mp)) {
1365 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1366 &buffer_list);
1367 if (!error)
1368 error = error2;
1369 }
1370
1371 error2 = xfs_buf_delwri_submit(&buffer_list);
1372 if (!error)
1373 error = error2;
1374
1375 /*
1376 * We can get this error if we couldn't do a dquot allocation inside
1377 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1378 * dirty dquots that might be cached, we just want to get rid of them
1379 * and turn quotaoff. The dquots won't be attached to any of the inodes
1380 * at this point (because we intentionally didn't in dqget_noattach).
1381 */
1382 if (error)
1383 goto error_purge;
1384
1385 /*
1386 * If one type of quotas is off, then it will lose its
1387 * quotachecked status, since we won't be doing accounting for
1388 * that type anymore.
1389 */
1390 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1391 mp->m_qflags |= flags;
1392
1393error_return:
1394 xfs_buf_delwri_cancel(&buffer_list);
1395
1396 if (error) {
1397 xfs_warn(mp,
1398 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1399 error);
1400 /*
1401 * We must turn off quotas.
1402 */
1403 ASSERT(mp->m_quotainfo != NULL);
1404 xfs_qm_destroy_quotainfo(mp);
1405 if (xfs_mount_reset_sbqflags(mp)) {
1406 xfs_warn(mp,
1407 "Quotacheck: Failed to reset quota flags.");
1408 }
1409 } else
1410 xfs_notice(mp, "Quotacheck: Done.");
1411 return error;
1412
1413error_purge:
1414 /*
1415 * On error, we may have inodes queued for inactivation. This may try
1416 * to attach dquots to the inode before running cleanup operations on
1417 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1418 * below that frees mp->m_quotainfo. To avoid this race, flush all the
1419 * pending inodegc operations before we purge the dquots from memory,
1420 * ensuring that background inactivation is idle whilst we turn off
1421 * quotas.
1422 */
1423 xfs_inodegc_flush(mp);
1424 xfs_qm_dqpurge_all(mp);
1425 goto error_return;
1426
1427}
1428
1429/*
1430 * This is called from xfs_mountfs to start quotas and initialize all
1431 * necessary data structures like quotainfo. This is also responsible for
1432 * running a quotacheck as necessary. We are guaranteed that the superblock
1433 * is consistently read in at this point.
1434 *
1435 * If we fail here, the mount will continue with quota turned off. We don't
1436 * need to inidicate success or failure at all.
1437 */
1438void
1439xfs_qm_mount_quotas(
1440 struct xfs_mount *mp)
1441{
1442 int error = 0;
1443 uint sbf;
1444
1445 /*
1446 * If quotas on realtime volumes is not supported, we disable
1447 * quotas immediately.
1448 */
1449 if (mp->m_sb.sb_rextents) {
1450 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1451 mp->m_qflags = 0;
1452 goto write_changes;
1453 }
1454
1455 ASSERT(XFS_IS_QUOTA_ON(mp));
1456
1457 /*
1458 * Allocate the quotainfo structure inside the mount struct, and
1459 * create quotainode(s), and change/rev superblock if necessary.
1460 */
1461 error = xfs_qm_init_quotainfo(mp);
1462 if (error) {
1463 /*
1464 * We must turn off quotas.
1465 */
1466 ASSERT(mp->m_quotainfo == NULL);
1467 mp->m_qflags = 0;
1468 goto write_changes;
1469 }
1470 /*
1471 * If any of the quotas are not consistent, do a quotacheck.
1472 */
1473 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1474 error = xfs_qm_quotacheck(mp);
1475 if (error) {
1476 /* Quotacheck failed and disabled quotas. */
1477 return;
1478 }
1479 }
1480 /*
1481 * If one type of quotas is off, then it will lose its
1482 * quotachecked status, since we won't be doing accounting for
1483 * that type anymore.
1484 */
1485 if (!XFS_IS_UQUOTA_ON(mp))
1486 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1487 if (!XFS_IS_GQUOTA_ON(mp))
1488 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1489 if (!XFS_IS_PQUOTA_ON(mp))
1490 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1491
1492 write_changes:
1493 /*
1494 * We actually don't have to acquire the m_sb_lock at all.
1495 * This can only be called from mount, and that's single threaded. XXX
1496 */
1497 spin_lock(&mp->m_sb_lock);
1498 sbf = mp->m_sb.sb_qflags;
1499 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1500 spin_unlock(&mp->m_sb_lock);
1501
1502 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1503 if (xfs_sync_sb(mp, false)) {
1504 /*
1505 * We could only have been turning quotas off.
1506 * We aren't in very good shape actually because
1507 * the incore structures are convinced that quotas are
1508 * off, but the on disk superblock doesn't know that !
1509 */
1510 ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1511 xfs_alert(mp, "%s: Superblock update failed!",
1512 __func__);
1513 }
1514 }
1515
1516 if (error) {
1517 xfs_warn(mp, "Failed to initialize disk quotas.");
1518 return;
1519 }
1520}
1521
1522/*
1523 * This is called after the superblock has been read in and we're ready to
1524 * iget the quota inodes.
1525 */
1526STATIC int
1527xfs_qm_init_quotainos(
1528 xfs_mount_t *mp)
1529{
1530 struct xfs_inode *uip = NULL;
1531 struct xfs_inode *gip = NULL;
1532 struct xfs_inode *pip = NULL;
1533 int error;
1534 uint flags = 0;
1535
1536 ASSERT(mp->m_quotainfo);
1537
1538 /*
1539 * Get the uquota and gquota inodes
1540 */
1541 if (xfs_has_quota(mp)) {
1542 if (XFS_IS_UQUOTA_ON(mp) &&
1543 mp->m_sb.sb_uquotino != NULLFSINO) {
1544 ASSERT(mp->m_sb.sb_uquotino > 0);
1545 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1546 0, 0, &uip);
1547 if (error)
1548 return error;
1549 }
1550 if (XFS_IS_GQUOTA_ON(mp) &&
1551 mp->m_sb.sb_gquotino != NULLFSINO) {
1552 ASSERT(mp->m_sb.sb_gquotino > 0);
1553 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1554 0, 0, &gip);
1555 if (error)
1556 goto error_rele;
1557 }
1558 if (XFS_IS_PQUOTA_ON(mp) &&
1559 mp->m_sb.sb_pquotino != NULLFSINO) {
1560 ASSERT(mp->m_sb.sb_pquotino > 0);
1561 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1562 0, 0, &pip);
1563 if (error)
1564 goto error_rele;
1565 }
1566 } else {
1567 flags |= XFS_QMOPT_SBVERSION;
1568 }
1569
1570 /*
1571 * Create the three inodes, if they don't exist already. The changes
1572 * made above will get added to a transaction and logged in one of
1573 * the qino_alloc calls below. If the device is readonly,
1574 * temporarily switch to read-write to do this.
1575 */
1576 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1577 error = xfs_qm_qino_alloc(mp, &uip,
1578 flags | XFS_QMOPT_UQUOTA);
1579 if (error)
1580 goto error_rele;
1581
1582 flags &= ~XFS_QMOPT_SBVERSION;
1583 }
1584 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1585 error = xfs_qm_qino_alloc(mp, &gip,
1586 flags | XFS_QMOPT_GQUOTA);
1587 if (error)
1588 goto error_rele;
1589
1590 flags &= ~XFS_QMOPT_SBVERSION;
1591 }
1592 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1593 error = xfs_qm_qino_alloc(mp, &pip,
1594 flags | XFS_QMOPT_PQUOTA);
1595 if (error)
1596 goto error_rele;
1597 }
1598
1599 mp->m_quotainfo->qi_uquotaip = uip;
1600 mp->m_quotainfo->qi_gquotaip = gip;
1601 mp->m_quotainfo->qi_pquotaip = pip;
1602
1603 return 0;
1604
1605error_rele:
1606 if (uip)
1607 xfs_irele(uip);
1608 if (gip)
1609 xfs_irele(gip);
1610 if (pip)
1611 xfs_irele(pip);
1612 return error;
1613}
1614
1615STATIC void
1616xfs_qm_destroy_quotainos(
1617 struct xfs_quotainfo *qi)
1618{
1619 if (qi->qi_uquotaip) {
1620 xfs_irele(qi->qi_uquotaip);
1621 qi->qi_uquotaip = NULL; /* paranoia */
1622 }
1623 if (qi->qi_gquotaip) {
1624 xfs_irele(qi->qi_gquotaip);
1625 qi->qi_gquotaip = NULL;
1626 }
1627 if (qi->qi_pquotaip) {
1628 xfs_irele(qi->qi_pquotaip);
1629 qi->qi_pquotaip = NULL;
1630 }
1631}
1632
1633STATIC void
1634xfs_qm_dqfree_one(
1635 struct xfs_dquot *dqp)
1636{
1637 struct xfs_mount *mp = dqp->q_mount;
1638 struct xfs_quotainfo *qi = mp->m_quotainfo;
1639
1640 mutex_lock(&qi->qi_tree_lock);
1641 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1642
1643 qi->qi_dquots--;
1644 mutex_unlock(&qi->qi_tree_lock);
1645
1646 xfs_qm_dqdestroy(dqp);
1647}
1648
1649/* --------------- utility functions for vnodeops ---------------- */
1650
1651
1652/*
1653 * Given an inode, a uid, gid and prid make sure that we have
1654 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1655 * quotas by creating this file.
1656 * This also attaches dquot(s) to the given inode after locking it,
1657 * and returns the dquots corresponding to the uid and/or gid.
1658 *
1659 * in : inode (unlocked)
1660 * out : udquot, gdquot with references taken and unlocked
1661 */
1662int
1663xfs_qm_vop_dqalloc(
1664 struct xfs_inode *ip,
1665 kuid_t uid,
1666 kgid_t gid,
1667 prid_t prid,
1668 uint flags,
1669 struct xfs_dquot **O_udqpp,
1670 struct xfs_dquot **O_gdqpp,
1671 struct xfs_dquot **O_pdqpp)
1672{
1673 struct xfs_mount *mp = ip->i_mount;
1674 struct inode *inode = VFS_I(ip);
1675 struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1676 struct xfs_dquot *uq = NULL;
1677 struct xfs_dquot *gq = NULL;
1678 struct xfs_dquot *pq = NULL;
1679 int error;
1680 uint lockflags;
1681
1682 if (!XFS_IS_QUOTA_ON(mp))
1683 return 0;
1684
1685 lockflags = XFS_ILOCK_EXCL;
1686 xfs_ilock(ip, lockflags);
1687
1688 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1689 gid = inode->i_gid;
1690
1691 /*
1692 * Attach the dquot(s) to this inode, doing a dquot allocation
1693 * if necessary. The dquot(s) will not be locked.
1694 */
1695 if (XFS_NOT_DQATTACHED(mp, ip)) {
1696 error = xfs_qm_dqattach_locked(ip, true);
1697 if (error) {
1698 xfs_iunlock(ip, lockflags);
1699 return error;
1700 }
1701 }
1702
1703 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1704 ASSERT(O_udqpp);
1705 if (!uid_eq(inode->i_uid, uid)) {
1706 /*
1707 * What we need is the dquot that has this uid, and
1708 * if we send the inode to dqget, the uid of the inode
1709 * takes priority over what's sent in the uid argument.
1710 * We must unlock inode here before calling dqget if
1711 * we're not sending the inode, because otherwise
1712 * we'll deadlock by doing trans_reserve while
1713 * holding ilock.
1714 */
1715 xfs_iunlock(ip, lockflags);
1716 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1717 XFS_DQTYPE_USER, true, &uq);
1718 if (error) {
1719 ASSERT(error != -ENOENT);
1720 return error;
1721 }
1722 /*
1723 * Get the ilock in the right order.
1724 */
1725 xfs_dqunlock(uq);
1726 lockflags = XFS_ILOCK_SHARED;
1727 xfs_ilock(ip, lockflags);
1728 } else {
1729 /*
1730 * Take an extra reference, because we'll return
1731 * this to caller
1732 */
1733 ASSERT(ip->i_udquot);
1734 uq = xfs_qm_dqhold(ip->i_udquot);
1735 }
1736 }
1737 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1738 ASSERT(O_gdqpp);
1739 if (!gid_eq(inode->i_gid, gid)) {
1740 xfs_iunlock(ip, lockflags);
1741 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1742 XFS_DQTYPE_GROUP, true, &gq);
1743 if (error) {
1744 ASSERT(error != -ENOENT);
1745 goto error_rele;
1746 }
1747 xfs_dqunlock(gq);
1748 lockflags = XFS_ILOCK_SHARED;
1749 xfs_ilock(ip, lockflags);
1750 } else {
1751 ASSERT(ip->i_gdquot);
1752 gq = xfs_qm_dqhold(ip->i_gdquot);
1753 }
1754 }
1755 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1756 ASSERT(O_pdqpp);
1757 if (ip->i_projid != prid) {
1758 xfs_iunlock(ip, lockflags);
1759 error = xfs_qm_dqget(mp, prid,
1760 XFS_DQTYPE_PROJ, true, &pq);
1761 if (error) {
1762 ASSERT(error != -ENOENT);
1763 goto error_rele;
1764 }
1765 xfs_dqunlock(pq);
1766 lockflags = XFS_ILOCK_SHARED;
1767 xfs_ilock(ip, lockflags);
1768 } else {
1769 ASSERT(ip->i_pdquot);
1770 pq = xfs_qm_dqhold(ip->i_pdquot);
1771 }
1772 }
1773 trace_xfs_dquot_dqalloc(ip);
1774
1775 xfs_iunlock(ip, lockflags);
1776 if (O_udqpp)
1777 *O_udqpp = uq;
1778 else
1779 xfs_qm_dqrele(uq);
1780 if (O_gdqpp)
1781 *O_gdqpp = gq;
1782 else
1783 xfs_qm_dqrele(gq);
1784 if (O_pdqpp)
1785 *O_pdqpp = pq;
1786 else
1787 xfs_qm_dqrele(pq);
1788 return 0;
1789
1790error_rele:
1791 xfs_qm_dqrele(gq);
1792 xfs_qm_dqrele(uq);
1793 return error;
1794}
1795
1796/*
1797 * Actually transfer ownership, and do dquot modifications.
1798 * These were already reserved.
1799 */
1800struct xfs_dquot *
1801xfs_qm_vop_chown(
1802 struct xfs_trans *tp,
1803 struct xfs_inode *ip,
1804 struct xfs_dquot **IO_olddq,
1805 struct xfs_dquot *newdq)
1806{
1807 struct xfs_dquot *prevdq;
1808 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1809 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1810
1811
1812 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1813 ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1814
1815 /* old dquot */
1816 prevdq = *IO_olddq;
1817 ASSERT(prevdq);
1818 ASSERT(prevdq != newdq);
1819
1820 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
1821 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1822
1823 /* the sparkling new dquot */
1824 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
1825 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1826
1827 /*
1828 * Back when we made quota reservations for the chown, we reserved the
1829 * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1830 * switched the dquots, decrease the new dquot's block reservation
1831 * (having already bumped up the real counter) so that we don't have
1832 * any reservation to give back when we commit.
1833 */
1834 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1835 -ip->i_delayed_blks);
1836
1837 /*
1838 * Give the incore reservation for delalloc blocks back to the old
1839 * dquot. We don't normally handle delalloc quota reservations
1840 * transactionally, so just lock the dquot and subtract from the
1841 * reservation. Dirty the transaction because it's too late to turn
1842 * back now.
1843 */
1844 tp->t_flags |= XFS_TRANS_DIRTY;
1845 xfs_dqlock(prevdq);
1846 ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1847 prevdq->q_blk.reserved -= ip->i_delayed_blks;
1848 xfs_dqunlock(prevdq);
1849
1850 /*
1851 * Take an extra reference, because the inode is going to keep
1852 * this dquot pointer even after the trans_commit.
1853 */
1854 *IO_olddq = xfs_qm_dqhold(newdq);
1855
1856 return prevdq;
1857}
1858
1859int
1860xfs_qm_vop_rename_dqattach(
1861 struct xfs_inode **i_tab)
1862{
1863 struct xfs_mount *mp = i_tab[0]->i_mount;
1864 int i;
1865
1866 if (!XFS_IS_QUOTA_ON(mp))
1867 return 0;
1868
1869 for (i = 0; (i < 4 && i_tab[i]); i++) {
1870 struct xfs_inode *ip = i_tab[i];
1871 int error;
1872
1873 /*
1874 * Watch out for duplicate entries in the table.
1875 */
1876 if (i == 0 || ip != i_tab[i-1]) {
1877 if (XFS_NOT_DQATTACHED(mp, ip)) {
1878 error = xfs_qm_dqattach(ip);
1879 if (error)
1880 return error;
1881 }
1882 }
1883 }
1884 return 0;
1885}
1886
1887void
1888xfs_qm_vop_create_dqattach(
1889 struct xfs_trans *tp,
1890 struct xfs_inode *ip,
1891 struct xfs_dquot *udqp,
1892 struct xfs_dquot *gdqp,
1893 struct xfs_dquot *pdqp)
1894{
1895 struct xfs_mount *mp = tp->t_mountp;
1896
1897 if (!XFS_IS_QUOTA_ON(mp))
1898 return;
1899
1900 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1901
1902 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1903 ASSERT(ip->i_udquot == NULL);
1904 ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1905
1906 ip->i_udquot = xfs_qm_dqhold(udqp);
1907 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1908 }
1909 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1910 ASSERT(ip->i_gdquot == NULL);
1911 ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1912
1913 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1914 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1915 }
1916 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1917 ASSERT(ip->i_pdquot == NULL);
1918 ASSERT(ip->i_projid == pdqp->q_id);
1919
1920 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1921 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1922 }
1923}
1924
1925/* Decide if this inode's dquot is near an enforcement boundary. */
1926bool
1927xfs_inode_near_dquot_enforcement(
1928 struct xfs_inode *ip,
1929 xfs_dqtype_t type)
1930{
1931 struct xfs_dquot *dqp;
1932 int64_t freesp;
1933
1934 /* We only care for quotas that are enabled and enforced. */
1935 dqp = xfs_inode_dquot(ip, type);
1936 if (!dqp || !xfs_dquot_is_enforced(dqp))
1937 return false;
1938
1939 if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1940 xfs_dquot_res_over_limits(&dqp->q_rtb))
1941 return true;
1942
1943 /* For space on the data device, check the various thresholds. */
1944 if (!dqp->q_prealloc_hi_wmark)
1945 return false;
1946
1947 if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1948 return false;
1949
1950 if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1951 return true;
1952
1953 freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1954 if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
1955 return true;
1956
1957 return false;
1958}