Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_mount.h"
27#include "xfs_inode.h"
28#include "xfs_ialloc.h"
29#include "xfs_itable.h"
30#include "xfs_quota.h"
31#include "xfs_error.h"
32#include "xfs_bmap.h"
33#include "xfs_bmap_btree.h"
34#include "xfs_trans.h"
35#include "xfs_trans_space.h"
36#include "xfs_qm.h"
37#include "xfs_trace.h"
38#include "xfs_icache.h"
39#include "xfs_cksum.h"
40
41/*
42 * The global quota manager. There is only one of these for the entire
43 * system, _not_ one per file system. XQM keeps track of the overall
44 * quota functionality, including maintaining the freelist and hash
45 * tables of dquots.
46 */
47STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
48STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
49
50
51STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
52/*
53 * We use the batch lookup interface to iterate over the dquots as it
54 * currently is the only interface into the radix tree code that allows
55 * fuzzy lookups instead of exact matches. Holding the lock over multiple
56 * operations is fine as all callers are used either during mount/umount
57 * or quotaoff.
58 */
59#define XFS_DQ_LOOKUP_BATCH 32
60
61STATIC int
62xfs_qm_dquot_walk(
63 struct xfs_mount *mp,
64 int type,
65 int (*execute)(struct xfs_dquot *dqp, void *data),
66 void *data)
67{
68 struct xfs_quotainfo *qi = mp->m_quotainfo;
69 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
70 uint32_t next_index;
71 int last_error = 0;
72 int skipped;
73 int nr_found;
74
75restart:
76 skipped = 0;
77 next_index = 0;
78 nr_found = 0;
79
80 while (1) {
81 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
82 int error = 0;
83 int i;
84
85 mutex_lock(&qi->qi_tree_lock);
86 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
87 next_index, XFS_DQ_LOOKUP_BATCH);
88 if (!nr_found) {
89 mutex_unlock(&qi->qi_tree_lock);
90 break;
91 }
92
93 for (i = 0; i < nr_found; i++) {
94 struct xfs_dquot *dqp = batch[i];
95
96 next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
97
98 error = execute(batch[i], data);
99 if (error == -EAGAIN) {
100 skipped++;
101 continue;
102 }
103 if (error && last_error != -EFSCORRUPTED)
104 last_error = error;
105 }
106
107 mutex_unlock(&qi->qi_tree_lock);
108
109 /* bail out if the filesystem is corrupted. */
110 if (last_error == -EFSCORRUPTED) {
111 skipped = 0;
112 break;
113 }
114 }
115
116 if (skipped) {
117 delay(1);
118 goto restart;
119 }
120
121 return last_error;
122}
123
124
125/*
126 * Purge a dquot from all tracking data structures and free it.
127 */
128STATIC int
129xfs_qm_dqpurge(
130 struct xfs_dquot *dqp,
131 void *data)
132{
133 struct xfs_mount *mp = dqp->q_mount;
134 struct xfs_quotainfo *qi = mp->m_quotainfo;
135
136 xfs_dqlock(dqp);
137 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
138 xfs_dqunlock(dqp);
139 return -EAGAIN;
140 }
141
142 dqp->dq_flags |= XFS_DQ_FREEING;
143
144 xfs_dqflock(dqp);
145
146 /*
147 * If we are turning this type of quotas off, we don't care
148 * about the dirty metadata sitting in this dquot. OTOH, if
149 * we're unmounting, we do care, so we flush it and wait.
150 */
151 if (XFS_DQ_IS_DIRTY(dqp)) {
152 struct xfs_buf *bp = NULL;
153 int error;
154
155 /*
156 * We don't care about getting disk errors here. We need
157 * to purge this dquot anyway, so we go ahead regardless.
158 */
159 error = xfs_qm_dqflush(dqp, &bp);
160 if (error) {
161 xfs_warn(mp, "%s: dquot %p flush failed",
162 __func__, dqp);
163 } else {
164 error = xfs_bwrite(bp);
165 xfs_buf_relse(bp);
166 }
167 xfs_dqflock(dqp);
168 }
169
170 ASSERT(atomic_read(&dqp->q_pincount) == 0);
171 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
172 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
173
174 xfs_dqfunlock(dqp);
175 xfs_dqunlock(dqp);
176
177 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
178 be32_to_cpu(dqp->q_core.d_id));
179 qi->qi_dquots--;
180
181 /*
182 * We move dquots to the freelist as soon as their reference count
183 * hits zero, so it really should be on the freelist here.
184 */
185 ASSERT(!list_empty(&dqp->q_lru));
186 list_lru_del(&qi->qi_lru, &dqp->q_lru);
187 XFS_STATS_DEC(mp, xs_qm_dquot_unused);
188
189 xfs_qm_dqdestroy(dqp);
190 return 0;
191}
192
193/*
194 * Purge the dquot cache.
195 */
196void
197xfs_qm_dqpurge_all(
198 struct xfs_mount *mp,
199 uint flags)
200{
201 if (flags & XFS_QMOPT_UQUOTA)
202 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
203 if (flags & XFS_QMOPT_GQUOTA)
204 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
205 if (flags & XFS_QMOPT_PQUOTA)
206 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
207}
208
209/*
210 * Just destroy the quotainfo structure.
211 */
212void
213xfs_qm_unmount(
214 struct xfs_mount *mp)
215{
216 if (mp->m_quotainfo) {
217 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
218 xfs_qm_destroy_quotainfo(mp);
219 }
220}
221
222/*
223 * Called from the vfsops layer.
224 */
225void
226xfs_qm_unmount_quotas(
227 xfs_mount_t *mp)
228{
229 /*
230 * Release the dquots that root inode, et al might be holding,
231 * before we flush quotas and blow away the quotainfo structure.
232 */
233 ASSERT(mp->m_rootip);
234 xfs_qm_dqdetach(mp->m_rootip);
235 if (mp->m_rbmip)
236 xfs_qm_dqdetach(mp->m_rbmip);
237 if (mp->m_rsumip)
238 xfs_qm_dqdetach(mp->m_rsumip);
239
240 /*
241 * Release the quota inodes.
242 */
243 if (mp->m_quotainfo) {
244 if (mp->m_quotainfo->qi_uquotaip) {
245 IRELE(mp->m_quotainfo->qi_uquotaip);
246 mp->m_quotainfo->qi_uquotaip = NULL;
247 }
248 if (mp->m_quotainfo->qi_gquotaip) {
249 IRELE(mp->m_quotainfo->qi_gquotaip);
250 mp->m_quotainfo->qi_gquotaip = NULL;
251 }
252 if (mp->m_quotainfo->qi_pquotaip) {
253 IRELE(mp->m_quotainfo->qi_pquotaip);
254 mp->m_quotainfo->qi_pquotaip = NULL;
255 }
256 }
257}
258
259STATIC int
260xfs_qm_dqattach_one(
261 xfs_inode_t *ip,
262 xfs_dqid_t id,
263 uint type,
264 uint doalloc,
265 xfs_dquot_t **IO_idqpp)
266{
267 xfs_dquot_t *dqp;
268 int error;
269
270 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
271 error = 0;
272
273 /*
274 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
275 * or &i_gdquot. This made the code look weird, but made the logic a lot
276 * simpler.
277 */
278 dqp = *IO_idqpp;
279 if (dqp) {
280 trace_xfs_dqattach_found(dqp);
281 return 0;
282 }
283
284 /*
285 * Find the dquot from somewhere. This bumps the reference count of
286 * dquot and returns it locked. This can return ENOENT if dquot didn't
287 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
288 * turned off suddenly.
289 */
290 error = xfs_qm_dqget(ip->i_mount, ip, id, type,
291 doalloc | XFS_QMOPT_DOWARN, &dqp);
292 if (error)
293 return error;
294
295 trace_xfs_dqattach_get(dqp);
296
297 /*
298 * dqget may have dropped and re-acquired the ilock, but it guarantees
299 * that the dquot returned is the one that should go in the inode.
300 */
301 *IO_idqpp = dqp;
302 xfs_dqunlock(dqp);
303 return 0;
304}
305
306static bool
307xfs_qm_need_dqattach(
308 struct xfs_inode *ip)
309{
310 struct xfs_mount *mp = ip->i_mount;
311
312 if (!XFS_IS_QUOTA_RUNNING(mp))
313 return false;
314 if (!XFS_IS_QUOTA_ON(mp))
315 return false;
316 if (!XFS_NOT_DQATTACHED(mp, ip))
317 return false;
318 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
319 return false;
320 return true;
321}
322
323/*
324 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
325 * into account.
326 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
327 * Inode may get unlocked and relocked in here, and the caller must deal with
328 * the consequences.
329 */
330int
331xfs_qm_dqattach_locked(
332 xfs_inode_t *ip,
333 uint flags)
334{
335 xfs_mount_t *mp = ip->i_mount;
336 int error = 0;
337
338 if (!xfs_qm_need_dqattach(ip))
339 return 0;
340
341 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
342
343 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
344 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
345 flags & XFS_QMOPT_DQALLOC,
346 &ip->i_udquot);
347 if (error)
348 goto done;
349 ASSERT(ip->i_udquot);
350 }
351
352 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
353 error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
354 flags & XFS_QMOPT_DQALLOC,
355 &ip->i_gdquot);
356 if (error)
357 goto done;
358 ASSERT(ip->i_gdquot);
359 }
360
361 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
362 error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
363 flags & XFS_QMOPT_DQALLOC,
364 &ip->i_pdquot);
365 if (error)
366 goto done;
367 ASSERT(ip->i_pdquot);
368 }
369
370done:
371 /*
372 * Don't worry about the dquots that we may have attached before any
373 * error - they'll get detached later if it has not already been done.
374 */
375 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
376 return error;
377}
378
379int
380xfs_qm_dqattach(
381 struct xfs_inode *ip,
382 uint flags)
383{
384 int error;
385
386 if (!xfs_qm_need_dqattach(ip))
387 return 0;
388
389 xfs_ilock(ip, XFS_ILOCK_EXCL);
390 error = xfs_qm_dqattach_locked(ip, flags);
391 xfs_iunlock(ip, XFS_ILOCK_EXCL);
392
393 return error;
394}
395
396/*
397 * Release dquots (and their references) if any.
398 * The inode should be locked EXCL except when this's called by
399 * xfs_ireclaim.
400 */
401void
402xfs_qm_dqdetach(
403 xfs_inode_t *ip)
404{
405 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
406 return;
407
408 trace_xfs_dquot_dqdetach(ip);
409
410 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
411 if (ip->i_udquot) {
412 xfs_qm_dqrele(ip->i_udquot);
413 ip->i_udquot = NULL;
414 }
415 if (ip->i_gdquot) {
416 xfs_qm_dqrele(ip->i_gdquot);
417 ip->i_gdquot = NULL;
418 }
419 if (ip->i_pdquot) {
420 xfs_qm_dqrele(ip->i_pdquot);
421 ip->i_pdquot = NULL;
422 }
423}
424
425struct xfs_qm_isolate {
426 struct list_head buffers;
427 struct list_head dispose;
428};
429
430static enum lru_status
431xfs_qm_dquot_isolate(
432 struct list_head *item,
433 struct list_lru_one *lru,
434 spinlock_t *lru_lock,
435 void *arg)
436 __releases(lru_lock) __acquires(lru_lock)
437{
438 struct xfs_dquot *dqp = container_of(item,
439 struct xfs_dquot, q_lru);
440 struct xfs_qm_isolate *isol = arg;
441
442 if (!xfs_dqlock_nowait(dqp))
443 goto out_miss_busy;
444
445 /*
446 * This dquot has acquired a reference in the meantime remove it from
447 * the freelist and try again.
448 */
449 if (dqp->q_nrefs) {
450 xfs_dqunlock(dqp);
451 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
452
453 trace_xfs_dqreclaim_want(dqp);
454 list_lru_isolate(lru, &dqp->q_lru);
455 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
456 return LRU_REMOVED;
457 }
458
459 /*
460 * If the dquot is dirty, flush it. If it's already being flushed, just
461 * skip it so there is time for the IO to complete before we try to
462 * reclaim it again on the next LRU pass.
463 */
464 if (!xfs_dqflock_nowait(dqp)) {
465 xfs_dqunlock(dqp);
466 goto out_miss_busy;
467 }
468
469 if (XFS_DQ_IS_DIRTY(dqp)) {
470 struct xfs_buf *bp = NULL;
471 int error;
472
473 trace_xfs_dqreclaim_dirty(dqp);
474
475 /* we have to drop the LRU lock to flush the dquot */
476 spin_unlock(lru_lock);
477
478 error = xfs_qm_dqflush(dqp, &bp);
479 if (error) {
480 xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
481 __func__, dqp);
482 goto out_unlock_dirty;
483 }
484
485 xfs_buf_delwri_queue(bp, &isol->buffers);
486 xfs_buf_relse(bp);
487 goto out_unlock_dirty;
488 }
489 xfs_dqfunlock(dqp);
490
491 /*
492 * Prevent lookups now that we are past the point of no return.
493 */
494 dqp->dq_flags |= XFS_DQ_FREEING;
495 xfs_dqunlock(dqp);
496
497 ASSERT(dqp->q_nrefs == 0);
498 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
499 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
500 trace_xfs_dqreclaim_done(dqp);
501 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
502 return LRU_REMOVED;
503
504out_miss_busy:
505 trace_xfs_dqreclaim_busy(dqp);
506 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
507 return LRU_SKIP;
508
509out_unlock_dirty:
510 trace_xfs_dqreclaim_busy(dqp);
511 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
512 xfs_dqunlock(dqp);
513 spin_lock(lru_lock);
514 return LRU_RETRY;
515}
516
517static unsigned long
518xfs_qm_shrink_scan(
519 struct shrinker *shrink,
520 struct shrink_control *sc)
521{
522 struct xfs_quotainfo *qi = container_of(shrink,
523 struct xfs_quotainfo, qi_shrinker);
524 struct xfs_qm_isolate isol;
525 unsigned long freed;
526 int error;
527
528 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
529 return 0;
530
531 INIT_LIST_HEAD(&isol.buffers);
532 INIT_LIST_HEAD(&isol.dispose);
533
534 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
535 xfs_qm_dquot_isolate, &isol);
536
537 error = xfs_buf_delwri_submit(&isol.buffers);
538 if (error)
539 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
540
541 while (!list_empty(&isol.dispose)) {
542 struct xfs_dquot *dqp;
543
544 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
545 list_del_init(&dqp->q_lru);
546 xfs_qm_dqfree_one(dqp);
547 }
548
549 return freed;
550}
551
552static unsigned long
553xfs_qm_shrink_count(
554 struct shrinker *shrink,
555 struct shrink_control *sc)
556{
557 struct xfs_quotainfo *qi = container_of(shrink,
558 struct xfs_quotainfo, qi_shrinker);
559
560 return list_lru_shrink_count(&qi->qi_lru, sc);
561}
562
563STATIC void
564xfs_qm_set_defquota(
565 xfs_mount_t *mp,
566 uint type,
567 xfs_quotainfo_t *qinf)
568{
569 xfs_dquot_t *dqp;
570 struct xfs_def_quota *defq;
571 int error;
572
573 error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp);
574
575 if (!error) {
576 xfs_disk_dquot_t *ddqp = &dqp->q_core;
577
578 defq = xfs_get_defquota(dqp, qinf);
579
580 /*
581 * Timers and warnings have been already set, let's just set the
582 * default limits for this quota type
583 */
584 defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
585 defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
586 defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
587 defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
588 defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
589 defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
590 xfs_qm_dqdestroy(dqp);
591 }
592}
593
594/*
595 * This initializes all the quota information that's kept in the
596 * mount structure
597 */
598STATIC int
599xfs_qm_init_quotainfo(
600 xfs_mount_t *mp)
601{
602 xfs_quotainfo_t *qinf;
603 int error;
604 xfs_dquot_t *dqp;
605
606 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
607
608 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
609
610 error = list_lru_init(&qinf->qi_lru);
611 if (error)
612 goto out_free_qinf;
613
614 /*
615 * See if quotainodes are setup, and if not, allocate them,
616 * and change the superblock accordingly.
617 */
618 error = xfs_qm_init_quotainos(mp);
619 if (error)
620 goto out_free_lru;
621
622 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
623 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
624 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
625 mutex_init(&qinf->qi_tree_lock);
626
627 /* mutex used to serialize quotaoffs */
628 mutex_init(&qinf->qi_quotaofflock);
629
630 /* Precalc some constants */
631 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
632 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
633
634 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
635
636 /*
637 * We try to get the limits from the superuser's limits fields.
638 * This is quite hacky, but it is standard quota practice.
639 *
640 * Since we may not have done a quotacheck by this point, just read
641 * the dquot without attaching it to any hashtables or lists.
642 *
643 * Timers and warnings are globally set by the first timer found in
644 * user/group/proj quota types, otherwise a default value is used.
645 * This should be split into different fields per quota type.
646 */
647 error = xfs_qm_dqread(mp, 0,
648 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
649 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
650 XFS_DQ_PROJ),
651 XFS_QMOPT_DOWARN, &dqp);
652
653 if (!error) {
654 xfs_disk_dquot_t *ddqp = &dqp->q_core;
655
656 /*
657 * The warnings and timers set the grace period given to
658 * a user or group before he or she can not perform any
659 * more writing. If it is zero, a default is used.
660 */
661 qinf->qi_btimelimit = ddqp->d_btimer ?
662 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
663 qinf->qi_itimelimit = ddqp->d_itimer ?
664 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
665 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
666 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
667 qinf->qi_bwarnlimit = ddqp->d_bwarns ?
668 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
669 qinf->qi_iwarnlimit = ddqp->d_iwarns ?
670 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
671 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
672 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
673 xfs_qm_dqdestroy(dqp);
674 } else {
675 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
676 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
677 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
678 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
679 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
680 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
681 }
682
683 if (XFS_IS_UQUOTA_RUNNING(mp))
684 xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
685 if (XFS_IS_GQUOTA_RUNNING(mp))
686 xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
687 if (XFS_IS_PQUOTA_RUNNING(mp))
688 xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
689
690 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
691 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
692 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
693 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
694 register_shrinker(&qinf->qi_shrinker);
695 return 0;
696
697out_free_lru:
698 list_lru_destroy(&qinf->qi_lru);
699out_free_qinf:
700 kmem_free(qinf);
701 mp->m_quotainfo = NULL;
702 return error;
703}
704
705
706/*
707 * Gets called when unmounting a filesystem or when all quotas get
708 * turned off.
709 * This purges the quota inodes, destroys locks and frees itself.
710 */
711void
712xfs_qm_destroy_quotainfo(
713 xfs_mount_t *mp)
714{
715 xfs_quotainfo_t *qi;
716
717 qi = mp->m_quotainfo;
718 ASSERT(qi != NULL);
719
720 unregister_shrinker(&qi->qi_shrinker);
721 list_lru_destroy(&qi->qi_lru);
722
723 if (qi->qi_uquotaip) {
724 IRELE(qi->qi_uquotaip);
725 qi->qi_uquotaip = NULL; /* paranoia */
726 }
727 if (qi->qi_gquotaip) {
728 IRELE(qi->qi_gquotaip);
729 qi->qi_gquotaip = NULL;
730 }
731 if (qi->qi_pquotaip) {
732 IRELE(qi->qi_pquotaip);
733 qi->qi_pquotaip = NULL;
734 }
735 mutex_destroy(&qi->qi_quotaofflock);
736 kmem_free(qi);
737 mp->m_quotainfo = NULL;
738}
739
740/*
741 * Create an inode and return with a reference already taken, but unlocked
742 * This is how we create quota inodes
743 */
744STATIC int
745xfs_qm_qino_alloc(
746 xfs_mount_t *mp,
747 xfs_inode_t **ip,
748 uint flags)
749{
750 xfs_trans_t *tp;
751 int error;
752 int committed;
753 bool need_alloc = true;
754
755 *ip = NULL;
756 /*
757 * With superblock that doesn't have separate pquotino, we
758 * share an inode between gquota and pquota. If the on-disk
759 * superblock has GQUOTA and the filesystem is now mounted
760 * with PQUOTA, just use sb_gquotino for sb_pquotino and
761 * vice-versa.
762 */
763 if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
764 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
765 xfs_ino_t ino = NULLFSINO;
766
767 if ((flags & XFS_QMOPT_PQUOTA) &&
768 (mp->m_sb.sb_gquotino != NULLFSINO)) {
769 ino = mp->m_sb.sb_gquotino;
770 ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
771 } else if ((flags & XFS_QMOPT_GQUOTA) &&
772 (mp->m_sb.sb_pquotino != NULLFSINO)) {
773 ino = mp->m_sb.sb_pquotino;
774 ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
775 }
776 if (ino != NULLFSINO) {
777 error = xfs_iget(mp, NULL, ino, 0, 0, ip);
778 if (error)
779 return error;
780 mp->m_sb.sb_gquotino = NULLFSINO;
781 mp->m_sb.sb_pquotino = NULLFSINO;
782 need_alloc = false;
783 }
784 }
785
786 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
787 XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
788 if (error)
789 return error;
790
791 if (need_alloc) {
792 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
793 &committed);
794 if (error) {
795 xfs_trans_cancel(tp);
796 return error;
797 }
798 }
799
800 /*
801 * Make the changes in the superblock, and log those too.
802 * sbfields arg may contain fields other than *QUOTINO;
803 * VERSIONNUM for example.
804 */
805 spin_lock(&mp->m_sb_lock);
806 if (flags & XFS_QMOPT_SBVERSION) {
807 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
808
809 xfs_sb_version_addquota(&mp->m_sb);
810 mp->m_sb.sb_uquotino = NULLFSINO;
811 mp->m_sb.sb_gquotino = NULLFSINO;
812 mp->m_sb.sb_pquotino = NULLFSINO;
813
814 /* qflags will get updated fully _after_ quotacheck */
815 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
816 }
817 if (flags & XFS_QMOPT_UQUOTA)
818 mp->m_sb.sb_uquotino = (*ip)->i_ino;
819 else if (flags & XFS_QMOPT_GQUOTA)
820 mp->m_sb.sb_gquotino = (*ip)->i_ino;
821 else
822 mp->m_sb.sb_pquotino = (*ip)->i_ino;
823 spin_unlock(&mp->m_sb_lock);
824 xfs_log_sb(tp);
825
826 error = xfs_trans_commit(tp);
827 if (error) {
828 ASSERT(XFS_FORCED_SHUTDOWN(mp));
829 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
830 }
831 if (need_alloc)
832 xfs_finish_inode_setup(*ip);
833 return error;
834}
835
836
837STATIC void
838xfs_qm_reset_dqcounts(
839 xfs_mount_t *mp,
840 xfs_buf_t *bp,
841 xfs_dqid_t id,
842 uint type)
843{
844 struct xfs_dqblk *dqb;
845 int j;
846
847 trace_xfs_reset_dqcounts(bp, _RET_IP_);
848
849 /*
850 * Reset all counters and timers. They'll be
851 * started afresh by xfs_qm_quotacheck.
852 */
853#ifdef DEBUG
854 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
855 do_div(j, sizeof(xfs_dqblk_t));
856 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
857#endif
858 dqb = bp->b_addr;
859 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
860 struct xfs_disk_dquot *ddq;
861
862 ddq = (struct xfs_disk_dquot *)&dqb[j];
863
864 /*
865 * Do a sanity check, and if needed, repair the dqblk. Don't
866 * output any warnings because it's perfectly possible to
867 * find uninitialised dquot blks. See comment in xfs_dqcheck.
868 */
869 xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
870 "xfs_quotacheck");
871 /*
872 * Reset type in case we are reusing group quota file for
873 * project quotas or vice versa
874 */
875 ddq->d_flags = type;
876 ddq->d_bcount = 0;
877 ddq->d_icount = 0;
878 ddq->d_rtbcount = 0;
879 ddq->d_btimer = 0;
880 ddq->d_itimer = 0;
881 ddq->d_rtbtimer = 0;
882 ddq->d_bwarns = 0;
883 ddq->d_iwarns = 0;
884 ddq->d_rtbwarns = 0;
885
886 if (xfs_sb_version_hascrc(&mp->m_sb)) {
887 xfs_update_cksum((char *)&dqb[j],
888 sizeof(struct xfs_dqblk),
889 XFS_DQUOT_CRC_OFF);
890 }
891 }
892}
893
894STATIC int
895xfs_qm_dqiter_bufs(
896 struct xfs_mount *mp,
897 xfs_dqid_t firstid,
898 xfs_fsblock_t bno,
899 xfs_filblks_t blkcnt,
900 uint flags,
901 struct list_head *buffer_list)
902{
903 struct xfs_buf *bp;
904 int error;
905 int type;
906
907 ASSERT(blkcnt > 0);
908 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
909 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
910 error = 0;
911
912 /*
913 * Blkcnt arg can be a very big number, and might even be
914 * larger than the log itself. So, we have to break it up into
915 * manageable-sized transactions.
916 * Note that we don't start a permanent transaction here; we might
917 * not be able to get a log reservation for the whole thing up front,
918 * and we don't really care to either, because we just discard
919 * everything if we were to crash in the middle of this loop.
920 */
921 while (blkcnt--) {
922 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
923 XFS_FSB_TO_DADDR(mp, bno),
924 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
925 &xfs_dquot_buf_ops);
926
927 /*
928 * CRC and validation errors will return a EFSCORRUPTED here. If
929 * this occurs, re-read without CRC validation so that we can
930 * repair the damage via xfs_qm_reset_dqcounts(). This process
931 * will leave a trace in the log indicating corruption has
932 * been detected.
933 */
934 if (error == -EFSCORRUPTED) {
935 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
936 XFS_FSB_TO_DADDR(mp, bno),
937 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
938 NULL);
939 }
940
941 if (error)
942 break;
943
944 /*
945 * A corrupt buffer might not have a verifier attached, so
946 * make sure we have the correct one attached before writeback
947 * occurs.
948 */
949 bp->b_ops = &xfs_dquot_buf_ops;
950 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
951 xfs_buf_delwri_queue(bp, buffer_list);
952 xfs_buf_relse(bp);
953
954 /* goto the next block. */
955 bno++;
956 firstid += mp->m_quotainfo->qi_dqperchunk;
957 }
958
959 return error;
960}
961
962/*
963 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
964 * caller supplied function for every chunk of dquots that we find.
965 */
966STATIC int
967xfs_qm_dqiterate(
968 struct xfs_mount *mp,
969 struct xfs_inode *qip,
970 uint flags,
971 struct list_head *buffer_list)
972{
973 struct xfs_bmbt_irec *map;
974 int i, nmaps; /* number of map entries */
975 int error; /* return value */
976 xfs_fileoff_t lblkno;
977 xfs_filblks_t maxlblkcnt;
978 xfs_dqid_t firstid;
979 xfs_fsblock_t rablkno;
980 xfs_filblks_t rablkcnt;
981
982 error = 0;
983 /*
984 * This looks racy, but we can't keep an inode lock across a
985 * trans_reserve. But, this gets called during quotacheck, and that
986 * happens only at mount time which is single threaded.
987 */
988 if (qip->i_d.di_nblocks == 0)
989 return 0;
990
991 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
992
993 lblkno = 0;
994 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
995 do {
996 uint lock_mode;
997
998 nmaps = XFS_DQITER_MAP_SIZE;
999 /*
1000 * We aren't changing the inode itself. Just changing
1001 * some of its data. No new blocks are added here, and
1002 * the inode is never added to the transaction.
1003 */
1004 lock_mode = xfs_ilock_data_map_shared(qip);
1005 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1006 map, &nmaps, 0);
1007 xfs_iunlock(qip, lock_mode);
1008 if (error)
1009 break;
1010
1011 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1012 for (i = 0; i < nmaps; i++) {
1013 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1014 ASSERT(map[i].br_blockcount);
1015
1016
1017 lblkno += map[i].br_blockcount;
1018
1019 if (map[i].br_startblock == HOLESTARTBLOCK)
1020 continue;
1021
1022 firstid = (xfs_dqid_t) map[i].br_startoff *
1023 mp->m_quotainfo->qi_dqperchunk;
1024 /*
1025 * Do a read-ahead on the next extent.
1026 */
1027 if ((i+1 < nmaps) &&
1028 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1029 rablkcnt = map[i+1].br_blockcount;
1030 rablkno = map[i+1].br_startblock;
1031 while (rablkcnt--) {
1032 xfs_buf_readahead(mp->m_ddev_targp,
1033 XFS_FSB_TO_DADDR(mp, rablkno),
1034 mp->m_quotainfo->qi_dqchunklen,
1035 &xfs_dquot_buf_ops);
1036 rablkno++;
1037 }
1038 }
1039 /*
1040 * Iterate thru all the blks in the extent and
1041 * reset the counters of all the dquots inside them.
1042 */
1043 error = xfs_qm_dqiter_bufs(mp, firstid,
1044 map[i].br_startblock,
1045 map[i].br_blockcount,
1046 flags, buffer_list);
1047 if (error)
1048 goto out;
1049 }
1050 } while (nmaps > 0);
1051
1052out:
1053 kmem_free(map);
1054 return error;
1055}
1056
1057/*
1058 * Called by dqusage_adjust in doing a quotacheck.
1059 *
1060 * Given the inode, and a dquot id this updates both the incore dqout as well
1061 * as the buffer copy. This is so that once the quotacheck is done, we can
1062 * just log all the buffers, as opposed to logging numerous updates to
1063 * individual dquots.
1064 */
1065STATIC int
1066xfs_qm_quotacheck_dqadjust(
1067 struct xfs_inode *ip,
1068 xfs_dqid_t id,
1069 uint type,
1070 xfs_qcnt_t nblks,
1071 xfs_qcnt_t rtblks)
1072{
1073 struct xfs_mount *mp = ip->i_mount;
1074 struct xfs_dquot *dqp;
1075 int error;
1076
1077 error = xfs_qm_dqget(mp, ip, id, type,
1078 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1079 if (error) {
1080 /*
1081 * Shouldn't be able to turn off quotas here.
1082 */
1083 ASSERT(error != -ESRCH);
1084 ASSERT(error != -ENOENT);
1085 return error;
1086 }
1087
1088 trace_xfs_dqadjust(dqp);
1089
1090 /*
1091 * Adjust the inode count and the block count to reflect this inode's
1092 * resource usage.
1093 */
1094 be64_add_cpu(&dqp->q_core.d_icount, 1);
1095 dqp->q_res_icount++;
1096 if (nblks) {
1097 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1098 dqp->q_res_bcount += nblks;
1099 }
1100 if (rtblks) {
1101 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1102 dqp->q_res_rtbcount += rtblks;
1103 }
1104
1105 /*
1106 * Set default limits, adjust timers (since we changed usages)
1107 *
1108 * There are no timers for the default values set in the root dquot.
1109 */
1110 if (dqp->q_core.d_id) {
1111 xfs_qm_adjust_dqlimits(mp, dqp);
1112 xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1113 }
1114
1115 dqp->dq_flags |= XFS_DQ_DIRTY;
1116 xfs_qm_dqput(dqp);
1117 return 0;
1118}
1119
1120STATIC int
1121xfs_qm_get_rtblks(
1122 xfs_inode_t *ip,
1123 xfs_qcnt_t *O_rtblks)
1124{
1125 xfs_filblks_t rtblks; /* total rt blks */
1126 xfs_extnum_t idx; /* extent record index */
1127 xfs_ifork_t *ifp; /* inode fork pointer */
1128 xfs_extnum_t nextents; /* number of extent entries */
1129 int error;
1130
1131 ASSERT(XFS_IS_REALTIME_INODE(ip));
1132 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1133 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1134 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1135 return error;
1136 }
1137 rtblks = 0;
1138 nextents = xfs_iext_count(ifp);
1139 for (idx = 0; idx < nextents; idx++)
1140 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1141 *O_rtblks = (xfs_qcnt_t)rtblks;
1142 return 0;
1143}
1144
1145/*
1146 * callback routine supplied to bulkstat(). Given an inumber, find its
1147 * dquots and update them to account for resources taken by that inode.
1148 */
1149/* ARGSUSED */
1150STATIC int
1151xfs_qm_dqusage_adjust(
1152 xfs_mount_t *mp, /* mount point for filesystem */
1153 xfs_ino_t ino, /* inode number to get data for */
1154 void __user *buffer, /* not used */
1155 int ubsize, /* not used */
1156 int *ubused, /* not used */
1157 int *res) /* result code value */
1158{
1159 xfs_inode_t *ip;
1160 xfs_qcnt_t nblks, rtblks = 0;
1161 int error;
1162
1163 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1164
1165 /*
1166 * rootino must have its resources accounted for, not so with the quota
1167 * inodes.
1168 */
1169 if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1170 *res = BULKSTAT_RV_NOTHING;
1171 return -EINVAL;
1172 }
1173
1174 /*
1175 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1176 * interface expects the inode to be exclusively locked because that's
1177 * the case in all other instances. It's OK that we do this because
1178 * quotacheck is done only at mount time.
1179 */
1180 error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
1181 &ip);
1182 if (error) {
1183 *res = BULKSTAT_RV_NOTHING;
1184 return error;
1185 }
1186
1187 ASSERT(ip->i_delayed_blks == 0);
1188
1189 if (XFS_IS_REALTIME_INODE(ip)) {
1190 /*
1191 * Walk thru the extent list and count the realtime blocks.
1192 */
1193 error = xfs_qm_get_rtblks(ip, &rtblks);
1194 if (error)
1195 goto error0;
1196 }
1197
1198 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1199
1200 /*
1201 * Add the (disk blocks and inode) resources occupied by this
1202 * inode to its dquots. We do this adjustment in the incore dquot,
1203 * and also copy the changes to its buffer.
1204 * We don't care about putting these changes in a transaction
1205 * envelope because if we crash in the middle of a 'quotacheck'
1206 * we have to start from the beginning anyway.
1207 * Once we're done, we'll log all the dquot bufs.
1208 *
1209 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1210 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1211 */
1212 if (XFS_IS_UQUOTA_ON(mp)) {
1213 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1214 XFS_DQ_USER, nblks, rtblks);
1215 if (error)
1216 goto error0;
1217 }
1218
1219 if (XFS_IS_GQUOTA_ON(mp)) {
1220 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1221 XFS_DQ_GROUP, nblks, rtblks);
1222 if (error)
1223 goto error0;
1224 }
1225
1226 if (XFS_IS_PQUOTA_ON(mp)) {
1227 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1228 XFS_DQ_PROJ, nblks, rtblks);
1229 if (error)
1230 goto error0;
1231 }
1232
1233 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1234 IRELE(ip);
1235 *res = BULKSTAT_RV_DIDONE;
1236 return 0;
1237
1238error0:
1239 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1240 IRELE(ip);
1241 *res = BULKSTAT_RV_GIVEUP;
1242 return error;
1243}
1244
1245STATIC int
1246xfs_qm_flush_one(
1247 struct xfs_dquot *dqp,
1248 void *data)
1249{
1250 struct list_head *buffer_list = data;
1251 struct xfs_buf *bp = NULL;
1252 int error = 0;
1253
1254 xfs_dqlock(dqp);
1255 if (dqp->dq_flags & XFS_DQ_FREEING)
1256 goto out_unlock;
1257 if (!XFS_DQ_IS_DIRTY(dqp))
1258 goto out_unlock;
1259
1260 xfs_dqflock(dqp);
1261 error = xfs_qm_dqflush(dqp, &bp);
1262 if (error)
1263 goto out_unlock;
1264
1265 xfs_buf_delwri_queue(bp, buffer_list);
1266 xfs_buf_relse(bp);
1267out_unlock:
1268 xfs_dqunlock(dqp);
1269 return error;
1270}
1271
1272/*
1273 * Walk thru all the filesystem inodes and construct a consistent view
1274 * of the disk quota world. If the quotacheck fails, disable quotas.
1275 */
1276STATIC int
1277xfs_qm_quotacheck(
1278 xfs_mount_t *mp)
1279{
1280 int done, count, error, error2;
1281 xfs_ino_t lastino;
1282 size_t structsz;
1283 uint flags;
1284 LIST_HEAD (buffer_list);
1285 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1286 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1287 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1288
1289 count = INT_MAX;
1290 structsz = 1;
1291 lastino = 0;
1292 flags = 0;
1293
1294 ASSERT(uip || gip || pip);
1295 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1296
1297 xfs_notice(mp, "Quotacheck needed: Please wait.");
1298
1299 /*
1300 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1301 * their counters to zero. We need a clean slate.
1302 * We don't log our changes till later.
1303 */
1304 if (uip) {
1305 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1306 &buffer_list);
1307 if (error)
1308 goto error_return;
1309 flags |= XFS_UQUOTA_CHKD;
1310 }
1311
1312 if (gip) {
1313 error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
1314 &buffer_list);
1315 if (error)
1316 goto error_return;
1317 flags |= XFS_GQUOTA_CHKD;
1318 }
1319
1320 if (pip) {
1321 error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
1322 &buffer_list);
1323 if (error)
1324 goto error_return;
1325 flags |= XFS_PQUOTA_CHKD;
1326 }
1327
1328 do {
1329 /*
1330 * Iterate thru all the inodes in the file system,
1331 * adjusting the corresponding dquot counters in core.
1332 */
1333 error = xfs_bulkstat(mp, &lastino, &count,
1334 xfs_qm_dqusage_adjust,
1335 structsz, NULL, &done);
1336 if (error)
1337 break;
1338
1339 } while (!done);
1340
1341 /*
1342 * We've made all the changes that we need to make incore. Flush them
1343 * down to disk buffers if everything was updated successfully.
1344 */
1345 if (XFS_IS_UQUOTA_ON(mp)) {
1346 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1347 &buffer_list);
1348 }
1349 if (XFS_IS_GQUOTA_ON(mp)) {
1350 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1351 &buffer_list);
1352 if (!error)
1353 error = error2;
1354 }
1355 if (XFS_IS_PQUOTA_ON(mp)) {
1356 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1357 &buffer_list);
1358 if (!error)
1359 error = error2;
1360 }
1361
1362 error2 = xfs_buf_delwri_submit(&buffer_list);
1363 if (!error)
1364 error = error2;
1365
1366 /*
1367 * We can get this error if we couldn't do a dquot allocation inside
1368 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1369 * dirty dquots that might be cached, we just want to get rid of them
1370 * and turn quotaoff. The dquots won't be attached to any of the inodes
1371 * at this point (because we intentionally didn't in dqget_noattach).
1372 */
1373 if (error) {
1374 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1375 goto error_return;
1376 }
1377
1378 /*
1379 * If one type of quotas is off, then it will lose its
1380 * quotachecked status, since we won't be doing accounting for
1381 * that type anymore.
1382 */
1383 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1384 mp->m_qflags |= flags;
1385
1386 error_return:
1387 while (!list_empty(&buffer_list)) {
1388 struct xfs_buf *bp =
1389 list_first_entry(&buffer_list, struct xfs_buf, b_list);
1390 list_del_init(&bp->b_list);
1391 xfs_buf_relse(bp);
1392 }
1393
1394 if (error) {
1395 xfs_warn(mp,
1396 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1397 error);
1398 /*
1399 * We must turn off quotas.
1400 */
1401 ASSERT(mp->m_quotainfo != NULL);
1402 xfs_qm_destroy_quotainfo(mp);
1403 if (xfs_mount_reset_sbqflags(mp)) {
1404 xfs_warn(mp,
1405 "Quotacheck: Failed to reset quota flags.");
1406 }
1407 } else
1408 xfs_notice(mp, "Quotacheck: Done.");
1409 return error;
1410}
1411
1412/*
1413 * This is called from xfs_mountfs to start quotas and initialize all
1414 * necessary data structures like quotainfo. This is also responsible for
1415 * running a quotacheck as necessary. We are guaranteed that the superblock
1416 * is consistently read in at this point.
1417 *
1418 * If we fail here, the mount will continue with quota turned off. We don't
1419 * need to inidicate success or failure at all.
1420 */
1421void
1422xfs_qm_mount_quotas(
1423 struct xfs_mount *mp)
1424{
1425 int error = 0;
1426 uint sbf;
1427
1428 /*
1429 * If quotas on realtime volumes is not supported, we disable
1430 * quotas immediately.
1431 */
1432 if (mp->m_sb.sb_rextents) {
1433 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1434 mp->m_qflags = 0;
1435 goto write_changes;
1436 }
1437
1438 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1439
1440 /*
1441 * Allocate the quotainfo structure inside the mount struct, and
1442 * create quotainode(s), and change/rev superblock if necessary.
1443 */
1444 error = xfs_qm_init_quotainfo(mp);
1445 if (error) {
1446 /*
1447 * We must turn off quotas.
1448 */
1449 ASSERT(mp->m_quotainfo == NULL);
1450 mp->m_qflags = 0;
1451 goto write_changes;
1452 }
1453 /*
1454 * If any of the quotas are not consistent, do a quotacheck.
1455 */
1456 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1457 error = xfs_qm_quotacheck(mp);
1458 if (error) {
1459 /* Quotacheck failed and disabled quotas. */
1460 return;
1461 }
1462 }
1463 /*
1464 * If one type of quotas is off, then it will lose its
1465 * quotachecked status, since we won't be doing accounting for
1466 * that type anymore.
1467 */
1468 if (!XFS_IS_UQUOTA_ON(mp))
1469 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1470 if (!XFS_IS_GQUOTA_ON(mp))
1471 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1472 if (!XFS_IS_PQUOTA_ON(mp))
1473 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1474
1475 write_changes:
1476 /*
1477 * We actually don't have to acquire the m_sb_lock at all.
1478 * This can only be called from mount, and that's single threaded. XXX
1479 */
1480 spin_lock(&mp->m_sb_lock);
1481 sbf = mp->m_sb.sb_qflags;
1482 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1483 spin_unlock(&mp->m_sb_lock);
1484
1485 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1486 if (xfs_sync_sb(mp, false)) {
1487 /*
1488 * We could only have been turning quotas off.
1489 * We aren't in very good shape actually because
1490 * the incore structures are convinced that quotas are
1491 * off, but the on disk superblock doesn't know that !
1492 */
1493 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1494 xfs_alert(mp, "%s: Superblock update failed!",
1495 __func__);
1496 }
1497 }
1498
1499 if (error) {
1500 xfs_warn(mp, "Failed to initialize disk quotas.");
1501 return;
1502 }
1503}
1504
1505/*
1506 * This is called after the superblock has been read in and we're ready to
1507 * iget the quota inodes.
1508 */
1509STATIC int
1510xfs_qm_init_quotainos(
1511 xfs_mount_t *mp)
1512{
1513 struct xfs_inode *uip = NULL;
1514 struct xfs_inode *gip = NULL;
1515 struct xfs_inode *pip = NULL;
1516 int error;
1517 uint flags = 0;
1518
1519 ASSERT(mp->m_quotainfo);
1520
1521 /*
1522 * Get the uquota and gquota inodes
1523 */
1524 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1525 if (XFS_IS_UQUOTA_ON(mp) &&
1526 mp->m_sb.sb_uquotino != NULLFSINO) {
1527 ASSERT(mp->m_sb.sb_uquotino > 0);
1528 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1529 0, 0, &uip);
1530 if (error)
1531 return error;
1532 }
1533 if (XFS_IS_GQUOTA_ON(mp) &&
1534 mp->m_sb.sb_gquotino != NULLFSINO) {
1535 ASSERT(mp->m_sb.sb_gquotino > 0);
1536 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1537 0, 0, &gip);
1538 if (error)
1539 goto error_rele;
1540 }
1541 if (XFS_IS_PQUOTA_ON(mp) &&
1542 mp->m_sb.sb_pquotino != NULLFSINO) {
1543 ASSERT(mp->m_sb.sb_pquotino > 0);
1544 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1545 0, 0, &pip);
1546 if (error)
1547 goto error_rele;
1548 }
1549 } else {
1550 flags |= XFS_QMOPT_SBVERSION;
1551 }
1552
1553 /*
1554 * Create the three inodes, if they don't exist already. The changes
1555 * made above will get added to a transaction and logged in one of
1556 * the qino_alloc calls below. If the device is readonly,
1557 * temporarily switch to read-write to do this.
1558 */
1559 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1560 error = xfs_qm_qino_alloc(mp, &uip,
1561 flags | XFS_QMOPT_UQUOTA);
1562 if (error)
1563 goto error_rele;
1564
1565 flags &= ~XFS_QMOPT_SBVERSION;
1566 }
1567 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1568 error = xfs_qm_qino_alloc(mp, &gip,
1569 flags | XFS_QMOPT_GQUOTA);
1570 if (error)
1571 goto error_rele;
1572
1573 flags &= ~XFS_QMOPT_SBVERSION;
1574 }
1575 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1576 error = xfs_qm_qino_alloc(mp, &pip,
1577 flags | XFS_QMOPT_PQUOTA);
1578 if (error)
1579 goto error_rele;
1580 }
1581
1582 mp->m_quotainfo->qi_uquotaip = uip;
1583 mp->m_quotainfo->qi_gquotaip = gip;
1584 mp->m_quotainfo->qi_pquotaip = pip;
1585
1586 return 0;
1587
1588error_rele:
1589 if (uip)
1590 IRELE(uip);
1591 if (gip)
1592 IRELE(gip);
1593 if (pip)
1594 IRELE(pip);
1595 return error;
1596}
1597
1598STATIC void
1599xfs_qm_dqfree_one(
1600 struct xfs_dquot *dqp)
1601{
1602 struct xfs_mount *mp = dqp->q_mount;
1603 struct xfs_quotainfo *qi = mp->m_quotainfo;
1604
1605 mutex_lock(&qi->qi_tree_lock);
1606 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1607 be32_to_cpu(dqp->q_core.d_id));
1608
1609 qi->qi_dquots--;
1610 mutex_unlock(&qi->qi_tree_lock);
1611
1612 xfs_qm_dqdestroy(dqp);
1613}
1614
1615/* --------------- utility functions for vnodeops ---------------- */
1616
1617
1618/*
1619 * Given an inode, a uid, gid and prid make sure that we have
1620 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1621 * quotas by creating this file.
1622 * This also attaches dquot(s) to the given inode after locking it,
1623 * and returns the dquots corresponding to the uid and/or gid.
1624 *
1625 * in : inode (unlocked)
1626 * out : udquot, gdquot with references taken and unlocked
1627 */
1628int
1629xfs_qm_vop_dqalloc(
1630 struct xfs_inode *ip,
1631 xfs_dqid_t uid,
1632 xfs_dqid_t gid,
1633 prid_t prid,
1634 uint flags,
1635 struct xfs_dquot **O_udqpp,
1636 struct xfs_dquot **O_gdqpp,
1637 struct xfs_dquot **O_pdqpp)
1638{
1639 struct xfs_mount *mp = ip->i_mount;
1640 struct xfs_dquot *uq = NULL;
1641 struct xfs_dquot *gq = NULL;
1642 struct xfs_dquot *pq = NULL;
1643 int error;
1644 uint lockflags;
1645
1646 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1647 return 0;
1648
1649 lockflags = XFS_ILOCK_EXCL;
1650 xfs_ilock(ip, lockflags);
1651
1652 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1653 gid = ip->i_d.di_gid;
1654
1655 /*
1656 * Attach the dquot(s) to this inode, doing a dquot allocation
1657 * if necessary. The dquot(s) will not be locked.
1658 */
1659 if (XFS_NOT_DQATTACHED(mp, ip)) {
1660 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1661 if (error) {
1662 xfs_iunlock(ip, lockflags);
1663 return error;
1664 }
1665 }
1666
1667 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1668 if (ip->i_d.di_uid != uid) {
1669 /*
1670 * What we need is the dquot that has this uid, and
1671 * if we send the inode to dqget, the uid of the inode
1672 * takes priority over what's sent in the uid argument.
1673 * We must unlock inode here before calling dqget if
1674 * we're not sending the inode, because otherwise
1675 * we'll deadlock by doing trans_reserve while
1676 * holding ilock.
1677 */
1678 xfs_iunlock(ip, lockflags);
1679 error = xfs_qm_dqget(mp, NULL, uid,
1680 XFS_DQ_USER,
1681 XFS_QMOPT_DQALLOC |
1682 XFS_QMOPT_DOWARN,
1683 &uq);
1684 if (error) {
1685 ASSERT(error != -ENOENT);
1686 return error;
1687 }
1688 /*
1689 * Get the ilock in the right order.
1690 */
1691 xfs_dqunlock(uq);
1692 lockflags = XFS_ILOCK_SHARED;
1693 xfs_ilock(ip, lockflags);
1694 } else {
1695 /*
1696 * Take an extra reference, because we'll return
1697 * this to caller
1698 */
1699 ASSERT(ip->i_udquot);
1700 uq = xfs_qm_dqhold(ip->i_udquot);
1701 }
1702 }
1703 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1704 if (ip->i_d.di_gid != gid) {
1705 xfs_iunlock(ip, lockflags);
1706 error = xfs_qm_dqget(mp, NULL, gid,
1707 XFS_DQ_GROUP,
1708 XFS_QMOPT_DQALLOC |
1709 XFS_QMOPT_DOWARN,
1710 &gq);
1711 if (error) {
1712 ASSERT(error != -ENOENT);
1713 goto error_rele;
1714 }
1715 xfs_dqunlock(gq);
1716 lockflags = XFS_ILOCK_SHARED;
1717 xfs_ilock(ip, lockflags);
1718 } else {
1719 ASSERT(ip->i_gdquot);
1720 gq = xfs_qm_dqhold(ip->i_gdquot);
1721 }
1722 }
1723 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1724 if (xfs_get_projid(ip) != prid) {
1725 xfs_iunlock(ip, lockflags);
1726 error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1727 XFS_DQ_PROJ,
1728 XFS_QMOPT_DQALLOC |
1729 XFS_QMOPT_DOWARN,
1730 &pq);
1731 if (error) {
1732 ASSERT(error != -ENOENT);
1733 goto error_rele;
1734 }
1735 xfs_dqunlock(pq);
1736 lockflags = XFS_ILOCK_SHARED;
1737 xfs_ilock(ip, lockflags);
1738 } else {
1739 ASSERT(ip->i_pdquot);
1740 pq = xfs_qm_dqhold(ip->i_pdquot);
1741 }
1742 }
1743 if (uq)
1744 trace_xfs_dquot_dqalloc(ip);
1745
1746 xfs_iunlock(ip, lockflags);
1747 if (O_udqpp)
1748 *O_udqpp = uq;
1749 else
1750 xfs_qm_dqrele(uq);
1751 if (O_gdqpp)
1752 *O_gdqpp = gq;
1753 else
1754 xfs_qm_dqrele(gq);
1755 if (O_pdqpp)
1756 *O_pdqpp = pq;
1757 else
1758 xfs_qm_dqrele(pq);
1759 return 0;
1760
1761error_rele:
1762 xfs_qm_dqrele(gq);
1763 xfs_qm_dqrele(uq);
1764 return error;
1765}
1766
1767/*
1768 * Actually transfer ownership, and do dquot modifications.
1769 * These were already reserved.
1770 */
1771xfs_dquot_t *
1772xfs_qm_vop_chown(
1773 xfs_trans_t *tp,
1774 xfs_inode_t *ip,
1775 xfs_dquot_t **IO_olddq,
1776 xfs_dquot_t *newdq)
1777{
1778 xfs_dquot_t *prevdq;
1779 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1780 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1781
1782
1783 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1784 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1785
1786 /* old dquot */
1787 prevdq = *IO_olddq;
1788 ASSERT(prevdq);
1789 ASSERT(prevdq != newdq);
1790
1791 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1792 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1793
1794 /* the sparkling new dquot */
1795 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1796 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1797
1798 /*
1799 * Take an extra reference, because the inode is going to keep
1800 * this dquot pointer even after the trans_commit.
1801 */
1802 *IO_olddq = xfs_qm_dqhold(newdq);
1803
1804 return prevdq;
1805}
1806
1807/*
1808 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1809 */
1810int
1811xfs_qm_vop_chown_reserve(
1812 struct xfs_trans *tp,
1813 struct xfs_inode *ip,
1814 struct xfs_dquot *udqp,
1815 struct xfs_dquot *gdqp,
1816 struct xfs_dquot *pdqp,
1817 uint flags)
1818{
1819 struct xfs_mount *mp = ip->i_mount;
1820 uint delblks, blkflags, prjflags = 0;
1821 struct xfs_dquot *udq_unres = NULL;
1822 struct xfs_dquot *gdq_unres = NULL;
1823 struct xfs_dquot *pdq_unres = NULL;
1824 struct xfs_dquot *udq_delblks = NULL;
1825 struct xfs_dquot *gdq_delblks = NULL;
1826 struct xfs_dquot *pdq_delblks = NULL;
1827 int error;
1828
1829
1830 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1831 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1832
1833 delblks = ip->i_delayed_blks;
1834 blkflags = XFS_IS_REALTIME_INODE(ip) ?
1835 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1836
1837 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1838 ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
1839 udq_delblks = udqp;
1840 /*
1841 * If there are delayed allocation blocks, then we have to
1842 * unreserve those from the old dquot, and add them to the
1843 * new dquot.
1844 */
1845 if (delblks) {
1846 ASSERT(ip->i_udquot);
1847 udq_unres = ip->i_udquot;
1848 }
1849 }
1850 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1851 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
1852 gdq_delblks = gdqp;
1853 if (delblks) {
1854 ASSERT(ip->i_gdquot);
1855 gdq_unres = ip->i_gdquot;
1856 }
1857 }
1858
1859 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1860 xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
1861 prjflags = XFS_QMOPT_ENOSPC;
1862 pdq_delblks = pdqp;
1863 if (delblks) {
1864 ASSERT(ip->i_pdquot);
1865 pdq_unres = ip->i_pdquot;
1866 }
1867 }
1868
1869 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1870 udq_delblks, gdq_delblks, pdq_delblks,
1871 ip->i_d.di_nblocks, 1,
1872 flags | blkflags | prjflags);
1873 if (error)
1874 return error;
1875
1876 /*
1877 * Do the delayed blks reservations/unreservations now. Since, these
1878 * are done without the help of a transaction, if a reservation fails
1879 * its previous reservations won't be automatically undone by trans
1880 * code. So, we have to do it manually here.
1881 */
1882 if (delblks) {
1883 /*
1884 * Do the reservations first. Unreservation can't fail.
1885 */
1886 ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1887 ASSERT(udq_unres || gdq_unres || pdq_unres);
1888 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1889 udq_delblks, gdq_delblks, pdq_delblks,
1890 (xfs_qcnt_t)delblks, 0,
1891 flags | blkflags | prjflags);
1892 if (error)
1893 return error;
1894 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1895 udq_unres, gdq_unres, pdq_unres,
1896 -((xfs_qcnt_t)delblks), 0, blkflags);
1897 }
1898
1899 return 0;
1900}
1901
1902int
1903xfs_qm_vop_rename_dqattach(
1904 struct xfs_inode **i_tab)
1905{
1906 struct xfs_mount *mp = i_tab[0]->i_mount;
1907 int i;
1908
1909 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1910 return 0;
1911
1912 for (i = 0; (i < 4 && i_tab[i]); i++) {
1913 struct xfs_inode *ip = i_tab[i];
1914 int error;
1915
1916 /*
1917 * Watch out for duplicate entries in the table.
1918 */
1919 if (i == 0 || ip != i_tab[i-1]) {
1920 if (XFS_NOT_DQATTACHED(mp, ip)) {
1921 error = xfs_qm_dqattach(ip, 0);
1922 if (error)
1923 return error;
1924 }
1925 }
1926 }
1927 return 0;
1928}
1929
1930void
1931xfs_qm_vop_create_dqattach(
1932 struct xfs_trans *tp,
1933 struct xfs_inode *ip,
1934 struct xfs_dquot *udqp,
1935 struct xfs_dquot *gdqp,
1936 struct xfs_dquot *pdqp)
1937{
1938 struct xfs_mount *mp = tp->t_mountp;
1939
1940 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1941 return;
1942
1943 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1944 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1945
1946 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1947 ASSERT(ip->i_udquot == NULL);
1948 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1949
1950 ip->i_udquot = xfs_qm_dqhold(udqp);
1951 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1952 }
1953 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1954 ASSERT(ip->i_gdquot == NULL);
1955 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
1956 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1957 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1958 }
1959 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1960 ASSERT(ip->i_pdquot == NULL);
1961 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
1962
1963 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1964 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1965 }
1966}
1967
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_sb.h"
14#include "xfs_mount.h"
15#include "xfs_inode.h"
16#include "xfs_iwalk.h"
17#include "xfs_quota.h"
18#include "xfs_bmap.h"
19#include "xfs_bmap_util.h"
20#include "xfs_trans.h"
21#include "xfs_trans_space.h"
22#include "xfs_qm.h"
23#include "xfs_trace.h"
24#include "xfs_icache.h"
25#include "xfs_error.h"
26
27/*
28 * The global quota manager. There is only one of these for the entire
29 * system, _not_ one per file system. XQM keeps track of the overall
30 * quota functionality, including maintaining the freelist and hash
31 * tables of dquots.
32 */
33STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
34STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
35
36STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
37STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
38/*
39 * We use the batch lookup interface to iterate over the dquots as it
40 * currently is the only interface into the radix tree code that allows
41 * fuzzy lookups instead of exact matches. Holding the lock over multiple
42 * operations is fine as all callers are used either during mount/umount
43 * or quotaoff.
44 */
45#define XFS_DQ_LOOKUP_BATCH 32
46
47STATIC int
48xfs_qm_dquot_walk(
49 struct xfs_mount *mp,
50 xfs_dqtype_t type,
51 int (*execute)(struct xfs_dquot *dqp, void *data),
52 void *data)
53{
54 struct xfs_quotainfo *qi = mp->m_quotainfo;
55 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
56 uint32_t next_index;
57 int last_error = 0;
58 int skipped;
59 int nr_found;
60
61restart:
62 skipped = 0;
63 next_index = 0;
64 nr_found = 0;
65
66 while (1) {
67 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
68 int error = 0;
69 int i;
70
71 mutex_lock(&qi->qi_tree_lock);
72 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
73 next_index, XFS_DQ_LOOKUP_BATCH);
74 if (!nr_found) {
75 mutex_unlock(&qi->qi_tree_lock);
76 break;
77 }
78
79 for (i = 0; i < nr_found; i++) {
80 struct xfs_dquot *dqp = batch[i];
81
82 next_index = dqp->q_id + 1;
83
84 error = execute(batch[i], data);
85 if (error == -EAGAIN) {
86 skipped++;
87 continue;
88 }
89 if (error && last_error != -EFSCORRUPTED)
90 last_error = error;
91 }
92
93 mutex_unlock(&qi->qi_tree_lock);
94
95 /* bail out if the filesystem is corrupted. */
96 if (last_error == -EFSCORRUPTED) {
97 skipped = 0;
98 break;
99 }
100 /* we're done if id overflows back to zero */
101 if (!next_index)
102 break;
103 }
104
105 if (skipped) {
106 delay(1);
107 goto restart;
108 }
109
110 return last_error;
111}
112
113
114/*
115 * Purge a dquot from all tracking data structures and free it.
116 */
117STATIC int
118xfs_qm_dqpurge(
119 struct xfs_dquot *dqp,
120 void *data)
121{
122 struct xfs_mount *mp = dqp->q_mount;
123 struct xfs_quotainfo *qi = mp->m_quotainfo;
124 int error = -EAGAIN;
125
126 xfs_dqlock(dqp);
127 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
128 goto out_unlock;
129
130 dqp->q_flags |= XFS_DQFLAG_FREEING;
131
132 xfs_dqflock(dqp);
133
134 /*
135 * If we are turning this type of quotas off, we don't care
136 * about the dirty metadata sitting in this dquot. OTOH, if
137 * we're unmounting, we do care, so we flush it and wait.
138 */
139 if (XFS_DQ_IS_DIRTY(dqp)) {
140 struct xfs_buf *bp = NULL;
141
142 /*
143 * We don't care about getting disk errors here. We need
144 * to purge this dquot anyway, so we go ahead regardless.
145 */
146 error = xfs_qm_dqflush(dqp, &bp);
147 if (!error) {
148 error = xfs_bwrite(bp);
149 xfs_buf_relse(bp);
150 } else if (error == -EAGAIN) {
151 dqp->q_flags &= ~XFS_DQFLAG_FREEING;
152 goto out_unlock;
153 }
154 xfs_dqflock(dqp);
155 }
156
157 ASSERT(atomic_read(&dqp->q_pincount) == 0);
158 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
159 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
160
161 xfs_dqfunlock(dqp);
162 xfs_dqunlock(dqp);
163
164 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
165 qi->qi_dquots--;
166
167 /*
168 * We move dquots to the freelist as soon as their reference count
169 * hits zero, so it really should be on the freelist here.
170 */
171 ASSERT(!list_empty(&dqp->q_lru));
172 list_lru_del(&qi->qi_lru, &dqp->q_lru);
173 XFS_STATS_DEC(mp, xs_qm_dquot_unused);
174
175 xfs_qm_dqdestroy(dqp);
176 return 0;
177
178out_unlock:
179 xfs_dqunlock(dqp);
180 return error;
181}
182
183/*
184 * Purge the dquot cache.
185 */
186void
187xfs_qm_dqpurge_all(
188 struct xfs_mount *mp,
189 uint flags)
190{
191 if (flags & XFS_QMOPT_UQUOTA)
192 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
193 if (flags & XFS_QMOPT_GQUOTA)
194 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
195 if (flags & XFS_QMOPT_PQUOTA)
196 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
197}
198
199/*
200 * Just destroy the quotainfo structure.
201 */
202void
203xfs_qm_unmount(
204 struct xfs_mount *mp)
205{
206 if (mp->m_quotainfo) {
207 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
208 xfs_qm_destroy_quotainfo(mp);
209 }
210}
211
212/*
213 * Called from the vfsops layer.
214 */
215void
216xfs_qm_unmount_quotas(
217 xfs_mount_t *mp)
218{
219 /*
220 * Release the dquots that root inode, et al might be holding,
221 * before we flush quotas and blow away the quotainfo structure.
222 */
223 ASSERT(mp->m_rootip);
224 xfs_qm_dqdetach(mp->m_rootip);
225 if (mp->m_rbmip)
226 xfs_qm_dqdetach(mp->m_rbmip);
227 if (mp->m_rsumip)
228 xfs_qm_dqdetach(mp->m_rsumip);
229
230 /*
231 * Release the quota inodes.
232 */
233 if (mp->m_quotainfo) {
234 if (mp->m_quotainfo->qi_uquotaip) {
235 xfs_irele(mp->m_quotainfo->qi_uquotaip);
236 mp->m_quotainfo->qi_uquotaip = NULL;
237 }
238 if (mp->m_quotainfo->qi_gquotaip) {
239 xfs_irele(mp->m_quotainfo->qi_gquotaip);
240 mp->m_quotainfo->qi_gquotaip = NULL;
241 }
242 if (mp->m_quotainfo->qi_pquotaip) {
243 xfs_irele(mp->m_quotainfo->qi_pquotaip);
244 mp->m_quotainfo->qi_pquotaip = NULL;
245 }
246 }
247}
248
249STATIC int
250xfs_qm_dqattach_one(
251 struct xfs_inode *ip,
252 xfs_dqid_t id,
253 xfs_dqtype_t type,
254 bool doalloc,
255 struct xfs_dquot **IO_idqpp)
256{
257 struct xfs_dquot *dqp;
258 int error;
259
260 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
261 error = 0;
262
263 /*
264 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
265 * or &i_gdquot. This made the code look weird, but made the logic a lot
266 * simpler.
267 */
268 dqp = *IO_idqpp;
269 if (dqp) {
270 trace_xfs_dqattach_found(dqp);
271 return 0;
272 }
273
274 /*
275 * Find the dquot from somewhere. This bumps the reference count of
276 * dquot and returns it locked. This can return ENOENT if dquot didn't
277 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
278 * turned off suddenly.
279 */
280 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
281 if (error)
282 return error;
283
284 trace_xfs_dqattach_get(dqp);
285
286 /*
287 * dqget may have dropped and re-acquired the ilock, but it guarantees
288 * that the dquot returned is the one that should go in the inode.
289 */
290 *IO_idqpp = dqp;
291 xfs_dqunlock(dqp);
292 return 0;
293}
294
295static bool
296xfs_qm_need_dqattach(
297 struct xfs_inode *ip)
298{
299 struct xfs_mount *mp = ip->i_mount;
300
301 if (!XFS_IS_QUOTA_RUNNING(mp))
302 return false;
303 if (!XFS_IS_QUOTA_ON(mp))
304 return false;
305 if (!XFS_NOT_DQATTACHED(mp, ip))
306 return false;
307 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
308 return false;
309 return true;
310}
311
312/*
313 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
314 * into account.
315 * If @doalloc is true, the dquot(s) will be allocated if needed.
316 * Inode may get unlocked and relocked in here, and the caller must deal with
317 * the consequences.
318 */
319int
320xfs_qm_dqattach_locked(
321 xfs_inode_t *ip,
322 bool doalloc)
323{
324 xfs_mount_t *mp = ip->i_mount;
325 int error = 0;
326
327 if (!xfs_qm_need_dqattach(ip))
328 return 0;
329
330 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
331
332 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
333 error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)),
334 XFS_DQTYPE_USER, doalloc, &ip->i_udquot);
335 if (error)
336 goto done;
337 ASSERT(ip->i_udquot);
338 }
339
340 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
341 error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)),
342 XFS_DQTYPE_GROUP, doalloc, &ip->i_gdquot);
343 if (error)
344 goto done;
345 ASSERT(ip->i_gdquot);
346 }
347
348 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
349 error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQTYPE_PROJ,
350 doalloc, &ip->i_pdquot);
351 if (error)
352 goto done;
353 ASSERT(ip->i_pdquot);
354 }
355
356done:
357 /*
358 * Don't worry about the dquots that we may have attached before any
359 * error - they'll get detached later if it has not already been done.
360 */
361 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
362 return error;
363}
364
365int
366xfs_qm_dqattach(
367 struct xfs_inode *ip)
368{
369 int error;
370
371 if (!xfs_qm_need_dqattach(ip))
372 return 0;
373
374 xfs_ilock(ip, XFS_ILOCK_EXCL);
375 error = xfs_qm_dqattach_locked(ip, false);
376 xfs_iunlock(ip, XFS_ILOCK_EXCL);
377
378 return error;
379}
380
381/*
382 * Release dquots (and their references) if any.
383 * The inode should be locked EXCL except when this's called by
384 * xfs_ireclaim.
385 */
386void
387xfs_qm_dqdetach(
388 xfs_inode_t *ip)
389{
390 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
391 return;
392
393 trace_xfs_dquot_dqdetach(ip);
394
395 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
396 if (ip->i_udquot) {
397 xfs_qm_dqrele(ip->i_udquot);
398 ip->i_udquot = NULL;
399 }
400 if (ip->i_gdquot) {
401 xfs_qm_dqrele(ip->i_gdquot);
402 ip->i_gdquot = NULL;
403 }
404 if (ip->i_pdquot) {
405 xfs_qm_dqrele(ip->i_pdquot);
406 ip->i_pdquot = NULL;
407 }
408}
409
410struct xfs_qm_isolate {
411 struct list_head buffers;
412 struct list_head dispose;
413};
414
415static enum lru_status
416xfs_qm_dquot_isolate(
417 struct list_head *item,
418 struct list_lru_one *lru,
419 spinlock_t *lru_lock,
420 void *arg)
421 __releases(lru_lock) __acquires(lru_lock)
422{
423 struct xfs_dquot *dqp = container_of(item,
424 struct xfs_dquot, q_lru);
425 struct xfs_qm_isolate *isol = arg;
426
427 if (!xfs_dqlock_nowait(dqp))
428 goto out_miss_busy;
429
430 /*
431 * This dquot has acquired a reference in the meantime remove it from
432 * the freelist and try again.
433 */
434 if (dqp->q_nrefs) {
435 xfs_dqunlock(dqp);
436 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
437
438 trace_xfs_dqreclaim_want(dqp);
439 list_lru_isolate(lru, &dqp->q_lru);
440 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
441 return LRU_REMOVED;
442 }
443
444 /*
445 * If the dquot is dirty, flush it. If it's already being flushed, just
446 * skip it so there is time for the IO to complete before we try to
447 * reclaim it again on the next LRU pass.
448 */
449 if (!xfs_dqflock_nowait(dqp)) {
450 xfs_dqunlock(dqp);
451 goto out_miss_busy;
452 }
453
454 if (XFS_DQ_IS_DIRTY(dqp)) {
455 struct xfs_buf *bp = NULL;
456 int error;
457
458 trace_xfs_dqreclaim_dirty(dqp);
459
460 /* we have to drop the LRU lock to flush the dquot */
461 spin_unlock(lru_lock);
462
463 error = xfs_qm_dqflush(dqp, &bp);
464 if (error)
465 goto out_unlock_dirty;
466
467 xfs_buf_delwri_queue(bp, &isol->buffers);
468 xfs_buf_relse(bp);
469 goto out_unlock_dirty;
470 }
471 xfs_dqfunlock(dqp);
472
473 /*
474 * Prevent lookups now that we are past the point of no return.
475 */
476 dqp->q_flags |= XFS_DQFLAG_FREEING;
477 xfs_dqunlock(dqp);
478
479 ASSERT(dqp->q_nrefs == 0);
480 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
481 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
482 trace_xfs_dqreclaim_done(dqp);
483 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
484 return LRU_REMOVED;
485
486out_miss_busy:
487 trace_xfs_dqreclaim_busy(dqp);
488 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
489 return LRU_SKIP;
490
491out_unlock_dirty:
492 trace_xfs_dqreclaim_busy(dqp);
493 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
494 xfs_dqunlock(dqp);
495 spin_lock(lru_lock);
496 return LRU_RETRY;
497}
498
499static unsigned long
500xfs_qm_shrink_scan(
501 struct shrinker *shrink,
502 struct shrink_control *sc)
503{
504 struct xfs_quotainfo *qi = container_of(shrink,
505 struct xfs_quotainfo, qi_shrinker);
506 struct xfs_qm_isolate isol;
507 unsigned long freed;
508 int error;
509
510 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
511 return 0;
512
513 INIT_LIST_HEAD(&isol.buffers);
514 INIT_LIST_HEAD(&isol.dispose);
515
516 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
517 xfs_qm_dquot_isolate, &isol);
518
519 error = xfs_buf_delwri_submit(&isol.buffers);
520 if (error)
521 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
522
523 while (!list_empty(&isol.dispose)) {
524 struct xfs_dquot *dqp;
525
526 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
527 list_del_init(&dqp->q_lru);
528 xfs_qm_dqfree_one(dqp);
529 }
530
531 return freed;
532}
533
534static unsigned long
535xfs_qm_shrink_count(
536 struct shrinker *shrink,
537 struct shrink_control *sc)
538{
539 struct xfs_quotainfo *qi = container_of(shrink,
540 struct xfs_quotainfo, qi_shrinker);
541
542 return list_lru_shrink_count(&qi->qi_lru, sc);
543}
544
545STATIC void
546xfs_qm_set_defquota(
547 struct xfs_mount *mp,
548 xfs_dqtype_t type,
549 struct xfs_quotainfo *qinf)
550{
551 struct xfs_dquot *dqp;
552 struct xfs_def_quota *defq;
553 int error;
554
555 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
556 if (error)
557 return;
558
559 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
560
561 /*
562 * Timers and warnings have been already set, let's just set the
563 * default limits for this quota type
564 */
565 defq->blk.hard = dqp->q_blk.hardlimit;
566 defq->blk.soft = dqp->q_blk.softlimit;
567 defq->ino.hard = dqp->q_ino.hardlimit;
568 defq->ino.soft = dqp->q_ino.softlimit;
569 defq->rtb.hard = dqp->q_rtb.hardlimit;
570 defq->rtb.soft = dqp->q_rtb.softlimit;
571 xfs_qm_dqdestroy(dqp);
572}
573
574/* Initialize quota time limits from the root dquot. */
575static void
576xfs_qm_init_timelimits(
577 struct xfs_mount *mp,
578 xfs_dqtype_t type)
579{
580 struct xfs_quotainfo *qinf = mp->m_quotainfo;
581 struct xfs_def_quota *defq;
582 struct xfs_dquot *dqp;
583 int error;
584
585 defq = xfs_get_defquota(qinf, type);
586
587 defq->blk.time = XFS_QM_BTIMELIMIT;
588 defq->ino.time = XFS_QM_ITIMELIMIT;
589 defq->rtb.time = XFS_QM_RTBTIMELIMIT;
590 defq->blk.warn = XFS_QM_BWARNLIMIT;
591 defq->ino.warn = XFS_QM_IWARNLIMIT;
592 defq->rtb.warn = XFS_QM_RTBWARNLIMIT;
593
594 /*
595 * We try to get the limits from the superuser's limits fields.
596 * This is quite hacky, but it is standard quota practice.
597 *
598 * Since we may not have done a quotacheck by this point, just read
599 * the dquot without attaching it to any hashtables or lists.
600 */
601 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
602 if (error)
603 return;
604
605 /*
606 * The warnings and timers set the grace period given to
607 * a user or group before he or she can not perform any
608 * more writing. If it is zero, a default is used.
609 */
610 if (dqp->q_blk.timer)
611 defq->blk.time = dqp->q_blk.timer;
612 if (dqp->q_ino.timer)
613 defq->ino.time = dqp->q_ino.timer;
614 if (dqp->q_rtb.timer)
615 defq->rtb.time = dqp->q_rtb.timer;
616 if (dqp->q_blk.warnings)
617 defq->blk.warn = dqp->q_blk.warnings;
618 if (dqp->q_ino.warnings)
619 defq->ino.warn = dqp->q_ino.warnings;
620 if (dqp->q_rtb.warnings)
621 defq->rtb.warn = dqp->q_rtb.warnings;
622
623 xfs_qm_dqdestroy(dqp);
624}
625
626/*
627 * This initializes all the quota information that's kept in the
628 * mount structure
629 */
630STATIC int
631xfs_qm_init_quotainfo(
632 struct xfs_mount *mp)
633{
634 struct xfs_quotainfo *qinf;
635 int error;
636
637 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
638
639 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
640
641 error = list_lru_init(&qinf->qi_lru);
642 if (error)
643 goto out_free_qinf;
644
645 /*
646 * See if quotainodes are setup, and if not, allocate them,
647 * and change the superblock accordingly.
648 */
649 error = xfs_qm_init_quotainos(mp);
650 if (error)
651 goto out_free_lru;
652
653 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
654 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
655 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
656 mutex_init(&qinf->qi_tree_lock);
657
658 /* mutex used to serialize quotaoffs */
659 mutex_init(&qinf->qi_quotaofflock);
660
661 /* Precalc some constants */
662 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
663 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
664
665 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
666
667 xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
668 xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
669 xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
670
671 if (XFS_IS_UQUOTA_RUNNING(mp))
672 xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
673 if (XFS_IS_GQUOTA_RUNNING(mp))
674 xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
675 if (XFS_IS_PQUOTA_RUNNING(mp))
676 xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
677
678 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
679 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
680 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
681 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
682
683 error = register_shrinker(&qinf->qi_shrinker);
684 if (error)
685 goto out_free_inos;
686
687 return 0;
688
689out_free_inos:
690 mutex_destroy(&qinf->qi_quotaofflock);
691 mutex_destroy(&qinf->qi_tree_lock);
692 xfs_qm_destroy_quotainos(qinf);
693out_free_lru:
694 list_lru_destroy(&qinf->qi_lru);
695out_free_qinf:
696 kmem_free(qinf);
697 mp->m_quotainfo = NULL;
698 return error;
699}
700
701/*
702 * Gets called when unmounting a filesystem or when all quotas get
703 * turned off.
704 * This purges the quota inodes, destroys locks and frees itself.
705 */
706void
707xfs_qm_destroy_quotainfo(
708 struct xfs_mount *mp)
709{
710 struct xfs_quotainfo *qi;
711
712 qi = mp->m_quotainfo;
713 ASSERT(qi != NULL);
714
715 unregister_shrinker(&qi->qi_shrinker);
716 list_lru_destroy(&qi->qi_lru);
717 xfs_qm_destroy_quotainos(qi);
718 mutex_destroy(&qi->qi_tree_lock);
719 mutex_destroy(&qi->qi_quotaofflock);
720 kmem_free(qi);
721 mp->m_quotainfo = NULL;
722}
723
724/*
725 * Create an inode and return with a reference already taken, but unlocked
726 * This is how we create quota inodes
727 */
728STATIC int
729xfs_qm_qino_alloc(
730 xfs_mount_t *mp,
731 xfs_inode_t **ip,
732 uint flags)
733{
734 xfs_trans_t *tp;
735 int error;
736 bool need_alloc = true;
737
738 *ip = NULL;
739 /*
740 * With superblock that doesn't have separate pquotino, we
741 * share an inode between gquota and pquota. If the on-disk
742 * superblock has GQUOTA and the filesystem is now mounted
743 * with PQUOTA, just use sb_gquotino for sb_pquotino and
744 * vice-versa.
745 */
746 if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
747 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
748 xfs_ino_t ino = NULLFSINO;
749
750 if ((flags & XFS_QMOPT_PQUOTA) &&
751 (mp->m_sb.sb_gquotino != NULLFSINO)) {
752 ino = mp->m_sb.sb_gquotino;
753 if (XFS_IS_CORRUPT(mp,
754 mp->m_sb.sb_pquotino != NULLFSINO))
755 return -EFSCORRUPTED;
756 } else if ((flags & XFS_QMOPT_GQUOTA) &&
757 (mp->m_sb.sb_pquotino != NULLFSINO)) {
758 ino = mp->m_sb.sb_pquotino;
759 if (XFS_IS_CORRUPT(mp,
760 mp->m_sb.sb_gquotino != NULLFSINO))
761 return -EFSCORRUPTED;
762 }
763 if (ino != NULLFSINO) {
764 error = xfs_iget(mp, NULL, ino, 0, 0, ip);
765 if (error)
766 return error;
767 mp->m_sb.sb_gquotino = NULLFSINO;
768 mp->m_sb.sb_pquotino = NULLFSINO;
769 need_alloc = false;
770 }
771 }
772
773 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
774 need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
775 0, 0, &tp);
776 if (error)
777 return error;
778
779 if (need_alloc) {
780 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
781 if (error) {
782 xfs_trans_cancel(tp);
783 return error;
784 }
785 }
786
787 /*
788 * Make the changes in the superblock, and log those too.
789 * sbfields arg may contain fields other than *QUOTINO;
790 * VERSIONNUM for example.
791 */
792 spin_lock(&mp->m_sb_lock);
793 if (flags & XFS_QMOPT_SBVERSION) {
794 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
795
796 xfs_sb_version_addquota(&mp->m_sb);
797 mp->m_sb.sb_uquotino = NULLFSINO;
798 mp->m_sb.sb_gquotino = NULLFSINO;
799 mp->m_sb.sb_pquotino = NULLFSINO;
800
801 /* qflags will get updated fully _after_ quotacheck */
802 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
803 }
804 if (flags & XFS_QMOPT_UQUOTA)
805 mp->m_sb.sb_uquotino = (*ip)->i_ino;
806 else if (flags & XFS_QMOPT_GQUOTA)
807 mp->m_sb.sb_gquotino = (*ip)->i_ino;
808 else
809 mp->m_sb.sb_pquotino = (*ip)->i_ino;
810 spin_unlock(&mp->m_sb_lock);
811 xfs_log_sb(tp);
812
813 error = xfs_trans_commit(tp);
814 if (error) {
815 ASSERT(XFS_FORCED_SHUTDOWN(mp));
816 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
817 }
818 if (need_alloc)
819 xfs_finish_inode_setup(*ip);
820 return error;
821}
822
823
824STATIC void
825xfs_qm_reset_dqcounts(
826 struct xfs_mount *mp,
827 struct xfs_buf *bp,
828 xfs_dqid_t id,
829 xfs_dqtype_t type)
830{
831 struct xfs_dqblk *dqb;
832 int j;
833
834 trace_xfs_reset_dqcounts(bp, _RET_IP_);
835
836 /*
837 * Reset all counters and timers. They'll be
838 * started afresh by xfs_qm_quotacheck.
839 */
840#ifdef DEBUG
841 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
842 sizeof(xfs_dqblk_t);
843 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
844#endif
845 dqb = bp->b_addr;
846 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
847 struct xfs_disk_dquot *ddq;
848
849 ddq = (struct xfs_disk_dquot *)&dqb[j];
850
851 /*
852 * Do a sanity check, and if needed, repair the dqblk. Don't
853 * output any warnings because it's perfectly possible to
854 * find uninitialised dquot blks. See comment in
855 * xfs_dquot_verify.
856 */
857 if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
858 (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
859 xfs_dqblk_repair(mp, &dqb[j], id + j, type);
860
861 /*
862 * Reset type in case we are reusing group quota file for
863 * project quotas or vice versa
864 */
865 ddq->d_type = type;
866 ddq->d_bcount = 0;
867 ddq->d_icount = 0;
868 ddq->d_rtbcount = 0;
869
870 /*
871 * dquot id 0 stores the default grace period and the maximum
872 * warning limit that were set by the administrator, so we
873 * should not reset them.
874 */
875 if (ddq->d_id != 0) {
876 ddq->d_btimer = 0;
877 ddq->d_itimer = 0;
878 ddq->d_rtbtimer = 0;
879 ddq->d_bwarns = 0;
880 ddq->d_iwarns = 0;
881 ddq->d_rtbwarns = 0;
882 }
883
884 if (xfs_sb_version_hascrc(&mp->m_sb)) {
885 xfs_update_cksum((char *)&dqb[j],
886 sizeof(struct xfs_dqblk),
887 XFS_DQUOT_CRC_OFF);
888 }
889 }
890}
891
892STATIC int
893xfs_qm_reset_dqcounts_all(
894 struct xfs_mount *mp,
895 xfs_dqid_t firstid,
896 xfs_fsblock_t bno,
897 xfs_filblks_t blkcnt,
898 xfs_dqtype_t type,
899 struct list_head *buffer_list)
900{
901 struct xfs_buf *bp;
902 int error = 0;
903
904 ASSERT(blkcnt > 0);
905
906 /*
907 * Blkcnt arg can be a very big number, and might even be
908 * larger than the log itself. So, we have to break it up into
909 * manageable-sized transactions.
910 * Note that we don't start a permanent transaction here; we might
911 * not be able to get a log reservation for the whole thing up front,
912 * and we don't really care to either, because we just discard
913 * everything if we were to crash in the middle of this loop.
914 */
915 while (blkcnt--) {
916 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
917 XFS_FSB_TO_DADDR(mp, bno),
918 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
919 &xfs_dquot_buf_ops);
920
921 /*
922 * CRC and validation errors will return a EFSCORRUPTED here. If
923 * this occurs, re-read without CRC validation so that we can
924 * repair the damage via xfs_qm_reset_dqcounts(). This process
925 * will leave a trace in the log indicating corruption has
926 * been detected.
927 */
928 if (error == -EFSCORRUPTED) {
929 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
930 XFS_FSB_TO_DADDR(mp, bno),
931 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
932 NULL);
933 }
934
935 if (error)
936 break;
937
938 /*
939 * A corrupt buffer might not have a verifier attached, so
940 * make sure we have the correct one attached before writeback
941 * occurs.
942 */
943 bp->b_ops = &xfs_dquot_buf_ops;
944 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
945 xfs_buf_delwri_queue(bp, buffer_list);
946 xfs_buf_relse(bp);
947
948 /* goto the next block. */
949 bno++;
950 firstid += mp->m_quotainfo->qi_dqperchunk;
951 }
952
953 return error;
954}
955
956/*
957 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
958 * counters for every chunk of dquots that we find.
959 */
960STATIC int
961xfs_qm_reset_dqcounts_buf(
962 struct xfs_mount *mp,
963 struct xfs_inode *qip,
964 xfs_dqtype_t type,
965 struct list_head *buffer_list)
966{
967 struct xfs_bmbt_irec *map;
968 int i, nmaps; /* number of map entries */
969 int error; /* return value */
970 xfs_fileoff_t lblkno;
971 xfs_filblks_t maxlblkcnt;
972 xfs_dqid_t firstid;
973 xfs_fsblock_t rablkno;
974 xfs_filblks_t rablkcnt;
975
976 error = 0;
977 /*
978 * This looks racy, but we can't keep an inode lock across a
979 * trans_reserve. But, this gets called during quotacheck, and that
980 * happens only at mount time which is single threaded.
981 */
982 if (qip->i_d.di_nblocks == 0)
983 return 0;
984
985 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
986
987 lblkno = 0;
988 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
989 do {
990 uint lock_mode;
991
992 nmaps = XFS_DQITER_MAP_SIZE;
993 /*
994 * We aren't changing the inode itself. Just changing
995 * some of its data. No new blocks are added here, and
996 * the inode is never added to the transaction.
997 */
998 lock_mode = xfs_ilock_data_map_shared(qip);
999 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1000 map, &nmaps, 0);
1001 xfs_iunlock(qip, lock_mode);
1002 if (error)
1003 break;
1004
1005 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1006 for (i = 0; i < nmaps; i++) {
1007 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1008 ASSERT(map[i].br_blockcount);
1009
1010
1011 lblkno += map[i].br_blockcount;
1012
1013 if (map[i].br_startblock == HOLESTARTBLOCK)
1014 continue;
1015
1016 firstid = (xfs_dqid_t) map[i].br_startoff *
1017 mp->m_quotainfo->qi_dqperchunk;
1018 /*
1019 * Do a read-ahead on the next extent.
1020 */
1021 if ((i+1 < nmaps) &&
1022 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1023 rablkcnt = map[i+1].br_blockcount;
1024 rablkno = map[i+1].br_startblock;
1025 while (rablkcnt--) {
1026 xfs_buf_readahead(mp->m_ddev_targp,
1027 XFS_FSB_TO_DADDR(mp, rablkno),
1028 mp->m_quotainfo->qi_dqchunklen,
1029 &xfs_dquot_buf_ops);
1030 rablkno++;
1031 }
1032 }
1033 /*
1034 * Iterate thru all the blks in the extent and
1035 * reset the counters of all the dquots inside them.
1036 */
1037 error = xfs_qm_reset_dqcounts_all(mp, firstid,
1038 map[i].br_startblock,
1039 map[i].br_blockcount,
1040 type, buffer_list);
1041 if (error)
1042 goto out;
1043 }
1044 } while (nmaps > 0);
1045
1046out:
1047 kmem_free(map);
1048 return error;
1049}
1050
1051/*
1052 * Called by dqusage_adjust in doing a quotacheck.
1053 *
1054 * Given the inode, and a dquot id this updates both the incore dqout as well
1055 * as the buffer copy. This is so that once the quotacheck is done, we can
1056 * just log all the buffers, as opposed to logging numerous updates to
1057 * individual dquots.
1058 */
1059STATIC int
1060xfs_qm_quotacheck_dqadjust(
1061 struct xfs_inode *ip,
1062 xfs_dqtype_t type,
1063 xfs_qcnt_t nblks,
1064 xfs_qcnt_t rtblks)
1065{
1066 struct xfs_mount *mp = ip->i_mount;
1067 struct xfs_dquot *dqp;
1068 xfs_dqid_t id;
1069 int error;
1070
1071 id = xfs_qm_id_for_quotatype(ip, type);
1072 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1073 if (error) {
1074 /*
1075 * Shouldn't be able to turn off quotas here.
1076 */
1077 ASSERT(error != -ESRCH);
1078 ASSERT(error != -ENOENT);
1079 return error;
1080 }
1081
1082 trace_xfs_dqadjust(dqp);
1083
1084 /*
1085 * Adjust the inode count and the block count to reflect this inode's
1086 * resource usage.
1087 */
1088 dqp->q_ino.count++;
1089 dqp->q_ino.reserved++;
1090 if (nblks) {
1091 dqp->q_blk.count += nblks;
1092 dqp->q_blk.reserved += nblks;
1093 }
1094 if (rtblks) {
1095 dqp->q_rtb.count += rtblks;
1096 dqp->q_rtb.reserved += rtblks;
1097 }
1098
1099 /*
1100 * Set default limits, adjust timers (since we changed usages)
1101 *
1102 * There are no timers for the default values set in the root dquot.
1103 */
1104 if (dqp->q_id) {
1105 xfs_qm_adjust_dqlimits(dqp);
1106 xfs_qm_adjust_dqtimers(dqp);
1107 }
1108
1109 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1110 xfs_qm_dqput(dqp);
1111 return 0;
1112}
1113
1114/*
1115 * callback routine supplied to bulkstat(). Given an inumber, find its
1116 * dquots and update them to account for resources taken by that inode.
1117 */
1118/* ARGSUSED */
1119STATIC int
1120xfs_qm_dqusage_adjust(
1121 struct xfs_mount *mp,
1122 struct xfs_trans *tp,
1123 xfs_ino_t ino,
1124 void *data)
1125{
1126 struct xfs_inode *ip;
1127 xfs_qcnt_t nblks;
1128 xfs_filblks_t rtblks = 0; /* total rt blks */
1129 int error;
1130
1131 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1132
1133 /*
1134 * rootino must have its resources accounted for, not so with the quota
1135 * inodes.
1136 */
1137 if (xfs_is_quota_inode(&mp->m_sb, ino))
1138 return 0;
1139
1140 /*
1141 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1142 * at mount time and therefore nobody will be racing chown/chproj.
1143 */
1144 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1145 if (error == -EINVAL || error == -ENOENT)
1146 return 0;
1147 if (error)
1148 return error;
1149
1150 ASSERT(ip->i_delayed_blks == 0);
1151
1152 if (XFS_IS_REALTIME_INODE(ip)) {
1153 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1154
1155 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1156 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1157 if (error)
1158 goto error0;
1159 }
1160
1161 xfs_bmap_count_leaves(ifp, &rtblks);
1162 }
1163
1164 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1165
1166 /*
1167 * Add the (disk blocks and inode) resources occupied by this
1168 * inode to its dquots. We do this adjustment in the incore dquot,
1169 * and also copy the changes to its buffer.
1170 * We don't care about putting these changes in a transaction
1171 * envelope because if we crash in the middle of a 'quotacheck'
1172 * we have to start from the beginning anyway.
1173 * Once we're done, we'll log all the dquot bufs.
1174 *
1175 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1176 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1177 */
1178 if (XFS_IS_UQUOTA_ON(mp)) {
1179 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1180 rtblks);
1181 if (error)
1182 goto error0;
1183 }
1184
1185 if (XFS_IS_GQUOTA_ON(mp)) {
1186 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1187 rtblks);
1188 if (error)
1189 goto error0;
1190 }
1191
1192 if (XFS_IS_PQUOTA_ON(mp)) {
1193 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1194 rtblks);
1195 if (error)
1196 goto error0;
1197 }
1198
1199error0:
1200 xfs_irele(ip);
1201 return error;
1202}
1203
1204STATIC int
1205xfs_qm_flush_one(
1206 struct xfs_dquot *dqp,
1207 void *data)
1208{
1209 struct xfs_mount *mp = dqp->q_mount;
1210 struct list_head *buffer_list = data;
1211 struct xfs_buf *bp = NULL;
1212 int error = 0;
1213
1214 xfs_dqlock(dqp);
1215 if (dqp->q_flags & XFS_DQFLAG_FREEING)
1216 goto out_unlock;
1217 if (!XFS_DQ_IS_DIRTY(dqp))
1218 goto out_unlock;
1219
1220 /*
1221 * The only way the dquot is already flush locked by the time quotacheck
1222 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1223 * it for the final time. Quotacheck collects all dquot bufs in the
1224 * local delwri queue before dquots are dirtied, so reclaim can't have
1225 * possibly queued it for I/O. The only way out is to push the buffer to
1226 * cycle the flush lock.
1227 */
1228 if (!xfs_dqflock_nowait(dqp)) {
1229 /* buf is pinned in-core by delwri list */
1230 bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1231 mp->m_quotainfo->qi_dqchunklen, 0);
1232 if (!bp) {
1233 error = -EINVAL;
1234 goto out_unlock;
1235 }
1236 xfs_buf_unlock(bp);
1237
1238 xfs_buf_delwri_pushbuf(bp, buffer_list);
1239 xfs_buf_rele(bp);
1240
1241 error = -EAGAIN;
1242 goto out_unlock;
1243 }
1244
1245 error = xfs_qm_dqflush(dqp, &bp);
1246 if (error)
1247 goto out_unlock;
1248
1249 xfs_buf_delwri_queue(bp, buffer_list);
1250 xfs_buf_relse(bp);
1251out_unlock:
1252 xfs_dqunlock(dqp);
1253 return error;
1254}
1255
1256/*
1257 * Walk thru all the filesystem inodes and construct a consistent view
1258 * of the disk quota world. If the quotacheck fails, disable quotas.
1259 */
1260STATIC int
1261xfs_qm_quotacheck(
1262 xfs_mount_t *mp)
1263{
1264 int error, error2;
1265 uint flags;
1266 LIST_HEAD (buffer_list);
1267 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1268 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1269 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1270
1271 flags = 0;
1272
1273 ASSERT(uip || gip || pip);
1274 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1275
1276 xfs_notice(mp, "Quotacheck needed: Please wait.");
1277
1278 /*
1279 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1280 * their counters to zero. We need a clean slate.
1281 * We don't log our changes till later.
1282 */
1283 if (uip) {
1284 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1285 &buffer_list);
1286 if (error)
1287 goto error_return;
1288 flags |= XFS_UQUOTA_CHKD;
1289 }
1290
1291 if (gip) {
1292 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1293 &buffer_list);
1294 if (error)
1295 goto error_return;
1296 flags |= XFS_GQUOTA_CHKD;
1297 }
1298
1299 if (pip) {
1300 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1301 &buffer_list);
1302 if (error)
1303 goto error_return;
1304 flags |= XFS_PQUOTA_CHKD;
1305 }
1306
1307 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1308 NULL);
1309 if (error)
1310 goto error_return;
1311
1312 /*
1313 * We've made all the changes that we need to make incore. Flush them
1314 * down to disk buffers if everything was updated successfully.
1315 */
1316 if (XFS_IS_UQUOTA_ON(mp)) {
1317 error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1318 &buffer_list);
1319 }
1320 if (XFS_IS_GQUOTA_ON(mp)) {
1321 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1322 &buffer_list);
1323 if (!error)
1324 error = error2;
1325 }
1326 if (XFS_IS_PQUOTA_ON(mp)) {
1327 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1328 &buffer_list);
1329 if (!error)
1330 error = error2;
1331 }
1332
1333 error2 = xfs_buf_delwri_submit(&buffer_list);
1334 if (!error)
1335 error = error2;
1336
1337 /*
1338 * We can get this error if we couldn't do a dquot allocation inside
1339 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1340 * dirty dquots that might be cached, we just want to get rid of them
1341 * and turn quotaoff. The dquots won't be attached to any of the inodes
1342 * at this point (because we intentionally didn't in dqget_noattach).
1343 */
1344 if (error) {
1345 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1346 goto error_return;
1347 }
1348
1349 /*
1350 * If one type of quotas is off, then it will lose its
1351 * quotachecked status, since we won't be doing accounting for
1352 * that type anymore.
1353 */
1354 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1355 mp->m_qflags |= flags;
1356
1357 error_return:
1358 xfs_buf_delwri_cancel(&buffer_list);
1359
1360 if (error) {
1361 xfs_warn(mp,
1362 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1363 error);
1364 /*
1365 * We must turn off quotas.
1366 */
1367 ASSERT(mp->m_quotainfo != NULL);
1368 xfs_qm_destroy_quotainfo(mp);
1369 if (xfs_mount_reset_sbqflags(mp)) {
1370 xfs_warn(mp,
1371 "Quotacheck: Failed to reset quota flags.");
1372 }
1373 } else
1374 xfs_notice(mp, "Quotacheck: Done.");
1375 return error;
1376}
1377
1378/*
1379 * This is called from xfs_mountfs to start quotas and initialize all
1380 * necessary data structures like quotainfo. This is also responsible for
1381 * running a quotacheck as necessary. We are guaranteed that the superblock
1382 * is consistently read in at this point.
1383 *
1384 * If we fail here, the mount will continue with quota turned off. We don't
1385 * need to inidicate success or failure at all.
1386 */
1387void
1388xfs_qm_mount_quotas(
1389 struct xfs_mount *mp)
1390{
1391 int error = 0;
1392 uint sbf;
1393
1394 /*
1395 * If quotas on realtime volumes is not supported, we disable
1396 * quotas immediately.
1397 */
1398 if (mp->m_sb.sb_rextents) {
1399 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1400 mp->m_qflags = 0;
1401 goto write_changes;
1402 }
1403
1404 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1405
1406 /*
1407 * Allocate the quotainfo structure inside the mount struct, and
1408 * create quotainode(s), and change/rev superblock if necessary.
1409 */
1410 error = xfs_qm_init_quotainfo(mp);
1411 if (error) {
1412 /*
1413 * We must turn off quotas.
1414 */
1415 ASSERT(mp->m_quotainfo == NULL);
1416 mp->m_qflags = 0;
1417 goto write_changes;
1418 }
1419 /*
1420 * If any of the quotas are not consistent, do a quotacheck.
1421 */
1422 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1423 error = xfs_qm_quotacheck(mp);
1424 if (error) {
1425 /* Quotacheck failed and disabled quotas. */
1426 return;
1427 }
1428 }
1429 /*
1430 * If one type of quotas is off, then it will lose its
1431 * quotachecked status, since we won't be doing accounting for
1432 * that type anymore.
1433 */
1434 if (!XFS_IS_UQUOTA_ON(mp))
1435 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1436 if (!XFS_IS_GQUOTA_ON(mp))
1437 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1438 if (!XFS_IS_PQUOTA_ON(mp))
1439 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1440
1441 write_changes:
1442 /*
1443 * We actually don't have to acquire the m_sb_lock at all.
1444 * This can only be called from mount, and that's single threaded. XXX
1445 */
1446 spin_lock(&mp->m_sb_lock);
1447 sbf = mp->m_sb.sb_qflags;
1448 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1449 spin_unlock(&mp->m_sb_lock);
1450
1451 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1452 if (xfs_sync_sb(mp, false)) {
1453 /*
1454 * We could only have been turning quotas off.
1455 * We aren't in very good shape actually because
1456 * the incore structures are convinced that quotas are
1457 * off, but the on disk superblock doesn't know that !
1458 */
1459 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1460 xfs_alert(mp, "%s: Superblock update failed!",
1461 __func__);
1462 }
1463 }
1464
1465 if (error) {
1466 xfs_warn(mp, "Failed to initialize disk quotas.");
1467 return;
1468 }
1469}
1470
1471/*
1472 * This is called after the superblock has been read in and we're ready to
1473 * iget the quota inodes.
1474 */
1475STATIC int
1476xfs_qm_init_quotainos(
1477 xfs_mount_t *mp)
1478{
1479 struct xfs_inode *uip = NULL;
1480 struct xfs_inode *gip = NULL;
1481 struct xfs_inode *pip = NULL;
1482 int error;
1483 uint flags = 0;
1484
1485 ASSERT(mp->m_quotainfo);
1486
1487 /*
1488 * Get the uquota and gquota inodes
1489 */
1490 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1491 if (XFS_IS_UQUOTA_ON(mp) &&
1492 mp->m_sb.sb_uquotino != NULLFSINO) {
1493 ASSERT(mp->m_sb.sb_uquotino > 0);
1494 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1495 0, 0, &uip);
1496 if (error)
1497 return error;
1498 }
1499 if (XFS_IS_GQUOTA_ON(mp) &&
1500 mp->m_sb.sb_gquotino != NULLFSINO) {
1501 ASSERT(mp->m_sb.sb_gquotino > 0);
1502 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1503 0, 0, &gip);
1504 if (error)
1505 goto error_rele;
1506 }
1507 if (XFS_IS_PQUOTA_ON(mp) &&
1508 mp->m_sb.sb_pquotino != NULLFSINO) {
1509 ASSERT(mp->m_sb.sb_pquotino > 0);
1510 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1511 0, 0, &pip);
1512 if (error)
1513 goto error_rele;
1514 }
1515 } else {
1516 flags |= XFS_QMOPT_SBVERSION;
1517 }
1518
1519 /*
1520 * Create the three inodes, if they don't exist already. The changes
1521 * made above will get added to a transaction and logged in one of
1522 * the qino_alloc calls below. If the device is readonly,
1523 * temporarily switch to read-write to do this.
1524 */
1525 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1526 error = xfs_qm_qino_alloc(mp, &uip,
1527 flags | XFS_QMOPT_UQUOTA);
1528 if (error)
1529 goto error_rele;
1530
1531 flags &= ~XFS_QMOPT_SBVERSION;
1532 }
1533 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1534 error = xfs_qm_qino_alloc(mp, &gip,
1535 flags | XFS_QMOPT_GQUOTA);
1536 if (error)
1537 goto error_rele;
1538
1539 flags &= ~XFS_QMOPT_SBVERSION;
1540 }
1541 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1542 error = xfs_qm_qino_alloc(mp, &pip,
1543 flags | XFS_QMOPT_PQUOTA);
1544 if (error)
1545 goto error_rele;
1546 }
1547
1548 mp->m_quotainfo->qi_uquotaip = uip;
1549 mp->m_quotainfo->qi_gquotaip = gip;
1550 mp->m_quotainfo->qi_pquotaip = pip;
1551
1552 return 0;
1553
1554error_rele:
1555 if (uip)
1556 xfs_irele(uip);
1557 if (gip)
1558 xfs_irele(gip);
1559 if (pip)
1560 xfs_irele(pip);
1561 return error;
1562}
1563
1564STATIC void
1565xfs_qm_destroy_quotainos(
1566 struct xfs_quotainfo *qi)
1567{
1568 if (qi->qi_uquotaip) {
1569 xfs_irele(qi->qi_uquotaip);
1570 qi->qi_uquotaip = NULL; /* paranoia */
1571 }
1572 if (qi->qi_gquotaip) {
1573 xfs_irele(qi->qi_gquotaip);
1574 qi->qi_gquotaip = NULL;
1575 }
1576 if (qi->qi_pquotaip) {
1577 xfs_irele(qi->qi_pquotaip);
1578 qi->qi_pquotaip = NULL;
1579 }
1580}
1581
1582STATIC void
1583xfs_qm_dqfree_one(
1584 struct xfs_dquot *dqp)
1585{
1586 struct xfs_mount *mp = dqp->q_mount;
1587 struct xfs_quotainfo *qi = mp->m_quotainfo;
1588
1589 mutex_lock(&qi->qi_tree_lock);
1590 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1591
1592 qi->qi_dquots--;
1593 mutex_unlock(&qi->qi_tree_lock);
1594
1595 xfs_qm_dqdestroy(dqp);
1596}
1597
1598/* --------------- utility functions for vnodeops ---------------- */
1599
1600
1601/*
1602 * Given an inode, a uid, gid and prid make sure that we have
1603 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1604 * quotas by creating this file.
1605 * This also attaches dquot(s) to the given inode after locking it,
1606 * and returns the dquots corresponding to the uid and/or gid.
1607 *
1608 * in : inode (unlocked)
1609 * out : udquot, gdquot with references taken and unlocked
1610 */
1611int
1612xfs_qm_vop_dqalloc(
1613 struct xfs_inode *ip,
1614 kuid_t uid,
1615 kgid_t gid,
1616 prid_t prid,
1617 uint flags,
1618 struct xfs_dquot **O_udqpp,
1619 struct xfs_dquot **O_gdqpp,
1620 struct xfs_dquot **O_pdqpp)
1621{
1622 struct xfs_mount *mp = ip->i_mount;
1623 struct inode *inode = VFS_I(ip);
1624 struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1625 struct xfs_dquot *uq = NULL;
1626 struct xfs_dquot *gq = NULL;
1627 struct xfs_dquot *pq = NULL;
1628 int error;
1629 uint lockflags;
1630
1631 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1632 return 0;
1633
1634 lockflags = XFS_ILOCK_EXCL;
1635 xfs_ilock(ip, lockflags);
1636
1637 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1638 gid = inode->i_gid;
1639
1640 /*
1641 * Attach the dquot(s) to this inode, doing a dquot allocation
1642 * if necessary. The dquot(s) will not be locked.
1643 */
1644 if (XFS_NOT_DQATTACHED(mp, ip)) {
1645 error = xfs_qm_dqattach_locked(ip, true);
1646 if (error) {
1647 xfs_iunlock(ip, lockflags);
1648 return error;
1649 }
1650 }
1651
1652 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1653 if (!uid_eq(inode->i_uid, uid)) {
1654 /*
1655 * What we need is the dquot that has this uid, and
1656 * if we send the inode to dqget, the uid of the inode
1657 * takes priority over what's sent in the uid argument.
1658 * We must unlock inode here before calling dqget if
1659 * we're not sending the inode, because otherwise
1660 * we'll deadlock by doing trans_reserve while
1661 * holding ilock.
1662 */
1663 xfs_iunlock(ip, lockflags);
1664 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1665 XFS_DQTYPE_USER, true, &uq);
1666 if (error) {
1667 ASSERT(error != -ENOENT);
1668 return error;
1669 }
1670 /*
1671 * Get the ilock in the right order.
1672 */
1673 xfs_dqunlock(uq);
1674 lockflags = XFS_ILOCK_SHARED;
1675 xfs_ilock(ip, lockflags);
1676 } else {
1677 /*
1678 * Take an extra reference, because we'll return
1679 * this to caller
1680 */
1681 ASSERT(ip->i_udquot);
1682 uq = xfs_qm_dqhold(ip->i_udquot);
1683 }
1684 }
1685 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1686 if (!gid_eq(inode->i_gid, gid)) {
1687 xfs_iunlock(ip, lockflags);
1688 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1689 XFS_DQTYPE_GROUP, true, &gq);
1690 if (error) {
1691 ASSERT(error != -ENOENT);
1692 goto error_rele;
1693 }
1694 xfs_dqunlock(gq);
1695 lockflags = XFS_ILOCK_SHARED;
1696 xfs_ilock(ip, lockflags);
1697 } else {
1698 ASSERT(ip->i_gdquot);
1699 gq = xfs_qm_dqhold(ip->i_gdquot);
1700 }
1701 }
1702 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1703 if (ip->i_d.di_projid != prid) {
1704 xfs_iunlock(ip, lockflags);
1705 error = xfs_qm_dqget(mp, (xfs_dqid_t)prid,
1706 XFS_DQTYPE_PROJ, true, &pq);
1707 if (error) {
1708 ASSERT(error != -ENOENT);
1709 goto error_rele;
1710 }
1711 xfs_dqunlock(pq);
1712 lockflags = XFS_ILOCK_SHARED;
1713 xfs_ilock(ip, lockflags);
1714 } else {
1715 ASSERT(ip->i_pdquot);
1716 pq = xfs_qm_dqhold(ip->i_pdquot);
1717 }
1718 }
1719 trace_xfs_dquot_dqalloc(ip);
1720
1721 xfs_iunlock(ip, lockflags);
1722 if (O_udqpp)
1723 *O_udqpp = uq;
1724 else
1725 xfs_qm_dqrele(uq);
1726 if (O_gdqpp)
1727 *O_gdqpp = gq;
1728 else
1729 xfs_qm_dqrele(gq);
1730 if (O_pdqpp)
1731 *O_pdqpp = pq;
1732 else
1733 xfs_qm_dqrele(pq);
1734 return 0;
1735
1736error_rele:
1737 xfs_qm_dqrele(gq);
1738 xfs_qm_dqrele(uq);
1739 return error;
1740}
1741
1742/*
1743 * Actually transfer ownership, and do dquot modifications.
1744 * These were already reserved.
1745 */
1746struct xfs_dquot *
1747xfs_qm_vop_chown(
1748 struct xfs_trans *tp,
1749 struct xfs_inode *ip,
1750 struct xfs_dquot **IO_olddq,
1751 struct xfs_dquot *newdq)
1752{
1753 struct xfs_dquot *prevdq;
1754 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1755 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1756
1757
1758 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1759 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1760
1761 /* old dquot */
1762 prevdq = *IO_olddq;
1763 ASSERT(prevdq);
1764 ASSERT(prevdq != newdq);
1765
1766 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1767 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1768
1769 /* the sparkling new dquot */
1770 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1771 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1772
1773 /*
1774 * Take an extra reference, because the inode is going to keep
1775 * this dquot pointer even after the trans_commit.
1776 */
1777 *IO_olddq = xfs_qm_dqhold(newdq);
1778
1779 return prevdq;
1780}
1781
1782/*
1783 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1784 */
1785int
1786xfs_qm_vop_chown_reserve(
1787 struct xfs_trans *tp,
1788 struct xfs_inode *ip,
1789 struct xfs_dquot *udqp,
1790 struct xfs_dquot *gdqp,
1791 struct xfs_dquot *pdqp,
1792 uint flags)
1793{
1794 struct xfs_mount *mp = ip->i_mount;
1795 uint64_t delblks;
1796 unsigned int blkflags;
1797 struct xfs_dquot *udq_unres = NULL;
1798 struct xfs_dquot *gdq_unres = NULL;
1799 struct xfs_dquot *pdq_unres = NULL;
1800 struct xfs_dquot *udq_delblks = NULL;
1801 struct xfs_dquot *gdq_delblks = NULL;
1802 struct xfs_dquot *pdq_delblks = NULL;
1803 int error;
1804
1805
1806 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1807 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1808
1809 delblks = ip->i_delayed_blks;
1810 blkflags = XFS_IS_REALTIME_INODE(ip) ?
1811 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1812
1813 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1814 i_uid_read(VFS_I(ip)) != udqp->q_id) {
1815 udq_delblks = udqp;
1816 /*
1817 * If there are delayed allocation blocks, then we have to
1818 * unreserve those from the old dquot, and add them to the
1819 * new dquot.
1820 */
1821 if (delblks) {
1822 ASSERT(ip->i_udquot);
1823 udq_unres = ip->i_udquot;
1824 }
1825 }
1826 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1827 i_gid_read(VFS_I(ip)) != gdqp->q_id) {
1828 gdq_delblks = gdqp;
1829 if (delblks) {
1830 ASSERT(ip->i_gdquot);
1831 gdq_unres = ip->i_gdquot;
1832 }
1833 }
1834
1835 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1836 ip->i_d.di_projid != pdqp->q_id) {
1837 pdq_delblks = pdqp;
1838 if (delblks) {
1839 ASSERT(ip->i_pdquot);
1840 pdq_unres = ip->i_pdquot;
1841 }
1842 }
1843
1844 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1845 udq_delblks, gdq_delblks, pdq_delblks,
1846 ip->i_d.di_nblocks, 1, flags | blkflags);
1847 if (error)
1848 return error;
1849
1850 /*
1851 * Do the delayed blks reservations/unreservations now. Since, these
1852 * are done without the help of a transaction, if a reservation fails
1853 * its previous reservations won't be automatically undone by trans
1854 * code. So, we have to do it manually here.
1855 */
1856 if (delblks) {
1857 /*
1858 * Do the reservations first. Unreservation can't fail.
1859 */
1860 ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1861 ASSERT(udq_unres || gdq_unres || pdq_unres);
1862 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1863 udq_delblks, gdq_delblks, pdq_delblks,
1864 (xfs_qcnt_t)delblks, 0, flags | blkflags);
1865 if (error)
1866 return error;
1867 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1868 udq_unres, gdq_unres, pdq_unres,
1869 -((xfs_qcnt_t)delblks), 0, blkflags);
1870 }
1871
1872 return 0;
1873}
1874
1875int
1876xfs_qm_vop_rename_dqattach(
1877 struct xfs_inode **i_tab)
1878{
1879 struct xfs_mount *mp = i_tab[0]->i_mount;
1880 int i;
1881
1882 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1883 return 0;
1884
1885 for (i = 0; (i < 4 && i_tab[i]); i++) {
1886 struct xfs_inode *ip = i_tab[i];
1887 int error;
1888
1889 /*
1890 * Watch out for duplicate entries in the table.
1891 */
1892 if (i == 0 || ip != i_tab[i-1]) {
1893 if (XFS_NOT_DQATTACHED(mp, ip)) {
1894 error = xfs_qm_dqattach(ip);
1895 if (error)
1896 return error;
1897 }
1898 }
1899 }
1900 return 0;
1901}
1902
1903void
1904xfs_qm_vop_create_dqattach(
1905 struct xfs_trans *tp,
1906 struct xfs_inode *ip,
1907 struct xfs_dquot *udqp,
1908 struct xfs_dquot *gdqp,
1909 struct xfs_dquot *pdqp)
1910{
1911 struct xfs_mount *mp = tp->t_mountp;
1912
1913 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1914 return;
1915
1916 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1917
1918 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1919 ASSERT(ip->i_udquot == NULL);
1920 ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1921
1922 ip->i_udquot = xfs_qm_dqhold(udqp);
1923 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1924 }
1925 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1926 ASSERT(ip->i_gdquot == NULL);
1927 ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1928
1929 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1930 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1931 }
1932 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1933 ASSERT(ip->i_pdquot == NULL);
1934 ASSERT(ip->i_d.di_projid == pdqp->q_id);
1935
1936 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1937 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1938 }
1939}
1940