Loading...
1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
23#include "xfs_mount.h"
24#include "xfs_inode.h"
25#include "xfs_quota.h"
26#include "xfs_error.h"
27#include "xfs_trans.h"
28#include "xfs_buf_item.h"
29#include "xfs_trans_priv.h"
30#include "xfs_qm.h"
31#include "xfs_log.h"
32
33static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
34{
35 return container_of(lip, struct xfs_dq_logitem, qli_item);
36}
37
38/*
39 * returns the number of iovecs needed to log the given dquot item.
40 */
41STATIC void
42xfs_qm_dquot_logitem_size(
43 struct xfs_log_item *lip,
44 int *nvecs,
45 int *nbytes)
46{
47 *nvecs += 2;
48 *nbytes += sizeof(struct xfs_dq_logformat) +
49 sizeof(struct xfs_disk_dquot);
50}
51
52/*
53 * fills in the vector of log iovecs for the given dquot log item.
54 */
55STATIC void
56xfs_qm_dquot_logitem_format(
57 struct xfs_log_item *lip,
58 struct xfs_log_vec *lv)
59{
60 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
61 struct xfs_log_iovec *vecp = NULL;
62 struct xfs_dq_logformat *qlf;
63
64 qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QFORMAT);
65 qlf->qlf_type = XFS_LI_DQUOT;
66 qlf->qlf_size = 2;
67 qlf->qlf_id = be32_to_cpu(qlip->qli_dquot->q_core.d_id);
68 qlf->qlf_blkno = qlip->qli_dquot->q_blkno;
69 qlf->qlf_len = 1;
70 qlf->qlf_boffset = qlip->qli_dquot->q_bufoffset;
71 xlog_finish_iovec(lv, vecp, sizeof(struct xfs_dq_logformat));
72
73 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_DQUOT,
74 &qlip->qli_dquot->q_core,
75 sizeof(struct xfs_disk_dquot));
76}
77
78/*
79 * Increment the pin count of the given dquot.
80 */
81STATIC void
82xfs_qm_dquot_logitem_pin(
83 struct xfs_log_item *lip)
84{
85 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
86
87 ASSERT(XFS_DQ_IS_LOCKED(dqp));
88 atomic_inc(&dqp->q_pincount);
89}
90
91/*
92 * Decrement the pin count of the given dquot, and wake up
93 * anyone in xfs_dqwait_unpin() if the count goes to 0. The
94 * dquot must have been previously pinned with a call to
95 * xfs_qm_dquot_logitem_pin().
96 */
97STATIC void
98xfs_qm_dquot_logitem_unpin(
99 struct xfs_log_item *lip,
100 int remove)
101{
102 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
103
104 ASSERT(atomic_read(&dqp->q_pincount) > 0);
105 if (atomic_dec_and_test(&dqp->q_pincount))
106 wake_up(&dqp->q_pinwait);
107}
108
109STATIC xfs_lsn_t
110xfs_qm_dquot_logitem_committed(
111 struct xfs_log_item *lip,
112 xfs_lsn_t lsn)
113{
114 /*
115 * We always re-log the entire dquot when it becomes dirty,
116 * so, the latest copy _is_ the only one that matters.
117 */
118 return lsn;
119}
120
121/*
122 * This is called to wait for the given dquot to be unpinned.
123 * Most of these pin/unpin routines are plagiarized from inode code.
124 */
125void
126xfs_qm_dqunpin_wait(
127 struct xfs_dquot *dqp)
128{
129 ASSERT(XFS_DQ_IS_LOCKED(dqp));
130 if (atomic_read(&dqp->q_pincount) == 0)
131 return;
132
133 /*
134 * Give the log a push so we don't wait here too long.
135 */
136 xfs_log_force(dqp->q_mount, 0);
137 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
138}
139
140/*
141 * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
142 * have been failed during writeback
143 *
144 * this informs the AIL that the dquot is already flush locked on the next push,
145 * and acquires a hold on the buffer to ensure that it isn't reclaimed before
146 * dirty data makes it to disk.
147 */
148STATIC void
149xfs_dquot_item_error(
150 struct xfs_log_item *lip,
151 struct xfs_buf *bp)
152{
153 ASSERT(!completion_done(&DQUOT_ITEM(lip)->qli_dquot->q_flush));
154 xfs_set_li_failed(lip, bp);
155}
156
157STATIC uint
158xfs_qm_dquot_logitem_push(
159 struct xfs_log_item *lip,
160 struct list_head *buffer_list)
161 __releases(&lip->li_ailp->ail_lock)
162 __acquires(&lip->li_ailp->ail_lock)
163{
164 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
165 struct xfs_buf *bp = lip->li_buf;
166 uint rval = XFS_ITEM_SUCCESS;
167 int error;
168
169 if (atomic_read(&dqp->q_pincount) > 0)
170 return XFS_ITEM_PINNED;
171
172 /*
173 * The buffer containing this item failed to be written back
174 * previously. Resubmit the buffer for IO
175 */
176 if (lip->li_flags & XFS_LI_FAILED) {
177 if (!xfs_buf_trylock(bp))
178 return XFS_ITEM_LOCKED;
179
180 if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list))
181 rval = XFS_ITEM_FLUSHING;
182
183 xfs_buf_unlock(bp);
184 return rval;
185 }
186
187 if (!xfs_dqlock_nowait(dqp))
188 return XFS_ITEM_LOCKED;
189
190 /*
191 * Re-check the pincount now that we stabilized the value by
192 * taking the quota lock.
193 */
194 if (atomic_read(&dqp->q_pincount) > 0) {
195 rval = XFS_ITEM_PINNED;
196 goto out_unlock;
197 }
198
199 /*
200 * Someone else is already flushing the dquot. Nothing we can do
201 * here but wait for the flush to finish and remove the item from
202 * the AIL.
203 */
204 if (!xfs_dqflock_nowait(dqp)) {
205 rval = XFS_ITEM_FLUSHING;
206 goto out_unlock;
207 }
208
209 spin_unlock(&lip->li_ailp->ail_lock);
210
211 error = xfs_qm_dqflush(dqp, &bp);
212 if (error) {
213 xfs_warn(dqp->q_mount, "%s: push error %d on dqp "PTR_FMT,
214 __func__, error, dqp);
215 } else {
216 if (!xfs_buf_delwri_queue(bp, buffer_list))
217 rval = XFS_ITEM_FLUSHING;
218 xfs_buf_relse(bp);
219 }
220
221 spin_lock(&lip->li_ailp->ail_lock);
222out_unlock:
223 xfs_dqunlock(dqp);
224 return rval;
225}
226
227/*
228 * Unlock the dquot associated with the log item.
229 * Clear the fields of the dquot and dquot log item that
230 * are specific to the current transaction. If the
231 * hold flags is set, do not unlock the dquot.
232 */
233STATIC void
234xfs_qm_dquot_logitem_unlock(
235 struct xfs_log_item *lip)
236{
237 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
238
239 ASSERT(XFS_DQ_IS_LOCKED(dqp));
240
241 /*
242 * Clear the transaction pointer in the dquot
243 */
244 dqp->q_transp = NULL;
245
246 /*
247 * dquots are never 'held' from getting unlocked at the end of
248 * a transaction. Their locking and unlocking is hidden inside the
249 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
250 * for the logitem.
251 */
252 xfs_dqunlock(dqp);
253}
254
255/*
256 * this needs to stamp an lsn into the dquot, I think.
257 * rpc's that look at user dquot's would then have to
258 * push on the dependency recorded in the dquot
259 */
260STATIC void
261xfs_qm_dquot_logitem_committing(
262 struct xfs_log_item *lip,
263 xfs_lsn_t lsn)
264{
265}
266
267/*
268 * This is the ops vector for dquots
269 */
270static const struct xfs_item_ops xfs_dquot_item_ops = {
271 .iop_size = xfs_qm_dquot_logitem_size,
272 .iop_format = xfs_qm_dquot_logitem_format,
273 .iop_pin = xfs_qm_dquot_logitem_pin,
274 .iop_unpin = xfs_qm_dquot_logitem_unpin,
275 .iop_unlock = xfs_qm_dquot_logitem_unlock,
276 .iop_committed = xfs_qm_dquot_logitem_committed,
277 .iop_push = xfs_qm_dquot_logitem_push,
278 .iop_committing = xfs_qm_dquot_logitem_committing,
279 .iop_error = xfs_dquot_item_error
280};
281
282/*
283 * Initialize the dquot log item for a newly allocated dquot.
284 * The dquot isn't locked at this point, but it isn't on any of the lists
285 * either, so we don't care.
286 */
287void
288xfs_qm_dquot_logitem_init(
289 struct xfs_dquot *dqp)
290{
291 struct xfs_dq_logitem *lp = &dqp->q_logitem;
292
293 xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
294 &xfs_dquot_item_ops);
295 lp->qli_dquot = dqp;
296}
297
298/*------------------ QUOTAOFF LOG ITEMS -------------------*/
299
300static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
301{
302 return container_of(lip, struct xfs_qoff_logitem, qql_item);
303}
304
305
306/*
307 * This returns the number of iovecs needed to log the given quotaoff item.
308 * We only need 1 iovec for an quotaoff item. It just logs the
309 * quotaoff_log_format structure.
310 */
311STATIC void
312xfs_qm_qoff_logitem_size(
313 struct xfs_log_item *lip,
314 int *nvecs,
315 int *nbytes)
316{
317 *nvecs += 1;
318 *nbytes += sizeof(struct xfs_qoff_logitem);
319}
320
321STATIC void
322xfs_qm_qoff_logitem_format(
323 struct xfs_log_item *lip,
324 struct xfs_log_vec *lv)
325{
326 struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip);
327 struct xfs_log_iovec *vecp = NULL;
328 struct xfs_qoff_logformat *qlf;
329
330 qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QUOTAOFF);
331 qlf->qf_type = XFS_LI_QUOTAOFF;
332 qlf->qf_size = 1;
333 qlf->qf_flags = qflip->qql_flags;
334 xlog_finish_iovec(lv, vecp, sizeof(struct xfs_qoff_logitem));
335}
336
337/*
338 * Pinning has no meaning for an quotaoff item, so just return.
339 */
340STATIC void
341xfs_qm_qoff_logitem_pin(
342 struct xfs_log_item *lip)
343{
344}
345
346/*
347 * Since pinning has no meaning for an quotaoff item, unpinning does
348 * not either.
349 */
350STATIC void
351xfs_qm_qoff_logitem_unpin(
352 struct xfs_log_item *lip,
353 int remove)
354{
355}
356
357/*
358 * There isn't much you can do to push a quotaoff item. It is simply
359 * stuck waiting for the log to be flushed to disk.
360 */
361STATIC uint
362xfs_qm_qoff_logitem_push(
363 struct xfs_log_item *lip,
364 struct list_head *buffer_list)
365{
366 return XFS_ITEM_LOCKED;
367}
368
369/*
370 * Quotaoff items have no locking or pushing, so return failure
371 * so that the caller doesn't bother with us.
372 */
373STATIC void
374xfs_qm_qoff_logitem_unlock(
375 struct xfs_log_item *lip)
376{
377}
378
379/*
380 * The quotaoff-start-item is logged only once and cannot be moved in the log,
381 * so simply return the lsn at which it's been logged.
382 */
383STATIC xfs_lsn_t
384xfs_qm_qoff_logitem_committed(
385 struct xfs_log_item *lip,
386 xfs_lsn_t lsn)
387{
388 return lsn;
389}
390
391STATIC xfs_lsn_t
392xfs_qm_qoffend_logitem_committed(
393 struct xfs_log_item *lip,
394 xfs_lsn_t lsn)
395{
396 struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
397 struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
398 struct xfs_ail *ailp = qfs->qql_item.li_ailp;
399
400 /*
401 * Delete the qoff-start logitem from the AIL.
402 * xfs_trans_ail_delete() drops the AIL lock.
403 */
404 spin_lock(&ailp->ail_lock);
405 xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
406
407 kmem_free(qfs->qql_item.li_lv_shadow);
408 kmem_free(lip->li_lv_shadow);
409 kmem_free(qfs);
410 kmem_free(qfe);
411 return (xfs_lsn_t)-1;
412}
413
414/*
415 * XXX rcc - don't know quite what to do with this. I think we can
416 * just ignore it. The only time that isn't the case is if we allow
417 * the client to somehow see that quotas have been turned off in which
418 * we can't allow that to get back until the quotaoff hits the disk.
419 * So how would that happen? Also, do we need different routines for
420 * quotaoff start and quotaoff end? I suspect the answer is yes but
421 * to be sure, I need to look at the recovery code and see how quota off
422 * recovery is handled (do we roll forward or back or do something else).
423 * If we roll forwards or backwards, then we need two separate routines,
424 * one that does nothing and one that stamps in the lsn that matters
425 * (truly makes the quotaoff irrevocable). If we do something else,
426 * then maybe we don't need two.
427 */
428STATIC void
429xfs_qm_qoff_logitem_committing(
430 struct xfs_log_item *lip,
431 xfs_lsn_t commit_lsn)
432{
433}
434
435static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
436 .iop_size = xfs_qm_qoff_logitem_size,
437 .iop_format = xfs_qm_qoff_logitem_format,
438 .iop_pin = xfs_qm_qoff_logitem_pin,
439 .iop_unpin = xfs_qm_qoff_logitem_unpin,
440 .iop_unlock = xfs_qm_qoff_logitem_unlock,
441 .iop_committed = xfs_qm_qoffend_logitem_committed,
442 .iop_push = xfs_qm_qoff_logitem_push,
443 .iop_committing = xfs_qm_qoff_logitem_committing
444};
445
446/*
447 * This is the ops vector shared by all quotaoff-start log items.
448 */
449static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
450 .iop_size = xfs_qm_qoff_logitem_size,
451 .iop_format = xfs_qm_qoff_logitem_format,
452 .iop_pin = xfs_qm_qoff_logitem_pin,
453 .iop_unpin = xfs_qm_qoff_logitem_unpin,
454 .iop_unlock = xfs_qm_qoff_logitem_unlock,
455 .iop_committed = xfs_qm_qoff_logitem_committed,
456 .iop_push = xfs_qm_qoff_logitem_push,
457 .iop_committing = xfs_qm_qoff_logitem_committing
458};
459
460/*
461 * Allocate and initialize an quotaoff item of the correct quota type(s).
462 */
463struct xfs_qoff_logitem *
464xfs_qm_qoff_logitem_init(
465 struct xfs_mount *mp,
466 struct xfs_qoff_logitem *start,
467 uint flags)
468{
469 struct xfs_qoff_logitem *qf;
470
471 qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
472
473 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
474 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
475 qf->qql_item.li_mountp = mp;
476 qf->qql_start_lip = start;
477 qf->qql_flags = flags;
478 return qf;
479}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_quota.h"
15#include "xfs_trans.h"
16#include "xfs_buf_item.h"
17#include "xfs_trans_priv.h"
18#include "xfs_qm.h"
19#include "xfs_log.h"
20#include "xfs_error.h"
21
22static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
23{
24 return container_of(lip, struct xfs_dq_logitem, qli_item);
25}
26
27/*
28 * returns the number of iovecs needed to log the given dquot item.
29 */
30STATIC void
31xfs_qm_dquot_logitem_size(
32 struct xfs_log_item *lip,
33 int *nvecs,
34 int *nbytes)
35{
36 *nvecs += 2;
37 *nbytes += sizeof(struct xfs_dq_logformat) +
38 sizeof(struct xfs_disk_dquot);
39}
40
41/*
42 * fills in the vector of log iovecs for the given dquot log item.
43 */
44STATIC void
45xfs_qm_dquot_logitem_format(
46 struct xfs_log_item *lip,
47 struct xfs_log_vec *lv)
48{
49 struct xfs_disk_dquot ddq;
50 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
51 struct xfs_log_iovec *vecp = NULL;
52 struct xfs_dq_logformat *qlf;
53
54 qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QFORMAT);
55 qlf->qlf_type = XFS_LI_DQUOT;
56 qlf->qlf_size = 2;
57 qlf->qlf_id = qlip->qli_dquot->q_id;
58 qlf->qlf_blkno = qlip->qli_dquot->q_blkno;
59 qlf->qlf_len = 1;
60 qlf->qlf_boffset = qlip->qli_dquot->q_bufoffset;
61 xlog_finish_iovec(lv, vecp, sizeof(struct xfs_dq_logformat));
62
63 xfs_dquot_to_disk(&ddq, qlip->qli_dquot);
64
65 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_DQUOT, &ddq,
66 sizeof(struct xfs_disk_dquot));
67}
68
69/*
70 * Increment the pin count of the given dquot.
71 */
72STATIC void
73xfs_qm_dquot_logitem_pin(
74 struct xfs_log_item *lip)
75{
76 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
77
78 ASSERT(XFS_DQ_IS_LOCKED(dqp));
79 atomic_inc(&dqp->q_pincount);
80}
81
82/*
83 * Decrement the pin count of the given dquot, and wake up
84 * anyone in xfs_dqwait_unpin() if the count goes to 0. The
85 * dquot must have been previously pinned with a call to
86 * xfs_qm_dquot_logitem_pin().
87 */
88STATIC void
89xfs_qm_dquot_logitem_unpin(
90 struct xfs_log_item *lip,
91 int remove)
92{
93 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
94
95 ASSERT(atomic_read(&dqp->q_pincount) > 0);
96 if (atomic_dec_and_test(&dqp->q_pincount))
97 wake_up(&dqp->q_pinwait);
98}
99
100/*
101 * This is called to wait for the given dquot to be unpinned.
102 * Most of these pin/unpin routines are plagiarized from inode code.
103 */
104void
105xfs_qm_dqunpin_wait(
106 struct xfs_dquot *dqp)
107{
108 ASSERT(XFS_DQ_IS_LOCKED(dqp));
109 if (atomic_read(&dqp->q_pincount) == 0)
110 return;
111
112 /*
113 * Give the log a push so we don't wait here too long.
114 */
115 xfs_log_force(dqp->q_mount, 0);
116 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
117}
118
119STATIC uint
120xfs_qm_dquot_logitem_push(
121 struct xfs_log_item *lip,
122 struct list_head *buffer_list)
123 __releases(&lip->li_ailp->ail_lock)
124 __acquires(&lip->li_ailp->ail_lock)
125{
126 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
127 struct xfs_dquot *dqp = qlip->qli_dquot;
128 struct xfs_buf *bp;
129 uint rval = XFS_ITEM_SUCCESS;
130 int error;
131
132 if (atomic_read(&dqp->q_pincount) > 0)
133 return XFS_ITEM_PINNED;
134
135 if (!xfs_dqlock_nowait(dqp))
136 return XFS_ITEM_LOCKED;
137
138 /*
139 * Re-check the pincount now that we stabilized the value by
140 * taking the quota lock.
141 */
142 if (atomic_read(&dqp->q_pincount) > 0) {
143 rval = XFS_ITEM_PINNED;
144 goto out_unlock;
145 }
146
147 /*
148 * Someone else is already flushing the dquot. Nothing we can do
149 * here but wait for the flush to finish and remove the item from
150 * the AIL.
151 */
152 if (!xfs_dqflock_nowait(dqp)) {
153 rval = XFS_ITEM_FLUSHING;
154 goto out_unlock;
155 }
156
157 spin_unlock(&lip->li_ailp->ail_lock);
158
159 error = xfs_dquot_use_attached_buf(dqp, &bp);
160 if (error == -EAGAIN) {
161 xfs_dqfunlock(dqp);
162 rval = XFS_ITEM_LOCKED;
163 goto out_relock_ail;
164 }
165
166 /*
167 * dqflush completes dqflock on error, and the delwri ioend does it on
168 * success.
169 */
170 error = xfs_qm_dqflush(dqp, bp);
171 if (!error) {
172 if (!xfs_buf_delwri_queue(bp, buffer_list))
173 rval = XFS_ITEM_FLUSHING;
174 }
175 xfs_buf_relse(bp);
176
177out_relock_ail:
178 spin_lock(&lip->li_ailp->ail_lock);
179out_unlock:
180 xfs_dqunlock(dqp);
181 return rval;
182}
183
184STATIC void
185xfs_qm_dquot_logitem_release(
186 struct xfs_log_item *lip)
187{
188 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
189
190 ASSERT(XFS_DQ_IS_LOCKED(dqp));
191
192 /*
193 * dquots are never 'held' from getting unlocked at the end of
194 * a transaction. Their locking and unlocking is hidden inside the
195 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
196 * for the logitem.
197 */
198 xfs_dqunlock(dqp);
199}
200
201STATIC void
202xfs_qm_dquot_logitem_committing(
203 struct xfs_log_item *lip,
204 xfs_csn_t seq)
205{
206 return xfs_qm_dquot_logitem_release(lip);
207}
208
209#ifdef DEBUG_EXPENSIVE
210static void
211xfs_qm_dquot_logitem_precommit_check(
212 struct xfs_dquot *dqp)
213{
214 struct xfs_mount *mp = dqp->q_mount;
215 struct xfs_disk_dquot ddq = { };
216 xfs_failaddr_t fa;
217
218 xfs_dquot_to_disk(&ddq, dqp);
219 fa = xfs_dquot_verify(mp, &ddq, dqp->q_id);
220 if (fa) {
221 XFS_CORRUPTION_ERROR("Bad dquot during logging",
222 XFS_ERRLEVEL_LOW, mp, &ddq, sizeof(ddq));
223 xfs_alert(mp,
224 "Metadata corruption detected at %pS, dquot 0x%x",
225 fa, dqp->q_id);
226 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
227 ASSERT(fa == NULL);
228 }
229}
230#else
231# define xfs_qm_dquot_logitem_precommit_check(...) ((void)0)
232#endif
233
234static int
235xfs_qm_dquot_logitem_precommit(
236 struct xfs_trans *tp,
237 struct xfs_log_item *lip)
238{
239 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
240 struct xfs_dquot *dqp = qlip->qli_dquot;
241
242 xfs_qm_dquot_logitem_precommit_check(dqp);
243
244 return xfs_dquot_attach_buf(tp, dqp);
245}
246
247static const struct xfs_item_ops xfs_dquot_item_ops = {
248 .iop_size = xfs_qm_dquot_logitem_size,
249 .iop_precommit = xfs_qm_dquot_logitem_precommit,
250 .iop_format = xfs_qm_dquot_logitem_format,
251 .iop_pin = xfs_qm_dquot_logitem_pin,
252 .iop_unpin = xfs_qm_dquot_logitem_unpin,
253 .iop_release = xfs_qm_dquot_logitem_release,
254 .iop_committing = xfs_qm_dquot_logitem_committing,
255 .iop_push = xfs_qm_dquot_logitem_push,
256};
257
258/*
259 * Initialize the dquot log item for a newly allocated dquot.
260 * The dquot isn't locked at this point, but it isn't on any of the lists
261 * either, so we don't care.
262 */
263void
264xfs_qm_dquot_logitem_init(
265 struct xfs_dquot *dqp)
266{
267 struct xfs_dq_logitem *lp = &dqp->q_logitem;
268
269 xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
270 &xfs_dquot_item_ops);
271 spin_lock_init(&lp->qli_lock);
272 lp->qli_dquot = dqp;
273 lp->qli_dirty = false;
274}