Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#ifndef __XFS_LOG_H__
7#define __XFS_LOG_H__
8
9struct xfs_cil_ctx;
10
11struct xfs_log_vec {
12 struct list_head lv_list; /* CIL lv chain ptrs */
13 uint32_t lv_order_id; /* chain ordering info */
14 int lv_niovecs; /* number of iovecs in lv */
15 struct xfs_log_iovec *lv_iovecp; /* iovec array */
16 struct xfs_log_item *lv_item; /* owner */
17 char *lv_buf; /* formatted buffer */
18 int lv_bytes; /* accounted space in buffer */
19 int lv_buf_len; /* aligned size of buffer */
20 int lv_size; /* size of allocated lv */
21};
22
23#define XFS_LOG_VEC_ORDERED (-1)
24
25/*
26 * Calculate the log iovec length for a given user buffer length. Intended to be
27 * used by ->iop_size implementations when sizing buffers of arbitrary
28 * alignments.
29 */
30static inline int
31xlog_calc_iovec_len(int len)
32{
33 return roundup(len, sizeof(uint32_t));
34}
35
36void *xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
37 uint type);
38
39static inline void
40xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec,
41 int data_len)
42{
43 struct xlog_op_header *oph = vec->i_addr;
44 int len;
45
46 /*
47 * Always round up the length to the correct alignment so callers don't
48 * need to know anything about this log vec layout requirement. This
49 * means we have to zero the area the data to be written does not cover.
50 * This is complicated by fact the payload region is offset into the
51 * logvec region by the opheader that tracks the payload.
52 */
53 len = xlog_calc_iovec_len(data_len);
54 if (len - data_len != 0) {
55 char *buf = vec->i_addr + sizeof(struct xlog_op_header);
56
57 memset(buf + data_len, 0, len - data_len);
58 }
59
60 /*
61 * The opheader tracks aligned payload length, whilst the logvec tracks
62 * the overall region length.
63 */
64 oph->oh_len = cpu_to_be32(len);
65
66 len += sizeof(struct xlog_op_header);
67 lv->lv_buf_len += len;
68 lv->lv_bytes += len;
69 vec->i_len = len;
70
71 /* Catch buffer overruns */
72 ASSERT((void *)lv->lv_buf + lv->lv_bytes <= (void *)lv + lv->lv_size);
73}
74
75/*
76 * Copy the amount of data requested by the caller into a new log iovec.
77 */
78static inline void *
79xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
80 uint type, void *data, int len)
81{
82 void *buf;
83
84 buf = xlog_prepare_iovec(lv, vecp, type);
85 memcpy(buf, data, len);
86 xlog_finish_iovec(lv, *vecp, len);
87 return buf;
88}
89
90static inline void *
91xlog_copy_from_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
92 const struct xfs_log_iovec *src)
93{
94 return xlog_copy_iovec(lv, vecp, src->i_type, src->i_addr, src->i_len);
95}
96
97/*
98 * By comparing each component, we don't have to worry about extra
99 * endian issues in treating two 32 bit numbers as one 64 bit number
100 */
101static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
102{
103 if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
104 return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
105
106 if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
107 return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
108
109 return 0;
110}
111
112#define XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
113
114/*
115 * Flags to xfs_log_force()
116 *
117 * XFS_LOG_SYNC: Synchronous force in-core log to disk
118 */
119#define XFS_LOG_SYNC 0x1
120
121/* Log manager interfaces */
122struct xfs_mount;
123struct xlog_in_core;
124struct xlog_ticket;
125struct xfs_log_item;
126struct xfs_item_ops;
127struct xfs_trans;
128struct xlog;
129
130int xfs_log_force(struct xfs_mount *mp, uint flags);
131int xfs_log_force_seq(struct xfs_mount *mp, xfs_csn_t seq, uint flags,
132 int *log_forced);
133int xfs_log_mount(struct xfs_mount *mp,
134 struct xfs_buftarg *log_target,
135 xfs_daddr_t start_block,
136 int num_bblocks);
137int xfs_log_mount_finish(struct xfs_mount *mp);
138void xfs_log_mount_cancel(struct xfs_mount *);
139xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
140xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
141void xfs_log_space_wake(struct xfs_mount *mp);
142int xfs_log_reserve(struct xfs_mount *mp, int length, int count,
143 struct xlog_ticket **ticket, bool permanent);
144int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
145void xfs_log_unmount(struct xfs_mount *mp);
146bool xfs_log_writable(struct xfs_mount *mp);
147
148struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
149void xfs_log_ticket_put(struct xlog_ticket *ticket);
150
151void xlog_cil_process_committed(struct list_head *list);
152bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
153
154void xfs_log_work_queue(struct xfs_mount *mp);
155int xfs_log_quiesce(struct xfs_mount *mp);
156void xfs_log_clean(struct xfs_mount *mp);
157bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
158
159xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);
160bool xlog_force_shutdown(struct xlog *log, uint32_t shutdown_flags);
161
162void xlog_use_incompat_feat(struct xlog *log);
163void xlog_drop_incompat_feat(struct xlog *log);
164int xfs_attr_use_log_assist(struct xfs_mount *mp);
165
166#endif /* __XFS_LOG_H__ */
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_LOG_H__
19#define __XFS_LOG_H__
20
21struct xfs_log_vec {
22 struct xfs_log_vec *lv_next; /* next lv in build list */
23 int lv_niovecs; /* number of iovecs in lv */
24 struct xfs_log_iovec *lv_iovecp; /* iovec array */
25 struct xfs_log_item *lv_item; /* owner */
26 char *lv_buf; /* formatted buffer */
27 int lv_buf_len; /* size of formatted buffer */
28 int lv_size; /* size of allocated lv */
29};
30
31#define XFS_LOG_VEC_ORDERED (-1)
32
33static inline void *
34xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
35 uint type)
36{
37 struct xfs_log_iovec *vec = *vecp;
38
39 if (vec) {
40 ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
41 vec++;
42 } else {
43 vec = &lv->lv_iovecp[0];
44 }
45
46 vec->i_type = type;
47 vec->i_addr = lv->lv_buf + lv->lv_buf_len;
48
49 ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t)));
50
51 *vecp = vec;
52 return vec->i_addr;
53}
54
55static inline void
56xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
57{
58 /*
59 * We need to make sure the next buffer is naturally aligned for the
60 * biggest basic data type we put into it. We already accounted for
61 * this when sizing the buffer.
62 */
63 lv->lv_buf_len += round_up(len, sizeof(uint64_t));
64 vec->i_len = len;
65}
66
67static inline void *
68xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
69 uint type, void *data, int len)
70{
71 void *buf;
72
73 buf = xlog_prepare_iovec(lv, vecp, type);
74 memcpy(buf, data, len);
75 xlog_finish_iovec(lv, *vecp, len);
76 return buf;
77}
78
79/*
80 * Structure used to pass callback function and the function's argument
81 * to the log manager.
82 */
83typedef struct xfs_log_callback {
84 struct xfs_log_callback *cb_next;
85 void (*cb_func)(void *, int);
86 void *cb_arg;
87} xfs_log_callback_t;
88
89/*
90 * By comparing each component, we don't have to worry about extra
91 * endian issues in treating two 32 bit numbers as one 64 bit number
92 */
93static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
94{
95 if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
96 return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
97
98 if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
99 return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
100
101 return 0;
102}
103
104#define XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
105
106/*
107 * Macros, structures, prototypes for interface to the log manager.
108 */
109
110/*
111 * Flags to xfs_log_done()
112 */
113#define XFS_LOG_REL_PERM_RESERV 0x1
114
115/*
116 * Flags to xfs_log_force()
117 *
118 * XFS_LOG_SYNC: Synchronous force in-core log to disk
119 */
120#define XFS_LOG_SYNC 0x1
121
122/* Log manager interfaces */
123struct xfs_mount;
124struct xlog_in_core;
125struct xlog_ticket;
126struct xfs_log_item;
127struct xfs_item_ops;
128struct xfs_trans;
129struct xfs_log_callback;
130
131xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
132 struct xlog_ticket *ticket,
133 struct xlog_in_core **iclog,
134 uint flags);
135int _xfs_log_force(struct xfs_mount *mp,
136 uint flags,
137 int *log_forced);
138void xfs_log_force(struct xfs_mount *mp,
139 uint flags);
140int _xfs_log_force_lsn(struct xfs_mount *mp,
141 xfs_lsn_t lsn,
142 uint flags,
143 int *log_forced);
144void xfs_log_force_lsn(struct xfs_mount *mp,
145 xfs_lsn_t lsn,
146 uint flags);
147int xfs_log_mount(struct xfs_mount *mp,
148 struct xfs_buftarg *log_target,
149 xfs_daddr_t start_block,
150 int num_bblocks);
151int xfs_log_mount_finish(struct xfs_mount *mp);
152xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
153xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
154void xfs_log_space_wake(struct xfs_mount *mp);
155int xfs_log_notify(struct xfs_mount *mp,
156 struct xlog_in_core *iclog,
157 struct xfs_log_callback *callback_entry);
158int xfs_log_release_iclog(struct xfs_mount *mp,
159 struct xlog_in_core *iclog);
160int xfs_log_reserve(struct xfs_mount *mp,
161 int length,
162 int count,
163 struct xlog_ticket **ticket,
164 __uint8_t clientid,
165 bool permanent,
166 uint t_type);
167int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
168int xfs_log_unmount_write(struct xfs_mount *mp);
169void xfs_log_unmount(struct xfs_mount *mp);
170int xfs_log_force_umount(struct xfs_mount *mp, int logerror);
171int xfs_log_need_covered(struct xfs_mount *mp);
172
173void xlog_iodone(struct xfs_buf *);
174
175struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
176void xfs_log_ticket_put(struct xlog_ticket *ticket);
177
178void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
179 xfs_lsn_t *commit_lsn, int flags);
180bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
181
182void xfs_log_work_queue(struct xfs_mount *mp);
183void xfs_log_worker(struct work_struct *work);
184void xfs_log_quiesce(struct xfs_mount *mp);
185
186#endif /* __XFS_LOG_H__ */