Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#ifndef __XFS_LOG_PRIV_H__
7#define __XFS_LOG_PRIV_H__
8
9struct xfs_buf;
10struct xlog;
11struct xlog_ticket;
12struct xfs_mount;
13
14/*
15 * Flags for log structure
16 */
17#define XLOG_ACTIVE_RECOVERY 0x2 /* in the middle of recovery */
18#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */
19#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being
20 shutdown */
21#define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */
22
23/*
24 * get client id from packed copy.
25 *
26 * this hack is here because the xlog_pack code copies four bytes
27 * of xlog_op_header containing the fields oh_clientid, oh_flags
28 * and oh_res2 into the packed copy.
29 *
30 * later on this four byte chunk is treated as an int and the
31 * client id is pulled out.
32 *
33 * this has endian issues, of course.
34 */
35static inline uint xlog_get_client_id(__be32 i)
36{
37 return be32_to_cpu(i) >> 24;
38}
39
40/*
41 * In core log state
42 */
43enum xlog_iclog_state {
44 XLOG_STATE_ACTIVE, /* Current IC log being written to */
45 XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */
46 XLOG_STATE_SYNCING, /* This IC log is syncing */
47 XLOG_STATE_DONE_SYNC, /* Done syncing to disk */
48 XLOG_STATE_CALLBACK, /* Callback functions now */
49 XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */
50 XLOG_STATE_IOERROR, /* IO error happened in sync'ing log */
51};
52
53#define XLOG_STATE_STRINGS \
54 { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \
55 { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \
56 { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \
57 { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \
58 { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \
59 { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }, \
60 { XLOG_STATE_IOERROR, "XLOG_STATE_IOERROR" }
61
62/*
63 * In core log flags
64 */
65#define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */
66#define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */
67
68#define XLOG_ICL_STRINGS \
69 { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \
70 { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" }
71
72
73/*
74 * Log ticket flags
75 */
76#define XLOG_TIC_PERM_RESERV 0x1 /* permanent reservation */
77
78#define XLOG_TIC_FLAGS \
79 { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }
80
81/*
82 * Below are states for covering allocation transactions.
83 * By covering, we mean changing the h_tail_lsn in the last on-disk
84 * log write such that no allocation transactions will be re-done during
85 * recovery after a system crash. Recovery starts at the last on-disk
86 * log write.
87 *
88 * These states are used to insert dummy log entries to cover
89 * space allocation transactions which can undo non-transactional changes
90 * after a crash. Writes to a file with space
91 * already allocated do not result in any transactions. Allocations
92 * might include space beyond the EOF. So if we just push the EOF a
93 * little, the last transaction for the file could contain the wrong
94 * size. If there is no file system activity, after an allocation
95 * transaction, and the system crashes, the allocation transaction
96 * will get replayed and the file will be truncated. This could
97 * be hours/days/... after the allocation occurred.
98 *
99 * The fix for this is to do two dummy transactions when the
100 * system is idle. We need two dummy transaction because the h_tail_lsn
101 * in the log record header needs to point beyond the last possible
102 * non-dummy transaction. The first dummy changes the h_tail_lsn to
103 * the first transaction before the dummy. The second dummy causes
104 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
105 *
106 * These dummy transactions get committed when everything
107 * is idle (after there has been some activity).
108 *
109 * There are 5 states used to control this.
110 *
111 * IDLE -- no logging has been done on the file system or
112 * we are done covering previous transactions.
113 * NEED -- logging has occurred and we need a dummy transaction
114 * when the log becomes idle.
115 * DONE -- we were in the NEED state and have committed a dummy
116 * transaction.
117 * NEED2 -- we detected that a dummy transaction has gone to the
118 * on disk log with no other transactions.
119 * DONE2 -- we committed a dummy transaction when in the NEED2 state.
120 *
121 * There are two places where we switch states:
122 *
123 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
124 * We commit the dummy transaction and switch to DONE or DONE2,
125 * respectively. In all other states, we don't do anything.
126 *
127 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
128 *
129 * No matter what state we are in, if this isn't the dummy
130 * transaction going out, the next state is NEED.
131 * So, if we aren't in the DONE or DONE2 states, the next state
132 * is NEED. We can't be finishing a write of the dummy record
133 * unless it was committed and the state switched to DONE or DONE2.
134 *
135 * If we are in the DONE state and this was a write of the
136 * dummy transaction, we move to NEED2.
137 *
138 * If we are in the DONE2 state and this was a write of the
139 * dummy transaction, we move to IDLE.
140 *
141 *
142 * Writing only one dummy transaction can get appended to
143 * one file space allocation. When this happens, the log recovery
144 * code replays the space allocation and a file could be truncated.
145 * This is why we have the NEED2 and DONE2 states before going idle.
146 */
147
148#define XLOG_STATE_COVER_IDLE 0
149#define XLOG_STATE_COVER_NEED 1
150#define XLOG_STATE_COVER_DONE 2
151#define XLOG_STATE_COVER_NEED2 3
152#define XLOG_STATE_COVER_DONE2 4
153
154#define XLOG_COVER_OPS 5
155
156/* Ticket reservation region accounting */
157#define XLOG_TIC_LEN_MAX 15
158
159/*
160 * Reservation region
161 * As would be stored in xfs_log_iovec but without the i_addr which
162 * we don't care about.
163 */
164typedef struct xlog_res {
165 uint r_len; /* region length :4 */
166 uint r_type; /* region's transaction type :4 */
167} xlog_res_t;
168
169typedef struct xlog_ticket {
170 struct list_head t_queue; /* reserve/write queue */
171 struct task_struct *t_task; /* task that owns this ticket */
172 xlog_tid_t t_tid; /* transaction identifier : 4 */
173 atomic_t t_ref; /* ticket reference count : 4 */
174 int t_curr_res; /* current reservation in bytes : 4 */
175 int t_unit_res; /* unit reservation in bytes : 4 */
176 char t_ocnt; /* original count : 1 */
177 char t_cnt; /* current count : 1 */
178 char t_clientid; /* who does this belong to; : 1 */
179 char t_flags; /* properties of reservation : 1 */
180
181 /* reservation array fields */
182 uint t_res_num; /* num in array : 4 */
183 uint t_res_num_ophdrs; /* num op hdrs : 4 */
184 uint t_res_arr_sum; /* array sum : 4 */
185 uint t_res_o_flow; /* sum overflow : 4 */
186 xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : 8 * 15 */
187} xlog_ticket_t;
188
189/*
190 * - A log record header is 512 bytes. There is plenty of room to grow the
191 * xlog_rec_header_t into the reserved space.
192 * - ic_data follows, so a write to disk can start at the beginning of
193 * the iclog.
194 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
195 * - ic_next is the pointer to the next iclog in the ring.
196 * - ic_log is a pointer back to the global log structure.
197 * - ic_size is the full size of the log buffer, minus the cycle headers.
198 * - ic_offset is the current number of bytes written to in this iclog.
199 * - ic_refcnt is bumped when someone is writing to the log.
200 * - ic_state is the state of the iclog.
201 *
202 * Because of cacheline contention on large machines, we need to separate
203 * various resources onto different cachelines. To start with, make the
204 * structure cacheline aligned. The following fields can be contended on
205 * by independent processes:
206 *
207 * - ic_callbacks
208 * - ic_refcnt
209 * - fields protected by the global l_icloglock
210 *
211 * so we need to ensure that these fields are located in separate cachelines.
212 * We'll put all the read-only and l_icloglock fields in the first cacheline,
213 * and move everything else out to subsequent cachelines.
214 */
215typedef struct xlog_in_core {
216 wait_queue_head_t ic_force_wait;
217 wait_queue_head_t ic_write_wait;
218 struct xlog_in_core *ic_next;
219 struct xlog_in_core *ic_prev;
220 struct xlog *ic_log;
221 u32 ic_size;
222 u32 ic_offset;
223 enum xlog_iclog_state ic_state;
224 unsigned int ic_flags;
225 char *ic_datap; /* pointer to iclog data */
226 struct list_head ic_callbacks;
227
228 /* reference counts need their own cacheline */
229 atomic_t ic_refcnt ____cacheline_aligned_in_smp;
230 xlog_in_core_2_t *ic_data;
231#define ic_header ic_data->hic_header
232#ifdef DEBUG
233 bool ic_fail_crc : 1;
234#endif
235 struct semaphore ic_sema;
236 struct work_struct ic_end_io_work;
237 struct bio ic_bio;
238 struct bio_vec ic_bvec[];
239} xlog_in_core_t;
240
241/*
242 * The CIL context is used to aggregate per-transaction details as well be
243 * passed to the iclog for checkpoint post-commit processing. After being
244 * passed to the iclog, another context needs to be allocated for tracking the
245 * next set of transactions to be aggregated into a checkpoint.
246 */
247struct xfs_cil;
248
249struct xfs_cil_ctx {
250 struct xfs_cil *cil;
251 xfs_csn_t sequence; /* chkpt sequence # */
252 xfs_lsn_t start_lsn; /* first LSN of chkpt commit */
253 xfs_lsn_t commit_lsn; /* chkpt commit record lsn */
254 struct xlog_ticket *ticket; /* chkpt ticket */
255 int nvecs; /* number of regions */
256 int space_used; /* aggregate size of regions */
257 struct list_head busy_extents; /* busy extents in chkpt */
258 struct xfs_log_vec *lv_chain; /* logvecs being pushed */
259 struct list_head iclog_entry;
260 struct list_head committing; /* ctx committing list */
261 struct work_struct discard_endio_work;
262};
263
264/*
265 * Committed Item List structure
266 *
267 * This structure is used to track log items that have been committed but not
268 * yet written into the log. It is used only when the delayed logging mount
269 * option is enabled.
270 *
271 * This structure tracks the list of committing checkpoint contexts so
272 * we can avoid the problem of having to hold out new transactions during a
273 * flush until we have a the commit record LSN of the checkpoint. We can
274 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
275 * sequence match and extract the commit LSN directly from there. If the
276 * checkpoint is still in the process of committing, we can block waiting for
277 * the commit LSN to be determined as well. This should make synchronous
278 * operations almost as efficient as the old logging methods.
279 */
280struct xfs_cil {
281 struct xlog *xc_log;
282 struct list_head xc_cil;
283 spinlock_t xc_cil_lock;
284
285 struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp;
286 struct xfs_cil_ctx *xc_ctx;
287
288 spinlock_t xc_push_lock ____cacheline_aligned_in_smp;
289 xfs_csn_t xc_push_seq;
290 struct list_head xc_committing;
291 wait_queue_head_t xc_commit_wait;
292 xfs_csn_t xc_current_sequence;
293 struct work_struct xc_push_work;
294 wait_queue_head_t xc_push_wait; /* background push throttle */
295} ____cacheline_aligned_in_smp;
296
297/*
298 * The amount of log space we allow the CIL to aggregate is difficult to size.
299 * Whatever we choose, we have to make sure we can get a reservation for the
300 * log space effectively, that it is large enough to capture sufficient
301 * relogging to reduce log buffer IO significantly, but it is not too large for
302 * the log or induces too much latency when writing out through the iclogs. We
303 * track both space consumed and the number of vectors in the checkpoint
304 * context, so we need to decide which to use for limiting.
305 *
306 * Every log buffer we write out during a push needs a header reserved, which
307 * is at least one sector and more for v2 logs. Hence we need a reservation of
308 * at least 512 bytes per 32k of log space just for the LR headers. That means
309 * 16KB of reservation per megabyte of delayed logging space we will consume,
310 * plus various headers. The number of headers will vary based on the num of
311 * io vectors, so limiting on a specific number of vectors is going to result
312 * in transactions of varying size. IOWs, it is more consistent to track and
313 * limit space consumed in the log rather than by the number of objects being
314 * logged in order to prevent checkpoint ticket overruns.
315 *
316 * Further, use of static reservations through the log grant mechanism is
317 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
318 * grant) and a significant deadlock potential because regranting write space
319 * can block on log pushes. Hence if we have to regrant log space during a log
320 * push, we can deadlock.
321 *
322 * However, we can avoid this by use of a dynamic "reservation stealing"
323 * technique during transaction commit whereby unused reservation space in the
324 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
325 * space needed by the checkpoint transaction. This means that we never need to
326 * specifically reserve space for the CIL checkpoint transaction, nor do we
327 * need to regrant space once the checkpoint completes. This also means the
328 * checkpoint transaction ticket is specific to the checkpoint context, rather
329 * than the CIL itself.
330 *
331 * With dynamic reservations, we can effectively make up arbitrary limits for
332 * the checkpoint size so long as they don't violate any other size rules.
333 * Recovery imposes a rule that no transaction exceed half the log, so we are
334 * limited by that. Furthermore, the log transaction reservation subsystem
335 * tries to keep 25% of the log free, so we need to keep below that limit or we
336 * risk running out of free log space to start any new transactions.
337 *
338 * In order to keep background CIL push efficient, we only need to ensure the
339 * CIL is large enough to maintain sufficient in-memory relogging to avoid
340 * repeated physical writes of frequently modified metadata. If we allow the CIL
341 * to grow to a substantial fraction of the log, then we may be pinning hundreds
342 * of megabytes of metadata in memory until the CIL flushes. This can cause
343 * issues when we are running low on memory - pinned memory cannot be reclaimed,
344 * and the CIL consumes a lot of memory. Hence we need to set an upper physical
345 * size limit for the CIL that limits the maximum amount of memory pinned by the
346 * CIL but does not limit performance by reducing relogging efficiency
347 * significantly.
348 *
349 * As such, the CIL push threshold ends up being the smaller of two thresholds:
350 * - a threshold large enough that it allows CIL to be pushed and progress to be
351 * made without excessive blocking of incoming transaction commits. This is
352 * defined to be 12.5% of the log space - half the 25% push threshold of the
353 * AIL.
354 * - small enough that it doesn't pin excessive amounts of memory but maintains
355 * close to peak relogging efficiency. This is defined to be 16x the iclog
356 * buffer window (32MB) as measurements have shown this to be roughly the
357 * point of diminishing performance increases under highly concurrent
358 * modification workloads.
359 *
360 * To prevent the CIL from overflowing upper commit size bounds, we introduce a
361 * new threshold at which we block committing transactions until the background
362 * CIL commit commences and switches to a new context. While this is not a hard
363 * limit, it forces the process committing a transaction to the CIL to block and
364 * yeild the CPU, giving the CIL push work a chance to be scheduled and start
365 * work. This prevents a process running lots of transactions from overfilling
366 * the CIL because it is not yielding the CPU. We set the blocking limit at
367 * twice the background push space threshold so we keep in line with the AIL
368 * push thresholds.
369 *
370 * Note: this is not a -hard- limit as blocking is applied after the transaction
371 * is inserted into the CIL and the push has been triggered. It is largely a
372 * throttling mechanism that allows the CIL push to be scheduled and run. A hard
373 * limit will be difficult to implement without introducing global serialisation
374 * in the CIL commit fast path, and it's not at all clear that we actually need
375 * such hard limits given the ~7 years we've run without a hard limit before
376 * finding the first situation where a checkpoint size overflow actually
377 * occurred. Hence the simple throttle, and an ASSERT check to tell us that
378 * we've overrun the max size.
379 */
380#define XLOG_CIL_SPACE_LIMIT(log) \
381 min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
382
383#define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \
384 (XLOG_CIL_SPACE_LIMIT(log) * 2)
385
386/*
387 * ticket grant locks, queues and accounting have their own cachlines
388 * as these are quite hot and can be operated on concurrently.
389 */
390struct xlog_grant_head {
391 spinlock_t lock ____cacheline_aligned_in_smp;
392 struct list_head waiters;
393 atomic64_t grant;
394};
395
396/*
397 * The reservation head lsn is not made up of a cycle number and block number.
398 * Instead, it uses a cycle number and byte number. Logs don't expect to
399 * overflow 31 bits worth of byte offset, so using a byte number will mean
400 * that round off problems won't occur when releasing partial reservations.
401 */
402struct xlog {
403 /* The following fields don't need locking */
404 struct xfs_mount *l_mp; /* mount point */
405 struct xfs_ail *l_ailp; /* AIL log is working with */
406 struct xfs_cil *l_cilp; /* CIL log is working with */
407 struct xfs_buftarg *l_targ; /* buftarg of log */
408 struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */
409 struct delayed_work l_work; /* background flush work */
410 uint l_flags;
411 uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
412 struct list_head *l_buf_cancel_table;
413 int l_iclog_hsize; /* size of iclog header */
414 int l_iclog_heads; /* # of iclog header sectors */
415 uint l_sectBBsize; /* sector size in BBs (2^n) */
416 int l_iclog_size; /* size of log in bytes */
417 int l_iclog_bufs; /* number of iclog buffers */
418 xfs_daddr_t l_logBBstart; /* start block of log */
419 int l_logsize; /* size of log in bytes */
420 int l_logBBsize; /* size of log in BB chunks */
421
422 /* The following block of fields are changed while holding icloglock */
423 wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp;
424 /* waiting for iclog flush */
425 int l_covered_state;/* state of "covering disk
426 * log entries" */
427 xlog_in_core_t *l_iclog; /* head log queue */
428 spinlock_t l_icloglock; /* grab to change iclog state */
429 int l_curr_cycle; /* Cycle number of log writes */
430 int l_prev_cycle; /* Cycle number before last
431 * block increment */
432 int l_curr_block; /* current logical log block */
433 int l_prev_block; /* previous logical log block */
434
435 /*
436 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
437 * read without needing to hold specific locks. To avoid operations
438 * contending with other hot objects, place each of them on a separate
439 * cacheline.
440 */
441 /* lsn of last LR on disk */
442 atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp;
443 /* lsn of 1st LR with unflushed * buffers */
444 atomic64_t l_tail_lsn ____cacheline_aligned_in_smp;
445
446 struct xlog_grant_head l_reserve_head;
447 struct xlog_grant_head l_write_head;
448
449 struct xfs_kobj l_kobj;
450
451 /* The following field are used for debugging; need to hold icloglock */
452#ifdef DEBUG
453 void *l_iclog_bak[XLOG_MAX_ICLOGS];
454#endif
455 /* log recovery lsn tracking (for buffer submission */
456 xfs_lsn_t l_recovery_lsn;
457
458 uint32_t l_iclog_roundoff;/* padding roundoff */
459};
460
461#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
462 ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
463
464#define XLOG_FORCED_SHUTDOWN(log) \
465 (unlikely((log)->l_flags & XLOG_IO_ERROR))
466
467/* common routines */
468extern int
469xlog_recover(
470 struct xlog *log);
471extern int
472xlog_recover_finish(
473 struct xlog *log);
474extern void
475xlog_recover_cancel(struct xlog *);
476
477extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
478 char *dp, int size);
479
480extern kmem_zone_t *xfs_log_ticket_zone;
481struct xlog_ticket *
482xlog_ticket_alloc(
483 struct xlog *log,
484 int unit_bytes,
485 int count,
486 char client,
487 bool permanent);
488
489static inline void
490xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
491{
492 *ptr += bytes;
493 *len -= bytes;
494 *off += bytes;
495}
496
497void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
498void xlog_print_trans(struct xfs_trans *);
499int xlog_write(struct xlog *log, struct xfs_log_vec *log_vector,
500 struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
501 struct xlog_in_core **commit_iclog, uint optype);
502int xlog_commit_record(struct xlog *log, struct xlog_ticket *ticket,
503 struct xlog_in_core **iclog, xfs_lsn_t *lsn);
504void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
505void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
506
507int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
508 xfs_lsn_t log_tail_lsn);
509
510/*
511 * When we crack an atomic LSN, we sample it first so that the value will not
512 * change while we are cracking it into the component values. This means we
513 * will always get consistent component values to work from. This should always
514 * be used to sample and crack LSNs that are stored and updated in atomic
515 * variables.
516 */
517static inline void
518xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
519{
520 xfs_lsn_t val = atomic64_read(lsn);
521
522 *cycle = CYCLE_LSN(val);
523 *block = BLOCK_LSN(val);
524}
525
526/*
527 * Calculate and assign a value to an atomic LSN variable from component pieces.
528 */
529static inline void
530xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
531{
532 atomic64_set(lsn, xlog_assign_lsn(cycle, block));
533}
534
535/*
536 * When we crack the grant head, we sample it first so that the value will not
537 * change while we are cracking it into the component values. This means we
538 * will always get consistent component values to work from.
539 */
540static inline void
541xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
542{
543 *cycle = val >> 32;
544 *space = val & 0xffffffff;
545}
546
547static inline void
548xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
549{
550 xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
551}
552
553static inline int64_t
554xlog_assign_grant_head_val(int cycle, int space)
555{
556 return ((int64_t)cycle << 32) | space;
557}
558
559static inline void
560xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
561{
562 atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
563}
564
565/*
566 * Committed Item List interfaces
567 */
568int xlog_cil_init(struct xlog *log);
569void xlog_cil_init_post_recovery(struct xlog *log);
570void xlog_cil_destroy(struct xlog *log);
571bool xlog_cil_empty(struct xlog *log);
572void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
573 xfs_csn_t *commit_seq, bool regrant);
574
575/*
576 * CIL force routines
577 */
578xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
579
580static inline void
581xlog_cil_force(struct xlog *log)
582{
583 xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
584}
585
586/*
587 * Wrapper function for waiting on a wait queue serialised against wakeups
588 * by a spinlock. This matches the semantics of all the wait queues used in the
589 * log code.
590 */
591static inline void
592xlog_wait(
593 struct wait_queue_head *wq,
594 struct spinlock *lock)
595 __releases(lock)
596{
597 DECLARE_WAITQUEUE(wait, current);
598
599 add_wait_queue_exclusive(wq, &wait);
600 __set_current_state(TASK_UNINTERRUPTIBLE);
601 spin_unlock(lock);
602 schedule();
603 remove_wait_queue(wq, &wait);
604}
605
606int xlog_wait_on_iclog(struct xlog_in_core *iclog);
607
608/*
609 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
610 * means that the next log record that includes this metadata could have a
611 * smaller LSN. In turn, this means that the modification in the log would not
612 * replay.
613 */
614static inline bool
615xlog_valid_lsn(
616 struct xlog *log,
617 xfs_lsn_t lsn)
618{
619 int cur_cycle;
620 int cur_block;
621 bool valid = true;
622
623 /*
624 * First, sample the current lsn without locking to avoid added
625 * contention from metadata I/O. The current cycle and block are updated
626 * (in xlog_state_switch_iclogs()) and read here in a particular order
627 * to avoid false negatives (e.g., thinking the metadata LSN is valid
628 * when it is not).
629 *
630 * The current block is always rewound before the cycle is bumped in
631 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
632 * a transiently forward state. Instead, we can see the LSN in a
633 * transiently behind state if we happen to race with a cycle wrap.
634 */
635 cur_cycle = READ_ONCE(log->l_curr_cycle);
636 smp_rmb();
637 cur_block = READ_ONCE(log->l_curr_block);
638
639 if ((CYCLE_LSN(lsn) > cur_cycle) ||
640 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
641 /*
642 * If the metadata LSN appears invalid, it's possible the check
643 * above raced with a wrap to the next log cycle. Grab the lock
644 * to check for sure.
645 */
646 spin_lock(&log->l_icloglock);
647 cur_cycle = log->l_curr_cycle;
648 cur_block = log->l_curr_block;
649 spin_unlock(&log->l_icloglock);
650
651 if ((CYCLE_LSN(lsn) > cur_cycle) ||
652 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
653 valid = false;
654 }
655
656 return valid;
657}
658
659#endif /* __XFS_LOG_PRIV_H__ */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#ifndef __XFS_LOG_PRIV_H__
7#define __XFS_LOG_PRIV_H__
8
9struct xfs_buf;
10struct xlog;
11struct xlog_ticket;
12struct xfs_mount;
13
14/*
15 * get client id from packed copy.
16 *
17 * this hack is here because the xlog_pack code copies four bytes
18 * of xlog_op_header containing the fields oh_clientid, oh_flags
19 * and oh_res2 into the packed copy.
20 *
21 * later on this four byte chunk is treated as an int and the
22 * client id is pulled out.
23 *
24 * this has endian issues, of course.
25 */
26static inline uint xlog_get_client_id(__be32 i)
27{
28 return be32_to_cpu(i) >> 24;
29}
30
31/*
32 * In core log state
33 */
34enum xlog_iclog_state {
35 XLOG_STATE_ACTIVE, /* Current IC log being written to */
36 XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */
37 XLOG_STATE_SYNCING, /* This IC log is syncing */
38 XLOG_STATE_DONE_SYNC, /* Done syncing to disk */
39 XLOG_STATE_CALLBACK, /* Callback functions now */
40 XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */
41};
42
43#define XLOG_STATE_STRINGS \
44 { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \
45 { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \
46 { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \
47 { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \
48 { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \
49 { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }
50
51/*
52 * In core log flags
53 */
54#define XLOG_ICL_NEED_FLUSH (1u << 0) /* iclog needs REQ_PREFLUSH */
55#define XLOG_ICL_NEED_FUA (1u << 1) /* iclog needs REQ_FUA */
56
57#define XLOG_ICL_STRINGS \
58 { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \
59 { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" }
60
61
62/*
63 * Log ticket flags
64 */
65#define XLOG_TIC_PERM_RESERV (1u << 0) /* permanent reservation */
66
67#define XLOG_TIC_FLAGS \
68 { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }
69
70/*
71 * Below are states for covering allocation transactions.
72 * By covering, we mean changing the h_tail_lsn in the last on-disk
73 * log write such that no allocation transactions will be re-done during
74 * recovery after a system crash. Recovery starts at the last on-disk
75 * log write.
76 *
77 * These states are used to insert dummy log entries to cover
78 * space allocation transactions which can undo non-transactional changes
79 * after a crash. Writes to a file with space
80 * already allocated do not result in any transactions. Allocations
81 * might include space beyond the EOF. So if we just push the EOF a
82 * little, the last transaction for the file could contain the wrong
83 * size. If there is no file system activity, after an allocation
84 * transaction, and the system crashes, the allocation transaction
85 * will get replayed and the file will be truncated. This could
86 * be hours/days/... after the allocation occurred.
87 *
88 * The fix for this is to do two dummy transactions when the
89 * system is idle. We need two dummy transaction because the h_tail_lsn
90 * in the log record header needs to point beyond the last possible
91 * non-dummy transaction. The first dummy changes the h_tail_lsn to
92 * the first transaction before the dummy. The second dummy causes
93 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
94 *
95 * These dummy transactions get committed when everything
96 * is idle (after there has been some activity).
97 *
98 * There are 5 states used to control this.
99 *
100 * IDLE -- no logging has been done on the file system or
101 * we are done covering previous transactions.
102 * NEED -- logging has occurred and we need a dummy transaction
103 * when the log becomes idle.
104 * DONE -- we were in the NEED state and have committed a dummy
105 * transaction.
106 * NEED2 -- we detected that a dummy transaction has gone to the
107 * on disk log with no other transactions.
108 * DONE2 -- we committed a dummy transaction when in the NEED2 state.
109 *
110 * There are two places where we switch states:
111 *
112 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
113 * We commit the dummy transaction and switch to DONE or DONE2,
114 * respectively. In all other states, we don't do anything.
115 *
116 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
117 *
118 * No matter what state we are in, if this isn't the dummy
119 * transaction going out, the next state is NEED.
120 * So, if we aren't in the DONE or DONE2 states, the next state
121 * is NEED. We can't be finishing a write of the dummy record
122 * unless it was committed and the state switched to DONE or DONE2.
123 *
124 * If we are in the DONE state and this was a write of the
125 * dummy transaction, we move to NEED2.
126 *
127 * If we are in the DONE2 state and this was a write of the
128 * dummy transaction, we move to IDLE.
129 *
130 *
131 * Writing only one dummy transaction can get appended to
132 * one file space allocation. When this happens, the log recovery
133 * code replays the space allocation and a file could be truncated.
134 * This is why we have the NEED2 and DONE2 states before going idle.
135 */
136
137#define XLOG_STATE_COVER_IDLE 0
138#define XLOG_STATE_COVER_NEED 1
139#define XLOG_STATE_COVER_DONE 2
140#define XLOG_STATE_COVER_NEED2 3
141#define XLOG_STATE_COVER_DONE2 4
142
143#define XLOG_COVER_OPS 5
144
145typedef struct xlog_ticket {
146 struct list_head t_queue; /* reserve/write queue */
147 struct task_struct *t_task; /* task that owns this ticket */
148 xlog_tid_t t_tid; /* transaction identifier */
149 atomic_t t_ref; /* ticket reference count */
150 int t_curr_res; /* current reservation */
151 int t_unit_res; /* unit reservation */
152 char t_ocnt; /* original unit count */
153 char t_cnt; /* current unit count */
154 uint8_t t_flags; /* properties of reservation */
155 int t_iclog_hdrs; /* iclog hdrs in t_curr_res */
156} xlog_ticket_t;
157
158/*
159 * - A log record header is 512 bytes. There is plenty of room to grow the
160 * xlog_rec_header_t into the reserved space.
161 * - ic_data follows, so a write to disk can start at the beginning of
162 * the iclog.
163 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
164 * - ic_next is the pointer to the next iclog in the ring.
165 * - ic_log is a pointer back to the global log structure.
166 * - ic_size is the full size of the log buffer, minus the cycle headers.
167 * - ic_offset is the current number of bytes written to in this iclog.
168 * - ic_refcnt is bumped when someone is writing to the log.
169 * - ic_state is the state of the iclog.
170 *
171 * Because of cacheline contention on large machines, we need to separate
172 * various resources onto different cachelines. To start with, make the
173 * structure cacheline aligned. The following fields can be contended on
174 * by independent processes:
175 *
176 * - ic_callbacks
177 * - ic_refcnt
178 * - fields protected by the global l_icloglock
179 *
180 * so we need to ensure that these fields are located in separate cachelines.
181 * We'll put all the read-only and l_icloglock fields in the first cacheline,
182 * and move everything else out to subsequent cachelines.
183 */
184typedef struct xlog_in_core {
185 wait_queue_head_t ic_force_wait;
186 wait_queue_head_t ic_write_wait;
187 struct xlog_in_core *ic_next;
188 struct xlog_in_core *ic_prev;
189 struct xlog *ic_log;
190 u32 ic_size;
191 u32 ic_offset;
192 enum xlog_iclog_state ic_state;
193 unsigned int ic_flags;
194 void *ic_datap; /* pointer to iclog data */
195 struct list_head ic_callbacks;
196
197 /* reference counts need their own cacheline */
198 atomic_t ic_refcnt ____cacheline_aligned_in_smp;
199 xlog_in_core_2_t *ic_data;
200#define ic_header ic_data->hic_header
201#ifdef DEBUG
202 bool ic_fail_crc : 1;
203#endif
204 struct semaphore ic_sema;
205 struct work_struct ic_end_io_work;
206 struct bio ic_bio;
207 struct bio_vec ic_bvec[];
208} xlog_in_core_t;
209
210/*
211 * The CIL context is used to aggregate per-transaction details as well be
212 * passed to the iclog for checkpoint post-commit processing. After being
213 * passed to the iclog, another context needs to be allocated for tracking the
214 * next set of transactions to be aggregated into a checkpoint.
215 */
216struct xfs_cil;
217
218struct xfs_cil_ctx {
219 struct xfs_cil *cil;
220 xfs_csn_t sequence; /* chkpt sequence # */
221 xfs_lsn_t start_lsn; /* first LSN of chkpt commit */
222 xfs_lsn_t commit_lsn; /* chkpt commit record lsn */
223 struct xlog_in_core *commit_iclog;
224 struct xlog_ticket *ticket; /* chkpt ticket */
225 atomic_t space_used; /* aggregate size of regions */
226 struct list_head busy_extents; /* busy extents in chkpt */
227 struct list_head log_items; /* log items in chkpt */
228 struct list_head lv_chain; /* logvecs being pushed */
229 struct list_head iclog_entry;
230 struct list_head committing; /* ctx committing list */
231 struct work_struct discard_endio_work;
232 struct work_struct push_work;
233 atomic_t order_id;
234};
235
236/*
237 * Per-cpu CIL tracking items
238 */
239struct xlog_cil_pcp {
240 int32_t space_used;
241 uint32_t space_reserved;
242 struct list_head busy_extents;
243 struct list_head log_items;
244};
245
246/*
247 * Committed Item List structure
248 *
249 * This structure is used to track log items that have been committed but not
250 * yet written into the log. It is used only when the delayed logging mount
251 * option is enabled.
252 *
253 * This structure tracks the list of committing checkpoint contexts so
254 * we can avoid the problem of having to hold out new transactions during a
255 * flush until we have a the commit record LSN of the checkpoint. We can
256 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
257 * sequence match and extract the commit LSN directly from there. If the
258 * checkpoint is still in the process of committing, we can block waiting for
259 * the commit LSN to be determined as well. This should make synchronous
260 * operations almost as efficient as the old logging methods.
261 */
262struct xfs_cil {
263 struct xlog *xc_log;
264 unsigned long xc_flags;
265 atomic_t xc_iclog_hdrs;
266 struct workqueue_struct *xc_push_wq;
267
268 struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp;
269 struct xfs_cil_ctx *xc_ctx;
270
271 spinlock_t xc_push_lock ____cacheline_aligned_in_smp;
272 xfs_csn_t xc_push_seq;
273 bool xc_push_commit_stable;
274 struct list_head xc_committing;
275 wait_queue_head_t xc_commit_wait;
276 wait_queue_head_t xc_start_wait;
277 xfs_csn_t xc_current_sequence;
278 wait_queue_head_t xc_push_wait; /* background push throttle */
279
280 void __percpu *xc_pcp; /* percpu CIL structures */
281#ifdef CONFIG_HOTPLUG_CPU
282 struct list_head xc_pcp_list;
283#endif
284} ____cacheline_aligned_in_smp;
285
286/* xc_flags bit values */
287#define XLOG_CIL_EMPTY 1
288#define XLOG_CIL_PCP_SPACE 2
289
290/*
291 * The amount of log space we allow the CIL to aggregate is difficult to size.
292 * Whatever we choose, we have to make sure we can get a reservation for the
293 * log space effectively, that it is large enough to capture sufficient
294 * relogging to reduce log buffer IO significantly, but it is not too large for
295 * the log or induces too much latency when writing out through the iclogs. We
296 * track both space consumed and the number of vectors in the checkpoint
297 * context, so we need to decide which to use for limiting.
298 *
299 * Every log buffer we write out during a push needs a header reserved, which
300 * is at least one sector and more for v2 logs. Hence we need a reservation of
301 * at least 512 bytes per 32k of log space just for the LR headers. That means
302 * 16KB of reservation per megabyte of delayed logging space we will consume,
303 * plus various headers. The number of headers will vary based on the num of
304 * io vectors, so limiting on a specific number of vectors is going to result
305 * in transactions of varying size. IOWs, it is more consistent to track and
306 * limit space consumed in the log rather than by the number of objects being
307 * logged in order to prevent checkpoint ticket overruns.
308 *
309 * Further, use of static reservations through the log grant mechanism is
310 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
311 * grant) and a significant deadlock potential because regranting write space
312 * can block on log pushes. Hence if we have to regrant log space during a log
313 * push, we can deadlock.
314 *
315 * However, we can avoid this by use of a dynamic "reservation stealing"
316 * technique during transaction commit whereby unused reservation space in the
317 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
318 * space needed by the checkpoint transaction. This means that we never need to
319 * specifically reserve space for the CIL checkpoint transaction, nor do we
320 * need to regrant space once the checkpoint completes. This also means the
321 * checkpoint transaction ticket is specific to the checkpoint context, rather
322 * than the CIL itself.
323 *
324 * With dynamic reservations, we can effectively make up arbitrary limits for
325 * the checkpoint size so long as they don't violate any other size rules.
326 * Recovery imposes a rule that no transaction exceed half the log, so we are
327 * limited by that. Furthermore, the log transaction reservation subsystem
328 * tries to keep 25% of the log free, so we need to keep below that limit or we
329 * risk running out of free log space to start any new transactions.
330 *
331 * In order to keep background CIL push efficient, we only need to ensure the
332 * CIL is large enough to maintain sufficient in-memory relogging to avoid
333 * repeated physical writes of frequently modified metadata. If we allow the CIL
334 * to grow to a substantial fraction of the log, then we may be pinning hundreds
335 * of megabytes of metadata in memory until the CIL flushes. This can cause
336 * issues when we are running low on memory - pinned memory cannot be reclaimed,
337 * and the CIL consumes a lot of memory. Hence we need to set an upper physical
338 * size limit for the CIL that limits the maximum amount of memory pinned by the
339 * CIL but does not limit performance by reducing relogging efficiency
340 * significantly.
341 *
342 * As such, the CIL push threshold ends up being the smaller of two thresholds:
343 * - a threshold large enough that it allows CIL to be pushed and progress to be
344 * made without excessive blocking of incoming transaction commits. This is
345 * defined to be 12.5% of the log space - half the 25% push threshold of the
346 * AIL.
347 * - small enough that it doesn't pin excessive amounts of memory but maintains
348 * close to peak relogging efficiency. This is defined to be 16x the iclog
349 * buffer window (32MB) as measurements have shown this to be roughly the
350 * point of diminishing performance increases under highly concurrent
351 * modification workloads.
352 *
353 * To prevent the CIL from overflowing upper commit size bounds, we introduce a
354 * new threshold at which we block committing transactions until the background
355 * CIL commit commences and switches to a new context. While this is not a hard
356 * limit, it forces the process committing a transaction to the CIL to block and
357 * yeild the CPU, giving the CIL push work a chance to be scheduled and start
358 * work. This prevents a process running lots of transactions from overfilling
359 * the CIL because it is not yielding the CPU. We set the blocking limit at
360 * twice the background push space threshold so we keep in line with the AIL
361 * push thresholds.
362 *
363 * Note: this is not a -hard- limit as blocking is applied after the transaction
364 * is inserted into the CIL and the push has been triggered. It is largely a
365 * throttling mechanism that allows the CIL push to be scheduled and run. A hard
366 * limit will be difficult to implement without introducing global serialisation
367 * in the CIL commit fast path, and it's not at all clear that we actually need
368 * such hard limits given the ~7 years we've run without a hard limit before
369 * finding the first situation where a checkpoint size overflow actually
370 * occurred. Hence the simple throttle, and an ASSERT check to tell us that
371 * we've overrun the max size.
372 */
373#define XLOG_CIL_SPACE_LIMIT(log) \
374 min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
375
376#define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \
377 (XLOG_CIL_SPACE_LIMIT(log) * 2)
378
379/*
380 * ticket grant locks, queues and accounting have their own cachlines
381 * as these are quite hot and can be operated on concurrently.
382 */
383struct xlog_grant_head {
384 spinlock_t lock ____cacheline_aligned_in_smp;
385 struct list_head waiters;
386 atomic64_t grant;
387};
388
389/*
390 * The reservation head lsn is not made up of a cycle number and block number.
391 * Instead, it uses a cycle number and byte number. Logs don't expect to
392 * overflow 31 bits worth of byte offset, so using a byte number will mean
393 * that round off problems won't occur when releasing partial reservations.
394 */
395struct xlog {
396 /* The following fields don't need locking */
397 struct xfs_mount *l_mp; /* mount point */
398 struct xfs_ail *l_ailp; /* AIL log is working with */
399 struct xfs_cil *l_cilp; /* CIL log is working with */
400 struct xfs_buftarg *l_targ; /* buftarg of log */
401 struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */
402 struct delayed_work l_work; /* background flush work */
403 long l_opstate; /* operational state */
404 uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
405 struct list_head *l_buf_cancel_table;
406 int l_iclog_hsize; /* size of iclog header */
407 int l_iclog_heads; /* # of iclog header sectors */
408 uint l_sectBBsize; /* sector size in BBs (2^n) */
409 int l_iclog_size; /* size of log in bytes */
410 int l_iclog_bufs; /* number of iclog buffers */
411 xfs_daddr_t l_logBBstart; /* start block of log */
412 int l_logsize; /* size of log in bytes */
413 int l_logBBsize; /* size of log in BB chunks */
414
415 /* The following block of fields are changed while holding icloglock */
416 wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp;
417 /* waiting for iclog flush */
418 int l_covered_state;/* state of "covering disk
419 * log entries" */
420 xlog_in_core_t *l_iclog; /* head log queue */
421 spinlock_t l_icloglock; /* grab to change iclog state */
422 int l_curr_cycle; /* Cycle number of log writes */
423 int l_prev_cycle; /* Cycle number before last
424 * block increment */
425 int l_curr_block; /* current logical log block */
426 int l_prev_block; /* previous logical log block */
427
428 /*
429 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
430 * read without needing to hold specific locks. To avoid operations
431 * contending with other hot objects, place each of them on a separate
432 * cacheline.
433 */
434 /* lsn of last LR on disk */
435 atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp;
436 /* lsn of 1st LR with unflushed * buffers */
437 atomic64_t l_tail_lsn ____cacheline_aligned_in_smp;
438
439 struct xlog_grant_head l_reserve_head;
440 struct xlog_grant_head l_write_head;
441
442 struct xfs_kobj l_kobj;
443
444 /* log recovery lsn tracking (for buffer submission */
445 xfs_lsn_t l_recovery_lsn;
446
447 uint32_t l_iclog_roundoff;/* padding roundoff */
448
449 /* Users of log incompat features should take a read lock. */
450 struct rw_semaphore l_incompat_users;
451};
452
453/*
454 * Bits for operational state
455 */
456#define XLOG_ACTIVE_RECOVERY 0 /* in the middle of recovery */
457#define XLOG_RECOVERY_NEEDED 1 /* log was recovered */
458#define XLOG_IO_ERROR 2 /* log hit an I/O error, and being
459 shutdown */
460#define XLOG_TAIL_WARN 3 /* log tail verify warning issued */
461
462static inline bool
463xlog_recovery_needed(struct xlog *log)
464{
465 return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
466}
467
468static inline bool
469xlog_in_recovery(struct xlog *log)
470{
471 return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
472}
473
474static inline bool
475xlog_is_shutdown(struct xlog *log)
476{
477 return test_bit(XLOG_IO_ERROR, &log->l_opstate);
478}
479
480/*
481 * Wait until the xlog_force_shutdown() has marked the log as shut down
482 * so xlog_is_shutdown() will always return true.
483 */
484static inline void
485xlog_shutdown_wait(
486 struct xlog *log)
487{
488 wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
489}
490
491/* common routines */
492extern int
493xlog_recover(
494 struct xlog *log);
495extern int
496xlog_recover_finish(
497 struct xlog *log);
498extern void
499xlog_recover_cancel(struct xlog *);
500
501extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
502 char *dp, int size);
503
504extern struct kmem_cache *xfs_log_ticket_cache;
505struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
506 int count, bool permanent);
507
508void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
509void xlog_print_trans(struct xfs_trans *);
510int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx,
511 struct list_head *lv_chain, struct xlog_ticket *tic,
512 uint32_t len);
513void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
514void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
515
516void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
517 int eventual_size);
518int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
519 struct xlog_ticket *ticket);
520
521/*
522 * When we crack an atomic LSN, we sample it first so that the value will not
523 * change while we are cracking it into the component values. This means we
524 * will always get consistent component values to work from. This should always
525 * be used to sample and crack LSNs that are stored and updated in atomic
526 * variables.
527 */
528static inline void
529xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
530{
531 xfs_lsn_t val = atomic64_read(lsn);
532
533 *cycle = CYCLE_LSN(val);
534 *block = BLOCK_LSN(val);
535}
536
537/*
538 * Calculate and assign a value to an atomic LSN variable from component pieces.
539 */
540static inline void
541xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
542{
543 atomic64_set(lsn, xlog_assign_lsn(cycle, block));
544}
545
546/*
547 * When we crack the grant head, we sample it first so that the value will not
548 * change while we are cracking it into the component values. This means we
549 * will always get consistent component values to work from.
550 */
551static inline void
552xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
553{
554 *cycle = val >> 32;
555 *space = val & 0xffffffff;
556}
557
558static inline void
559xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
560{
561 xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
562}
563
564static inline int64_t
565xlog_assign_grant_head_val(int cycle, int space)
566{
567 return ((int64_t)cycle << 32) | space;
568}
569
570static inline void
571xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
572{
573 atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
574}
575
576/*
577 * Committed Item List interfaces
578 */
579int xlog_cil_init(struct xlog *log);
580void xlog_cil_init_post_recovery(struct xlog *log);
581void xlog_cil_destroy(struct xlog *log);
582bool xlog_cil_empty(struct xlog *log);
583void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
584 xfs_csn_t *commit_seq, bool regrant);
585void xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx,
586 struct xlog_in_core *iclog);
587
588
589/*
590 * CIL force routines
591 */
592void xlog_cil_flush(struct xlog *log);
593xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
594
595static inline void
596xlog_cil_force(struct xlog *log)
597{
598 xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
599}
600
601/*
602 * Wrapper function for waiting on a wait queue serialised against wakeups
603 * by a spinlock. This matches the semantics of all the wait queues used in the
604 * log code.
605 */
606static inline void
607xlog_wait(
608 struct wait_queue_head *wq,
609 struct spinlock *lock)
610 __releases(lock)
611{
612 DECLARE_WAITQUEUE(wait, current);
613
614 add_wait_queue_exclusive(wq, &wait);
615 __set_current_state(TASK_UNINTERRUPTIBLE);
616 spin_unlock(lock);
617 schedule();
618 remove_wait_queue(wq, &wait);
619}
620
621int xlog_wait_on_iclog(struct xlog_in_core *iclog);
622
623/*
624 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
625 * means that the next log record that includes this metadata could have a
626 * smaller LSN. In turn, this means that the modification in the log would not
627 * replay.
628 */
629static inline bool
630xlog_valid_lsn(
631 struct xlog *log,
632 xfs_lsn_t lsn)
633{
634 int cur_cycle;
635 int cur_block;
636 bool valid = true;
637
638 /*
639 * First, sample the current lsn without locking to avoid added
640 * contention from metadata I/O. The current cycle and block are updated
641 * (in xlog_state_switch_iclogs()) and read here in a particular order
642 * to avoid false negatives (e.g., thinking the metadata LSN is valid
643 * when it is not).
644 *
645 * The current block is always rewound before the cycle is bumped in
646 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
647 * a transiently forward state. Instead, we can see the LSN in a
648 * transiently behind state if we happen to race with a cycle wrap.
649 */
650 cur_cycle = READ_ONCE(log->l_curr_cycle);
651 smp_rmb();
652 cur_block = READ_ONCE(log->l_curr_block);
653
654 if ((CYCLE_LSN(lsn) > cur_cycle) ||
655 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
656 /*
657 * If the metadata LSN appears invalid, it's possible the check
658 * above raced with a wrap to the next log cycle. Grab the lock
659 * to check for sure.
660 */
661 spin_lock(&log->l_icloglock);
662 cur_cycle = log->l_curr_cycle;
663 cur_block = log->l_curr_block;
664 spin_unlock(&log->l_icloglock);
665
666 if ((CYCLE_LSN(lsn) > cur_cycle) ||
667 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
668 valid = false;
669 }
670
671 return valid;
672}
673
674/*
675 * Log vector and shadow buffers can be large, so we need to use kvmalloc() here
676 * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts
677 * to fall back to vmalloc, so we can't actually do anything useful with gfp
678 * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc()
679 * will do direct reclaim and compaction in the slow path, both of which are
680 * horrendously expensive. We just want kmalloc to fail fast and fall back to
681 * vmalloc if it can't get somethign straight away from the free lists or
682 * buddy allocator. Hence we have to open code kvmalloc outselves here.
683 *
684 * This assumes that the caller uses memalloc_nofs_save task context here, so
685 * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS
686 * allocations. This is actually the only way to make vmalloc() do GFP_NOFS
687 * allocations, so lets just all pretend this is a GFP_KERNEL context
688 * operation....
689 */
690static inline void *
691xlog_kvmalloc(
692 size_t buf_size)
693{
694 gfp_t flags = GFP_KERNEL;
695 void *p;
696
697 flags &= ~__GFP_DIRECT_RECLAIM;
698 flags |= __GFP_NOWARN | __GFP_NORETRY;
699 do {
700 p = kmalloc(buf_size, flags);
701 if (!p)
702 p = vmalloc(buf_size);
703 } while (!p);
704
705 return p;
706}
707
708/*
709 * CIL CPU dead notifier
710 */
711void xlog_cil_pcp_dead(struct xlog *log, unsigned int cpu);
712
713#endif /* __XFS_LOG_PRIV_H__ */