Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#ifndef	__XFS_LOG_PRIV_H__
  7#define __XFS_LOG_PRIV_H__
  8
  9struct xfs_buf;
 10struct xlog;
 11struct xlog_ticket;
 12struct xfs_mount;
 
 
 
 
 
 
 
 
 
 
 13
 14/*
 15 * get client id from packed copy.
 16 *
 17 * this hack is here because the xlog_pack code copies four bytes
 18 * of xlog_op_header containing the fields oh_clientid, oh_flags
 19 * and oh_res2 into the packed copy.
 20 *
 21 * later on this four byte chunk is treated as an int and the
 22 * client id is pulled out.
 23 *
 24 * this has endian issues, of course.
 25 */
 26static inline uint xlog_get_client_id(__be32 i)
 27{
 28	return be32_to_cpu(i) >> 24;
 29}
 30
 31/*
 32 * In core log state
 33 */
 34enum xlog_iclog_state {
 35	XLOG_STATE_ACTIVE,	/* Current IC log being written to */
 36	XLOG_STATE_WANT_SYNC,	/* Want to sync this iclog; no more writes */
 37	XLOG_STATE_SYNCING,	/* This IC log is syncing */
 38	XLOG_STATE_DONE_SYNC,	/* Done syncing to disk */
 39	XLOG_STATE_CALLBACK,	/* Callback functions now */
 40	XLOG_STATE_DIRTY,	/* Dirty IC log, not ready for ACTIVE status */
 41};
 42
 43#define XLOG_STATE_STRINGS \
 44	{ XLOG_STATE_ACTIVE,	"XLOG_STATE_ACTIVE" }, \
 45	{ XLOG_STATE_WANT_SYNC,	"XLOG_STATE_WANT_SYNC" }, \
 46	{ XLOG_STATE_SYNCING,	"XLOG_STATE_SYNCING" }, \
 47	{ XLOG_STATE_DONE_SYNC,	"XLOG_STATE_DONE_SYNC" }, \
 48	{ XLOG_STATE_CALLBACK,	"XLOG_STATE_CALLBACK" }, \
 49	{ XLOG_STATE_DIRTY,	"XLOG_STATE_DIRTY" }
 50
 51/*
 52 * In core log flags
 53 */
 54#define XLOG_ICL_NEED_FLUSH	(1u << 0)	/* iclog needs REQ_PREFLUSH */
 55#define XLOG_ICL_NEED_FUA	(1u << 1)	/* iclog needs REQ_FUA */
 56
 57#define XLOG_ICL_STRINGS \
 58	{ XLOG_ICL_NEED_FLUSH,	"XLOG_ICL_NEED_FLUSH" }, \
 59	{ XLOG_ICL_NEED_FUA,	"XLOG_ICL_NEED_FUA" }
 60
 61
 62/*
 63 * Log ticket flags
 64 */
 65#define XLOG_TIC_PERM_RESERV	(1u << 0)	/* permanent reservation */
 66
 67#define XLOG_TIC_FLAGS \
 
 68	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
 69
 70/*
 71 * Below are states for covering allocation transactions.
 72 * By covering, we mean changing the h_tail_lsn in the last on-disk
 73 * log write such that no allocation transactions will be re-done during
 74 * recovery after a system crash. Recovery starts at the last on-disk
 75 * log write.
 76 *
 77 * These states are used to insert dummy log entries to cover
 78 * space allocation transactions which can undo non-transactional changes
 79 * after a crash. Writes to a file with space
 80 * already allocated do not result in any transactions. Allocations
 81 * might include space beyond the EOF. So if we just push the EOF a
 82 * little, the last transaction for the file could contain the wrong
 83 * size. If there is no file system activity, after an allocation
 84 * transaction, and the system crashes, the allocation transaction
 85 * will get replayed and the file will be truncated. This could
 86 * be hours/days/... after the allocation occurred.
 87 *
 88 * The fix for this is to do two dummy transactions when the
 89 * system is idle. We need two dummy transaction because the h_tail_lsn
 90 * in the log record header needs to point beyond the last possible
 91 * non-dummy transaction. The first dummy changes the h_tail_lsn to
 92 * the first transaction before the dummy. The second dummy causes
 93 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
 94 *
 95 * These dummy transactions get committed when everything
 96 * is idle (after there has been some activity).
 97 *
 98 * There are 5 states used to control this.
 99 *
100 *  IDLE -- no logging has been done on the file system or
101 *		we are done covering previous transactions.
102 *  NEED -- logging has occurred and we need a dummy transaction
103 *		when the log becomes idle.
104 *  DONE -- we were in the NEED state and have committed a dummy
105 *		transaction.
106 *  NEED2 -- we detected that a dummy transaction has gone to the
107 *		on disk log with no other transactions.
108 *  DONE2 -- we committed a dummy transaction when in the NEED2 state.
109 *
110 * There are two places where we switch states:
111 *
112 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
113 *	We commit the dummy transaction and switch to DONE or DONE2,
114 *	respectively. In all other states, we don't do anything.
115 *
116 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
117 *
118 *	No matter what state we are in, if this isn't the dummy
119 *	transaction going out, the next state is NEED.
120 *	So, if we aren't in the DONE or DONE2 states, the next state
121 *	is NEED. We can't be finishing a write of the dummy record
122 *	unless it was committed and the state switched to DONE or DONE2.
123 *
124 *	If we are in the DONE state and this was a write of the
125 *		dummy transaction, we move to NEED2.
126 *
127 *	If we are in the DONE2 state and this was a write of the
128 *		dummy transaction, we move to IDLE.
129 *
130 *
131 * Writing only one dummy transaction can get appended to
132 * one file space allocation. When this happens, the log recovery
133 * code replays the space allocation and a file could be truncated.
134 * This is why we have the NEED2 and DONE2 states before going idle.
135 */
136
137#define XLOG_STATE_COVER_IDLE	0
138#define XLOG_STATE_COVER_NEED	1
139#define XLOG_STATE_COVER_DONE	2
140#define XLOG_STATE_COVER_NEED2	3
141#define XLOG_STATE_COVER_DONE2	4
142
143#define XLOG_COVER_OPS		5
144
 
 
 
 
 
 
 
 
 
 
 
 
 
145typedef struct xlog_ticket {
146	struct list_head	t_queue;	/* reserve/write queue */
147	struct task_struct	*t_task;	/* task that owns this ticket */
148	xlog_tid_t		t_tid;		/* transaction identifier */
149	atomic_t		t_ref;		/* ticket reference count */
150	int			t_curr_res;	/* current reservation */
151	int			t_unit_res;	/* unit reservation */
152	char			t_ocnt;		/* original unit count */
153	char			t_cnt;		/* current unit count */
154	uint8_t			t_flags;	/* properties of reservation */
155	int			t_iclog_hdrs;	/* iclog hdrs in t_curr_res */
 
 
 
 
 
 
 
156} xlog_ticket_t;
157
158/*
159 * - A log record header is 512 bytes.  There is plenty of room to grow the
160 *	xlog_rec_header_t into the reserved space.
161 * - ic_data follows, so a write to disk can start at the beginning of
162 *	the iclog.
163 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
164 * - ic_next is the pointer to the next iclog in the ring.
 
165 * - ic_log is a pointer back to the global log structure.
166 * - ic_size is the full size of the log buffer, minus the cycle headers.
 
 
167 * - ic_offset is the current number of bytes written to in this iclog.
168 * - ic_refcnt is bumped when someone is writing to the log.
169 * - ic_state is the state of the iclog.
170 *
171 * Because of cacheline contention on large machines, we need to separate
172 * various resources onto different cachelines. To start with, make the
173 * structure cacheline aligned. The following fields can be contended on
174 * by independent processes:
175 *
176 *	- ic_callbacks
177 *	- ic_refcnt
178 *	- fields protected by the global l_icloglock
179 *
180 * so we need to ensure that these fields are located in separate cachelines.
181 * We'll put all the read-only and l_icloglock fields in the first cacheline,
182 * and move everything else out to subsequent cachelines.
183 */
184typedef struct xlog_in_core {
185	wait_queue_head_t	ic_force_wait;
186	wait_queue_head_t	ic_write_wait;
187	struct xlog_in_core	*ic_next;
188	struct xlog_in_core	*ic_prev;
 
189	struct xlog		*ic_log;
190	u32			ic_size;
191	u32			ic_offset;
192	enum xlog_iclog_state	ic_state;
193	unsigned int		ic_flags;
194	void			*ic_datap;	/* pointer to iclog data */
195	struct list_head	ic_callbacks;
 
 
 
 
196
197	/* reference counts need their own cacheline */
198	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
199	xlog_in_core_2_t	*ic_data;
200#define ic_header	ic_data->hic_header
201#ifdef DEBUG
202	bool			ic_fail_crc : 1;
203#endif
204	struct semaphore	ic_sema;
205	struct work_struct	ic_end_io_work;
206	struct bio		ic_bio;
207	struct bio_vec		ic_bvec[];
208} xlog_in_core_t;
209
210/*
211 * The CIL context is used to aggregate per-transaction details as well be
212 * passed to the iclog for checkpoint post-commit processing.  After being
213 * passed to the iclog, another context needs to be allocated for tracking the
214 * next set of transactions to be aggregated into a checkpoint.
215 */
216struct xfs_cil;
217
218struct xfs_cil_ctx {
219	struct xfs_cil		*cil;
220	xfs_csn_t		sequence;	/* chkpt sequence # */
221	xfs_lsn_t		start_lsn;	/* first LSN of chkpt commit */
222	xfs_lsn_t		commit_lsn;	/* chkpt commit record lsn */
223	struct xlog_in_core	*commit_iclog;
224	struct xlog_ticket	*ticket;	/* chkpt ticket */
225	atomic_t		space_used;	/* aggregate size of regions */
 
226	struct list_head	busy_extents;	/* busy extents in chkpt */
227	struct list_head	log_items;	/* log items in chkpt */
228	struct list_head	lv_chain;	/* logvecs being pushed */
229	struct list_head	iclog_entry;
230	struct list_head	committing;	/* ctx committing list */
231	struct work_struct	discard_endio_work;
232	struct work_struct	push_work;
233	atomic_t		order_id;
234};
235
236/*
237 * Per-cpu CIL tracking items
238 */
239struct xlog_cil_pcp {
240	int32_t			space_used;
241	uint32_t		space_reserved;
242	struct list_head	busy_extents;
243	struct list_head	log_items;
244};
245
246/*
247 * Committed Item List structure
248 *
249 * This structure is used to track log items that have been committed but not
250 * yet written into the log. It is used only when the delayed logging mount
251 * option is enabled.
252 *
253 * This structure tracks the list of committing checkpoint contexts so
254 * we can avoid the problem of having to hold out new transactions during a
255 * flush until we have a the commit record LSN of the checkpoint. We can
256 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
257 * sequence match and extract the commit LSN directly from there. If the
258 * checkpoint is still in the process of committing, we can block waiting for
259 * the commit LSN to be determined as well. This should make synchronous
260 * operations almost as efficient as the old logging methods.
261 */
262struct xfs_cil {
263	struct xlog		*xc_log;
264	unsigned long		xc_flags;
265	atomic_t		xc_iclog_hdrs;
266	struct workqueue_struct	*xc_push_wq;
267
268	struct rw_semaphore	xc_ctx_lock ____cacheline_aligned_in_smp;
269	struct xfs_cil_ctx	*xc_ctx;
270
271	spinlock_t		xc_push_lock ____cacheline_aligned_in_smp;
272	xfs_csn_t		xc_push_seq;
273	bool			xc_push_commit_stable;
274	struct list_head	xc_committing;
275	wait_queue_head_t	xc_commit_wait;
276	wait_queue_head_t	xc_start_wait;
277	xfs_csn_t		xc_current_sequence;
278	wait_queue_head_t	xc_push_wait;	/* background push throttle */
279
280	void __percpu		*xc_pcp;	/* percpu CIL structures */
281#ifdef CONFIG_HOTPLUG_CPU
282	struct list_head	xc_pcp_list;
283#endif
284} ____cacheline_aligned_in_smp;
285
286/* xc_flags bit values */
287#define	XLOG_CIL_EMPTY		1
288#define XLOG_CIL_PCP_SPACE	2
289
290/*
291 * The amount of log space we allow the CIL to aggregate is difficult to size.
292 * Whatever we choose, we have to make sure we can get a reservation for the
293 * log space effectively, that it is large enough to capture sufficient
294 * relogging to reduce log buffer IO significantly, but it is not too large for
295 * the log or induces too much latency when writing out through the iclogs. We
296 * track both space consumed and the number of vectors in the checkpoint
297 * context, so we need to decide which to use for limiting.
298 *
299 * Every log buffer we write out during a push needs a header reserved, which
300 * is at least one sector and more for v2 logs. Hence we need a reservation of
301 * at least 512 bytes per 32k of log space just for the LR headers. That means
302 * 16KB of reservation per megabyte of delayed logging space we will consume,
303 * plus various headers.  The number of headers will vary based on the num of
304 * io vectors, so limiting on a specific number of vectors is going to result
305 * in transactions of varying size. IOWs, it is more consistent to track and
306 * limit space consumed in the log rather than by the number of objects being
307 * logged in order to prevent checkpoint ticket overruns.
308 *
309 * Further, use of static reservations through the log grant mechanism is
310 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
311 * grant) and a significant deadlock potential because regranting write space
312 * can block on log pushes. Hence if we have to regrant log space during a log
313 * push, we can deadlock.
314 *
315 * However, we can avoid this by use of a dynamic "reservation stealing"
316 * technique during transaction commit whereby unused reservation space in the
317 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
318 * space needed by the checkpoint transaction. This means that we never need to
319 * specifically reserve space for the CIL checkpoint transaction, nor do we
320 * need to regrant space once the checkpoint completes. This also means the
321 * checkpoint transaction ticket is specific to the checkpoint context, rather
322 * than the CIL itself.
323 *
324 * With dynamic reservations, we can effectively make up arbitrary limits for
325 * the checkpoint size so long as they don't violate any other size rules.
326 * Recovery imposes a rule that no transaction exceed half the log, so we are
327 * limited by that.  Furthermore, the log transaction reservation subsystem
328 * tries to keep 25% of the log free, so we need to keep below that limit or we
329 * risk running out of free log space to start any new transactions.
330 *
331 * In order to keep background CIL push efficient, we only need to ensure the
332 * CIL is large enough to maintain sufficient in-memory relogging to avoid
333 * repeated physical writes of frequently modified metadata. If we allow the CIL
334 * to grow to a substantial fraction of the log, then we may be pinning hundreds
335 * of megabytes of metadata in memory until the CIL flushes. This can cause
336 * issues when we are running low on memory - pinned memory cannot be reclaimed,
337 * and the CIL consumes a lot of memory. Hence we need to set an upper physical
338 * size limit for the CIL that limits the maximum amount of memory pinned by the
339 * CIL but does not limit performance by reducing relogging efficiency
340 * significantly.
341 *
342 * As such, the CIL push threshold ends up being the smaller of two thresholds:
343 * - a threshold large enough that it allows CIL to be pushed and progress to be
344 *   made without excessive blocking of incoming transaction commits. This is
345 *   defined to be 12.5% of the log space - half the 25% push threshold of the
346 *   AIL.
347 * - small enough that it doesn't pin excessive amounts of memory but maintains
348 *   close to peak relogging efficiency. This is defined to be 16x the iclog
349 *   buffer window (32MB) as measurements have shown this to be roughly the
350 *   point of diminishing performance increases under highly concurrent
351 *   modification workloads.
352 *
353 * To prevent the CIL from overflowing upper commit size bounds, we introduce a
354 * new threshold at which we block committing transactions until the background
355 * CIL commit commences and switches to a new context. While this is not a hard
356 * limit, it forces the process committing a transaction to the CIL to block and
357 * yeild the CPU, giving the CIL push work a chance to be scheduled and start
358 * work. This prevents a process running lots of transactions from overfilling
359 * the CIL because it is not yielding the CPU. We set the blocking limit at
360 * twice the background push space threshold so we keep in line with the AIL
361 * push thresholds.
362 *
363 * Note: this is not a -hard- limit as blocking is applied after the transaction
364 * is inserted into the CIL and the push has been triggered. It is largely a
365 * throttling mechanism that allows the CIL push to be scheduled and run. A hard
366 * limit will be difficult to implement without introducing global serialisation
367 * in the CIL commit fast path, and it's not at all clear that we actually need
368 * such hard limits given the ~7 years we've run without a hard limit before
369 * finding the first situation where a checkpoint size overflow actually
370 * occurred. Hence the simple throttle, and an ASSERT check to tell us that
371 * we've overrun the max size.
372 */
373#define XLOG_CIL_SPACE_LIMIT(log)	\
374	min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
375
376#define XLOG_CIL_BLOCKING_SPACE_LIMIT(log)	\
377	(XLOG_CIL_SPACE_LIMIT(log) * 2)
378
379/*
380 * ticket grant locks, queues and accounting have their own cachlines
381 * as these are quite hot and can be operated on concurrently.
382 */
383struct xlog_grant_head {
384	spinlock_t		lock ____cacheline_aligned_in_smp;
385	struct list_head	waiters;
386	atomic64_t		grant;
387};
388
389/*
390 * The reservation head lsn is not made up of a cycle number and block number.
391 * Instead, it uses a cycle number and byte number.  Logs don't expect to
392 * overflow 31 bits worth of byte offset, so using a byte number will mean
393 * that round off problems won't occur when releasing partial reservations.
394 */
395struct xlog {
396	/* The following fields don't need locking */
397	struct xfs_mount	*l_mp;	        /* mount point */
398	struct xfs_ail		*l_ailp;	/* AIL log is working with */
399	struct xfs_cil		*l_cilp;	/* CIL log is working with */
 
 
400	struct xfs_buftarg	*l_targ;        /* buftarg of log */
401	struct workqueue_struct	*l_ioend_workqueue; /* for I/O completions */
402	struct delayed_work	l_work;		/* background flush work */
403	long			l_opstate;	/* operational state */
404	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
405	struct list_head	*l_buf_cancel_table;
406	int			l_iclog_hsize;  /* size of iclog header */
407	int			l_iclog_heads;  /* # of iclog header sectors */
408	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
409	int			l_iclog_size;	/* size of log in bytes */
 
410	int			l_iclog_bufs;	/* number of iclog buffers */
411	xfs_daddr_t		l_logBBstart;   /* start block of log */
412	int			l_logsize;      /* size of log in bytes */
413	int			l_logBBsize;    /* size of log in BB chunks */
414
415	/* The following block of fields are changed while holding icloglock */
416	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
417						/* waiting for iclog flush */
418	int			l_covered_state;/* state of "covering disk
419						 * log entries" */
420	xlog_in_core_t		*l_iclog;       /* head log queue	*/
421	spinlock_t		l_icloglock;    /* grab to change iclog state */
422	int			l_curr_cycle;   /* Cycle number of log writes */
423	int			l_prev_cycle;   /* Cycle number before last
424						 * block increment */
425	int			l_curr_block;   /* current logical log block */
426	int			l_prev_block;   /* previous logical log block */
427
428	/*
429	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
430	 * read without needing to hold specific locks. To avoid operations
431	 * contending with other hot objects, place each of them on a separate
432	 * cacheline.
433	 */
434	/* lsn of last LR on disk */
435	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
436	/* lsn of 1st LR with unflushed * buffers */
437	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
438
439	struct xlog_grant_head	l_reserve_head;
440	struct xlog_grant_head	l_write_head;
441
442	struct xfs_kobj		l_kobj;
443
 
 
 
 
 
 
444	/* log recovery lsn tracking (for buffer submission */
445	xfs_lsn_t		l_recovery_lsn;
446
447	uint32_t		l_iclog_roundoff;/* padding roundoff */
448
449	/* Users of log incompat features should take a read lock. */
450	struct rw_semaphore	l_incompat_users;
451};
452
453/*
454 * Bits for operational state
455 */
456#define XLOG_ACTIVE_RECOVERY	0	/* in the middle of recovery */
457#define XLOG_RECOVERY_NEEDED	1	/* log was recovered */
458#define XLOG_IO_ERROR		2	/* log hit an I/O error, and being
459				   shutdown */
460#define XLOG_TAIL_WARN		3	/* log tail verify warning issued */
461
462static inline bool
463xlog_recovery_needed(struct xlog *log)
464{
465	return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
466}
467
468static inline bool
469xlog_in_recovery(struct xlog *log)
470{
471	return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
472}
473
474static inline bool
475xlog_is_shutdown(struct xlog *log)
476{
477	return test_bit(XLOG_IO_ERROR, &log->l_opstate);
478}
479
480/*
481 * Wait until the xlog_force_shutdown() has marked the log as shut down
482 * so xlog_is_shutdown() will always return true.
483 */
484static inline void
485xlog_shutdown_wait(
486	struct xlog	*log)
487{
488	wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
489}
490
491/* common routines */
492extern int
493xlog_recover(
494	struct xlog		*log);
495extern int
496xlog_recover_finish(
497	struct xlog		*log);
498extern void
499xlog_recover_cancel(struct xlog *);
500
501extern __le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
502			    char *dp, int size);
503
504extern struct kmem_cache *xfs_log_ticket_cache;
505struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
506		int count, bool permanent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
508void	xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
509void	xlog_print_trans(struct xfs_trans *);
510int	xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx,
511		struct list_head *lv_chain, struct xlog_ticket *tic,
512		uint32_t len);
513void	xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
514void	xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
515
516void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
517		int eventual_size);
518int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
519		struct xlog_ticket *ticket);
520
521/*
522 * When we crack an atomic LSN, we sample it first so that the value will not
523 * change while we are cracking it into the component values. This means we
524 * will always get consistent component values to work from. This should always
525 * be used to sample and crack LSNs that are stored and updated in atomic
526 * variables.
527 */
528static inline void
529xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
530{
531	xfs_lsn_t val = atomic64_read(lsn);
532
533	*cycle = CYCLE_LSN(val);
534	*block = BLOCK_LSN(val);
535}
536
537/*
538 * Calculate and assign a value to an atomic LSN variable from component pieces.
539 */
540static inline void
541xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
542{
543	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
544}
545
546/*
547 * When we crack the grant head, we sample it first so that the value will not
548 * change while we are cracking it into the component values. This means we
549 * will always get consistent component values to work from.
550 */
551static inline void
552xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
553{
554	*cycle = val >> 32;
555	*space = val & 0xffffffff;
556}
557
558static inline void
559xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
560{
561	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
562}
563
564static inline int64_t
565xlog_assign_grant_head_val(int cycle, int space)
566{
567	return ((int64_t)cycle << 32) | space;
568}
569
570static inline void
571xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
572{
573	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
574}
575
576/*
577 * Committed Item List interfaces
578 */
579int	xlog_cil_init(struct xlog *log);
580void	xlog_cil_init_post_recovery(struct xlog *log);
581void	xlog_cil_destroy(struct xlog *log);
582bool	xlog_cil_empty(struct xlog *log);
583void	xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
584			xfs_csn_t *commit_seq, bool regrant);
585void	xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx,
586			struct xlog_in_core *iclog);
587
588
589/*
590 * CIL force routines
591 */
592void xlog_cil_flush(struct xlog *log);
593xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
 
 
594
595static inline void
596xlog_cil_force(struct xlog *log)
597{
598	xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
599}
600
601/*
 
 
 
 
 
 
602 * Wrapper function for waiting on a wait queue serialised against wakeups
603 * by a spinlock. This matches the semantics of all the wait queues used in the
604 * log code.
605 */
606static inline void
607xlog_wait(
608	struct wait_queue_head	*wq,
609	struct spinlock		*lock)
610		__releases(lock)
611{
612	DECLARE_WAITQUEUE(wait, current);
613
614	add_wait_queue_exclusive(wq, &wait);
615	__set_current_state(TASK_UNINTERRUPTIBLE);
616	spin_unlock(lock);
617	schedule();
618	remove_wait_queue(wq, &wait);
619}
620
621int xlog_wait_on_iclog(struct xlog_in_core *iclog);
622
623/*
624 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
625 * means that the next log record that includes this metadata could have a
626 * smaller LSN. In turn, this means that the modification in the log would not
627 * replay.
628 */
629static inline bool
630xlog_valid_lsn(
631	struct xlog	*log,
632	xfs_lsn_t	lsn)
633{
634	int		cur_cycle;
635	int		cur_block;
636	bool		valid = true;
637
638	/*
639	 * First, sample the current lsn without locking to avoid added
640	 * contention from metadata I/O. The current cycle and block are updated
641	 * (in xlog_state_switch_iclogs()) and read here in a particular order
642	 * to avoid false negatives (e.g., thinking the metadata LSN is valid
643	 * when it is not).
644	 *
645	 * The current block is always rewound before the cycle is bumped in
646	 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
647	 * a transiently forward state. Instead, we can see the LSN in a
648	 * transiently behind state if we happen to race with a cycle wrap.
649	 */
650	cur_cycle = READ_ONCE(log->l_curr_cycle);
651	smp_rmb();
652	cur_block = READ_ONCE(log->l_curr_block);
653
654	if ((CYCLE_LSN(lsn) > cur_cycle) ||
655	    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
656		/*
657		 * If the metadata LSN appears invalid, it's possible the check
658		 * above raced with a wrap to the next log cycle. Grab the lock
659		 * to check for sure.
660		 */
661		spin_lock(&log->l_icloglock);
662		cur_cycle = log->l_curr_cycle;
663		cur_block = log->l_curr_block;
664		spin_unlock(&log->l_icloglock);
665
666		if ((CYCLE_LSN(lsn) > cur_cycle) ||
667		    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
668			valid = false;
669	}
670
671	return valid;
672}
673
674/*
675 * Log vector and shadow buffers can be large, so we need to use kvmalloc() here
676 * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts
677 * to fall back to vmalloc, so we can't actually do anything useful with gfp
678 * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc()
679 * will do direct reclaim and compaction in the slow path, both of which are
680 * horrendously expensive. We just want kmalloc to fail fast and fall back to
681 * vmalloc if it can't get somethign straight away from the free lists or
682 * buddy allocator. Hence we have to open code kvmalloc outselves here.
683 *
684 * This assumes that the caller uses memalloc_nofs_save task context here, so
685 * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS
686 * allocations. This is actually the only way to make vmalloc() do GFP_NOFS
687 * allocations, so lets just all pretend this is a GFP_KERNEL context
688 * operation....
689 */
690static inline void *
691xlog_kvmalloc(
692	size_t		buf_size)
693{
694	gfp_t		flags = GFP_KERNEL;
695	void		*p;
696
697	flags &= ~__GFP_DIRECT_RECLAIM;
698	flags |= __GFP_NOWARN | __GFP_NORETRY;
699	do {
700		p = kmalloc(buf_size, flags);
701		if (!p)
702			p = vmalloc(buf_size);
703	} while (!p);
704
705	return p;
706}
707
708/*
709 * CIL CPU dead notifier
710 */
711void xlog_cil_pcp_dead(struct xlog *log, unsigned int cpu);
712
713#endif	/* __XFS_LOG_PRIV_H__ */
v4.10.11
 
  1/*
  2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#ifndef	__XFS_LOG_PRIV_H__
 19#define __XFS_LOG_PRIV_H__
 20
 21struct xfs_buf;
 22struct xlog;
 23struct xlog_ticket;
 24struct xfs_mount;
 25struct xfs_log_callback;
 26
 27/*
 28 * Flags for log structure
 29 */
 30#define XLOG_ACTIVE_RECOVERY	0x2	/* in the middle of recovery */
 31#define	XLOG_RECOVERY_NEEDED	0x4	/* log was recovered */
 32#define XLOG_IO_ERROR		0x8	/* log hit an I/O error, and being
 33					   shutdown */
 34#define XLOG_TAIL_WARN		0x10	/* log tail verify warning issued */
 35
 36/*
 37 * get client id from packed copy.
 38 *
 39 * this hack is here because the xlog_pack code copies four bytes
 40 * of xlog_op_header containing the fields oh_clientid, oh_flags
 41 * and oh_res2 into the packed copy.
 42 *
 43 * later on this four byte chunk is treated as an int and the
 44 * client id is pulled out.
 45 *
 46 * this has endian issues, of course.
 47 */
 48static inline uint xlog_get_client_id(__be32 i)
 49{
 50	return be32_to_cpu(i) >> 24;
 51}
 52
 53/*
 54 * In core log state
 55 */
 56#define XLOG_STATE_ACTIVE    0x0001 /* Current IC log being written to */
 57#define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */
 58#define XLOG_STATE_SYNCING   0x0004 /* This IC log is syncing */
 59#define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */
 60#define XLOG_STATE_DO_CALLBACK \
 61			     0x0010 /* Process callback functions */
 62#define XLOG_STATE_CALLBACK  0x0020 /* Callback functions now */
 63#define XLOG_STATE_DIRTY     0x0040 /* Dirty IC log, not ready for ACTIVE status*/
 64#define XLOG_STATE_IOERROR   0x0080 /* IO error happened in sync'ing log */
 65#define XLOG_STATE_IOABORT   0x0100 /* force abort on I/O completion (debug) */
 66#define XLOG_STATE_ALL	     0x7FFF /* All possible valid flags */
 67#define XLOG_STATE_NOTUSED   0x8000 /* This IC log not being used */
 
 
 
 
 68
 69/*
 70 * Flags to log ticket
 71 */
 72#define XLOG_TIC_INITED		0x1	/* has been initialized */
 73#define XLOG_TIC_PERM_RESERV	0x2	/* permanent reservation */
 
 
 
 
 
 
 
 
 
 
 74
 75#define XLOG_TIC_FLAGS \
 76	{ XLOG_TIC_INITED,	"XLOG_TIC_INITED" }, \
 77	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
 78
 79/*
 80 * Below are states for covering allocation transactions.
 81 * By covering, we mean changing the h_tail_lsn in the last on-disk
 82 * log write such that no allocation transactions will be re-done during
 83 * recovery after a system crash. Recovery starts at the last on-disk
 84 * log write.
 85 *
 86 * These states are used to insert dummy log entries to cover
 87 * space allocation transactions which can undo non-transactional changes
 88 * after a crash. Writes to a file with space
 89 * already allocated do not result in any transactions. Allocations
 90 * might include space beyond the EOF. So if we just push the EOF a
 91 * little, the last transaction for the file could contain the wrong
 92 * size. If there is no file system activity, after an allocation
 93 * transaction, and the system crashes, the allocation transaction
 94 * will get replayed and the file will be truncated. This could
 95 * be hours/days/... after the allocation occurred.
 96 *
 97 * The fix for this is to do two dummy transactions when the
 98 * system is idle. We need two dummy transaction because the h_tail_lsn
 99 * in the log record header needs to point beyond the last possible
100 * non-dummy transaction. The first dummy changes the h_tail_lsn to
101 * the first transaction before the dummy. The second dummy causes
102 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
103 *
104 * These dummy transactions get committed when everything
105 * is idle (after there has been some activity).
106 *
107 * There are 5 states used to control this.
108 *
109 *  IDLE -- no logging has been done on the file system or
110 *		we are done covering previous transactions.
111 *  NEED -- logging has occurred and we need a dummy transaction
112 *		when the log becomes idle.
113 *  DONE -- we were in the NEED state and have committed a dummy
114 *		transaction.
115 *  NEED2 -- we detected that a dummy transaction has gone to the
116 *		on disk log with no other transactions.
117 *  DONE2 -- we committed a dummy transaction when in the NEED2 state.
118 *
119 * There are two places where we switch states:
120 *
121 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
122 *	We commit the dummy transaction and switch to DONE or DONE2,
123 *	respectively. In all other states, we don't do anything.
124 *
125 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
126 *
127 *	No matter what state we are in, if this isn't the dummy
128 *	transaction going out, the next state is NEED.
129 *	So, if we aren't in the DONE or DONE2 states, the next state
130 *	is NEED. We can't be finishing a write of the dummy record
131 *	unless it was committed and the state switched to DONE or DONE2.
132 *
133 *	If we are in the DONE state and this was a write of the
134 *		dummy transaction, we move to NEED2.
135 *
136 *	If we are in the DONE2 state and this was a write of the
137 *		dummy transaction, we move to IDLE.
138 *
139 *
140 * Writing only one dummy transaction can get appended to
141 * one file space allocation. When this happens, the log recovery
142 * code replays the space allocation and a file could be truncated.
143 * This is why we have the NEED2 and DONE2 states before going idle.
144 */
145
146#define XLOG_STATE_COVER_IDLE	0
147#define XLOG_STATE_COVER_NEED	1
148#define XLOG_STATE_COVER_DONE	2
149#define XLOG_STATE_COVER_NEED2	3
150#define XLOG_STATE_COVER_DONE2	4
151
152#define XLOG_COVER_OPS		5
153
154/* Ticket reservation region accounting */ 
155#define XLOG_TIC_LEN_MAX	15
156
157/*
158 * Reservation region
159 * As would be stored in xfs_log_iovec but without the i_addr which
160 * we don't care about.
161 */
162typedef struct xlog_res {
163	uint	r_len;	/* region length		:4 */
164	uint	r_type;	/* region's transaction type	:4 */
165} xlog_res_t;
166
167typedef struct xlog_ticket {
168	struct list_head   t_queue;	 /* reserve/write queue */
169	struct task_struct *t_task;	 /* task that owns this ticket */
170	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
171	atomic_t	   t_ref;	 /* ticket reference count       : 4  */
172	int		   t_curr_res;	 /* current reservation in bytes : 4  */
173	int		   t_unit_res;	 /* unit reservation in bytes    : 4  */
174	char		   t_ocnt;	 /* original count		 : 1  */
175	char		   t_cnt;	 /* current count		 : 1  */
176	char		   t_clientid;	 /* who does this belong to;	 : 1  */
177	char		   t_flags;	 /* properties of reservation	 : 1  */
178
179        /* reservation array fields */
180	uint		   t_res_num;                    /* num in array : 4 */
181	uint		   t_res_num_ophdrs;		 /* num op hdrs  : 4 */
182	uint		   t_res_arr_sum;		 /* array sum    : 4 */
183	uint		   t_res_o_flow;		 /* sum overflow : 4 */
184	xlog_res_t	   t_res_arr[XLOG_TIC_LEN_MAX];  /* array of res : 8 * 15 */ 
185} xlog_ticket_t;
186
187/*
188 * - A log record header is 512 bytes.  There is plenty of room to grow the
189 *	xlog_rec_header_t into the reserved space.
190 * - ic_data follows, so a write to disk can start at the beginning of
191 *	the iclog.
192 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
193 * - ic_next is the pointer to the next iclog in the ring.
194 * - ic_bp is a pointer to the buffer used to write this incore log to disk.
195 * - ic_log is a pointer back to the global log structure.
196 * - ic_callback is a linked list of callback function/argument pairs to be
197 *	called after an iclog finishes writing.
198 * - ic_size is the full size of the header plus data.
199 * - ic_offset is the current number of bytes written to in this iclog.
200 * - ic_refcnt is bumped when someone is writing to the log.
201 * - ic_state is the state of the iclog.
202 *
203 * Because of cacheline contention on large machines, we need to separate
204 * various resources onto different cachelines. To start with, make the
205 * structure cacheline aligned. The following fields can be contended on
206 * by independent processes:
207 *
208 *	- ic_callback_*
209 *	- ic_refcnt
210 *	- fields protected by the global l_icloglock
211 *
212 * so we need to ensure that these fields are located in separate cachelines.
213 * We'll put all the read-only and l_icloglock fields in the first cacheline,
214 * and move everything else out to subsequent cachelines.
215 */
216typedef struct xlog_in_core {
217	wait_queue_head_t	ic_force_wait;
218	wait_queue_head_t	ic_write_wait;
219	struct xlog_in_core	*ic_next;
220	struct xlog_in_core	*ic_prev;
221	struct xfs_buf		*ic_bp;
222	struct xlog		*ic_log;
223	int			ic_size;
224	int			ic_offset;
225	int			ic_bwritecnt;
226	unsigned short		ic_state;
227	char			*ic_datap;	/* pointer to iclog data */
228
229	/* Callback structures need their own cacheline */
230	spinlock_t		ic_callback_lock ____cacheline_aligned_in_smp;
231	struct xfs_log_callback	*ic_callback;
232	struct xfs_log_callback	**ic_callback_tail;
233
234	/* reference counts need their own cacheline */
235	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
236	xlog_in_core_2_t	*ic_data;
237#define ic_header	ic_data->hic_header
 
 
 
 
 
 
 
238} xlog_in_core_t;
239
240/*
241 * The CIL context is used to aggregate per-transaction details as well be
242 * passed to the iclog for checkpoint post-commit processing.  After being
243 * passed to the iclog, another context needs to be allocated for tracking the
244 * next set of transactions to be aggregated into a checkpoint.
245 */
246struct xfs_cil;
247
248struct xfs_cil_ctx {
249	struct xfs_cil		*cil;
250	xfs_lsn_t		sequence;	/* chkpt sequence # */
251	xfs_lsn_t		start_lsn;	/* first LSN of chkpt commit */
252	xfs_lsn_t		commit_lsn;	/* chkpt commit record lsn */
 
253	struct xlog_ticket	*ticket;	/* chkpt ticket */
254	int			nvecs;		/* number of regions */
255	int			space_used;	/* aggregate size of regions */
256	struct list_head	busy_extents;	/* busy extents in chkpt */
257	struct xfs_log_vec	*lv_chain;	/* logvecs being pushed */
258	struct xfs_log_callback	log_cb;		/* completion callback hook. */
 
259	struct list_head	committing;	/* ctx committing list */
 
 
 
 
 
 
 
 
 
 
 
 
 
260};
261
262/*
263 * Committed Item List structure
264 *
265 * This structure is used to track log items that have been committed but not
266 * yet written into the log. It is used only when the delayed logging mount
267 * option is enabled.
268 *
269 * This structure tracks the list of committing checkpoint contexts so
270 * we can avoid the problem of having to hold out new transactions during a
271 * flush until we have a the commit record LSN of the checkpoint. We can
272 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
273 * sequence match and extract the commit LSN directly from there. If the
274 * checkpoint is still in the process of committing, we can block waiting for
275 * the commit LSN to be determined as well. This should make synchronous
276 * operations almost as efficient as the old logging methods.
277 */
278struct xfs_cil {
279	struct xlog		*xc_log;
280	struct list_head	xc_cil;
281	spinlock_t		xc_cil_lock;
 
282
283	struct rw_semaphore	xc_ctx_lock ____cacheline_aligned_in_smp;
284	struct xfs_cil_ctx	*xc_ctx;
285
286	spinlock_t		xc_push_lock ____cacheline_aligned_in_smp;
287	xfs_lsn_t		xc_push_seq;
 
288	struct list_head	xc_committing;
289	wait_queue_head_t	xc_commit_wait;
290	xfs_lsn_t		xc_current_sequence;
291	struct work_struct	xc_push_work;
 
 
 
 
 
 
292} ____cacheline_aligned_in_smp;
293
 
 
 
 
294/*
295 * The amount of log space we allow the CIL to aggregate is difficult to size.
296 * Whatever we choose, we have to make sure we can get a reservation for the
297 * log space effectively, that it is large enough to capture sufficient
298 * relogging to reduce log buffer IO significantly, but it is not too large for
299 * the log or induces too much latency when writing out through the iclogs. We
300 * track both space consumed and the number of vectors in the checkpoint
301 * context, so we need to decide which to use for limiting.
302 *
303 * Every log buffer we write out during a push needs a header reserved, which
304 * is at least one sector and more for v2 logs. Hence we need a reservation of
305 * at least 512 bytes per 32k of log space just for the LR headers. That means
306 * 16KB of reservation per megabyte of delayed logging space we will consume,
307 * plus various headers.  The number of headers will vary based on the num of
308 * io vectors, so limiting on a specific number of vectors is going to result
309 * in transactions of varying size. IOWs, it is more consistent to track and
310 * limit space consumed in the log rather than by the number of objects being
311 * logged in order to prevent checkpoint ticket overruns.
312 *
313 * Further, use of static reservations through the log grant mechanism is
314 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
315 * grant) and a significant deadlock potential because regranting write space
316 * can block on log pushes. Hence if we have to regrant log space during a log
317 * push, we can deadlock.
318 *
319 * However, we can avoid this by use of a dynamic "reservation stealing"
320 * technique during transaction commit whereby unused reservation space in the
321 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
322 * space needed by the checkpoint transaction. This means that we never need to
323 * specifically reserve space for the CIL checkpoint transaction, nor do we
324 * need to regrant space once the checkpoint completes. This also means the
325 * checkpoint transaction ticket is specific to the checkpoint context, rather
326 * than the CIL itself.
327 *
328 * With dynamic reservations, we can effectively make up arbitrary limits for
329 * the checkpoint size so long as they don't violate any other size rules.
330 * Recovery imposes a rule that no transaction exceed half the log, so we are
331 * limited by that.  Furthermore, the log transaction reservation subsystem
332 * tries to keep 25% of the log free, so we need to keep below that limit or we
333 * risk running out of free log space to start any new transactions.
334 *
335 * In order to keep background CIL push efficient, we will set a lower
336 * threshold at which background pushing is attempted without blocking current
337 * transaction commits.  A separate, higher bound defines when CIL pushes are
338 * enforced to ensure we stay within our maximum checkpoint size bounds.
339 * threshold, yet give us plenty of space for aggregation on large logs.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340 */
341#define XLOG_CIL_SPACE_LIMIT(log)	(log->l_logsize >> 3)
 
 
 
 
342
343/*
344 * ticket grant locks, queues and accounting have their own cachlines
345 * as these are quite hot and can be operated on concurrently.
346 */
347struct xlog_grant_head {
348	spinlock_t		lock ____cacheline_aligned_in_smp;
349	struct list_head	waiters;
350	atomic64_t		grant;
351};
352
353/*
354 * The reservation head lsn is not made up of a cycle number and block number.
355 * Instead, it uses a cycle number and byte number.  Logs don't expect to
356 * overflow 31 bits worth of byte offset, so using a byte number will mean
357 * that round off problems won't occur when releasing partial reservations.
358 */
359struct xlog {
360	/* The following fields don't need locking */
361	struct xfs_mount	*l_mp;	        /* mount point */
362	struct xfs_ail		*l_ailp;	/* AIL log is working with */
363	struct xfs_cil		*l_cilp;	/* CIL log is working with */
364	struct xfs_buf		*l_xbuf;        /* extra buffer for log
365						 * wrapping */
366	struct xfs_buftarg	*l_targ;        /* buftarg of log */
 
367	struct delayed_work	l_work;		/* background flush work */
368	uint			l_flags;
369	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
370	struct list_head	*l_buf_cancel_table;
371	int			l_iclog_hsize;  /* size of iclog header */
372	int			l_iclog_heads;  /* # of iclog header sectors */
373	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
374	int			l_iclog_size;	/* size of log in bytes */
375	int			l_iclog_size_log; /* log power size of log */
376	int			l_iclog_bufs;	/* number of iclog buffers */
377	xfs_daddr_t		l_logBBstart;   /* start block of log */
378	int			l_logsize;      /* size of log in bytes */
379	int			l_logBBsize;    /* size of log in BB chunks */
380
381	/* The following block of fields are changed while holding icloglock */
382	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
383						/* waiting for iclog flush */
384	int			l_covered_state;/* state of "covering disk
385						 * log entries" */
386	xlog_in_core_t		*l_iclog;       /* head log queue	*/
387	spinlock_t		l_icloglock;    /* grab to change iclog state */
388	int			l_curr_cycle;   /* Cycle number of log writes */
389	int			l_prev_cycle;   /* Cycle number before last
390						 * block increment */
391	int			l_curr_block;   /* current logical log block */
392	int			l_prev_block;   /* previous logical log block */
393
394	/*
395	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
396	 * read without needing to hold specific locks. To avoid operations
397	 * contending with other hot objects, place each of them on a separate
398	 * cacheline.
399	 */
400	/* lsn of last LR on disk */
401	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
402	/* lsn of 1st LR with unflushed * buffers */
403	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
404
405	struct xlog_grant_head	l_reserve_head;
406	struct xlog_grant_head	l_write_head;
407
408	struct xfs_kobj		l_kobj;
409
410	/* The following field are used for debugging; need to hold icloglock */
411#ifdef DEBUG
412	void			*l_iclog_bak[XLOG_MAX_ICLOGS];
413	/* log record crc error injection factor */
414	uint32_t		l_badcrc_factor;
415#endif
416	/* log recovery lsn tracking (for buffer submission */
417	xfs_lsn_t		l_recovery_lsn;
 
 
 
 
 
418};
419
420#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
421	((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422
423#define XLOG_FORCED_SHUTDOWN(log)	((log)->l_flags & XLOG_IO_ERROR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
425/* common routines */
426extern int
427xlog_recover(
428	struct xlog		*log);
429extern int
430xlog_recover_finish(
431	struct xlog		*log);
432extern int
433xlog_recover_cancel(struct xlog *);
434
435extern __le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
436			    char *dp, int size);
437
438extern kmem_zone_t *xfs_log_ticket_zone;
439struct xlog_ticket *
440xlog_ticket_alloc(
441	struct xlog	*log,
442	int		unit_bytes,
443	int		count,
444	char		client,
445	bool		permanent,
446	xfs_km_flags_t	alloc_flags);
447
448
449static inline void
450xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
451{
452	*ptr += bytes;
453	*len -= bytes;
454	*off += bytes;
455}
456
457void	xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
458int
459xlog_write(
460	struct xlog		*log,
461	struct xfs_log_vec	*log_vector,
462	struct xlog_ticket	*tic,
463	xfs_lsn_t		*start_lsn,
464	struct xlog_in_core	**commit_iclog,
465	uint			flags);
 
 
 
466
467/*
468 * When we crack an atomic LSN, we sample it first so that the value will not
469 * change while we are cracking it into the component values. This means we
470 * will always get consistent component values to work from. This should always
471 * be used to sample and crack LSNs that are stored and updated in atomic
472 * variables.
473 */
474static inline void
475xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
476{
477	xfs_lsn_t val = atomic64_read(lsn);
478
479	*cycle = CYCLE_LSN(val);
480	*block = BLOCK_LSN(val);
481}
482
483/*
484 * Calculate and assign a value to an atomic LSN variable from component pieces.
485 */
486static inline void
487xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
488{
489	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
490}
491
492/*
493 * When we crack the grant head, we sample it first so that the value will not
494 * change while we are cracking it into the component values. This means we
495 * will always get consistent component values to work from.
496 */
497static inline void
498xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
499{
500	*cycle = val >> 32;
501	*space = val & 0xffffffff;
502}
503
504static inline void
505xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
506{
507	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
508}
509
510static inline int64_t
511xlog_assign_grant_head_val(int cycle, int space)
512{
513	return ((int64_t)cycle << 32) | space;
514}
515
516static inline void
517xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
518{
519	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
520}
521
522/*
523 * Committed Item List interfaces
524 */
525int	xlog_cil_init(struct xlog *log);
526void	xlog_cil_init_post_recovery(struct xlog *log);
527void	xlog_cil_destroy(struct xlog *log);
528bool	xlog_cil_empty(struct xlog *log);
 
 
 
 
 
529
530/*
531 * CIL force routines
532 */
533xfs_lsn_t
534xlog_cil_force_lsn(
535	struct xlog *log,
536	xfs_lsn_t sequence);
537
538static inline void
539xlog_cil_force(struct xlog *log)
540{
541	xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
542}
543
544/*
545 * Unmount record type is used as a pseudo transaction type for the ticket.
546 * It's value must be outside the range of XFS_TRANS_* values.
547 */
548#define XLOG_UNMOUNT_REC_TYPE	(-1U)
549
550/*
551 * Wrapper function for waiting on a wait queue serialised against wakeups
552 * by a spinlock. This matches the semantics of all the wait queues used in the
553 * log code.
554 */
555static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
 
 
 
 
556{
557	DECLARE_WAITQUEUE(wait, current);
558
559	add_wait_queue_exclusive(wq, &wait);
560	__set_current_state(TASK_UNINTERRUPTIBLE);
561	spin_unlock(lock);
562	schedule();
563	remove_wait_queue(wq, &wait);
564}
565
 
 
566/*
567 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
568 * means that the next log record that includes this metadata could have a
569 * smaller LSN. In turn, this means that the modification in the log would not
570 * replay.
571 */
572static inline bool
573xlog_valid_lsn(
574	struct xlog	*log,
575	xfs_lsn_t	lsn)
576{
577	int		cur_cycle;
578	int		cur_block;
579	bool		valid = true;
580
581	/*
582	 * First, sample the current lsn without locking to avoid added
583	 * contention from metadata I/O. The current cycle and block are updated
584	 * (in xlog_state_switch_iclogs()) and read here in a particular order
585	 * to avoid false negatives (e.g., thinking the metadata LSN is valid
586	 * when it is not).
587	 *
588	 * The current block is always rewound before the cycle is bumped in
589	 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
590	 * a transiently forward state. Instead, we can see the LSN in a
591	 * transiently behind state if we happen to race with a cycle wrap.
592	 */
593	cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
594	smp_rmb();
595	cur_block = ACCESS_ONCE(log->l_curr_block);
596
597	if ((CYCLE_LSN(lsn) > cur_cycle) ||
598	    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
599		/*
600		 * If the metadata LSN appears invalid, it's possible the check
601		 * above raced with a wrap to the next log cycle. Grab the lock
602		 * to check for sure.
603		 */
604		spin_lock(&log->l_icloglock);
605		cur_cycle = log->l_curr_cycle;
606		cur_block = log->l_curr_block;
607		spin_unlock(&log->l_icloglock);
608
609		if ((CYCLE_LSN(lsn) > cur_cycle) ||
610		    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
611			valid = false;
612	}
613
614	return valid;
615}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
616
617#endif	/* __XFS_LOG_PRIV_H__ */