Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#ifndef	__XFS_LOG_PRIV_H__
 19#define __XFS_LOG_PRIV_H__
 20
 21struct xfs_buf;
 22struct log;
 23struct xlog_ticket;
 24struct xfs_mount;
 25
 26/*
 27 * Macros, structures, prototypes for internal log manager use.
 28 */
 29
 30#define XLOG_MIN_ICLOGS		2
 31#define XLOG_MAX_ICLOGS		8
 32#define XLOG_HEADER_MAGIC_NUM	0xFEEDbabe	/* Invalid cycle number */
 33#define XLOG_VERSION_1		1
 34#define XLOG_VERSION_2		2		/* Large IClogs, Log sunit */
 35#define XLOG_VERSION_OKBITS	(XLOG_VERSION_1 | XLOG_VERSION_2)
 36#define XLOG_MIN_RECORD_BSIZE	(16*1024)	/* eventually 32k */
 37#define XLOG_BIG_RECORD_BSIZE	(32*1024)	/* 32k buffers */
 38#define XLOG_MAX_RECORD_BSIZE	(256*1024)
 39#define XLOG_HEADER_CYCLE_SIZE	(32*1024)	/* cycle data in header */
 40#define XLOG_MIN_RECORD_BSHIFT	14		/* 16384 == 1 << 14 */
 41#define XLOG_BIG_RECORD_BSHIFT	15		/* 32k == 1 << 15 */
 42#define XLOG_MAX_RECORD_BSHIFT	18		/* 256k == 1 << 18 */
 43#define XLOG_BTOLSUNIT(log, b)  (((b)+(log)->l_mp->m_sb.sb_logsunit-1) / \
 44                                 (log)->l_mp->m_sb.sb_logsunit)
 45#define XLOG_LSUNITTOB(log, su) ((su) * (log)->l_mp->m_sb.sb_logsunit)
 46
 47#define XLOG_HEADER_SIZE	512
 48
 49#define XLOG_REC_SHIFT(log) \
 50	BTOBB(1 << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
 51	 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
 52#define XLOG_TOTAL_REC_SHIFT(log) \
 53	BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
 54	 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
 55
 56static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
 57{
 58	return ((xfs_lsn_t)cycle << 32) | block;
 59}
 60
 61static inline uint xlog_get_cycle(char *ptr)
 62{
 63	if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
 64		return be32_to_cpu(*((__be32 *)ptr + 1));
 65	else
 66		return be32_to_cpu(*(__be32 *)ptr);
 67}
 68
 69#define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
 70
 71#ifdef __KERNEL__
 72
 73/*
 74 * get client id from packed copy.
 75 *
 76 * this hack is here because the xlog_pack code copies four bytes
 77 * of xlog_op_header containing the fields oh_clientid, oh_flags
 78 * and oh_res2 into the packed copy.
 79 *
 80 * later on this four byte chunk is treated as an int and the
 81 * client id is pulled out.
 82 *
 83 * this has endian issues, of course.
 84 */
 85static inline uint xlog_get_client_id(__be32 i)
 86{
 87	return be32_to_cpu(i) >> 24;
 88}
 89
 90/*
 91 * In core log state
 92 */
 93#define XLOG_STATE_ACTIVE    0x0001 /* Current IC log being written to */
 94#define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */
 95#define XLOG_STATE_SYNCING   0x0004 /* This IC log is syncing */
 96#define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */
 97#define XLOG_STATE_DO_CALLBACK \
 98			     0x0010 /* Process callback functions */
 99#define XLOG_STATE_CALLBACK  0x0020 /* Callback functions now */
100#define XLOG_STATE_DIRTY     0x0040 /* Dirty IC log, not ready for ACTIVE status*/
101#define XLOG_STATE_IOERROR   0x0080 /* IO error happened in sync'ing log */
102#define XLOG_STATE_ALL	     0x7FFF /* All possible valid flags */
103#define XLOG_STATE_NOTUSED   0x8000 /* This IC log not being used */
104#endif	/* __KERNEL__ */
105
106/*
107 * Flags to log operation header
108 *
109 * The first write of a new transaction will be preceded with a start
110 * record, XLOG_START_TRANS.  Once a transaction is committed, a commit
111 * record is written, XLOG_COMMIT_TRANS.  If a single region can not fit into
112 * the remainder of the current active in-core log, it is split up into
113 * multiple regions.  Each partial region will be marked with a
114 * XLOG_CONTINUE_TRANS until the last one, which gets marked with XLOG_END_TRANS.
115 *
116 */
117#define XLOG_START_TRANS	0x01	/* Start a new transaction */
118#define XLOG_COMMIT_TRANS	0x02	/* Commit this transaction */
119#define XLOG_CONTINUE_TRANS	0x04	/* Cont this trans into new region */
120#define XLOG_WAS_CONT_TRANS	0x08	/* Cont this trans into new region */
121#define XLOG_END_TRANS		0x10	/* End a continued transaction */
122#define XLOG_UNMOUNT_TRANS	0x20	/* Unmount a filesystem transaction */
123
124#ifdef __KERNEL__
125/*
126 * Flags to log ticket
127 */
128#define XLOG_TIC_INITED		0x1	/* has been initialized */
129#define XLOG_TIC_PERM_RESERV	0x2	/* permanent reservation */
130
131#define XLOG_TIC_FLAGS \
132	{ XLOG_TIC_INITED,	"XLOG_TIC_INITED" }, \
133	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
134
135#endif	/* __KERNEL__ */
136
137#define XLOG_UNMOUNT_TYPE	0x556e	/* Un for Unmount */
138
139/*
140 * Flags for log structure
141 */
142#define XLOG_CHKSUM_MISMATCH	0x1	/* used only during recovery */
143#define XLOG_ACTIVE_RECOVERY	0x2	/* in the middle of recovery */
144#define	XLOG_RECOVERY_NEEDED	0x4	/* log was recovered */
145#define XLOG_IO_ERROR		0x8	/* log hit an I/O error, and being
146					   shutdown */
147#define XLOG_TAIL_WARN		0x10	/* log tail verify warning issued */
148
149typedef __uint32_t xlog_tid_t;
150
151#ifdef __KERNEL__
152/*
153 * Below are states for covering allocation transactions.
154 * By covering, we mean changing the h_tail_lsn in the last on-disk
155 * log write such that no allocation transactions will be re-done during
156 * recovery after a system crash. Recovery starts at the last on-disk
157 * log write.
158 *
159 * These states are used to insert dummy log entries to cover
160 * space allocation transactions which can undo non-transactional changes
161 * after a crash. Writes to a file with space
162 * already allocated do not result in any transactions. Allocations
163 * might include space beyond the EOF. So if we just push the EOF a
164 * little, the last transaction for the file could contain the wrong
165 * size. If there is no file system activity, after an allocation
166 * transaction, and the system crashes, the allocation transaction
167 * will get replayed and the file will be truncated. This could
168 * be hours/days/... after the allocation occurred.
169 *
170 * The fix for this is to do two dummy transactions when the
171 * system is idle. We need two dummy transaction because the h_tail_lsn
172 * in the log record header needs to point beyond the last possible
173 * non-dummy transaction. The first dummy changes the h_tail_lsn to
174 * the first transaction before the dummy. The second dummy causes
175 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
176 *
177 * These dummy transactions get committed when everything
178 * is idle (after there has been some activity).
179 *
180 * There are 5 states used to control this.
181 *
182 *  IDLE -- no logging has been done on the file system or
183 *		we are done covering previous transactions.
184 *  NEED -- logging has occurred and we need a dummy transaction
185 *		when the log becomes idle.
186 *  DONE -- we were in the NEED state and have committed a dummy
187 *		transaction.
188 *  NEED2 -- we detected that a dummy transaction has gone to the
189 *		on disk log with no other transactions.
190 *  DONE2 -- we committed a dummy transaction when in the NEED2 state.
191 *
192 * There are two places where we switch states:
193 *
194 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
195 *	We commit the dummy transaction and switch to DONE or DONE2,
196 *	respectively. In all other states, we don't do anything.
197 *
198 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
199 *
200 *	No matter what state we are in, if this isn't the dummy
201 *	transaction going out, the next state is NEED.
202 *	So, if we aren't in the DONE or DONE2 states, the next state
203 *	is NEED. We can't be finishing a write of the dummy record
204 *	unless it was committed and the state switched to DONE or DONE2.
205 *
206 *	If we are in the DONE state and this was a write of the
207 *		dummy transaction, we move to NEED2.
208 *
209 *	If we are in the DONE2 state and this was a write of the
210 *		dummy transaction, we move to IDLE.
211 *
212 *
213 * Writing only one dummy transaction can get appended to
214 * one file space allocation. When this happens, the log recovery
215 * code replays the space allocation and a file could be truncated.
216 * This is why we have the NEED2 and DONE2 states before going idle.
217 */
218
219#define XLOG_STATE_COVER_IDLE	0
220#define XLOG_STATE_COVER_NEED	1
221#define XLOG_STATE_COVER_DONE	2
222#define XLOG_STATE_COVER_NEED2	3
223#define XLOG_STATE_COVER_DONE2	4
224
225#define XLOG_COVER_OPS		5
226
227
228/* Ticket reservation region accounting */ 
229#define XLOG_TIC_LEN_MAX	15
230
231/*
232 * Reservation region
233 * As would be stored in xfs_log_iovec but without the i_addr which
234 * we don't care about.
235 */
236typedef struct xlog_res {
237	uint	r_len;	/* region length		:4 */
238	uint	r_type;	/* region's transaction type	:4 */
239} xlog_res_t;
240
241typedef struct xlog_ticket {
242	wait_queue_head_t  t_wait;	 /* ticket wait queue */
243	struct list_head   t_queue;	 /* reserve/write queue */
 
244	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
245	atomic_t	   t_ref;	 /* ticket reference count       : 4  */
246	int		   t_curr_res;	 /* current reservation in bytes : 4  */
247	int		   t_unit_res;	 /* unit reservation in bytes    : 4  */
248	char		   t_ocnt;	 /* original count		 : 1  */
249	char		   t_cnt;	 /* current count		 : 1  */
250	char		   t_clientid;	 /* who does this belong to;	 : 1  */
251	char		   t_flags;	 /* properties of reservation	 : 1  */
252	uint		   t_trans_type; /* transaction type             : 4  */
253
254        /* reservation array fields */
255	uint		   t_res_num;                    /* num in array : 4 */
256	uint		   t_res_num_ophdrs;		 /* num op hdrs  : 4 */
257	uint		   t_res_arr_sum;		 /* array sum    : 4 */
258	uint		   t_res_o_flow;		 /* sum overflow : 4 */
259	xlog_res_t	   t_res_arr[XLOG_TIC_LEN_MAX];  /* array of res : 8 * 15 */ 
260} xlog_ticket_t;
261
262#endif
263
264
265typedef struct xlog_op_header {
266	__be32	   oh_tid;	/* transaction id of operation	:  4 b */
267	__be32	   oh_len;	/* bytes in data region		:  4 b */
268	__u8	   oh_clientid;	/* who sent me this		:  1 b */
269	__u8	   oh_flags;	/*				:  1 b */
270	__u16	   oh_res2;	/* 32 bit align			:  2 b */
271} xlog_op_header_t;
272
273
274/* valid values for h_fmt */
275#define XLOG_FMT_UNKNOWN  0
276#define XLOG_FMT_LINUX_LE 1
277#define XLOG_FMT_LINUX_BE 2
278#define XLOG_FMT_IRIX_BE  3
279
280/* our fmt */
281#ifdef XFS_NATIVE_HOST
282#define XLOG_FMT XLOG_FMT_LINUX_BE
283#else
284#define XLOG_FMT XLOG_FMT_LINUX_LE
285#endif
286
287typedef struct xlog_rec_header {
288	__be32	  h_magicno;	/* log record (LR) identifier		:  4 */
289	__be32	  h_cycle;	/* write cycle of log			:  4 */
290	__be32	  h_version;	/* LR version				:  4 */
291	__be32	  h_len;	/* len in bytes; should be 64-bit aligned: 4 */
292	__be64	  h_lsn;	/* lsn of this LR			:  8 */
293	__be64	  h_tail_lsn;	/* lsn of 1st LR w/ buffers not committed: 8 */
294	__be32	  h_chksum;	/* may not be used; non-zero if used	:  4 */
295	__be32	  h_prev_block; /* block number to previous LR		:  4 */
296	__be32	  h_num_logops;	/* number of log operations in this LR	:  4 */
297	__be32	  h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
298	/* new fields */
299	__be32    h_fmt;        /* format of log record                 :  4 */
300	uuid_t	  h_fs_uuid;    /* uuid of FS                           : 16 */
301	__be32	  h_size;	/* iclog size				:  4 */
302} xlog_rec_header_t;
303
304typedef struct xlog_rec_ext_header {
305	__be32	  xh_cycle;	/* write cycle of log			: 4 */
306	__be32	  xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /*	: 256 */
307} xlog_rec_ext_header_t;
308
309#ifdef __KERNEL__
310
311/*
312 * Quite misnamed, because this union lays out the actual on-disk log buffer.
313 */
314typedef union xlog_in_core2 {
315	xlog_rec_header_t	hic_header;
316	xlog_rec_ext_header_t	hic_xheader;
317	char			hic_sector[XLOG_HEADER_SIZE];
318} xlog_in_core_2_t;
319
320/*
321 * - A log record header is 512 bytes.  There is plenty of room to grow the
322 *	xlog_rec_header_t into the reserved space.
323 * - ic_data follows, so a write to disk can start at the beginning of
324 *	the iclog.
325 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
326 * - ic_next is the pointer to the next iclog in the ring.
327 * - ic_bp is a pointer to the buffer used to write this incore log to disk.
328 * - ic_log is a pointer back to the global log structure.
329 * - ic_callback is a linked list of callback function/argument pairs to be
330 *	called after an iclog finishes writing.
331 * - ic_size is the full size of the header plus data.
332 * - ic_offset is the current number of bytes written to in this iclog.
333 * - ic_refcnt is bumped when someone is writing to the log.
334 * - ic_state is the state of the iclog.
335 *
336 * Because of cacheline contention on large machines, we need to separate
337 * various resources onto different cachelines. To start with, make the
338 * structure cacheline aligned. The following fields can be contended on
339 * by independent processes:
340 *
341 *	- ic_callback_*
342 *	- ic_refcnt
343 *	- fields protected by the global l_icloglock
344 *
345 * so we need to ensure that these fields are located in separate cachelines.
346 * We'll put all the read-only and l_icloglock fields in the first cacheline,
347 * and move everything else out to subsequent cachelines.
348 */
349typedef struct xlog_in_core {
350	wait_queue_head_t	ic_force_wait;
351	wait_queue_head_t	ic_write_wait;
352	struct xlog_in_core	*ic_next;
353	struct xlog_in_core	*ic_prev;
354	struct xfs_buf		*ic_bp;
355	struct log		*ic_log;
356	int			ic_size;
357	int			ic_offset;
358	int			ic_bwritecnt;
359	unsigned short		ic_state;
360	char			*ic_datap;	/* pointer to iclog data */
361
362	/* Callback structures need their own cacheline */
363	spinlock_t		ic_callback_lock ____cacheline_aligned_in_smp;
364	xfs_log_callback_t	*ic_callback;
365	xfs_log_callback_t	**ic_callback_tail;
366
367	/* reference counts need their own cacheline */
368	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
369	xlog_in_core_2_t	*ic_data;
370#define ic_header	ic_data->hic_header
 
 
 
 
 
 
 
371} xlog_in_core_t;
372
373/*
374 * The CIL context is used to aggregate per-transaction details as well be
375 * passed to the iclog for checkpoint post-commit processing.  After being
376 * passed to the iclog, another context needs to be allocated for tracking the
377 * next set of transactions to be aggregated into a checkpoint.
378 */
379struct xfs_cil;
380
381struct xfs_cil_ctx {
382	struct xfs_cil		*cil;
383	xfs_lsn_t		sequence;	/* chkpt sequence # */
384	xfs_lsn_t		start_lsn;	/* first LSN of chkpt commit */
385	xfs_lsn_t		commit_lsn;	/* chkpt commit record lsn */
386	struct xlog_ticket	*ticket;	/* chkpt ticket */
387	int			nvecs;		/* number of regions */
388	int			space_used;	/* aggregate size of regions */
389	struct list_head	busy_extents;	/* busy extents in chkpt */
390	struct xfs_log_vec	*lv_chain;	/* logvecs being pushed */
391	xfs_log_callback_t	log_cb;		/* completion callback hook. */
392	struct list_head	committing;	/* ctx committing list */
 
393};
394
395/*
396 * Committed Item List structure
397 *
398 * This structure is used to track log items that have been committed but not
399 * yet written into the log. It is used only when the delayed logging mount
400 * option is enabled.
401 *
402 * This structure tracks the list of committing checkpoint contexts so
403 * we can avoid the problem of having to hold out new transactions during a
404 * flush until we have a the commit record LSN of the checkpoint. We can
405 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
406 * sequence match and extract the commit LSN directly from there. If the
407 * checkpoint is still in the process of committing, we can block waiting for
408 * the commit LSN to be determined as well. This should make synchronous
409 * operations almost as efficient as the old logging methods.
410 */
411struct xfs_cil {
412	struct log		*xc_log;
413	struct list_head	xc_cil;
414	spinlock_t		xc_cil_lock;
 
 
415	struct xfs_cil_ctx	*xc_ctx;
416	struct rw_semaphore	xc_ctx_lock;
 
 
417	struct list_head	xc_committing;
418	wait_queue_head_t	xc_commit_wait;
419	xfs_lsn_t		xc_current_sequence;
420};
 
 
421
422/*
423 * The amount of log space we allow the CIL to aggregate is difficult to size.
424 * Whatever we choose, we have to make sure we can get a reservation for the
425 * log space effectively, that it is large enough to capture sufficient
426 * relogging to reduce log buffer IO significantly, but it is not too large for
427 * the log or induces too much latency when writing out through the iclogs. We
428 * track both space consumed and the number of vectors in the checkpoint
429 * context, so we need to decide which to use for limiting.
430 *
431 * Every log buffer we write out during a push needs a header reserved, which
432 * is at least one sector and more for v2 logs. Hence we need a reservation of
433 * at least 512 bytes per 32k of log space just for the LR headers. That means
434 * 16KB of reservation per megabyte of delayed logging space we will consume,
435 * plus various headers.  The number of headers will vary based on the num of
436 * io vectors, so limiting on a specific number of vectors is going to result
437 * in transactions of varying size. IOWs, it is more consistent to track and
438 * limit space consumed in the log rather than by the number of objects being
439 * logged in order to prevent checkpoint ticket overruns.
440 *
441 * Further, use of static reservations through the log grant mechanism is
442 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
443 * grant) and a significant deadlock potential because regranting write space
444 * can block on log pushes. Hence if we have to regrant log space during a log
445 * push, we can deadlock.
446 *
447 * However, we can avoid this by use of a dynamic "reservation stealing"
448 * technique during transaction commit whereby unused reservation space in the
449 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
450 * space needed by the checkpoint transaction. This means that we never need to
451 * specifically reserve space for the CIL checkpoint transaction, nor do we
452 * need to regrant space once the checkpoint completes. This also means the
453 * checkpoint transaction ticket is specific to the checkpoint context, rather
454 * than the CIL itself.
455 *
456 * With dynamic reservations, we can effectively make up arbitrary limits for
457 * the checkpoint size so long as they don't violate any other size rules.
458 * Recovery imposes a rule that no transaction exceed half the log, so we are
459 * limited by that.  Furthermore, the log transaction reservation subsystem
460 * tries to keep 25% of the log free, so we need to keep below that limit or we
461 * risk running out of free log space to start any new transactions.
462 *
463 * In order to keep background CIL push efficient, we will set a lower
464 * threshold at which background pushing is attempted without blocking current
465 * transaction commits.  A separate, higher bound defines when CIL pushes are
466 * enforced to ensure we stay within our maximum checkpoint size bounds.
467 * threshold, yet give us plenty of space for aggregation on large logs.
468 */
469#define XLOG_CIL_SPACE_LIMIT(log)	(log->l_logsize >> 3)
470#define XLOG_CIL_HARD_SPACE_LIMIT(log)	(3 * (log->l_logsize >> 4))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
471
472/*
473 * The reservation head lsn is not made up of a cycle number and block number.
474 * Instead, it uses a cycle number and byte number.  Logs don't expect to
475 * overflow 31 bits worth of byte offset, so using a byte number will mean
476 * that round off problems won't occur when releasing partial reservations.
477 */
478typedef struct log {
479	/* The following fields don't need locking */
480	struct xfs_mount	*l_mp;	        /* mount point */
481	struct xfs_ail		*l_ailp;	/* AIL log is working with */
482	struct xfs_cil		*l_cilp;	/* CIL log is working with */
483	struct xfs_buf		*l_xbuf;        /* extra buffer for log
484						 * wrapping */
485	struct xfs_buftarg	*l_targ;        /* buftarg of log */
 
 
486	uint			l_flags;
487	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
488	struct list_head	*l_buf_cancel_table;
489	int			l_iclog_hsize;  /* size of iclog header */
490	int			l_iclog_heads;  /* # of iclog header sectors */
491	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
492	int			l_iclog_size;	/* size of log in bytes */
493	int			l_iclog_size_log; /* log power size of log */
494	int			l_iclog_bufs;	/* number of iclog buffers */
495	xfs_daddr_t		l_logBBstart;   /* start block of log */
496	int			l_logsize;      /* size of log in bytes */
497	int			l_logBBsize;    /* size of log in BB chunks */
498
499	/* The following block of fields are changed while holding icloglock */
500	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
501						/* waiting for iclog flush */
502	int			l_covered_state;/* state of "covering disk
503						 * log entries" */
504	xlog_in_core_t		*l_iclog;       /* head log queue	*/
505	spinlock_t		l_icloglock;    /* grab to change iclog state */
506	int			l_curr_cycle;   /* Cycle number of log writes */
507	int			l_prev_cycle;   /* Cycle number before last
508						 * block increment */
509	int			l_curr_block;   /* current logical log block */
510	int			l_prev_block;   /* previous logical log block */
511
512	/*
513	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
514	 * read without needing to hold specific locks. To avoid operations
515	 * contending with other hot objects, place each of them on a separate
516	 * cacheline.
517	 */
518	/* lsn of last LR on disk */
519	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
520	/* lsn of 1st LR with unflushed * buffers */
521	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
522
523	/*
524	 * ticket grant locks, queues and accounting have their own cachlines
525	 * as these are quite hot and can be operated on concurrently.
526	 */
527	spinlock_t		l_grant_reserve_lock ____cacheline_aligned_in_smp;
528	struct list_head	l_reserveq;
529	atomic64_t		l_grant_reserve_head;
530
531	spinlock_t		l_grant_write_lock ____cacheline_aligned_in_smp;
532	struct list_head	l_writeq;
533	atomic64_t		l_grant_write_head;
534
535	/* The following field are used for debugging; need to hold icloglock */
536#ifdef DEBUG
537	char			*l_iclog_bak[XLOG_MAX_ICLOGS];
538#endif
539
540} xlog_t;
 
541
542#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
543	((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
544
545#define XLOG_FORCED_SHUTDOWN(log)	((log)->l_flags & XLOG_IO_ERROR)
 
546
547/* common routines */
548extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
549extern int	 xlog_recover(xlog_t *log);
550extern int	 xlog_recover_finish(xlog_t *log);
551extern void	 xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
 
 
 
 
552
553extern kmem_zone_t *xfs_log_ticket_zone;
554struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes,
555				int count, char client, uint xflags,
556				int alloc_flags);
557
 
 
 
 
 
 
 
 
558
559static inline void
560xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
561{
562	*ptr += bytes;
563	*len -= bytes;
564	*off += bytes;
565}
566
567void	xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
568int	xlog_write(struct log *log, struct xfs_log_vec *log_vector,
569				struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
570				xlog_in_core_t **commit_iclog, uint flags);
 
 
 
 
 
 
571
572/*
573 * When we crack an atomic LSN, we sample it first so that the value will not
574 * change while we are cracking it into the component values. This means we
575 * will always get consistent component values to work from. This should always
576 * be used to sample and crack LSNs that are stored and updated in atomic
577 * variables.
578 */
579static inline void
580xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
581{
582	xfs_lsn_t val = atomic64_read(lsn);
583
584	*cycle = CYCLE_LSN(val);
585	*block = BLOCK_LSN(val);
586}
587
588/*
589 * Calculate and assign a value to an atomic LSN variable from component pieces.
590 */
591static inline void
592xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
593{
594	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
595}
596
597/*
598 * When we crack the grant head, we sample it first so that the value will not
599 * change while we are cracking it into the component values. This means we
600 * will always get consistent component values to work from.
601 */
602static inline void
603xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
604{
605	*cycle = val >> 32;
606	*space = val & 0xffffffff;
607}
608
609static inline void
610xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
611{
612	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
613}
614
615static inline int64_t
616xlog_assign_grant_head_val(int cycle, int space)
617{
618	return ((int64_t)cycle << 32) | space;
619}
620
621static inline void
622xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
623{
624	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
625}
626
627/*
628 * Committed Item List interfaces
629 */
630int	xlog_cil_init(struct log *log);
631void	xlog_cil_init_post_recovery(struct log *log);
632void	xlog_cil_destroy(struct log *log);
 
633
634/*
635 * CIL force routines
636 */
637xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence);
 
 
 
638
639static inline void
640xlog_cil_force(struct log *log)
641{
642	xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
643}
644
645/*
646 * Unmount record type is used as a pseudo transaction type for the ticket.
647 * It's value must be outside the range of XFS_TRANS_* values.
648 */
649#define XLOG_UNMOUNT_REC_TYPE	(-1U)
650
651/*
652 * Wrapper function for waiting on a wait queue serialised against wakeups
653 * by a spinlock. This matches the semantics of all the wait queues used in the
654 * log code.
655 */
656static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
 
 
 
 
657{
658	DECLARE_WAITQUEUE(wait, current);
659
660	add_wait_queue_exclusive(wq, &wait);
661	__set_current_state(TASK_UNINTERRUPTIBLE);
662	spin_unlock(lock);
663	schedule();
664	remove_wait_queue(wq, &wait);
665}
666#endif	/* __KERNEL__ */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
667
668#endif	/* __XFS_LOG_PRIV_H__ */
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#ifndef	__XFS_LOG_PRIV_H__
  7#define __XFS_LOG_PRIV_H__
  8
  9struct xfs_buf;
 10struct xlog;
 11struct xlog_ticket;
 12struct xfs_mount;
 13
 14/*
 15 * Flags for log structure
 16 */
 17#define XLOG_ACTIVE_RECOVERY	0x2	/* in the middle of recovery */
 18#define	XLOG_RECOVERY_NEEDED	0x4	/* log was recovered */
 19#define XLOG_IO_ERROR		0x8	/* log hit an I/O error, and being
 20					   shutdown */
 21#define XLOG_TAIL_WARN		0x10	/* log tail verify warning issued */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22
 23/*
 24 * get client id from packed copy.
 25 *
 26 * this hack is here because the xlog_pack code copies four bytes
 27 * of xlog_op_header containing the fields oh_clientid, oh_flags
 28 * and oh_res2 into the packed copy.
 29 *
 30 * later on this four byte chunk is treated as an int and the
 31 * client id is pulled out.
 32 *
 33 * this has endian issues, of course.
 34 */
 35static inline uint xlog_get_client_id(__be32 i)
 36{
 37	return be32_to_cpu(i) >> 24;
 38}
 39
 40/*
 41 * In core log state
 42 */
 43enum xlog_iclog_state {
 44	XLOG_STATE_ACTIVE,	/* Current IC log being written to */
 45	XLOG_STATE_WANT_SYNC,	/* Want to sync this iclog; no more writes */
 46	XLOG_STATE_SYNCING,	/* This IC log is syncing */
 47	XLOG_STATE_DONE_SYNC,	/* Done syncing to disk */
 48	XLOG_STATE_CALLBACK,	/* Callback functions now */
 49	XLOG_STATE_DIRTY,	/* Dirty IC log, not ready for ACTIVE status */
 50	XLOG_STATE_IOERROR,	/* IO error happened in sync'ing log */
 51};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52
 
 53/*
 54 * Log ticket flags
 55 */
 56#define XLOG_TIC_PERM_RESERV	0x1	/* permanent reservation */
 
 57
 58#define XLOG_TIC_FLAGS \
 
 59	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
 60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61/*
 62 * Below are states for covering allocation transactions.
 63 * By covering, we mean changing the h_tail_lsn in the last on-disk
 64 * log write such that no allocation transactions will be re-done during
 65 * recovery after a system crash. Recovery starts at the last on-disk
 66 * log write.
 67 *
 68 * These states are used to insert dummy log entries to cover
 69 * space allocation transactions which can undo non-transactional changes
 70 * after a crash. Writes to a file with space
 71 * already allocated do not result in any transactions. Allocations
 72 * might include space beyond the EOF. So if we just push the EOF a
 73 * little, the last transaction for the file could contain the wrong
 74 * size. If there is no file system activity, after an allocation
 75 * transaction, and the system crashes, the allocation transaction
 76 * will get replayed and the file will be truncated. This could
 77 * be hours/days/... after the allocation occurred.
 78 *
 79 * The fix for this is to do two dummy transactions when the
 80 * system is idle. We need two dummy transaction because the h_tail_lsn
 81 * in the log record header needs to point beyond the last possible
 82 * non-dummy transaction. The first dummy changes the h_tail_lsn to
 83 * the first transaction before the dummy. The second dummy causes
 84 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
 85 *
 86 * These dummy transactions get committed when everything
 87 * is idle (after there has been some activity).
 88 *
 89 * There are 5 states used to control this.
 90 *
 91 *  IDLE -- no logging has been done on the file system or
 92 *		we are done covering previous transactions.
 93 *  NEED -- logging has occurred and we need a dummy transaction
 94 *		when the log becomes idle.
 95 *  DONE -- we were in the NEED state and have committed a dummy
 96 *		transaction.
 97 *  NEED2 -- we detected that a dummy transaction has gone to the
 98 *		on disk log with no other transactions.
 99 *  DONE2 -- we committed a dummy transaction when in the NEED2 state.
100 *
101 * There are two places where we switch states:
102 *
103 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
104 *	We commit the dummy transaction and switch to DONE or DONE2,
105 *	respectively. In all other states, we don't do anything.
106 *
107 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
108 *
109 *	No matter what state we are in, if this isn't the dummy
110 *	transaction going out, the next state is NEED.
111 *	So, if we aren't in the DONE or DONE2 states, the next state
112 *	is NEED. We can't be finishing a write of the dummy record
113 *	unless it was committed and the state switched to DONE or DONE2.
114 *
115 *	If we are in the DONE state and this was a write of the
116 *		dummy transaction, we move to NEED2.
117 *
118 *	If we are in the DONE2 state and this was a write of the
119 *		dummy transaction, we move to IDLE.
120 *
121 *
122 * Writing only one dummy transaction can get appended to
123 * one file space allocation. When this happens, the log recovery
124 * code replays the space allocation and a file could be truncated.
125 * This is why we have the NEED2 and DONE2 states before going idle.
126 */
127
128#define XLOG_STATE_COVER_IDLE	0
129#define XLOG_STATE_COVER_NEED	1
130#define XLOG_STATE_COVER_DONE	2
131#define XLOG_STATE_COVER_NEED2	3
132#define XLOG_STATE_COVER_DONE2	4
133
134#define XLOG_COVER_OPS		5
135
 
136/* Ticket reservation region accounting */ 
137#define XLOG_TIC_LEN_MAX	15
138
139/*
140 * Reservation region
141 * As would be stored in xfs_log_iovec but without the i_addr which
142 * we don't care about.
143 */
144typedef struct xlog_res {
145	uint	r_len;	/* region length		:4 */
146	uint	r_type;	/* region's transaction type	:4 */
147} xlog_res_t;
148
149typedef struct xlog_ticket {
 
150	struct list_head   t_queue;	 /* reserve/write queue */
151	struct task_struct *t_task;	 /* task that owns this ticket */
152	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
153	atomic_t	   t_ref;	 /* ticket reference count       : 4  */
154	int		   t_curr_res;	 /* current reservation in bytes : 4  */
155	int		   t_unit_res;	 /* unit reservation in bytes    : 4  */
156	char		   t_ocnt;	 /* original count		 : 1  */
157	char		   t_cnt;	 /* current count		 : 1  */
158	char		   t_clientid;	 /* who does this belong to;	 : 1  */
159	char		   t_flags;	 /* properties of reservation	 : 1  */
 
160
161        /* reservation array fields */
162	uint		   t_res_num;                    /* num in array : 4 */
163	uint		   t_res_num_ophdrs;		 /* num op hdrs  : 4 */
164	uint		   t_res_arr_sum;		 /* array sum    : 4 */
165	uint		   t_res_o_flow;		 /* sum overflow : 4 */
166	xlog_res_t	   t_res_arr[XLOG_TIC_LEN_MAX];  /* array of res : 8 * 15 */ 
167} xlog_ticket_t;
168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169/*
170 * - A log record header is 512 bytes.  There is plenty of room to grow the
171 *	xlog_rec_header_t into the reserved space.
172 * - ic_data follows, so a write to disk can start at the beginning of
173 *	the iclog.
174 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
175 * - ic_next is the pointer to the next iclog in the ring.
 
176 * - ic_log is a pointer back to the global log structure.
177 * - ic_size is the full size of the log buffer, minus the cycle headers.
 
 
178 * - ic_offset is the current number of bytes written to in this iclog.
179 * - ic_refcnt is bumped when someone is writing to the log.
180 * - ic_state is the state of the iclog.
181 *
182 * Because of cacheline contention on large machines, we need to separate
183 * various resources onto different cachelines. To start with, make the
184 * structure cacheline aligned. The following fields can be contended on
185 * by independent processes:
186 *
187 *	- ic_callbacks
188 *	- ic_refcnt
189 *	- fields protected by the global l_icloglock
190 *
191 * so we need to ensure that these fields are located in separate cachelines.
192 * We'll put all the read-only and l_icloglock fields in the first cacheline,
193 * and move everything else out to subsequent cachelines.
194 */
195typedef struct xlog_in_core {
196	wait_queue_head_t	ic_force_wait;
197	wait_queue_head_t	ic_write_wait;
198	struct xlog_in_core	*ic_next;
199	struct xlog_in_core	*ic_prev;
200	struct xlog		*ic_log;
201	u32			ic_size;
202	u32			ic_offset;
203	enum xlog_iclog_state	ic_state;
 
 
204	char			*ic_datap;	/* pointer to iclog data */
205
206	/* Callback structures need their own cacheline */
207	spinlock_t		ic_callback_lock ____cacheline_aligned_in_smp;
208	struct list_head	ic_callbacks;
 
209
210	/* reference counts need their own cacheline */
211	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
212	xlog_in_core_2_t	*ic_data;
213#define ic_header	ic_data->hic_header
214#ifdef DEBUG
215	bool			ic_fail_crc : 1;
216#endif
217	struct semaphore	ic_sema;
218	struct work_struct	ic_end_io_work;
219	struct bio		ic_bio;
220	struct bio_vec		ic_bvec[];
221} xlog_in_core_t;
222
223/*
224 * The CIL context is used to aggregate per-transaction details as well be
225 * passed to the iclog for checkpoint post-commit processing.  After being
226 * passed to the iclog, another context needs to be allocated for tracking the
227 * next set of transactions to be aggregated into a checkpoint.
228 */
229struct xfs_cil;
230
231struct xfs_cil_ctx {
232	struct xfs_cil		*cil;
233	xfs_lsn_t		sequence;	/* chkpt sequence # */
234	xfs_lsn_t		start_lsn;	/* first LSN of chkpt commit */
235	xfs_lsn_t		commit_lsn;	/* chkpt commit record lsn */
236	struct xlog_ticket	*ticket;	/* chkpt ticket */
237	int			nvecs;		/* number of regions */
238	int			space_used;	/* aggregate size of regions */
239	struct list_head	busy_extents;	/* busy extents in chkpt */
240	struct xfs_log_vec	*lv_chain;	/* logvecs being pushed */
241	struct list_head	iclog_entry;
242	struct list_head	committing;	/* ctx committing list */
243	struct work_struct	discard_endio_work;
244};
245
246/*
247 * Committed Item List structure
248 *
249 * This structure is used to track log items that have been committed but not
250 * yet written into the log. It is used only when the delayed logging mount
251 * option is enabled.
252 *
253 * This structure tracks the list of committing checkpoint contexts so
254 * we can avoid the problem of having to hold out new transactions during a
255 * flush until we have a the commit record LSN of the checkpoint. We can
256 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
257 * sequence match and extract the commit LSN directly from there. If the
258 * checkpoint is still in the process of committing, we can block waiting for
259 * the commit LSN to be determined as well. This should make synchronous
260 * operations almost as efficient as the old logging methods.
261 */
262struct xfs_cil {
263	struct xlog		*xc_log;
264	struct list_head	xc_cil;
265	spinlock_t		xc_cil_lock;
266
267	struct rw_semaphore	xc_ctx_lock ____cacheline_aligned_in_smp;
268	struct xfs_cil_ctx	*xc_ctx;
269
270	spinlock_t		xc_push_lock ____cacheline_aligned_in_smp;
271	xfs_lsn_t		xc_push_seq;
272	struct list_head	xc_committing;
273	wait_queue_head_t	xc_commit_wait;
274	xfs_lsn_t		xc_current_sequence;
275	struct work_struct	xc_push_work;
276	wait_queue_head_t	xc_push_wait;	/* background push throttle */
277} ____cacheline_aligned_in_smp;
278
279/*
280 * The amount of log space we allow the CIL to aggregate is difficult to size.
281 * Whatever we choose, we have to make sure we can get a reservation for the
282 * log space effectively, that it is large enough to capture sufficient
283 * relogging to reduce log buffer IO significantly, but it is not too large for
284 * the log or induces too much latency when writing out through the iclogs. We
285 * track both space consumed and the number of vectors in the checkpoint
286 * context, so we need to decide which to use for limiting.
287 *
288 * Every log buffer we write out during a push needs a header reserved, which
289 * is at least one sector and more for v2 logs. Hence we need a reservation of
290 * at least 512 bytes per 32k of log space just for the LR headers. That means
291 * 16KB of reservation per megabyte of delayed logging space we will consume,
292 * plus various headers.  The number of headers will vary based on the num of
293 * io vectors, so limiting on a specific number of vectors is going to result
294 * in transactions of varying size. IOWs, it is more consistent to track and
295 * limit space consumed in the log rather than by the number of objects being
296 * logged in order to prevent checkpoint ticket overruns.
297 *
298 * Further, use of static reservations through the log grant mechanism is
299 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
300 * grant) and a significant deadlock potential because regranting write space
301 * can block on log pushes. Hence if we have to regrant log space during a log
302 * push, we can deadlock.
303 *
304 * However, we can avoid this by use of a dynamic "reservation stealing"
305 * technique during transaction commit whereby unused reservation space in the
306 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
307 * space needed by the checkpoint transaction. This means that we never need to
308 * specifically reserve space for the CIL checkpoint transaction, nor do we
309 * need to regrant space once the checkpoint completes. This also means the
310 * checkpoint transaction ticket is specific to the checkpoint context, rather
311 * than the CIL itself.
312 *
313 * With dynamic reservations, we can effectively make up arbitrary limits for
314 * the checkpoint size so long as they don't violate any other size rules.
315 * Recovery imposes a rule that no transaction exceed half the log, so we are
316 * limited by that.  Furthermore, the log transaction reservation subsystem
317 * tries to keep 25% of the log free, so we need to keep below that limit or we
318 * risk running out of free log space to start any new transactions.
319 *
320 * In order to keep background CIL push efficient, we only need to ensure the
321 * CIL is large enough to maintain sufficient in-memory relogging to avoid
322 * repeated physical writes of frequently modified metadata. If we allow the CIL
323 * to grow to a substantial fraction of the log, then we may be pinning hundreds
324 * of megabytes of metadata in memory until the CIL flushes. This can cause
325 * issues when we are running low on memory - pinned memory cannot be reclaimed,
326 * and the CIL consumes a lot of memory. Hence we need to set an upper physical
327 * size limit for the CIL that limits the maximum amount of memory pinned by the
328 * CIL but does not limit performance by reducing relogging efficiency
329 * significantly.
330 *
331 * As such, the CIL push threshold ends up being the smaller of two thresholds:
332 * - a threshold large enough that it allows CIL to be pushed and progress to be
333 *   made without excessive blocking of incoming transaction commits. This is
334 *   defined to be 12.5% of the log space - half the 25% push threshold of the
335 *   AIL.
336 * - small enough that it doesn't pin excessive amounts of memory but maintains
337 *   close to peak relogging efficiency. This is defined to be 16x the iclog
338 *   buffer window (32MB) as measurements have shown this to be roughly the
339 *   point of diminishing performance increases under highly concurrent
340 *   modification workloads.
341 *
342 * To prevent the CIL from overflowing upper commit size bounds, we introduce a
343 * new threshold at which we block committing transactions until the background
344 * CIL commit commences and switches to a new context. While this is not a hard
345 * limit, it forces the process committing a transaction to the CIL to block and
346 * yeild the CPU, giving the CIL push work a chance to be scheduled and start
347 * work. This prevents a process running lots of transactions from overfilling
348 * the CIL because it is not yielding the CPU. We set the blocking limit at
349 * twice the background push space threshold so we keep in line with the AIL
350 * push thresholds.
351 *
352 * Note: this is not a -hard- limit as blocking is applied after the transaction
353 * is inserted into the CIL and the push has been triggered. It is largely a
354 * throttling mechanism that allows the CIL push to be scheduled and run. A hard
355 * limit will be difficult to implement without introducing global serialisation
356 * in the CIL commit fast path, and it's not at all clear that we actually need
357 * such hard limits given the ~7 years we've run without a hard limit before
358 * finding the first situation where a checkpoint size overflow actually
359 * occurred. Hence the simple throttle, and an ASSERT check to tell us that
360 * we've overrun the max size.
361 */
362#define XLOG_CIL_SPACE_LIMIT(log)	\
363	min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
364
365#define XLOG_CIL_BLOCKING_SPACE_LIMIT(log)	\
366	(XLOG_CIL_SPACE_LIMIT(log) * 2)
367
368/*
369 * ticket grant locks, queues and accounting have their own cachlines
370 * as these are quite hot and can be operated on concurrently.
371 */
372struct xlog_grant_head {
373	spinlock_t		lock ____cacheline_aligned_in_smp;
374	struct list_head	waiters;
375	atomic64_t		grant;
376};
377
378/*
379 * The reservation head lsn is not made up of a cycle number and block number.
380 * Instead, it uses a cycle number and byte number.  Logs don't expect to
381 * overflow 31 bits worth of byte offset, so using a byte number will mean
382 * that round off problems won't occur when releasing partial reservations.
383 */
384struct xlog {
385	/* The following fields don't need locking */
386	struct xfs_mount	*l_mp;	        /* mount point */
387	struct xfs_ail		*l_ailp;	/* AIL log is working with */
388	struct xfs_cil		*l_cilp;	/* CIL log is working with */
 
 
389	struct xfs_buftarg	*l_targ;        /* buftarg of log */
390	struct workqueue_struct	*l_ioend_workqueue; /* for I/O completions */
391	struct delayed_work	l_work;		/* background flush work */
392	uint			l_flags;
393	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
394	struct list_head	*l_buf_cancel_table;
395	int			l_iclog_hsize;  /* size of iclog header */
396	int			l_iclog_heads;  /* # of iclog header sectors */
397	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
398	int			l_iclog_size;	/* size of log in bytes */
 
399	int			l_iclog_bufs;	/* number of iclog buffers */
400	xfs_daddr_t		l_logBBstart;   /* start block of log */
401	int			l_logsize;      /* size of log in bytes */
402	int			l_logBBsize;    /* size of log in BB chunks */
403
404	/* The following block of fields are changed while holding icloglock */
405	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
406						/* waiting for iclog flush */
407	int			l_covered_state;/* state of "covering disk
408						 * log entries" */
409	xlog_in_core_t		*l_iclog;       /* head log queue	*/
410	spinlock_t		l_icloglock;    /* grab to change iclog state */
411	int			l_curr_cycle;   /* Cycle number of log writes */
412	int			l_prev_cycle;   /* Cycle number before last
413						 * block increment */
414	int			l_curr_block;   /* current logical log block */
415	int			l_prev_block;   /* previous logical log block */
416
417	/*
418	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
419	 * read without needing to hold specific locks. To avoid operations
420	 * contending with other hot objects, place each of them on a separate
421	 * cacheline.
422	 */
423	/* lsn of last LR on disk */
424	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
425	/* lsn of 1st LR with unflushed * buffers */
426	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
427
428	struct xlog_grant_head	l_reserve_head;
429	struct xlog_grant_head	l_write_head;
430
431	struct xfs_kobj		l_kobj;
 
 
 
 
 
 
 
432
433	/* The following field are used for debugging; need to hold icloglock */
434#ifdef DEBUG
435	void			*l_iclog_bak[XLOG_MAX_ICLOGS];
436#endif
437	/* log recovery lsn tracking (for buffer submission */
438	xfs_lsn_t		l_recovery_lsn;
439};
440
441#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
442	((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
443
444#define XLOG_FORCED_SHUTDOWN(log) \
445	(unlikely((log)->l_flags & XLOG_IO_ERROR))
446
447/* common routines */
448extern int
449xlog_recover(
450	struct xlog		*log);
451extern int
452xlog_recover_finish(
453	struct xlog		*log);
454extern void
455xlog_recover_cancel(struct xlog *);
456
457extern __le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
458			    char *dp, int size);
 
 
459
460extern kmem_zone_t *xfs_log_ticket_zone;
461struct xlog_ticket *
462xlog_ticket_alloc(
463	struct xlog	*log,
464	int		unit_bytes,
465	int		count,
466	char		client,
467	bool		permanent);
468
469static inline void
470xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
471{
472	*ptr += bytes;
473	*len -= bytes;
474	*off += bytes;
475}
476
477void	xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
478void	xlog_print_trans(struct xfs_trans *);
479int	xlog_write(struct xlog *log, struct xfs_log_vec *log_vector,
480		struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
481		struct xlog_in_core **commit_iclog, uint flags,
482		bool need_start_rec);
483int	xlog_commit_record(struct xlog *log, struct xlog_ticket *ticket,
484		struct xlog_in_core **iclog, xfs_lsn_t *lsn);
485void	xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
486void	xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
487
488/*
489 * When we crack an atomic LSN, we sample it first so that the value will not
490 * change while we are cracking it into the component values. This means we
491 * will always get consistent component values to work from. This should always
492 * be used to sample and crack LSNs that are stored and updated in atomic
493 * variables.
494 */
495static inline void
496xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
497{
498	xfs_lsn_t val = atomic64_read(lsn);
499
500	*cycle = CYCLE_LSN(val);
501	*block = BLOCK_LSN(val);
502}
503
504/*
505 * Calculate and assign a value to an atomic LSN variable from component pieces.
506 */
507static inline void
508xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
509{
510	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
511}
512
513/*
514 * When we crack the grant head, we sample it first so that the value will not
515 * change while we are cracking it into the component values. This means we
516 * will always get consistent component values to work from.
517 */
518static inline void
519xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
520{
521	*cycle = val >> 32;
522	*space = val & 0xffffffff;
523}
524
525static inline void
526xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
527{
528	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
529}
530
531static inline int64_t
532xlog_assign_grant_head_val(int cycle, int space)
533{
534	return ((int64_t)cycle << 32) | space;
535}
536
537static inline void
538xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
539{
540	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
541}
542
543/*
544 * Committed Item List interfaces
545 */
546int	xlog_cil_init(struct xlog *log);
547void	xlog_cil_init_post_recovery(struct xlog *log);
548void	xlog_cil_destroy(struct xlog *log);
549bool	xlog_cil_empty(struct xlog *log);
550
551/*
552 * CIL force routines
553 */
554xfs_lsn_t
555xlog_cil_force_lsn(
556	struct xlog *log,
557	xfs_lsn_t sequence);
558
559static inline void
560xlog_cil_force(struct xlog *log)
561{
562	xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
563}
564
565/*
 
 
 
 
 
 
566 * Wrapper function for waiting on a wait queue serialised against wakeups
567 * by a spinlock. This matches the semantics of all the wait queues used in the
568 * log code.
569 */
570static inline void
571xlog_wait(
572	struct wait_queue_head	*wq,
573	struct spinlock		*lock)
574		__releases(lock)
575{
576	DECLARE_WAITQUEUE(wait, current);
577
578	add_wait_queue_exclusive(wq, &wait);
579	__set_current_state(TASK_UNINTERRUPTIBLE);
580	spin_unlock(lock);
581	schedule();
582	remove_wait_queue(wq, &wait);
583}
584
585/*
586 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
587 * means that the next log record that includes this metadata could have a
588 * smaller LSN. In turn, this means that the modification in the log would not
589 * replay.
590 */
591static inline bool
592xlog_valid_lsn(
593	struct xlog	*log,
594	xfs_lsn_t	lsn)
595{
596	int		cur_cycle;
597	int		cur_block;
598	bool		valid = true;
599
600	/*
601	 * First, sample the current lsn without locking to avoid added
602	 * contention from metadata I/O. The current cycle and block are updated
603	 * (in xlog_state_switch_iclogs()) and read here in a particular order
604	 * to avoid false negatives (e.g., thinking the metadata LSN is valid
605	 * when it is not).
606	 *
607	 * The current block is always rewound before the cycle is bumped in
608	 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
609	 * a transiently forward state. Instead, we can see the LSN in a
610	 * transiently behind state if we happen to race with a cycle wrap.
611	 */
612	cur_cycle = READ_ONCE(log->l_curr_cycle);
613	smp_rmb();
614	cur_block = READ_ONCE(log->l_curr_block);
615
616	if ((CYCLE_LSN(lsn) > cur_cycle) ||
617	    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
618		/*
619		 * If the metadata LSN appears invalid, it's possible the check
620		 * above raced with a wrap to the next log cycle. Grab the lock
621		 * to check for sure.
622		 */
623		spin_lock(&log->l_icloglock);
624		cur_cycle = log->l_curr_cycle;
625		cur_block = log->l_curr_block;
626		spin_unlock(&log->l_icloglock);
627
628		if ((CYCLE_LSN(lsn) > cur_cycle) ||
629		    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
630			valid = false;
631	}
632
633	return valid;
634}
635
636#endif	/* __XFS_LOG_PRIV_H__ */