Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6#ifndef	__XFS_LOG_H__
  7#define __XFS_LOG_H__
  8
  9struct xfs_cil_ctx;
 10
 11struct xfs_log_vec {
 12	struct xfs_log_vec	*lv_next;	/* next lv in build list */
 13	int			lv_niovecs;	/* number of iovecs in lv */
 14	struct xfs_log_iovec	*lv_iovecp;	/* iovec array */
 15	struct xfs_log_item	*lv_item;	/* owner */
 16	char			*lv_buf;	/* formatted buffer */
 17	int			lv_bytes;	/* accounted space in buffer */
 18	int			lv_buf_len;	/* aligned size of buffer */
 19	int			lv_size;	/* size of allocated lv */
 20};
 21
 22#define XFS_LOG_VEC_ORDERED	(-1)
 23
 24static inline void *
 25xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
 26		uint type)
 27{
 28	struct xfs_log_iovec *vec = *vecp;
 29
 30	if (vec) {
 31		ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
 32		vec++;
 33	} else {
 34		vec = &lv->lv_iovecp[0];
 35	}
 36
 37	vec->i_type = type;
 38	vec->i_addr = lv->lv_buf + lv->lv_buf_len;
 39
 40	ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t)));
 41
 42	*vecp = vec;
 43	return vec->i_addr;
 44}
 45
 46/*
 47 * We need to make sure the next buffer is naturally aligned for the biggest
 48 * basic data type we put into it.  We already accounted for this padding when
 49 * sizing the buffer.
 50 *
 51 * However, this padding does not get written into the log, and hence we have to
 52 * track the space used by the log vectors separately to prevent log space hangs
 53 * due to inaccurate accounting (i.e. a leak) of the used log space through the
 54 * CIL context ticket.
 55 */
 56static inline void
 57xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
 58{
 59	lv->lv_buf_len += round_up(len, sizeof(uint64_t));
 60	lv->lv_bytes += len;
 61	vec->i_len = len;
 62}
 63
 64static inline void *
 65xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
 66		uint type, void *data, int len)
 67{
 68	void *buf;
 69
 70	buf = xlog_prepare_iovec(lv, vecp, type);
 71	memcpy(buf, data, len);
 72	xlog_finish_iovec(lv, *vecp, len);
 73	return buf;
 74}
 75
 76/*
 77 * By comparing each component, we don't have to worry about extra
 78 * endian issues in treating two 32 bit numbers as one 64 bit number
 79 */
 80static inline xfs_lsn_t	_lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
 81{
 82	if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
 83		return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
 84
 85	if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
 86		return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
 87
 88	return 0;
 89}
 90
 91#define	XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
 92
 93/*
 94 * Flags to xfs_log_force()
 95 *
 96 *	XFS_LOG_SYNC:	Synchronous force in-core log to disk
 97 */
 98#define XFS_LOG_SYNC		0x1
 99
100/* Log manager interfaces */
101struct xfs_mount;
102struct xlog_in_core;
103struct xlog_ticket;
104struct xfs_log_item;
105struct xfs_item_ops;
106struct xfs_trans;
107
 
 
 
 
108int	  xfs_log_force(struct xfs_mount *mp, uint flags);
109int	  xfs_log_force_lsn(struct xfs_mount *mp, xfs_lsn_t lsn, uint flags,
110		int *log_forced);
111int	  xfs_log_mount(struct xfs_mount	*mp,
112			struct xfs_buftarg	*log_target,
113			xfs_daddr_t		start_block,
114			int		 	num_bblocks);
115int	  xfs_log_mount_finish(struct xfs_mount *mp);
116void	xfs_log_mount_cancel(struct xfs_mount *);
117xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
118xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
119void	  xfs_log_space_wake(struct xfs_mount *mp);
120void	  xfs_log_release_iclog(struct xlog_in_core *iclog);
 
121int	  xfs_log_reserve(struct xfs_mount *mp,
122			  int		   length,
123			  int		   count,
124			  struct xlog_ticket **ticket,
125			  uint8_t		   clientid,
126			  bool		   permanent);
127int	  xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
128void      xfs_log_unmount(struct xfs_mount *mp);
129int	  xfs_log_force_umount(struct xfs_mount *mp, int logerror);
130
131struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
132void	  xfs_log_ticket_put(struct xlog_ticket *ticket);
133
134void	xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
135				xfs_lsn_t *commit_lsn, bool regrant);
136void	xlog_cil_process_committed(struct list_head *list);
137bool	xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
138
139void	xfs_log_work_queue(struct xfs_mount *mp);
140void	xfs_log_quiesce(struct xfs_mount *mp);
141bool	xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
142bool	xfs_log_in_recovery(struct xfs_mount *);
143
144#endif	/* __XFS_LOG_H__ */
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6#ifndef	__XFS_LOG_H__
  7#define __XFS_LOG_H__
  8
  9struct xfs_cil_ctx;
 10
 11struct xfs_log_vec {
 12	struct xfs_log_vec	*lv_next;	/* next lv in build list */
 13	int			lv_niovecs;	/* number of iovecs in lv */
 14	struct xfs_log_iovec	*lv_iovecp;	/* iovec array */
 15	struct xfs_log_item	*lv_item;	/* owner */
 16	char			*lv_buf;	/* formatted buffer */
 17	int			lv_bytes;	/* accounted space in buffer */
 18	int			lv_buf_len;	/* aligned size of buffer */
 19	int			lv_size;	/* size of allocated lv */
 20};
 21
 22#define XFS_LOG_VEC_ORDERED	(-1)
 23
 24static inline void *
 25xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
 26		uint type)
 27{
 28	struct xfs_log_iovec *vec = *vecp;
 29
 30	if (vec) {
 31		ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
 32		vec++;
 33	} else {
 34		vec = &lv->lv_iovecp[0];
 35	}
 36
 37	vec->i_type = type;
 38	vec->i_addr = lv->lv_buf + lv->lv_buf_len;
 39
 40	ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t)));
 41
 42	*vecp = vec;
 43	return vec->i_addr;
 44}
 45
 46/*
 47 * We need to make sure the next buffer is naturally aligned for the biggest
 48 * basic data type we put into it.  We already accounted for this padding when
 49 * sizing the buffer.
 50 *
 51 * However, this padding does not get written into the log, and hence we have to
 52 * track the space used by the log vectors separately to prevent log space hangs
 53 * due to inaccurate accounting (i.e. a leak) of the used log space through the
 54 * CIL context ticket.
 55 */
 56static inline void
 57xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
 58{
 59	lv->lv_buf_len += round_up(len, sizeof(uint64_t));
 60	lv->lv_bytes += len;
 61	vec->i_len = len;
 62}
 63
 64static inline void *
 65xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
 66		uint type, void *data, int len)
 67{
 68	void *buf;
 69
 70	buf = xlog_prepare_iovec(lv, vecp, type);
 71	memcpy(buf, data, len);
 72	xlog_finish_iovec(lv, *vecp, len);
 73	return buf;
 74}
 75
 76/*
 77 * By comparing each component, we don't have to worry about extra
 78 * endian issues in treating two 32 bit numbers as one 64 bit number
 79 */
 80static inline xfs_lsn_t	_lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
 81{
 82	if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
 83		return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
 84
 85	if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
 86		return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
 87
 88	return 0;
 89}
 90
 91#define	XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
 92
 93/*
 94 * Flags to xfs_log_force()
 95 *
 96 *	XFS_LOG_SYNC:	Synchronous force in-core log to disk
 97 */
 98#define XFS_LOG_SYNC		0x1
 99
100/* Log manager interfaces */
101struct xfs_mount;
102struct xlog_in_core;
103struct xlog_ticket;
104struct xfs_log_item;
105struct xfs_item_ops;
106struct xfs_trans;
107
108xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
109		       struct xlog_ticket *ticket,
110		       struct xlog_in_core **iclog,
111		       bool regrant);
112int	  xfs_log_force(struct xfs_mount *mp, uint flags);
113int	  xfs_log_force_lsn(struct xfs_mount *mp, xfs_lsn_t lsn, uint flags,
114		int *log_forced);
115int	  xfs_log_mount(struct xfs_mount	*mp,
116			struct xfs_buftarg	*log_target,
117			xfs_daddr_t		start_block,
118			int		 	num_bblocks);
119int	  xfs_log_mount_finish(struct xfs_mount *mp);
120void	xfs_log_mount_cancel(struct xfs_mount *);
121xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
122xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
123void	  xfs_log_space_wake(struct xfs_mount *mp);
124int	  xfs_log_release_iclog(struct xfs_mount *mp,
125			 struct xlog_in_core	 *iclog);
126int	  xfs_log_reserve(struct xfs_mount *mp,
127			  int		   length,
128			  int		   count,
129			  struct xlog_ticket **ticket,
130			  uint8_t		   clientid,
131			  bool		   permanent);
132int	  xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
133void      xfs_log_unmount(struct xfs_mount *mp);
134int	  xfs_log_force_umount(struct xfs_mount *mp, int logerror);
135
136struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
137void	  xfs_log_ticket_put(struct xlog_ticket *ticket);
138
139void	xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
140				xfs_lsn_t *commit_lsn, bool regrant);
141void	xlog_cil_process_committed(struct list_head *list, bool aborted);
142bool	xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
143
144void	xfs_log_work_queue(struct xfs_mount *mp);
145void	xfs_log_quiesce(struct xfs_mount *mp);
146bool	xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
147bool	xfs_log_in_recovery(struct xfs_mount *);
148
149#endif	/* __XFS_LOG_H__ */