Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public License as
  6 * published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it would be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 11 * GNU General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public License
 14 * along with this program; if not, write the Free Software Foundation,
 15 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 16 */
 17
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_types.h"
 21#include "xfs_bit.h"
 22#include "xfs_log.h"
 23#include "xfs_inum.h"
 
 
 24#include "xfs_trans.h"
 25#include "xfs_trans_priv.h"
 
 26#include "xfs_log_priv.h"
 27#include "xfs_sb.h"
 28#include "xfs_ag.h"
 29#include "xfs_mount.h"
 30#include "xfs_error.h"
 31#include "xfs_alloc.h"
 32#include "xfs_discard.h"
 33
 34/*
 35 * Perform initial CIL structure initialisation. If the CIL is not
 36 * enabled in this filesystem, ensure the log->l_cilp is null so
 37 * we can check this conditional to determine if we are doing delayed
 38 * logging or not.
 39 */
 40int
 41xlog_cil_init(
 42	struct log	*log)
 43{
 44	struct xfs_cil	*cil;
 45	struct xfs_cil_ctx *ctx;
 46
 47	log->l_cilp = NULL;
 48	if (!(log->l_mp->m_flags & XFS_MOUNT_DELAYLOG))
 49		return 0;
 50
 51	cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
 52	if (!cil)
 53		return ENOMEM;
 54
 55	ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
 56	if (!ctx) {
 57		kmem_free(cil);
 58		return ENOMEM;
 59	}
 60
 61	INIT_LIST_HEAD(&cil->xc_cil);
 62	INIT_LIST_HEAD(&cil->xc_committing);
 63	spin_lock_init(&cil->xc_cil_lock);
 64	init_rwsem(&cil->xc_ctx_lock);
 65	init_waitqueue_head(&cil->xc_commit_wait);
 66
 67	INIT_LIST_HEAD(&ctx->committing);
 68	INIT_LIST_HEAD(&ctx->busy_extents);
 69	ctx->sequence = 1;
 70	ctx->cil = cil;
 71	cil->xc_ctx = ctx;
 72	cil->xc_current_sequence = ctx->sequence;
 73
 74	cil->xc_log = log;
 75	log->l_cilp = cil;
 76	return 0;
 77}
 78
 79void
 80xlog_cil_destroy(
 81	struct log	*log)
 82{
 83	if (!log->l_cilp)
 84		return;
 85
 86	if (log->l_cilp->xc_ctx) {
 87		if (log->l_cilp->xc_ctx->ticket)
 88			xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
 89		kmem_free(log->l_cilp->xc_ctx);
 90	}
 91
 92	ASSERT(list_empty(&log->l_cilp->xc_cil));
 93	kmem_free(log->l_cilp);
 94}
 95
 96/*
 97 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
 98 * recover, so we don't allow failure here. Also, we allocate in a context that
 99 * we don't want to be issuing transactions from, so we need to tell the
100 * allocation code this as well.
101 *
102 * We don't reserve any space for the ticket - we are going to steal whatever
103 * space we require from transactions as they commit. To ensure we reserve all
104 * the space required, we need to set the current reservation of the ticket to
105 * zero so that we know to steal the initial transaction overhead from the
106 * first transaction commit.
107 */
108static struct xlog_ticket *
109xlog_cil_ticket_alloc(
110	struct log	*log)
111{
112	struct xlog_ticket *tic;
113
114	tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
115				KM_SLEEP|KM_NOFS);
116	tic->t_trans_type = XFS_TRANS_CHECKPOINT;
117
118	/*
119	 * set the current reservation to zero so we know to steal the basic
120	 * transaction overhead reservation from the first transaction commit.
121	 */
122	tic->t_curr_res = 0;
 
123	return tic;
124}
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126/*
127 * After the first stage of log recovery is done, we know where the head and
128 * tail of the log are. We need this log initialisation done before we can
129 * initialise the first CIL checkpoint context.
130 *
131 * Here we allocate a log ticket to track space usage during a CIL push.  This
132 * ticket is passed to xlog_write() directly so that we don't slowly leak log
133 * space by failing to account for space used by log headers and additional
134 * region headers for split regions.
135 */
136void
137xlog_cil_init_post_recovery(
138	struct log	*log)
139{
140	if (!log->l_cilp)
141		return;
142
143	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
144	log->l_cilp->xc_ctx->sequence = 1;
145	log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle,
146								log->l_curr_block);
 
 
 
 
 
 
 
 
147}
148
149/*
150 * Format log item into a flat buffers
151 *
152 * For delayed logging, we need to hold a formatted buffer containing all the
153 * changes on the log item. This enables us to relog the item in memory and
154 * write it out asynchronously without needing to relock the object that was
155 * modified at the time it gets written into the iclog.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156 *
157 * This function builds a vector for the changes in each log item in the
158 * transaction. It then works out the length of the buffer needed for each log
159 * item, allocates them and formats the vector for the item into the buffer.
160 * The buffer is then attached to the log item are then inserted into the
161 * Committed Item List for tracking until the next checkpoint is written out.
162 *
163 * We don't set up region headers during this process; we simply copy the
164 * regions into the flat buffer. We can do this because we still have to do a
165 * formatting step to write the regions into the iclog buffer.  Writing the
166 * ophdrs during the iclog write means that we can support splitting large
167 * regions across iclog boundares without needing a change in the format of the
168 * item/region encapsulation.
169 *
170 * Hence what we need to do now is change the rewrite the vector array to point
171 * to the copied region inside the buffer we just allocated. This allows us to
172 * format the regions into the iclog as though they are being formatted
173 * directly out of the objects themselves.
174 */
175static void
176xlog_cil_format_items(
177	struct log		*log,
178	struct xfs_log_vec	*log_vector)
179{
180	struct xfs_log_vec *lv;
181
182	ASSERT(log_vector);
183	for (lv = log_vector; lv; lv = lv->lv_next) {
184		void	*ptr;
185		int	index;
186		int	len = 0;
187
188		/* build the vector array and calculate it's length */
189		IOP_FORMAT(lv->lv_item, lv->lv_iovecp);
190		for (index = 0; index < lv->lv_niovecs; index++)
191			len += lv->lv_iovecp[index].i_len;
192
193		lv->lv_buf_len = len;
194		lv->lv_buf = kmem_alloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS);
195		ptr = lv->lv_buf;
196
197		for (index = 0; index < lv->lv_niovecs; index++) {
198			struct xfs_log_iovec *vec = &lv->lv_iovecp[index];
199
200			memcpy(ptr, vec->i_addr, vec->i_len);
201			vec->i_addr = ptr;
202			ptr += vec->i_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203		}
204		ASSERT(ptr == lv->lv_buf + lv->lv_buf_len);
 
 
 
 
 
205	}
 
206}
207
208/*
209 * Prepare the log item for insertion into the CIL. Calculate the difference in
210 * log space and vectors it will consume, and if it is a new item pin it as
211 * well.
212 */
213STATIC void
214xfs_cil_prepare_item(
215	struct log		*log,
216	struct xfs_log_vec	*lv,
217	int			*len,
218	int			*diff_iovecs)
219{
220	struct xfs_log_vec	*old = lv->lv_item->li_lv;
 
 
221
222	if (old) {
223		/* existing lv on log item, space used is a delta */
224		ASSERT(!list_empty(&lv->lv_item->li_cil));
225		ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs);
226
227		*len += lv->lv_buf_len - old->lv_buf_len;
228		*diff_iovecs += lv->lv_niovecs - old->lv_niovecs;
229		kmem_free(old->lv_buf);
230		kmem_free(old);
231	} else {
232		/* new lv, must pin the log item */
233		ASSERT(!lv->lv_item->li_lv);
234		ASSERT(list_empty(&lv->lv_item->li_cil));
235
236		*len += lv->lv_buf_len;
237		*diff_iovecs += lv->lv_niovecs;
238		IOP_PIN(lv->lv_item);
239
 
 
240	}
241
242	/* attach new log vector to log item */
243	lv->lv_item->li_lv = lv;
244
245	/*
246	 * If this is the first time the item is being committed to the
247	 * CIL, store the sequence number on the log item so we can
248	 * tell in future commits whether this is the first checkpoint
249	 * the item is being committed into.
250	 */
251	if (!lv->lv_item->li_seq)
252		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
253}
254
255/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256 * Insert the log items into the CIL and calculate the difference in space
257 * consumed by the item. Add the space to the checkpoint ticket and calculate
258 * if the change requires additional log metadata. If it does, take that space
259 * as well. Remove the amount of space we addded to the checkpoint ticket from
260 * the current transaction ticket so that the accounting works out correctly.
261 */
262static void
263xlog_cil_insert_items(
264	struct log		*log,
265	struct xfs_log_vec	*log_vector,
266	struct xlog_ticket	*ticket)
267{
268	struct xfs_cil		*cil = log->l_cilp;
269	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
270	struct xfs_log_vec	*lv;
271	int			len = 0;
272	int			diff_iovecs = 0;
273	int			iclog_space;
 
 
 
274
275	ASSERT(log_vector);
276
277	/*
278	 * Do all the accounting aggregation and switching of log vectors
279	 * around in a separate loop to the insertion of items into the CIL.
280	 * Then we can do a separate loop to update the CIL within a single
281	 * lock/unlock pair. This reduces the number of round trips on the CIL
282	 * lock from O(nr_logvectors) to O(1) and greatly reduces the overall
283	 * hold time for the transaction commit.
284	 *
285	 * If this is the first time the item is being placed into the CIL in
286	 * this context, pin it so it can't be written to disk until the CIL is
287	 * flushed to the iclog and the iclog written to disk.
288	 *
289	 * We can do this safely because the context can't checkpoint until we
290	 * are done so it doesn't matter exactly how we update the CIL.
291	 */
292	for (lv = log_vector; lv; lv = lv->lv_next)
293		xfs_cil_prepare_item(log, lv, &len, &diff_iovecs);
294
295	/* account for space used by new iovec headers  */
296	len += diff_iovecs * sizeof(xlog_op_header_t);
 
 
 
 
297
298	spin_lock(&cil->xc_cil_lock);
 
 
 
 
 
 
 
299
300	/* move the items to the tail of the CIL */
301	for (lv = log_vector; lv; lv = lv->lv_next)
302		list_move_tail(&lv->lv_item->li_cil, &cil->xc_cil);
303
304	ctx->nvecs += diff_iovecs;
 
 
 
 
 
 
 
 
 
305
306	/*
307	 * Now transfer enough transaction reservation to the context ticket
308	 * for the checkpoint. The context ticket is special - the unit
309	 * reservation has to grow as well as the current reservation as we
310	 * steal from tickets so we can correctly determine the space used
311	 * during the transaction commit.
 
 
 
 
 
 
 
 
 
 
 
312	 */
313	if (ctx->ticket->t_curr_res == 0) {
314		/* first commit in checkpoint, steal the header reservation */
315		ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len);
316		ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
317		ticket->t_curr_res -= ctx->ticket->t_unit_res;
 
 
 
 
 
318	}
 
319
320	/* do we need space for more log record headers? */
321	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
322	if (len > 0 && (ctx->space_used / iclog_space !=
323				(ctx->space_used + len) / iclog_space)) {
324		int hdrs;
 
 
 
 
 
 
325
326		hdrs = (len + iclog_space - 1) / iclog_space;
327		/* need to take into account split region headers, too */
328		hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
329		ctx->ticket->t_unit_res += hdrs;
330		ctx->ticket->t_curr_res += hdrs;
331		ticket->t_curr_res -= hdrs;
332		ASSERT(ticket->t_curr_res >= len);
 
333	}
334	ticket->t_curr_res -= len;
335	ctx->space_used += len;
 
 
 
 
 
 
 
 
 
 
 
 
 
336
337	spin_unlock(&cil->xc_cil_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338}
339
340static void
341xlog_cil_free_logvec(
342	struct xfs_log_vec	*log_vector)
343{
344	struct xfs_log_vec	*lv;
345
346	for (lv = log_vector; lv; ) {
347		struct xfs_log_vec *next = lv->lv_next;
348		kmem_free(lv->lv_buf);
349		kmem_free(lv);
350		lv = next;
351	}
352}
353
354/*
355 * Mark all items committed and clear busy extents. We free the log vector
356 * chains in a separate pass so that we unpin the log items as quickly as
357 * possible.
358 */
359static void
360xlog_cil_committed(
361	void	*args,
362	int	abort)
363{
364	struct xfs_cil_ctx	*ctx = args;
365	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
367	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
368					ctx->start_lsn, abort);
369
370	xfs_alloc_busy_sort(&ctx->busy_extents);
371	xfs_alloc_busy_clear(mp, &ctx->busy_extents,
372			     (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
373
374	spin_lock(&ctx->cil->xc_cil_lock);
375	list_del(&ctx->committing);
376	spin_unlock(&ctx->cil->xc_cil_lock);
377
378	xlog_cil_free_logvec(ctx->lv_chain);
379
380	if (!list_empty(&ctx->busy_extents)) {
381		ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
382
 
 
 
383		xfs_discard_extents(mp, &ctx->busy_extents);
384		xfs_alloc_busy_clear(mp, &ctx->busy_extents, false);
385	}
386
387	kmem_free(ctx);
388}
389
390/*
391 * Push the Committed Item List to the log. If @push_seq flag is zero, then it
392 * is a background flush and so we can chose to ignore it. Otherwise, if the
393 * current sequence is the same as @push_seq we need to do a flush. If
394 * @push_seq is less than the current sequence, then it has already been
395 * flushed and we don't need to do anything - the caller will wait for it to
396 * complete if necessary.
397 *
398 * @push_seq is a value rather than a flag because that allows us to do an
399 * unlocked check of the sequence number for a match. Hence we can allows log
400 * forces to run racily and not issue pushes for the same sequence twice. If we
401 * get a race between multiple pushes for the same sequence they will block on
402 * the first one and then abort, hence avoiding needless pushes.
403 */
404STATIC int
405xlog_cil_push(
406	struct log		*log,
407	xfs_lsn_t		push_seq)
408{
409	struct xfs_cil		*cil = log->l_cilp;
410	struct xfs_log_vec	*lv;
411	struct xfs_cil_ctx	*ctx;
412	struct xfs_cil_ctx	*new_ctx;
413	struct xlog_in_core	*commit_iclog;
414	struct xlog_ticket	*tic;
415	int			num_lv;
416	int			num_iovecs;
417	int			len;
418	int			error = 0;
419	struct xfs_trans_header thdr;
420	struct xfs_log_iovec	lhdr;
421	struct xfs_log_vec	lvhdr = { NULL };
422	xfs_lsn_t		commit_lsn;
423
424	if (!cil)
425		return 0;
 
 
 
 
426
427	ASSERT(!push_seq || push_seq <= cil->xc_ctx->sequence);
 
 
 
 
 
 
 
 
 
 
 
 
428
429	new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
430	new_ctx->ticket = xlog_cil_ticket_alloc(log);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431
432	/*
433	 * Lock out transaction commit, but don't block for background pushes
434	 * unless we are well over the CIL space limit. See the definition of
435	 * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic
436	 * used here.
437	 */
438	if (!down_write_trylock(&cil->xc_ctx_lock)) {
439		if (!push_seq &&
440		    cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))
441			goto out_free_ticket;
442		down_write(&cil->xc_ctx_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443	}
444	ctx = cil->xc_ctx;
 
 
445
446	/* check if we've anything to push */
447	if (list_empty(&cil->xc_cil))
448		goto out_skip;
 
 
 
 
 
 
 
 
 
 
449
450	/* check for spurious background flush */
451	if (!push_seq && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
452		goto out_skip;
 
 
453
454	/* check for a previously pushed seqeunce */
455	if (push_seq && push_seq < cil->xc_ctx->sequence)
456		goto out_skip;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
457
458	/*
459	 * pull all the log vectors off the items in the CIL, and
460	 * remove the items from the CIL. We don't need the CIL lock
461	 * here because it's only needed on the transaction commit
462	 * side which is currently locked out by the flush lock.
463	 */
464	lv = NULL;
465	num_lv = 0;
466	num_iovecs = 0;
467	len = 0;
468	while (!list_empty(&cil->xc_cil)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469		struct xfs_log_item	*item;
470		int			i;
471
472		item = list_first_entry(&cil->xc_cil,
473					struct xfs_log_item, li_cil);
474		list_del_init(&item->li_cil);
475		if (!ctx->lv_chain)
476			ctx->lv_chain = item->li_lv;
477		else
478			lv->lv_next = item->li_lv;
 
 
479		lv = item->li_lv;
 
 
 
 
 
 
 
 
 
 
480		item->li_lv = NULL;
 
 
481
482		num_lv++;
483		num_iovecs += lv->lv_niovecs;
484		for (i = 0; i < lv->lv_niovecs; i++)
485			len += lv->lv_iovecp[i].i_len;
 
 
 
 
 
 
486	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487
488	/*
489	 * initialise the new context and attach it to the CIL. Then attach
490	 * the current context to the CIL committing lsit so it can be found
491	 * during log forces to extract the commit lsn of the sequence that
492	 * needs to be forced.
 
 
493	 */
494	INIT_LIST_HEAD(&new_ctx->committing);
495	INIT_LIST_HEAD(&new_ctx->busy_extents);
496	new_ctx->sequence = ctx->sequence + 1;
497	new_ctx->cil = cil;
498	cil->xc_ctx = new_ctx;
499
500	/*
501	 * mirror the new sequence into the cil structure so that we can do
502	 * unlocked checks against the current sequence in log forces without
503	 * risking deferencing a freed context pointer.
504	 */
505	cil->xc_current_sequence = new_ctx->sequence;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
507	/*
508	 * The switch is now done, so we can drop the context lock and move out
509	 * of a shared context. We can't just go straight to the commit record,
510	 * though - we need to synchronise with previous and future commits so
511	 * that the commit records are correctly ordered in the log to ensure
512	 * that we process items during log IO completion in the correct order.
513	 *
514	 * For example, if we get an EFI in one checkpoint and the EFD in the
515	 * next (e.g. due to log forces), we do not want the checkpoint with
516	 * the EFD to be committed before the checkpoint with the EFI.  Hence
517	 * we must strictly order the commit records of the checkpoints so
518	 * that: a) the checkpoint callbacks are attached to the iclogs in the
519	 * correct order; and b) the checkpoints are replayed in correct order
520	 * in log recovery.
521	 *
522	 * Hence we need to add this context to the committing context list so
523	 * that higher sequences will wait for us to write out a commit record
524	 * before they do.
 
 
 
 
 
 
525	 */
526	spin_lock(&cil->xc_cil_lock);
527	list_add(&ctx->committing, &cil->xc_committing);
528	spin_unlock(&cil->xc_cil_lock);
529	up_write(&cil->xc_ctx_lock);
530
531	/*
 
 
 
 
 
 
 
532	 * Build a checkpoint transaction header and write it to the log to
533	 * begin the transaction. We need to account for the space used by the
534	 * transaction header here as it is not accounted for in xlog_write().
535	 *
536	 * The LSN we need to pass to the log items on transaction commit is
537	 * the LSN reported by the first log vector write. If we use the commit
538	 * record lsn then we can move the tail beyond the grant write head.
539	 */
540	tic = ctx->ticket;
541	thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
542	thdr.th_type = XFS_TRANS_CHECKPOINT;
543	thdr.th_tid = tic->t_tid;
544	thdr.th_num_items = num_iovecs;
545	lhdr.i_addr = &thdr;
546	lhdr.i_len = sizeof(xfs_trans_header_t);
547	lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
548	tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
549
550	lvhdr.lv_niovecs = 1;
551	lvhdr.lv_iovecp = &lhdr;
552	lvhdr.lv_next = ctx->lv_chain;
553
554	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
555	if (error)
556		goto out_abort_free_ticket;
557
558	/*
559	 * now that we've written the checkpoint into the log, strictly
560	 * order the commit records so replay will get them in the right order.
 
 
 
561	 */
562restart:
563	spin_lock(&cil->xc_cil_lock);
564	list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
565		/*
566		 * Higher sequences will wait for this one so skip them.
567		 * Don't wait for own own sequence, either.
568		 */
569		if (new_ctx->sequence >= ctx->sequence)
570			continue;
571		if (!new_ctx->commit_lsn) {
 
 
 
 
 
 
 
 
 
 
572			/*
573			 * It is still being pushed! Wait for the push to
574			 * complete, then start again from the beginning.
 
575			 */
576			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
577			goto restart;
578		}
579	}
580	spin_unlock(&cil->xc_cil_lock);
581
582	/* xfs_log_done always frees the ticket on error. */
583	commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
584	if (commit_lsn == -1)
585		goto out_abort;
586
587	/* attach all the transactions w/ busy extents to iclog */
588	ctx->log_cb.cb_func = xlog_cil_committed;
589	ctx->log_cb.cb_arg = ctx;
590	error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb);
591	if (error)
592		goto out_abort;
593
594	/*
595	 * now the checkpoint commit is complete and we've attached the
596	 * callbacks to the iclog we can assign the commit LSN to the context
597	 * and wake up anyone who is waiting for the commit to complete.
 
 
 
 
 
598	 */
599	spin_lock(&cil->xc_cil_lock);
600	ctx->commit_lsn = commit_lsn;
601	wake_up_all(&cil->xc_commit_wait);
602	spin_unlock(&cil->xc_cil_lock);
603
604	/* release the hounds! */
605	return xfs_log_release_iclog(log->l_mp, commit_iclog);
 
 
 
 
 
 
606
607out_skip:
608	up_write(&cil->xc_ctx_lock);
609out_free_ticket:
610	xfs_log_ticket_put(new_ctx->ticket);
611	kmem_free(new_ctx);
612	return 0;
613
614out_abort_free_ticket:
615	xfs_log_ticket_put(tic);
616out_abort:
617	xlog_cil_committed(ctx, XFS_LI_ABORTED);
618	return XFS_ERROR(EIO);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619}
620
621/*
622 * Commit a transaction with the given vector to the Committed Item List.
623 *
624 * To do this, we need to format the item, pin it in memory if required and
625 * account for the space used by the transaction. Once we have done that we
626 * need to release the unused reservation for the transaction, attach the
627 * transaction to the checkpoint context so we carry the busy extents through
628 * to checkpoint completion, and then unlock all the items in the transaction.
629 *
630 * For more specific information about the order of operations in
631 * xfs_log_commit_cil() please refer to the comments in
632 * xfs_trans_commit_iclog().
633 *
634 * Called with the context lock already held in read mode to lock out
635 * background commit, returns without it held once background commits are
636 * allowed again.
637 */
638void
639xfs_log_commit_cil(
640	struct xfs_mount	*mp,
641	struct xfs_trans	*tp,
642	struct xfs_log_vec	*log_vector,
643	xfs_lsn_t		*commit_lsn,
644	int			flags)
645{
646	struct log		*log = mp->m_log;
647	int			log_flags = 0;
648	int			push = 0;
649
650	if (flags & XFS_TRANS_RELEASE_LOG_RES)
651		log_flags = XFS_LOG_REL_PERM_RESERV;
652
653	/*
654	 * do all the hard work of formatting items (including memory
655	 * allocation) outside the CIL context lock. This prevents stalling CIL
656	 * pushes when we are low on memory and a transaction commit spends a
657	 * lot of time in memory reclaim.
658	 */
659	xlog_cil_format_items(log, log_vector);
660
661	/* lock out background commit */
662	down_read(&log->l_cilp->xc_ctx_lock);
663	if (commit_lsn)
664		*commit_lsn = log->l_cilp->xc_ctx->sequence;
665
666	xlog_cil_insert_items(log, log_vector, tp->t_ticket);
667
668	/* check we didn't blow the reservation */
669	if (tp->t_ticket->t_curr_res < 0)
670		xlog_print_tic_res(log->l_mp, tp->t_ticket);
671
672	/* attach the transaction to the CIL if it has any busy extents */
673	if (!list_empty(&tp->t_busy)) {
674		spin_lock(&log->l_cilp->xc_cil_lock);
675		list_splice_init(&tp->t_busy,
676					&log->l_cilp->xc_ctx->busy_extents);
677		spin_unlock(&log->l_cilp->xc_cil_lock);
678	}
679
680	tp->t_commit_lsn = *commit_lsn;
681	xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
 
 
 
 
 
682	xfs_trans_unreserve_and_mod_sb(tp);
683
684	/*
685	 * Once all the items of the transaction have been copied to the CIL,
686	 * the items can be unlocked and freed.
687	 *
688	 * This needs to be done before we drop the CIL context lock because we
689	 * have to update state in the log items and unlock them before they go
690	 * to disk. If we don't, then the CIL checkpoint can race with us and
691	 * we can run checkpoint completion before we've updated and unlocked
692	 * the log items. This affects (at least) processing of stale buffers,
693	 * inodes and EFIs.
694	 */
695	xfs_trans_free_items(tp, *commit_lsn, 0);
 
 
 
 
 
 
 
696
697	/* check for background commit before unlock */
698	if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
699		push = 1;
 
 
 
 
 
 
 
 
 
 
 
700
701	up_read(&log->l_cilp->xc_ctx_lock);
 
702
703	/*
704	 * We need to push CIL every so often so we don't cache more than we
705	 * can fit in the log. The limit really is that a checkpoint can't be
706	 * more than half the log (the current checkpoint is not allowed to
707	 * overwrite the previous checkpoint), but commit latency and memory
708	 * usage limit this to a smaller size in most cases.
709	 */
710	if (push)
711		xlog_cil_push(log, 0);
712}
713
714/*
715 * Conditionally push the CIL based on the sequence passed in.
716 *
717 * We only need to push if we haven't already pushed the sequence
718 * number given. Hence the only time we will trigger a push here is
719 * if the push sequence is the same as the current context.
720 *
721 * We return the current commit lsn to allow the callers to determine if a
722 * iclog flush is necessary following this call.
723 *
724 * XXX: Initially, just push the CIL unconditionally and return whatever
725 * commit lsn is there. It'll be empty, so this is broken for now.
726 */
727xfs_lsn_t
728xlog_cil_force_lsn(
729	struct log	*log,
730	xfs_lsn_t	sequence)
731{
732	struct xfs_cil		*cil = log->l_cilp;
733	struct xfs_cil_ctx	*ctx;
734	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
735
736	ASSERT(sequence <= cil->xc_current_sequence);
737
 
 
 
 
738	/*
739	 * check to see if we need to force out the current context.
740	 * xlog_cil_push() handles racing pushes for the same sequence,
741	 * so no need to deal with it here.
742	 */
743	if (sequence == cil->xc_current_sequence)
744		xlog_cil_push(log, sequence);
745
746	/*
747	 * See if we can find a previous sequence still committing.
748	 * We need to wait for all previous sequence commits to complete
749	 * before allowing the force of push_seq to go ahead. Hence block
750	 * on commits for those as well.
751	 */
752restart:
753	spin_lock(&cil->xc_cil_lock);
754	list_for_each_entry(ctx, &cil->xc_committing, committing) {
 
 
 
 
 
 
 
755		if (ctx->sequence > sequence)
756			continue;
757		if (!ctx->commit_lsn) {
758			/*
759			 * It is still being pushed! Wait for the push to
760			 * complete, then start again from the beginning.
761			 */
762			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
 
763			goto restart;
764		}
765		if (ctx->sequence != sequence)
766			continue;
767		/* found it! */
768		commit_lsn = ctx->commit_lsn;
769	}
770	spin_unlock(&cil->xc_cil_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
771	return commit_lsn;
 
 
 
 
 
 
 
 
 
 
 
772}
773
774/*
775 * Check if the current log item was first committed in this sequence.
776 * We can't rely on just the log item being in the CIL, we have to check
777 * the recorded commit sequence number.
778 *
779 * Note: for this to be used in a non-racy manner, it has to be called with
780 * CIL flushing locked out. As a result, it should only be used during the
781 * transaction commit process when deciding what to format into the item.
782 */
783bool
784xfs_log_item_in_current_chkpt(
785	struct xfs_log_item *lip)
786{
787	struct xfs_cil_ctx *ctx;
788
789	if (!(lip->li_mountp->m_flags & XFS_MOUNT_DELAYLOG))
790		return false;
791	if (list_empty(&lip->li_cil))
792		return false;
793
794	ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
795
 
 
 
796	/*
797	 * li_seq is written on the first commit of a log item to record the
798	 * first checkpoint it is written to. Hence if it is different to the
799	 * current sequence, we're in a new checkpoint.
800	 */
801	if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
802		return false;
803	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
804}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_format.h"
   9#include "xfs_log_format.h"
  10#include "xfs_shared.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
  13#include "xfs_extent_busy.h"
  14#include "xfs_trans.h"
  15#include "xfs_trans_priv.h"
  16#include "xfs_log.h"
  17#include "xfs_log_priv.h"
  18#include "xfs_trace.h"
 
 
 
 
  19#include "xfs_discard.h"
  20
  21/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  22 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
  23 * recover, so we don't allow failure here. Also, we allocate in a context that
  24 * we don't want to be issuing transactions from, so we need to tell the
  25 * allocation code this as well.
  26 *
  27 * We don't reserve any space for the ticket - we are going to steal whatever
  28 * space we require from transactions as they commit. To ensure we reserve all
  29 * the space required, we need to set the current reservation of the ticket to
  30 * zero so that we know to steal the initial transaction overhead from the
  31 * first transaction commit.
  32 */
  33static struct xlog_ticket *
  34xlog_cil_ticket_alloc(
  35	struct xlog	*log)
  36{
  37	struct xlog_ticket *tic;
  38
  39	tic = xlog_ticket_alloc(log, 0, 1, 0);
 
 
  40
  41	/*
  42	 * set the current reservation to zero so we know to steal the basic
  43	 * transaction overhead reservation from the first transaction commit.
  44	 */
  45	tic->t_curr_res = 0;
  46	tic->t_iclog_hdrs = 0;
  47	return tic;
  48}
  49
  50static inline void
  51xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
  52{
  53	struct xlog	*log = cil->xc_log;
  54
  55	atomic_set(&cil->xc_iclog_hdrs,
  56		   (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
  57			(log->l_iclog_size - log->l_iclog_hsize)));
  58}
  59
  60/*
  61 * Check if the current log item was first committed in this sequence.
  62 * We can't rely on just the log item being in the CIL, we have to check
  63 * the recorded commit sequence number.
  64 *
  65 * Note: for this to be used in a non-racy manner, it has to be called with
  66 * CIL flushing locked out. As a result, it should only be used during the
  67 * transaction commit process when deciding what to format into the item.
  68 */
  69static bool
  70xlog_item_in_current_chkpt(
  71	struct xfs_cil		*cil,
  72	struct xfs_log_item	*lip)
  73{
  74	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
  75		return false;
  76
  77	/*
  78	 * li_seq is written on the first commit of a log item to record the
  79	 * first checkpoint it is written to. Hence if it is different to the
  80	 * current sequence, we're in a new checkpoint.
  81	 */
  82	return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
  83}
  84
  85bool
  86xfs_log_item_in_current_chkpt(
  87	struct xfs_log_item *lip)
  88{
  89	return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip);
  90}
  91
  92/*
  93 * Unavoidable forward declaration - xlog_cil_push_work() calls
  94 * xlog_cil_ctx_alloc() itself.
  95 */
  96static void xlog_cil_push_work(struct work_struct *work);
  97
  98static struct xfs_cil_ctx *
  99xlog_cil_ctx_alloc(void)
 100{
 101	struct xfs_cil_ctx	*ctx;
 102
 103	ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS);
 104	INIT_LIST_HEAD(&ctx->committing);
 105	INIT_LIST_HEAD(&ctx->busy_extents.extent_list);
 106	INIT_LIST_HEAD(&ctx->log_items);
 107	INIT_LIST_HEAD(&ctx->lv_chain);
 108	INIT_WORK(&ctx->push_work, xlog_cil_push_work);
 109	return ctx;
 110}
 111
 112/*
 113 * Aggregate the CIL per cpu structures into global counts, lists, etc and
 114 * clear the percpu state ready for the next context to use. This is called
 115 * from the push code with the context lock held exclusively, hence nothing else
 116 * will be accessing or modifying the per-cpu counters.
 117 */
 118static void
 119xlog_cil_push_pcp_aggregate(
 120	struct xfs_cil		*cil,
 121	struct xfs_cil_ctx	*ctx)
 122{
 123	struct xlog_cil_pcp	*cilpcp;
 124	int			cpu;
 125
 126	for_each_cpu(cpu, &ctx->cil_pcpmask) {
 127		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
 128
 129		ctx->ticket->t_curr_res += cilpcp->space_reserved;
 130		cilpcp->space_reserved = 0;
 131
 132		if (!list_empty(&cilpcp->busy_extents)) {
 133			list_splice_init(&cilpcp->busy_extents,
 134					&ctx->busy_extents.extent_list);
 135		}
 136		if (!list_empty(&cilpcp->log_items))
 137			list_splice_init(&cilpcp->log_items, &ctx->log_items);
 138
 139		/*
 140		 * We're in the middle of switching cil contexts.  Reset the
 141		 * counter we use to detect when the current context is nearing
 142		 * full.
 143		 */
 144		cilpcp->space_used = 0;
 145	}
 146}
 147
 148/*
 149 * Aggregate the CIL per-cpu space used counters into the global atomic value.
 150 * This is called when the per-cpu counter aggregation will first pass the soft
 151 * limit threshold so we can switch to atomic counter aggregation for accurate
 152 * detection of hard limit traversal.
 153 */
 154static void
 155xlog_cil_insert_pcp_aggregate(
 156	struct xfs_cil		*cil,
 157	struct xfs_cil_ctx	*ctx)
 158{
 159	struct xlog_cil_pcp	*cilpcp;
 160	int			cpu;
 161	int			count = 0;
 162
 163	/* Trigger atomic updates then aggregate only for the first caller */
 164	if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
 165		return;
 166
 167	/*
 168	 * We can race with other cpus setting cil_pcpmask.  However, we've
 169	 * atomically cleared PCP_SPACE which forces other threads to add to
 170	 * the global space used count.  cil_pcpmask is a superset of cilpcp
 171	 * structures that could have a nonzero space_used.
 172	 */
 173	for_each_cpu(cpu, &ctx->cil_pcpmask) {
 174		int	old, prev;
 175
 176		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
 177		do {
 178			old = cilpcp->space_used;
 179			prev = cmpxchg(&cilpcp->space_used, old, 0);
 180		} while (old != prev);
 181		count += old;
 182	}
 183	atomic_add(count, &ctx->space_used);
 184}
 185
 186static void
 187xlog_cil_ctx_switch(
 188	struct xfs_cil		*cil,
 189	struct xfs_cil_ctx	*ctx)
 190{
 191	xlog_cil_set_iclog_hdr_count(cil);
 192	set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
 193	set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
 194	ctx->sequence = ++cil->xc_current_sequence;
 195	ctx->cil = cil;
 196	cil->xc_ctx = ctx;
 197}
 198
 199/*
 200 * After the first stage of log recovery is done, we know where the head and
 201 * tail of the log are. We need this log initialisation done before we can
 202 * initialise the first CIL checkpoint context.
 203 *
 204 * Here we allocate a log ticket to track space usage during a CIL push.  This
 205 * ticket is passed to xlog_write() directly so that we don't slowly leak log
 206 * space by failing to account for space used by log headers and additional
 207 * region headers for split regions.
 208 */
 209void
 210xlog_cil_init_post_recovery(
 211	struct xlog	*log)
 212{
 
 
 
 213	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
 214	log->l_cilp->xc_ctx->sequence = 1;
 215	xlog_cil_set_iclog_hdr_count(log->l_cilp);
 216}
 217
 218static inline int
 219xlog_cil_iovec_space(
 220	uint	niovecs)
 221{
 222	return round_up((sizeof(struct xfs_log_vec) +
 223					niovecs * sizeof(struct xfs_log_iovec)),
 224			sizeof(uint64_t));
 225}
 226
 227/*
 228 * Allocate or pin log vector buffers for CIL insertion.
 229 *
 230 * The CIL currently uses disposable buffers for copying a snapshot of the
 231 * modified items into the log during a push. The biggest problem with this is
 232 * the requirement to allocate the disposable buffer during the commit if:
 233 *	a) does not exist; or
 234 *	b) it is too small
 235 *
 236 * If we do this allocation within xlog_cil_insert_format_items(), it is done
 237 * under the xc_ctx_lock, which means that a CIL push cannot occur during
 238 * the memory allocation. This means that we have a potential deadlock situation
 239 * under low memory conditions when we have lots of dirty metadata pinned in
 240 * the CIL and we need a CIL commit to occur to free memory.
 241 *
 242 * To avoid this, we need to move the memory allocation outside the
 243 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
 244 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
 245 * vector buffers between the check and the formatting of the item into the
 246 * log vector buffer within the xc_ctx_lock.
 247 *
 248 * Because the log vector buffer needs to be unchanged during the CIL push
 249 * process, we cannot share the buffer between the transaction commit (which
 250 * modifies the buffer) and the CIL push context that is writing the changes
 251 * into the log. This means skipping preallocation of buffer space is
 252 * unreliable, but we most definitely do not want to be allocating and freeing
 253 * buffers unnecessarily during commits when overwrites can be done safely.
 254 *
 255 * The simplest solution to this problem is to allocate a shadow buffer when a
 256 * log item is committed for the second time, and then to only use this buffer
 257 * if necessary. The buffer can remain attached to the log item until such time
 258 * it is needed, and this is the buffer that is reallocated to match the size of
 259 * the incoming modification. Then during the formatting of the item we can swap
 260 * the active buffer with the new one if we can't reuse the existing buffer. We
 261 * don't free the old buffer as it may be reused on the next modification if
 262 * it's size is right, otherwise we'll free and reallocate it at that point.
 263 *
 264 * This function builds a vector for the changes in each log item in the
 265 * transaction. It then works out the length of the buffer needed for each log
 266 * item, allocates them and attaches the vector to the log item in preparation
 267 * for the formatting step which occurs under the xc_ctx_lock.
 
 268 *
 269 * While this means the memory footprint goes up, it avoids the repeated
 270 * alloc/free pattern that repeated modifications of an item would otherwise
 271 * cause, and hence minimises the CPU overhead of such behaviour.
 
 
 
 
 
 
 
 
 272 */
 273static void
 274xlog_cil_alloc_shadow_bufs(
 275	struct xlog		*log,
 276	struct xfs_trans	*tp)
 277{
 278	struct xfs_log_item	*lip;
 279
 280	list_for_each_entry(lip, &tp->t_items, li_trans) {
 281		struct xfs_log_vec *lv;
 282		int	niovecs = 0;
 283		int	nbytes = 0;
 284		int	buf_size;
 285		bool	ordered = false;
 286
 287		/* Skip items which aren't dirty in this transaction. */
 288		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 289			continue;
 290
 291		/* get number of vecs and size of data to be stored */
 292		lip->li_ops->iop_size(lip, &niovecs, &nbytes);
 293
 294		/*
 295		 * Ordered items need to be tracked but we do not wish to write
 296		 * them. We need a logvec to track the object, but we do not
 297		 * need an iovec or buffer to be allocated for copying data.
 298		 */
 299		if (niovecs == XFS_LOG_VEC_ORDERED) {
 300			ordered = true;
 301			niovecs = 0;
 302			nbytes = 0;
 303		}
 304
 305		/*
 306		 * We 64-bit align the length of each iovec so that the start of
 307		 * the next one is naturally aligned.  We'll need to account for
 308		 * that slack space here.
 309		 *
 310		 * We also add the xlog_op_header to each region when
 311		 * formatting, but that's not accounted to the size of the item
 312		 * at this point. Hence we'll need an addition number of bytes
 313		 * for each vector to hold an opheader.
 314		 *
 315		 * Then round nbytes up to 64-bit alignment so that the initial
 316		 * buffer alignment is easy to calculate and verify.
 317		 */
 318		nbytes += niovecs *
 319			(sizeof(uint64_t) + sizeof(struct xlog_op_header));
 320		nbytes = round_up(nbytes, sizeof(uint64_t));
 321
 322		/*
 323		 * The data buffer needs to start 64-bit aligned, so round up
 324		 * that space to ensure we can align it appropriately and not
 325		 * overrun the buffer.
 326		 */
 327		buf_size = nbytes + xlog_cil_iovec_space(niovecs);
 328
 329		/*
 330		 * if we have no shadow buffer, or it is too small, we need to
 331		 * reallocate it.
 332		 */
 333		if (!lip->li_lv_shadow ||
 334		    buf_size > lip->li_lv_shadow->lv_size) {
 335			/*
 336			 * We free and allocate here as a realloc would copy
 337			 * unnecessary data. We don't use kvzalloc() for the
 338			 * same reason - we don't need to zero the data area in
 339			 * the buffer, only the log vector header and the iovec
 340			 * storage.
 341			 */
 342			kmem_free(lip->li_lv_shadow);
 343			lv = xlog_kvmalloc(buf_size);
 344
 345			memset(lv, 0, xlog_cil_iovec_space(niovecs));
 346
 347			INIT_LIST_HEAD(&lv->lv_list);
 348			lv->lv_item = lip;
 349			lv->lv_size = buf_size;
 350			if (ordered)
 351				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 352			else
 353				lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
 354			lip->li_lv_shadow = lv;
 355		} else {
 356			/* same or smaller, optimise common overwrite case */
 357			lv = lip->li_lv_shadow;
 358			if (ordered)
 359				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 360			else
 361				lv->lv_buf_len = 0;
 362			lv->lv_bytes = 0;
 363		}
 364
 365		/* Ensure the lv is set up according to ->iop_size */
 366		lv->lv_niovecs = niovecs;
 367
 368		/* The allocated data region lies beyond the iovec region */
 369		lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
 370	}
 371
 372}
 373
 374/*
 375 * Prepare the log item for insertion into the CIL. Calculate the difference in
 376 * log space it will consume, and if it is a new item pin it as well.
 
 377 */
 378STATIC void
 379xfs_cil_prepare_item(
 380	struct xlog		*log,
 381	struct xfs_log_vec	*lv,
 382	struct xfs_log_vec	*old_lv,
 383	int			*diff_len)
 384{
 385	/* Account for the new LV being passed in */
 386	if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
 387		*diff_len += lv->lv_bytes;
 388
 389	/*
 390	 * If there is no old LV, this is the first time we've seen the item in
 391	 * this CIL context and so we need to pin it. If we are replacing the
 392	 * old_lv, then remove the space it accounts for and make it the shadow
 393	 * buffer for later freeing. In both cases we are now switching to the
 394	 * shadow buffer, so update the pointer to it appropriately.
 395	 */
 396	if (!old_lv) {
 397		if (lv->lv_item->li_ops->iop_pin)
 398			lv->lv_item->li_ops->iop_pin(lv->lv_item);
 399		lv->lv_item->li_lv_shadow = NULL;
 400	} else if (old_lv != lv) {
 401		ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
 
 
 
 
 402
 403		*diff_len -= old_lv->lv_bytes;
 404		lv->lv_item->li_lv_shadow = old_lv;
 405	}
 406
 407	/* attach new log vector to log item */
 408	lv->lv_item->li_lv = lv;
 409
 410	/*
 411	 * If this is the first time the item is being committed to the
 412	 * CIL, store the sequence number on the log item so we can
 413	 * tell in future commits whether this is the first checkpoint
 414	 * the item is being committed into.
 415	 */
 416	if (!lv->lv_item->li_seq)
 417		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
 418}
 419
 420/*
 421 * Format log item into a flat buffers
 422 *
 423 * For delayed logging, we need to hold a formatted buffer containing all the
 424 * changes on the log item. This enables us to relog the item in memory and
 425 * write it out asynchronously without needing to relock the object that was
 426 * modified at the time it gets written into the iclog.
 427 *
 428 * This function takes the prepared log vectors attached to each log item, and
 429 * formats the changes into the log vector buffer. The buffer it uses is
 430 * dependent on the current state of the vector in the CIL - the shadow lv is
 431 * guaranteed to be large enough for the current modification, but we will only
 432 * use that if we can't reuse the existing lv. If we can't reuse the existing
 433 * lv, then simple swap it out for the shadow lv. We don't free it - that is
 434 * done lazily either by th enext modification or the freeing of the log item.
 435 *
 436 * We don't set up region headers during this process; we simply copy the
 437 * regions into the flat buffer. We can do this because we still have to do a
 438 * formatting step to write the regions into the iclog buffer.  Writing the
 439 * ophdrs during the iclog write means that we can support splitting large
 440 * regions across iclog boundares without needing a change in the format of the
 441 * item/region encapsulation.
 442 *
 443 * Hence what we need to do now is change the rewrite the vector array to point
 444 * to the copied region inside the buffer we just allocated. This allows us to
 445 * format the regions into the iclog as though they are being formatted
 446 * directly out of the objects themselves.
 447 */
 448static void
 449xlog_cil_insert_format_items(
 450	struct xlog		*log,
 451	struct xfs_trans	*tp,
 452	int			*diff_len)
 453{
 454	struct xfs_log_item	*lip;
 455
 456	/* Bail out if we didn't find a log item.  */
 457	if (list_empty(&tp->t_items)) {
 458		ASSERT(0);
 459		return;
 460	}
 461
 462	list_for_each_entry(lip, &tp->t_items, li_trans) {
 463		struct xfs_log_vec *lv;
 464		struct xfs_log_vec *old_lv = NULL;
 465		struct xfs_log_vec *shadow;
 466		bool	ordered = false;
 467
 468		/* Skip items which aren't dirty in this transaction. */
 469		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 470			continue;
 471
 472		/*
 473		 * The formatting size information is already attached to
 474		 * the shadow lv on the log item.
 475		 */
 476		shadow = lip->li_lv_shadow;
 477		if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
 478			ordered = true;
 479
 480		/* Skip items that do not have any vectors for writing */
 481		if (!shadow->lv_niovecs && !ordered)
 482			continue;
 483
 484		/* compare to existing item size */
 485		old_lv = lip->li_lv;
 486		if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
 487			/* same or smaller, optimise common overwrite case */
 488			lv = lip->li_lv;
 489
 490			if (ordered)
 491				goto insert;
 492
 493			/*
 494			 * set the item up as though it is a new insertion so
 495			 * that the space reservation accounting is correct.
 496			 */
 497			*diff_len -= lv->lv_bytes;
 498
 499			/* Ensure the lv is set up according to ->iop_size */
 500			lv->lv_niovecs = shadow->lv_niovecs;
 501
 502			/* reset the lv buffer information for new formatting */
 503			lv->lv_buf_len = 0;
 504			lv->lv_bytes = 0;
 505			lv->lv_buf = (char *)lv +
 506					xlog_cil_iovec_space(lv->lv_niovecs);
 507		} else {
 508			/* switch to shadow buffer! */
 509			lv = shadow;
 510			lv->lv_item = lip;
 511			if (ordered) {
 512				/* track as an ordered logvec */
 513				ASSERT(lip->li_lv == NULL);
 514				goto insert;
 515			}
 516		}
 517
 518		ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
 519		lip->li_ops->iop_format(lip, lv);
 520insert:
 521		xfs_cil_prepare_item(log, lv, old_lv, diff_len);
 522	}
 523}
 524
 525/*
 526 * The use of lockless waitqueue_active() requires that the caller has
 527 * serialised itself against the wakeup call in xlog_cil_push_work(). That
 528 * can be done by either holding the push lock or the context lock.
 529 */
 530static inline bool
 531xlog_cil_over_hard_limit(
 532	struct xlog	*log,
 533	int32_t		space_used)
 534{
 535	if (waitqueue_active(&log->l_cilp->xc_push_wait))
 536		return true;
 537	if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
 538		return true;
 539	return false;
 540}
 541
 542/*
 543 * Insert the log items into the CIL and calculate the difference in space
 544 * consumed by the item. Add the space to the checkpoint ticket and calculate
 545 * if the change requires additional log metadata. If it does, take that space
 546 * as well. Remove the amount of space we added to the checkpoint ticket from
 547 * the current transaction ticket so that the accounting works out correctly.
 548 */
 549static void
 550xlog_cil_insert_items(
 551	struct xlog		*log,
 552	struct xfs_trans	*tp,
 553	uint32_t		released_space)
 554{
 555	struct xfs_cil		*cil = log->l_cilp;
 556	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
 557	struct xfs_log_item	*lip;
 558	int			len = 0;
 559	int			iovhdr_res = 0, split_res = 0, ctx_res = 0;
 560	int			space_used;
 561	int			order;
 562	unsigned int		cpu_nr;
 563	struct xlog_cil_pcp	*cilpcp;
 564
 565	ASSERT(tp);
 566
 567	/*
 
 
 
 
 
 
 
 
 
 
 
 568	 * We can do this safely because the context can't checkpoint until we
 569	 * are done so it doesn't matter exactly how we update the CIL.
 570	 */
 571	xlog_cil_insert_format_items(log, tp, &len);
 
 572
 573	/*
 574	 * Subtract the space released by intent cancelation from the space we
 575	 * consumed so that we remove it from the CIL space and add it back to
 576	 * the current transaction reservation context.
 577	 */
 578	len -= released_space;
 579
 580	/*
 581	 * Grab the per-cpu pointer for the CIL before we start any accounting.
 582	 * That ensures that we are running with pre-emption disabled and so we
 583	 * can't be scheduled away between split sample/update operations that
 584	 * are done without outside locking to serialise them.
 585	 */
 586	cpu_nr = get_cpu();
 587	cilpcp = this_cpu_ptr(cil->xc_pcp);
 588
 589	/* Tell the future push that there was work added by this CPU. */
 590	if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask))
 591		cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask);
 592
 593	/*
 594	 * We need to take the CIL checkpoint unit reservation on the first
 595	 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
 596	 * unnecessarily do an atomic op in the fast path here. We can clear the
 597	 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
 598	 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
 599	 */
 600	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
 601	    test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
 602		ctx_res = ctx->ticket->t_unit_res;
 603
 604	/*
 605	 * Check if we need to steal iclog headers. atomic_read() is not a
 606	 * locked atomic operation, so we can check the value before we do any
 607	 * real atomic ops in the fast path. If we've already taken the CIL unit
 608	 * reservation from this commit, we've already got one iclog header
 609	 * space reserved so we have to account for that otherwise we risk
 610	 * overrunning the reservation on this ticket.
 611	 *
 612	 * If the CIL is already at the hard limit, we might need more header
 613	 * space that originally reserved. So steal more header space from every
 614	 * commit that occurs once we are over the hard limit to ensure the CIL
 615	 * push won't run out of reservation space.
 616	 *
 617	 * This can steal more than we need, but that's OK.
 618	 *
 619	 * The cil->xc_ctx_lock provides the serialisation necessary for safely
 620	 * calling xlog_cil_over_hard_limit() in this context.
 621	 */
 622	space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
 623	if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
 624	    xlog_cil_over_hard_limit(log, space_used)) {
 625		split_res = log->l_iclog_hsize +
 626					sizeof(struct xlog_op_header);
 627		if (ctx_res)
 628			ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
 629		else
 630			ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
 631		atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
 632	}
 633	cilpcp->space_reserved += ctx_res;
 634
 635	/*
 636	 * Accurately account when over the soft limit, otherwise fold the
 637	 * percpu count into the global count if over the per-cpu threshold.
 638	 */
 639	if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
 640		atomic_add(len, &ctx->space_used);
 641	} else if (cilpcp->space_used + len >
 642			(XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
 643		space_used = atomic_add_return(cilpcp->space_used + len,
 644						&ctx->space_used);
 645		cilpcp->space_used = 0;
 646
 647		/*
 648		 * If we just transitioned over the soft limit, we need to
 649		 * transition to the global atomic counter.
 650		 */
 651		if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
 652			xlog_cil_insert_pcp_aggregate(cil, ctx);
 653	} else {
 654		cilpcp->space_used += len;
 655	}
 656	/* attach the transaction to the CIL if it has any busy extents */
 657	if (!list_empty(&tp->t_busy))
 658		list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
 659
 660	/*
 661	 * Now update the order of everything modified in the transaction
 662	 * and insert items into the CIL if they aren't already there.
 663	 * We do this here so we only need to take the CIL lock once during
 664	 * the transaction commit.
 665	 */
 666	order = atomic_inc_return(&ctx->order_id);
 667	list_for_each_entry(lip, &tp->t_items, li_trans) {
 668		/* Skip items which aren't dirty in this transaction. */
 669		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 670			continue;
 671
 672		lip->li_order_id = order;
 673		if (!list_empty(&lip->li_cil))
 674			continue;
 675		list_add_tail(&lip->li_cil, &cilpcp->log_items);
 676	}
 677	put_cpu();
 678
 679	/*
 680	 * If we've overrun the reservation, dump the tx details before we move
 681	 * the log items. Shutdown is imminent...
 682	 */
 683	tp->t_ticket->t_curr_res -= ctx_res + len;
 684	if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
 685		xfs_warn(log->l_mp, "Transaction log reservation overrun:");
 686		xfs_warn(log->l_mp,
 687			 "  log items: %d bytes (iov hdrs: %d bytes)",
 688			 len, iovhdr_res);
 689		xfs_warn(log->l_mp, "  split region headers: %d bytes",
 690			 split_res);
 691		xfs_warn(log->l_mp, "  ctx ticket: %d bytes", ctx_res);
 692		xlog_print_trans(tp);
 693		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
 694	}
 695}
 696
 697static void
 698xlog_cil_free_logvec(
 699	struct list_head	*lv_chain)
 700{
 701	struct xfs_log_vec	*lv;
 702
 703	while (!list_empty(lv_chain)) {
 704		lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
 705		list_del_init(&lv->lv_list);
 706		kmem_free(lv);
 
 707	}
 708}
 709
 710/*
 711 * Mark all items committed and clear busy extents. We free the log vector
 712 * chains in a separate pass so that we unpin the log items as quickly as
 713 * possible.
 714 */
 715static void
 716xlog_cil_committed(
 717	struct xfs_cil_ctx	*ctx)
 
 718{
 
 719	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
 720	bool			abort = xlog_is_shutdown(ctx->cil->xc_log);
 721
 722	/*
 723	 * If the I/O failed, we're aborting the commit and already shutdown.
 724	 * Wake any commit waiters before aborting the log items so we don't
 725	 * block async log pushers on callbacks. Async log pushers explicitly do
 726	 * not wait on log force completion because they may be holding locks
 727	 * required to unpin items.
 728	 */
 729	if (abort) {
 730		spin_lock(&ctx->cil->xc_push_lock);
 731		wake_up_all(&ctx->cil->xc_start_wait);
 732		wake_up_all(&ctx->cil->xc_commit_wait);
 733		spin_unlock(&ctx->cil->xc_push_lock);
 734	}
 735
 736	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain,
 737					ctx->start_lsn, abort);
 738
 739	xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
 740	xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list,
 741			      xfs_has_discard(mp) && !abort);
 742
 743	spin_lock(&ctx->cil->xc_push_lock);
 744	list_del(&ctx->committing);
 745	spin_unlock(&ctx->cil->xc_push_lock);
 
 
 746
 747	xlog_cil_free_logvec(&ctx->lv_chain);
 
 748
 749	if (!list_empty(&ctx->busy_extents.extent_list)) {
 750		ctx->busy_extents.mount = mp;
 751		ctx->busy_extents.owner = ctx;
 752		xfs_discard_extents(mp, &ctx->busy_extents);
 753		return;
 754	}
 755
 756	kmem_free(ctx);
 757}
 758
 759void
 760xlog_cil_process_committed(
 761	struct list_head	*list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762{
 
 
 763	struct xfs_cil_ctx	*ctx;
 
 
 
 
 
 
 
 
 
 
 
 764
 765	while ((ctx = list_first_entry_or_null(list,
 766			struct xfs_cil_ctx, iclog_entry))) {
 767		list_del(&ctx->iclog_entry);
 768		xlog_cil_committed(ctx);
 769	}
 770}
 771
 772/*
 773* Record the LSN of the iclog we were just granted space to start writing into.
 774* If the context doesn't have a start_lsn recorded, then this iclog will
 775* contain the start record for the checkpoint. Otherwise this write contains
 776* the commit record for the checkpoint.
 777*/
 778void
 779xlog_cil_set_ctx_write_state(
 780	struct xfs_cil_ctx	*ctx,
 781	struct xlog_in_core	*iclog)
 782{
 783	struct xfs_cil		*cil = ctx->cil;
 784	xfs_lsn_t		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
 785
 786	ASSERT(!ctx->commit_lsn);
 787	if (!ctx->start_lsn) {
 788		spin_lock(&cil->xc_push_lock);
 789		/*
 790		 * The LSN we need to pass to the log items on transaction
 791		 * commit is the LSN reported by the first log vector write, not
 792		 * the commit lsn. If we use the commit record lsn then we can
 793		 * move the grant write head beyond the tail LSN and overwrite
 794		 * it.
 795		 */
 796		ctx->start_lsn = lsn;
 797		wake_up_all(&cil->xc_start_wait);
 798		spin_unlock(&cil->xc_push_lock);
 799
 800		/*
 801		 * Make sure the metadata we are about to overwrite in the log
 802		 * has been flushed to stable storage before this iclog is
 803		 * issued.
 804		 */
 805		spin_lock(&cil->xc_log->l_icloglock);
 806		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
 807		spin_unlock(&cil->xc_log->l_icloglock);
 808		return;
 809	}
 810
 811	/*
 812	 * Take a reference to the iclog for the context so that we still hold
 813	 * it when xlog_write is done and has released it. This means the
 814	 * context controls when the iclog is released for IO.
 815	 */
 816	atomic_inc(&iclog->ic_refcnt);
 817
 818	/*
 819	 * xlog_state_get_iclog_space() guarantees there is enough space in the
 820	 * iclog for an entire commit record, so we can attach the context
 821	 * callbacks now.  This needs to be done before we make the commit_lsn
 822	 * visible to waiters so that checkpoints with commit records in the
 823	 * same iclog order their IO completion callbacks in the same order that
 824	 * the commit records appear in the iclog.
 825	 */
 826	spin_lock(&cil->xc_log->l_icloglock);
 827	list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
 828	spin_unlock(&cil->xc_log->l_icloglock);
 829
 830	/*
 831	 * Now we can record the commit LSN and wake anyone waiting for this
 832	 * sequence to have the ordered commit record assigned to a physical
 833	 * location in the log.
 834	 */
 835	spin_lock(&cil->xc_push_lock);
 836	ctx->commit_iclog = iclog;
 837	ctx->commit_lsn = lsn;
 838	wake_up_all(&cil->xc_commit_wait);
 839	spin_unlock(&cil->xc_push_lock);
 840}
 841
 842
 843/*
 844 * Ensure that the order of log writes follows checkpoint sequence order. This
 845 * relies on the context LSN being zero until the log write has guaranteed the
 846 * LSN that the log write will start at via xlog_state_get_iclog_space().
 847 */
 848enum _record_type {
 849	_START_RECORD,
 850	_COMMIT_RECORD,
 851};
 852
 853static int
 854xlog_cil_order_write(
 855	struct xfs_cil		*cil,
 856	xfs_csn_t		sequence,
 857	enum _record_type	record)
 858{
 859	struct xfs_cil_ctx	*ctx;
 860
 861restart:
 862	spin_lock(&cil->xc_push_lock);
 863	list_for_each_entry(ctx, &cil->xc_committing, committing) {
 864		/*
 865		 * Avoid getting stuck in this loop because we were woken by the
 866		 * shutdown, but then went back to sleep once already in the
 867		 * shutdown state.
 868		 */
 869		if (xlog_is_shutdown(cil->xc_log)) {
 870			spin_unlock(&cil->xc_push_lock);
 871			return -EIO;
 872		}
 873
 874		/*
 875		 * Higher sequences will wait for this one so skip them.
 876		 * Don't wait for our own sequence, either.
 877		 */
 878		if (ctx->sequence >= sequence)
 879			continue;
 880
 881		/* Wait until the LSN for the record has been recorded. */
 882		switch (record) {
 883		case _START_RECORD:
 884			if (!ctx->start_lsn) {
 885				xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
 886				goto restart;
 887			}
 888			break;
 889		case _COMMIT_RECORD:
 890			if (!ctx->commit_lsn) {
 891				xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
 892				goto restart;
 893			}
 894			break;
 895		}
 896	}
 897	spin_unlock(&cil->xc_push_lock);
 898	return 0;
 899}
 900
 901/*
 902 * Write out the log vector change now attached to the CIL context. This will
 903 * write a start record that needs to be strictly ordered in ascending CIL
 904 * sequence order so that log recovery will always use in-order start LSNs when
 905 * replaying checkpoints.
 906 */
 907static int
 908xlog_cil_write_chain(
 909	struct xfs_cil_ctx	*ctx,
 910	uint32_t		chain_len)
 911{
 912	struct xlog		*log = ctx->cil->xc_log;
 913	int			error;
 914
 915	error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
 916	if (error)
 917		return error;
 918	return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
 919}
 920
 921/*
 922 * Write out the commit record of a checkpoint transaction to close off a
 923 * running log write. These commit records are strictly ordered in ascending CIL
 924 * sequence order so that log recovery will always replay the checkpoints in the
 925 * correct order.
 926 */
 927static int
 928xlog_cil_write_commit_record(
 929	struct xfs_cil_ctx	*ctx)
 930{
 931	struct xlog		*log = ctx->cil->xc_log;
 932	struct xlog_op_header	ophdr = {
 933		.oh_clientid = XFS_TRANSACTION,
 934		.oh_tid = cpu_to_be32(ctx->ticket->t_tid),
 935		.oh_flags = XLOG_COMMIT_TRANS,
 936	};
 937	struct xfs_log_iovec	reg = {
 938		.i_addr = &ophdr,
 939		.i_len = sizeof(struct xlog_op_header),
 940		.i_type = XLOG_REG_TYPE_COMMIT,
 941	};
 942	struct xfs_log_vec	vec = {
 943		.lv_niovecs = 1,
 944		.lv_iovecp = &reg,
 945	};
 946	int			error;
 947	LIST_HEAD(lv_chain);
 948	list_add(&vec.lv_list, &lv_chain);
 949
 950	if (xlog_is_shutdown(log))
 951		return -EIO;
 952
 953	error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
 954	if (error)
 955		return error;
 956
 957	/* account for space used by record data */
 958	ctx->ticket->t_curr_res -= reg.i_len;
 959	error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
 960	if (error)
 961		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
 962	return error;
 963}
 964
 965struct xlog_cil_trans_hdr {
 966	struct xlog_op_header	oph[2];
 967	struct xfs_trans_header	thdr;
 968	struct xfs_log_iovec	lhdr[2];
 969};
 970
 971/*
 972 * Build a checkpoint transaction header to begin the journal transaction.  We
 973 * need to account for the space used by the transaction header here as it is
 974 * not accounted for in xlog_write().
 975 *
 976 * This is the only place we write a transaction header, so we also build the
 977 * log opheaders that indicate the start of a log transaction and wrap the
 978 * transaction header. We keep the start record in it's own log vector rather
 979 * than compacting them into a single region as this ends up making the logic
 980 * in xlog_write() for handling empty opheaders for start, commit and unmount
 981 * records much simpler.
 982 */
 983static void
 984xlog_cil_build_trans_hdr(
 985	struct xfs_cil_ctx	*ctx,
 986	struct xlog_cil_trans_hdr *hdr,
 987	struct xfs_log_vec	*lvhdr,
 988	int			num_iovecs)
 989{
 990	struct xlog_ticket	*tic = ctx->ticket;
 991	__be32			tid = cpu_to_be32(tic->t_tid);
 992
 993	memset(hdr, 0, sizeof(*hdr));
 994
 995	/* Log start record */
 996	hdr->oph[0].oh_tid = tid;
 997	hdr->oph[0].oh_clientid = XFS_TRANSACTION;
 998	hdr->oph[0].oh_flags = XLOG_START_TRANS;
 999
1000	/* log iovec region pointer */
1001	hdr->lhdr[0].i_addr = &hdr->oph[0];
1002	hdr->lhdr[0].i_len = sizeof(struct xlog_op_header);
1003	hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER;
1004
1005	/* log opheader */
1006	hdr->oph[1].oh_tid = tid;
1007	hdr->oph[1].oh_clientid = XFS_TRANSACTION;
1008	hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header));
1009
1010	/* transaction header in host byte order format */
1011	hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
1012	hdr->thdr.th_type = XFS_TRANS_CHECKPOINT;
1013	hdr->thdr.th_tid = tic->t_tid;
1014	hdr->thdr.th_num_items = num_iovecs;
1015
1016	/* log iovec region pointer */
1017	hdr->lhdr[1].i_addr = &hdr->oph[1];
1018	hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) +
1019				sizeof(struct xfs_trans_header);
1020	hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR;
1021
1022	lvhdr->lv_niovecs = 2;
1023	lvhdr->lv_iovecp = &hdr->lhdr[0];
1024	lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
1025
1026	tic->t_curr_res -= lvhdr->lv_bytes;
1027}
1028
1029/*
1030 * CIL item reordering compare function. We want to order in ascending ID order,
1031 * but we want to leave items with the same ID in the order they were added to
1032 * the list. This is important for operations like reflink where we log 4 order
1033 * dependent intents in a single transaction when we overwrite an existing
1034 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
1035 * CUI (inc), BUI(remap)...
1036 */
1037static int
1038xlog_cil_order_cmp(
1039	void			*priv,
1040	const struct list_head	*a,
1041	const struct list_head	*b)
1042{
1043	struct xfs_log_vec	*l1 = container_of(a, struct xfs_log_vec, lv_list);
1044	struct xfs_log_vec	*l2 = container_of(b, struct xfs_log_vec, lv_list);
1045
1046	return l1->lv_order_id > l2->lv_order_id;
1047}
1048
1049/*
1050 * Pull all the log vectors off the items in the CIL, and remove the items from
1051 * the CIL. We don't need the CIL lock here because it's only needed on the
1052 * transaction commit side which is currently locked out by the flush lock.
1053 *
1054 * If a log item is marked with a whiteout, we do not need to write it to the
1055 * journal and so we just move them to the whiteout list for the caller to
1056 * dispose of appropriately.
1057 */
1058static void
1059xlog_cil_build_lv_chain(
1060	struct xfs_cil_ctx	*ctx,
1061	struct list_head	*whiteouts,
1062	uint32_t		*num_iovecs,
1063	uint32_t		*num_bytes)
1064{
1065	while (!list_empty(&ctx->log_items)) {
1066		struct xfs_log_item	*item;
1067		struct xfs_log_vec	*lv;
1068
1069		item = list_first_entry(&ctx->log_items,
1070					struct xfs_log_item, li_cil);
1071
1072		if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
1073			list_move(&item->li_cil, whiteouts);
1074			trace_xfs_cil_whiteout_skip(item);
1075			continue;
1076		}
1077
1078		lv = item->li_lv;
1079		lv->lv_order_id = item->li_order_id;
1080
1081		/* we don't write ordered log vectors */
1082		if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
1083			*num_bytes += lv->lv_bytes;
1084		*num_iovecs += lv->lv_niovecs;
1085		list_add_tail(&lv->lv_list, &ctx->lv_chain);
1086
1087		list_del_init(&item->li_cil);
1088		item->li_order_id = 0;
1089		item->li_lv = NULL;
1090	}
1091}
1092
1093static void
1094xlog_cil_cleanup_whiteouts(
1095	struct list_head	*whiteouts)
1096{
1097	while (!list_empty(whiteouts)) {
1098		struct xfs_log_item *item = list_first_entry(whiteouts,
1099						struct xfs_log_item, li_cil);
1100		list_del_init(&item->li_cil);
1101		trace_xfs_cil_whiteout_unpin(item);
1102		item->li_ops->iop_unpin(item, 1);
1103	}
1104}
1105
1106/*
1107 * Push the Committed Item List to the log.
1108 *
1109 * If the current sequence is the same as xc_push_seq we need to do a flush. If
1110 * xc_push_seq is less than the current sequence, then it has already been
1111 * flushed and we don't need to do anything - the caller will wait for it to
1112 * complete if necessary.
1113 *
1114 * xc_push_seq is checked unlocked against the sequence number for a match.
1115 * Hence we can allow log forces to run racily and not issue pushes for the
1116 * same sequence twice.  If we get a race between multiple pushes for the same
1117 * sequence they will block on the first one and then abort, hence avoiding
1118 * needless pushes.
1119 */
1120static void
1121xlog_cil_push_work(
1122	struct work_struct	*work)
1123{
1124	struct xfs_cil_ctx	*ctx =
1125		container_of(work, struct xfs_cil_ctx, push_work);
1126	struct xfs_cil		*cil = ctx->cil;
1127	struct xlog		*log = cil->xc_log;
1128	struct xfs_cil_ctx	*new_ctx;
1129	int			num_iovecs = 0;
1130	int			num_bytes = 0;
1131	int			error = 0;
1132	struct xlog_cil_trans_hdr thdr;
1133	struct xfs_log_vec	lvhdr = {};
1134	xfs_csn_t		push_seq;
1135	bool			push_commit_stable;
1136	LIST_HEAD		(whiteouts);
1137	struct xlog_ticket	*ticket;
1138
1139	new_ctx = xlog_cil_ctx_alloc();
1140	new_ctx->ticket = xlog_cil_ticket_alloc(log);
1141
1142	down_write(&cil->xc_ctx_lock);
1143
1144	spin_lock(&cil->xc_push_lock);
1145	push_seq = cil->xc_push_seq;
1146	ASSERT(push_seq <= ctx->sequence);
1147	push_commit_stable = cil->xc_push_commit_stable;
1148	cil->xc_push_commit_stable = false;
1149
1150	/*
1151	 * As we are about to switch to a new, empty CIL context, we no longer
1152	 * need to throttle tasks on CIL space overruns. Wake any waiters that
1153	 * the hard push throttle may have caught so they can start committing
1154	 * to the new context. The ctx->xc_push_lock provides the serialisation
1155	 * necessary for safely using the lockless waitqueue_active() check in
1156	 * this context.
1157	 */
1158	if (waitqueue_active(&cil->xc_push_wait))
1159		wake_up_all(&cil->xc_push_wait);
1160
1161	xlog_cil_push_pcp_aggregate(cil, ctx);
 
1162
1163	/*
1164	 * Check if we've anything to push. If there is nothing, then we don't
1165	 * move on to a new sequence number and so we have to be able to push
1166	 * this sequence again later.
1167	 */
1168	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1169		cil->xc_push_seq = 0;
1170		spin_unlock(&cil->xc_push_lock);
1171		goto out_skip;
1172	}
1173
1174
1175	/* check for a previously pushed sequence */
1176	if (push_seq < ctx->sequence) {
1177		spin_unlock(&cil->xc_push_lock);
1178		goto out_skip;
1179	}
1180
1181	/*
1182	 * We are now going to push this context, so add it to the committing
1183	 * list before we do anything else. This ensures that anyone waiting on
1184	 * this push can easily detect the difference between a "push in
1185	 * progress" and "CIL is empty, nothing to do".
1186	 *
1187	 * IOWs, a wait loop can now check for:
1188	 *	the current sequence not being found on the committing list;
1189	 *	an empty CIL; and
1190	 *	an unchanged sequence number
1191	 * to detect a push that had nothing to do and therefore does not need
1192	 * waiting on. If the CIL is not empty, we get put on the committing
1193	 * list before emptying the CIL and bumping the sequence number. Hence
1194	 * an empty CIL and an unchanged sequence number means we jumped out
1195	 * above after doing nothing.
1196	 *
1197	 * Hence the waiter will either find the commit sequence on the
1198	 * committing list or the sequence number will be unchanged and the CIL
1199	 * still dirty. In that latter case, the push has not yet started, and
1200	 * so the waiter will have to continue trying to check the CIL
1201	 * committing list until it is found. In extreme cases of delay, the
1202	 * sequence may fully commit between the attempts the wait makes to wait
1203	 * on the commit sequence.
1204	 */
1205	list_add(&ctx->committing, &cil->xc_committing);
1206	spin_unlock(&cil->xc_push_lock);
1207
1208	xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
1209
1210	/*
1211	 * Switch the contexts so we can drop the context lock and move out
1212	 * of a shared context. We can't just go straight to the commit record,
1213	 * though - we need to synchronise with previous and future commits so
1214	 * that the commit records are correctly ordered in the log to ensure
1215	 * that we process items during log IO completion in the correct order.
1216	 *
1217	 * For example, if we get an EFI in one checkpoint and the EFD in the
1218	 * next (e.g. due to log forces), we do not want the checkpoint with
1219	 * the EFD to be committed before the checkpoint with the EFI.  Hence
1220	 * we must strictly order the commit records of the checkpoints so
1221	 * that: a) the checkpoint callbacks are attached to the iclogs in the
1222	 * correct order; and b) the checkpoints are replayed in correct order
1223	 * in log recovery.
1224	 *
1225	 * Hence we need to add this context to the committing context list so
1226	 * that higher sequences will wait for us to write out a commit record
1227	 * before they do.
1228	 *
1229	 * xfs_log_force_seq requires us to mirror the new sequence into the cil
1230	 * structure atomically with the addition of this sequence to the
1231	 * committing list. This also ensures that we can do unlocked checks
1232	 * against the current sequence in log forces without risking
1233	 * deferencing a freed context pointer.
1234	 */
1235	spin_lock(&cil->xc_push_lock);
1236	xlog_cil_ctx_switch(cil, new_ctx);
1237	spin_unlock(&cil->xc_push_lock);
1238	up_write(&cil->xc_ctx_lock);
1239
1240	/*
1241	 * Sort the log vector chain before we add the transaction headers.
1242	 * This ensures we always have the transaction headers at the start
1243	 * of the chain.
1244	 */
1245	list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
1246
1247	/*
1248	 * Build a checkpoint transaction header and write it to the log to
1249	 * begin the transaction. We need to account for the space used by the
1250	 * transaction header here as it is not accounted for in xlog_write().
1251	 * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
1252	 * it gets written into the iclog first.
1253	 */
1254	xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
1255	num_bytes += lvhdr.lv_bytes;
1256	list_add(&lvhdr.lv_list, &ctx->lv_chain);
1257
1258	/*
1259	 * Take the lvhdr back off the lv_chain immediately after calling
1260	 * xlog_cil_write_chain() as it should not be passed to log IO
1261	 * completion.
1262	 */
1263	error = xlog_cil_write_chain(ctx, num_bytes);
1264	list_del(&lvhdr.lv_list);
1265	if (error)
1266		goto out_abort_free_ticket;
 
 
1267
1268	error = xlog_cil_write_commit_record(ctx);
1269	if (error)
1270		goto out_abort_free_ticket;
1271
1272	/*
1273	 * Grab the ticket from the ctx so we can ungrant it after releasing the
1274	 * commit_iclog. The ctx may be freed by the time we return from
1275	 * releasing the commit_iclog (i.e. checkpoint has been completed and
1276	 * callback run) so we can't reference the ctx after the call to
1277	 * xlog_state_release_iclog().
1278	 */
1279	ticket = ctx->ticket;
1280
1281	/*
1282	 * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1283	 * to complete before we submit the commit_iclog. We can't use state
1284	 * checks for this - ACTIVE can be either a past completed iclog or a
1285	 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1286	 * past or future iclog awaiting IO or ordered IO completion to be run.
1287	 * In the latter case, if it's a future iclog and we wait on it, the we
1288	 * will hang because it won't get processed through to ic_force_wait
1289	 * wakeup until this commit_iclog is written to disk.  Hence we use the
1290	 * iclog header lsn and compare it to the commit lsn to determine if we
1291	 * need to wait on iclogs or not.
1292	 */
1293	spin_lock(&log->l_icloglock);
1294	if (ctx->start_lsn != ctx->commit_lsn) {
1295		xfs_lsn_t	plsn;
1296
1297		plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
1298		if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
1299			/*
1300			 * Waiting on ic_force_wait orders the completion of
1301			 * iclogs older than ic_prev. Hence we only need to wait
1302			 * on the most recent older iclog here.
1303			 */
1304			xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
1305			spin_lock(&log->l_icloglock);
1306		}
 
 
1307
1308		/*
1309		 * We need to issue a pre-flush so that the ordering for this
1310		 * checkpoint is correctly preserved down to stable storage.
1311		 */
1312		ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
1313	}
 
 
 
 
 
1314
1315	/*
1316	 * The commit iclog must be written to stable storage to guarantee
1317	 * journal IO vs metadata writeback IO is correctly ordered on stable
1318	 * storage.
1319	 *
1320	 * If the push caller needs the commit to be immediately stable and the
1321	 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1322	 * will be written when released, switch it's state to WANT_SYNC right
1323	 * now.
1324	 */
1325	ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
1326	if (push_commit_stable &&
1327	    ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
1328		xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
1329	ticket = ctx->ticket;
1330	xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1331
1332	/* Not safe to reference ctx now! */
1333
1334	spin_unlock(&log->l_icloglock);
1335	xlog_cil_cleanup_whiteouts(&whiteouts);
1336	xfs_log_ticket_ungrant(log, ticket);
1337	return;
1338
1339out_skip:
1340	up_write(&cil->xc_ctx_lock);
 
1341	xfs_log_ticket_put(new_ctx->ticket);
1342	kmem_free(new_ctx);
1343	return;
1344
1345out_abort_free_ticket:
1346	ASSERT(xlog_is_shutdown(log));
1347	xlog_cil_cleanup_whiteouts(&whiteouts);
1348	if (!ctx->commit_iclog) {
1349		xfs_log_ticket_ungrant(log, ctx->ticket);
1350		xlog_cil_committed(ctx);
1351		return;
1352	}
1353	spin_lock(&log->l_icloglock);
1354	ticket = ctx->ticket;
1355	xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1356	/* Not safe to reference ctx now! */
1357	spin_unlock(&log->l_icloglock);
1358	xfs_log_ticket_ungrant(log, ticket);
1359}
1360
1361/*
1362 * We need to push CIL every so often so we don't cache more than we can fit in
1363 * the log. The limit really is that a checkpoint can't be more than half the
1364 * log (the current checkpoint is not allowed to overwrite the previous
1365 * checkpoint), but commit latency and memory usage limit this to a smaller
1366 * size.
1367 */
1368static void
1369xlog_cil_push_background(
1370	struct xlog	*log) __releases(cil->xc_ctx_lock)
1371{
1372	struct xfs_cil	*cil = log->l_cilp;
1373	int		space_used = atomic_read(&cil->xc_ctx->space_used);
1374
1375	/*
1376	 * The cil won't be empty because we are called while holding the
1377	 * context lock so whatever we added to the CIL will still be there.
1378	 */
1379	ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1380
1381	/*
1382	 * We are done if:
1383	 * - we haven't used up all the space available yet; or
1384	 * - we've already queued up a push; and
1385	 * - we're not over the hard limit; and
1386	 * - nothing has been over the hard limit.
1387	 *
1388	 * If so, we don't need to take the push lock as there's nothing to do.
1389	 */
1390	if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
1391	    (cil->xc_push_seq == cil->xc_current_sequence &&
1392	     space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
1393	     !waitqueue_active(&cil->xc_push_wait))) {
1394		up_read(&cil->xc_ctx_lock);
1395		return;
1396	}
1397
1398	spin_lock(&cil->xc_push_lock);
1399	if (cil->xc_push_seq < cil->xc_current_sequence) {
1400		cil->xc_push_seq = cil->xc_current_sequence;
1401		queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1402	}
1403
1404	/*
1405	 * Drop the context lock now, we can't hold that if we need to sleep
1406	 * because we are over the blocking threshold. The push_lock is still
1407	 * held, so blocking threshold sleep/wakeup is still correctly
1408	 * serialised here.
1409	 */
1410	up_read(&cil->xc_ctx_lock);
1411
1412	/*
1413	 * If we are well over the space limit, throttle the work that is being
1414	 * done until the push work on this context has begun. Enforce the hard
1415	 * throttle on all transaction commits once it has been activated, even
1416	 * if the committing transactions have resulted in the space usage
1417	 * dipping back down under the hard limit.
1418	 *
1419	 * The ctx->xc_push_lock provides the serialisation necessary for safely
1420	 * calling xlog_cil_over_hard_limit() in this context.
1421	 */
1422	if (xlog_cil_over_hard_limit(log, space_used)) {
1423		trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1424		ASSERT(space_used < log->l_logsize);
1425		xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1426		return;
1427	}
1428
1429	spin_unlock(&cil->xc_push_lock);
1430
1431}
1432
1433/*
1434 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1435 * number that is passed. When it returns, the work will be queued for
1436 * @push_seq, but it won't be completed.
1437 *
1438 * If the caller is performing a synchronous force, we will flush the workqueue
1439 * to get previously queued work moving to minimise the wait time they will
1440 * undergo waiting for all outstanding pushes to complete. The caller is
1441 * expected to do the required waiting for push_seq to complete.
1442 *
1443 * If the caller is performing an async push, we need to ensure that the
1444 * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1445 * don't do this, then the commit record may remain sitting in memory in an
1446 * ACTIVE iclog. This then requires another full log force to push to disk,
1447 * which defeats the purpose of having an async, non-blocking CIL force
1448 * mechanism. Hence in this case we need to pass a flag to the push work to
1449 * indicate it needs to flush the commit record itself.
1450 */
1451static void
1452xlog_cil_push_now(
1453	struct xlog	*log,
1454	xfs_lsn_t	push_seq,
1455	bool		async)
1456{
1457	struct xfs_cil	*cil = log->l_cilp;
1458
1459	if (!cil)
1460		return;
1461
1462	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1463
1464	/* start on any pending background push to minimise wait time on it */
1465	if (!async)
1466		flush_workqueue(cil->xc_push_wq);
1467
1468	spin_lock(&cil->xc_push_lock);
1469
1470	/*
1471	 * If this is an async flush request, we always need to set the
1472	 * xc_push_commit_stable flag even if something else has already queued
1473	 * a push. The flush caller is asking for the CIL to be on stable
1474	 * storage when the next push completes, so regardless of who has queued
1475	 * the push, the flush requires stable semantics from it.
1476	 */
1477	cil->xc_push_commit_stable = async;
1478
1479	/*
1480	 * If the CIL is empty or we've already pushed the sequence then
1481	 * there's no more work that we need to do.
1482	 */
1483	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
1484	    push_seq <= cil->xc_push_seq) {
1485		spin_unlock(&cil->xc_push_lock);
1486		return;
1487	}
1488
1489	cil->xc_push_seq = push_seq;
1490	queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1491	spin_unlock(&cil->xc_push_lock);
1492}
1493
1494bool
1495xlog_cil_empty(
1496	struct xlog	*log)
1497{
1498	struct xfs_cil	*cil = log->l_cilp;
1499	bool		empty = false;
1500
1501	spin_lock(&cil->xc_push_lock);
1502	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
1503		empty = true;
1504	spin_unlock(&cil->xc_push_lock);
1505	return empty;
1506}
1507
1508/*
1509 * If there are intent done items in this transaction and the related intent was
1510 * committed in the current (same) CIL checkpoint, we don't need to write either
1511 * the intent or intent done item to the journal as the change will be
1512 * journalled atomically within this checkpoint. As we cannot remove items from
1513 * the CIL here, mark the related intent with a whiteout so that the CIL push
1514 * can remove it rather than writing it to the journal. Then remove the intent
1515 * done item from the current transaction and release it so it doesn't get put
1516 * into the CIL at all.
1517 */
1518static uint32_t
1519xlog_cil_process_intents(
1520	struct xfs_cil		*cil,
1521	struct xfs_trans	*tp)
1522{
1523	struct xfs_log_item	*lip, *ilip, *next;
1524	uint32_t		len = 0;
1525
1526	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1527		if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE))
1528			continue;
1529
1530		ilip = lip->li_ops->iop_intent(lip);
1531		if (!ilip || !xlog_item_in_current_chkpt(cil, ilip))
1532			continue;
1533		set_bit(XFS_LI_WHITEOUT, &ilip->li_flags);
1534		trace_xfs_cil_whiteout_mark(ilip);
1535		len += ilip->li_lv->lv_bytes;
1536		kmem_free(ilip->li_lv);
1537		ilip->li_lv = NULL;
1538
1539		xfs_trans_del_item(lip);
1540		lip->li_ops->iop_release(lip);
1541	}
1542	return len;
1543}
1544
1545/*
1546 * Commit a transaction with the given vector to the Committed Item List.
1547 *
1548 * To do this, we need to format the item, pin it in memory if required and
1549 * account for the space used by the transaction. Once we have done that we
1550 * need to release the unused reservation for the transaction, attach the
1551 * transaction to the checkpoint context so we carry the busy extents through
1552 * to checkpoint completion, and then unlock all the items in the transaction.
1553 *
 
 
 
 
1554 * Called with the context lock already held in read mode to lock out
1555 * background commit, returns without it held once background commits are
1556 * allowed again.
1557 */
1558void
1559xlog_cil_commit(
1560	struct xlog		*log,
1561	struct xfs_trans	*tp,
1562	xfs_csn_t		*commit_seq,
1563	bool			regrant)
 
1564{
1565	struct xfs_cil		*cil = log->l_cilp;
1566	struct xfs_log_item	*lip, *next;
1567	uint32_t		released_space = 0;
 
 
 
1568
1569	/*
1570	 * Do all necessary memory allocation before we lock the CIL.
1571	 * This ensures the allocation does not deadlock with a CIL
1572	 * push in memory reclaim (e.g. from kswapd).
 
1573	 */
1574	xlog_cil_alloc_shadow_bufs(log, tp);
1575
1576	/* lock out background commit */
1577	down_read(&cil->xc_ctx_lock);
 
 
 
 
 
 
 
 
1578
1579	if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE)
1580		released_space = xlog_cil_process_intents(cil, tp);
 
 
 
 
 
1581
1582	xlog_cil_insert_items(log, tp, released_space);
1583
1584	if (regrant && !xlog_is_shutdown(log))
1585		xfs_log_ticket_regrant(log, tp->t_ticket);
1586	else
1587		xfs_log_ticket_ungrant(log, tp->t_ticket);
1588	tp->t_ticket = NULL;
1589	xfs_trans_unreserve_and_mod_sb(tp);
1590
1591	/*
1592	 * Once all the items of the transaction have been copied to the CIL,
1593	 * the items can be unlocked and possibly freed.
1594	 *
1595	 * This needs to be done before we drop the CIL context lock because we
1596	 * have to update state in the log items and unlock them before they go
1597	 * to disk. If we don't, then the CIL checkpoint can race with us and
1598	 * we can run checkpoint completion before we've updated and unlocked
1599	 * the log items. This affects (at least) processing of stale buffers,
1600	 * inodes and EFIs.
1601	 */
1602	trace_xfs_trans_commit_items(tp, _RET_IP_);
1603	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1604		xfs_trans_del_item(lip);
1605		if (lip->li_ops->iop_committing)
1606			lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1607	}
1608	if (commit_seq)
1609		*commit_seq = cil->xc_ctx->sequence;
1610
1611	/* xlog_cil_push_background() releases cil->xc_ctx_lock */
1612	xlog_cil_push_background(log);
1613}
1614
1615/*
1616 * Flush the CIL to stable storage but don't wait for it to complete. This
1617 * requires the CIL push to ensure the commit record for the push hits the disk,
1618 * but otherwise is no different to a push done from a log force.
1619 */
1620void
1621xlog_cil_flush(
1622	struct xlog	*log)
1623{
1624	xfs_csn_t	seq = log->l_cilp->xc_current_sequence;
1625
1626	trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
1627	xlog_cil_push_now(log, seq, true);
1628
1629	/*
1630	 * If the CIL is empty, make sure that any previous checkpoint that may
1631	 * still be in an active iclog is pushed to stable storage.
 
 
 
1632	 */
1633	if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
1634		xfs_log_force(log->l_mp, 0);
1635}
1636
1637/*
1638 * Conditionally push the CIL based on the sequence passed in.
1639 *
1640 * We only need to push if we haven't already pushed the sequence number given.
1641 * Hence the only time we will trigger a push here is if the push sequence is
1642 * the same as the current context.
1643 *
1644 * We return the current commit lsn to allow the callers to determine if a
1645 * iclog flush is necessary following this call.
 
 
 
1646 */
1647xfs_lsn_t
1648xlog_cil_force_seq(
1649	struct xlog	*log,
1650	xfs_csn_t	sequence)
1651{
1652	struct xfs_cil		*cil = log->l_cilp;
1653	struct xfs_cil_ctx	*ctx;
1654	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
1655
1656	ASSERT(sequence <= cil->xc_current_sequence);
1657
1658	if (!sequence)
1659		sequence = cil->xc_current_sequence;
1660	trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
1661
1662	/*
1663	 * check to see if we need to force out the current context.
1664	 * xlog_cil_push() handles racing pushes for the same sequence,
1665	 * so no need to deal with it here.
1666	 */
1667restart:
1668	xlog_cil_push_now(log, sequence, false);
1669
1670	/*
1671	 * See if we can find a previous sequence still committing.
1672	 * We need to wait for all previous sequence commits to complete
1673	 * before allowing the force of push_seq to go ahead. Hence block
1674	 * on commits for those as well.
1675	 */
1676	spin_lock(&cil->xc_push_lock);
 
1677	list_for_each_entry(ctx, &cil->xc_committing, committing) {
1678		/*
1679		 * Avoid getting stuck in this loop because we were woken by the
1680		 * shutdown, but then went back to sleep once already in the
1681		 * shutdown state.
1682		 */
1683		if (xlog_is_shutdown(log))
1684			goto out_shutdown;
1685		if (ctx->sequence > sequence)
1686			continue;
1687		if (!ctx->commit_lsn) {
1688			/*
1689			 * It is still being pushed! Wait for the push to
1690			 * complete, then start again from the beginning.
1691			 */
1692			XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
1693			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1694			goto restart;
1695		}
1696		if (ctx->sequence != sequence)
1697			continue;
1698		/* found it! */
1699		commit_lsn = ctx->commit_lsn;
1700	}
1701
1702	/*
1703	 * The call to xlog_cil_push_now() executes the push in the background.
1704	 * Hence by the time we have got here it our sequence may not have been
1705	 * pushed yet. This is true if the current sequence still matches the
1706	 * push sequence after the above wait loop and the CIL still contains
1707	 * dirty objects. This is guaranteed by the push code first adding the
1708	 * context to the committing list before emptying the CIL.
1709	 *
1710	 * Hence if we don't find the context in the committing list and the
1711	 * current sequence number is unchanged then the CIL contents are
1712	 * significant.  If the CIL is empty, if means there was nothing to push
1713	 * and that means there is nothing to wait for. If the CIL is not empty,
1714	 * it means we haven't yet started the push, because if it had started
1715	 * we would have found the context on the committing list.
1716	 */
1717	if (sequence == cil->xc_current_sequence &&
1718	    !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1719		spin_unlock(&cil->xc_push_lock);
1720		goto restart;
1721	}
1722
1723	spin_unlock(&cil->xc_push_lock);
1724	return commit_lsn;
1725
1726	/*
1727	 * We detected a shutdown in progress. We need to trigger the log force
1728	 * to pass through it's iclog state machine error handling, even though
1729	 * we are already in a shutdown state. Hence we can't return
1730	 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1731	 * LSN is already stable), so we return a zero LSN instead.
1732	 */
1733out_shutdown:
1734	spin_unlock(&cil->xc_push_lock);
1735	return 0;
1736}
1737
1738/*
1739 * Perform initial CIL structure initialisation.
 
 
 
 
 
 
1740 */
1741int
1742xlog_cil_init(
1743	struct xlog		*log)
1744{
1745	struct xfs_cil		*cil;
1746	struct xfs_cil_ctx	*ctx;
1747	struct xlog_cil_pcp	*cilpcp;
1748	int			cpu;
 
 
 
 
1749
1750	cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1751	if (!cil)
1752		return -ENOMEM;
1753	/*
1754	 * Limit the CIL pipeline depth to 4 concurrent works to bound the
1755	 * concurrency the log spinlocks will be exposed to.
 
1756	 */
1757	cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
1758			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
1759			4, log->l_mp->m_super->s_id);
1760	if (!cil->xc_push_wq)
1761		goto out_destroy_cil;
1762
1763	cil->xc_log = log;
1764	cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
1765	if (!cil->xc_pcp)
1766		goto out_destroy_wq;
1767
1768	for_each_possible_cpu(cpu) {
1769		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1770		INIT_LIST_HEAD(&cilpcp->busy_extents);
1771		INIT_LIST_HEAD(&cilpcp->log_items);
1772	}
1773
1774	INIT_LIST_HEAD(&cil->xc_committing);
1775	spin_lock_init(&cil->xc_push_lock);
1776	init_waitqueue_head(&cil->xc_push_wait);
1777	init_rwsem(&cil->xc_ctx_lock);
1778	init_waitqueue_head(&cil->xc_start_wait);
1779	init_waitqueue_head(&cil->xc_commit_wait);
1780	log->l_cilp = cil;
1781
1782	ctx = xlog_cil_ctx_alloc();
1783	xlog_cil_ctx_switch(cil, ctx);
1784	return 0;
1785
1786out_destroy_wq:
1787	destroy_workqueue(cil->xc_push_wq);
1788out_destroy_cil:
1789	kmem_free(cil);
1790	return -ENOMEM;
1791}
1792
1793void
1794xlog_cil_destroy(
1795	struct xlog	*log)
1796{
1797	struct xfs_cil	*cil = log->l_cilp;
1798
1799	if (cil->xc_ctx) {
1800		if (cil->xc_ctx->ticket)
1801			xfs_log_ticket_put(cil->xc_ctx->ticket);
1802		kmem_free(cil->xc_ctx);
1803	}
1804
1805	ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1806	free_percpu(cil->xc_pcp);
1807	destroy_workqueue(cil->xc_push_wq);
1808	kmem_free(cil);
1809}
1810