Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public License as
  6 * published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it would be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 11 * GNU General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public License
 14 * along with this program; if not, write the Free Software Foundation,
 15 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 16 */
 17
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_types.h"
 21#include "xfs_bit.h"
 22#include "xfs_log.h"
 23#include "xfs_inum.h"
 24#include "xfs_trans.h"
 25#include "xfs_trans_priv.h"
 26#include "xfs_log_priv.h"
 27#include "xfs_sb.h"
 28#include "xfs_ag.h"
 29#include "xfs_mount.h"
 30#include "xfs_error.h"
 31#include "xfs_alloc.h"
 
 32#include "xfs_discard.h"
 33
 34/*
 35 * Perform initial CIL structure initialisation. If the CIL is not
 36 * enabled in this filesystem, ensure the log->l_cilp is null so
 37 * we can check this conditional to determine if we are doing delayed
 38 * logging or not.
 39 */
 40int
 41xlog_cil_init(
 42	struct log	*log)
 43{
 44	struct xfs_cil	*cil;
 45	struct xfs_cil_ctx *ctx;
 46
 47	log->l_cilp = NULL;
 48	if (!(log->l_mp->m_flags & XFS_MOUNT_DELAYLOG))
 49		return 0;
 50
 51	cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
 52	if (!cil)
 53		return ENOMEM;
 54
 55	ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
 56	if (!ctx) {
 57		kmem_free(cil);
 58		return ENOMEM;
 59	}
 60
 61	INIT_LIST_HEAD(&cil->xc_cil);
 62	INIT_LIST_HEAD(&cil->xc_committing);
 63	spin_lock_init(&cil->xc_cil_lock);
 64	init_rwsem(&cil->xc_ctx_lock);
 65	init_waitqueue_head(&cil->xc_commit_wait);
 66
 67	INIT_LIST_HEAD(&ctx->committing);
 68	INIT_LIST_HEAD(&ctx->busy_extents);
 69	ctx->sequence = 1;
 70	ctx->cil = cil;
 71	cil->xc_ctx = ctx;
 72	cil->xc_current_sequence = ctx->sequence;
 73
 74	cil->xc_log = log;
 75	log->l_cilp = cil;
 76	return 0;
 77}
 78
 79void
 80xlog_cil_destroy(
 81	struct log	*log)
 82{
 83	if (!log->l_cilp)
 84		return;
 85
 86	if (log->l_cilp->xc_ctx) {
 87		if (log->l_cilp->xc_ctx->ticket)
 88			xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
 89		kmem_free(log->l_cilp->xc_ctx);
 90	}
 91
 92	ASSERT(list_empty(&log->l_cilp->xc_cil));
 93	kmem_free(log->l_cilp);
 94}
 95
 96/*
 97 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
 98 * recover, so we don't allow failure here. Also, we allocate in a context that
 99 * we don't want to be issuing transactions from, so we need to tell the
100 * allocation code this as well.
101 *
102 * We don't reserve any space for the ticket - we are going to steal whatever
103 * space we require from transactions as they commit. To ensure we reserve all
104 * the space required, we need to set the current reservation of the ticket to
105 * zero so that we know to steal the initial transaction overhead from the
106 * first transaction commit.
107 */
108static struct xlog_ticket *
109xlog_cil_ticket_alloc(
110	struct log	*log)
111{
112	struct xlog_ticket *tic;
113
114	tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
115				KM_SLEEP|KM_NOFS);
116	tic->t_trans_type = XFS_TRANS_CHECKPOINT;
117
118	/*
119	 * set the current reservation to zero so we know to steal the basic
120	 * transaction overhead reservation from the first transaction commit.
121	 */
122	tic->t_curr_res = 0;
123	return tic;
124}
125
126/*
127 * After the first stage of log recovery is done, we know where the head and
128 * tail of the log are. We need this log initialisation done before we can
129 * initialise the first CIL checkpoint context.
130 *
131 * Here we allocate a log ticket to track space usage during a CIL push.  This
132 * ticket is passed to xlog_write() directly so that we don't slowly leak log
133 * space by failing to account for space used by log headers and additional
134 * region headers for split regions.
135 */
136void
137xlog_cil_init_post_recovery(
138	struct log	*log)
139{
140	if (!log->l_cilp)
141		return;
142
143	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
144	log->l_cilp->xc_ctx->sequence = 1;
145	log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle,
146								log->l_curr_block);
147}
148
149/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150 * Format log item into a flat buffers
151 *
152 * For delayed logging, we need to hold a formatted buffer containing all the
153 * changes on the log item. This enables us to relog the item in memory and
154 * write it out asynchronously without needing to relock the object that was
155 * modified at the time it gets written into the iclog.
156 *
157 * This function builds a vector for the changes in each log item in the
158 * transaction. It then works out the length of the buffer needed for each log
159 * item, allocates them and formats the vector for the item into the buffer.
160 * The buffer is then attached to the log item are then inserted into the
161 * Committed Item List for tracking until the next checkpoint is written out.
162 *
163 * We don't set up region headers during this process; we simply copy the
164 * regions into the flat buffer. We can do this because we still have to do a
165 * formatting step to write the regions into the iclog buffer.  Writing the
166 * ophdrs during the iclog write means that we can support splitting large
167 * regions across iclog boundares without needing a change in the format of the
168 * item/region encapsulation.
169 *
170 * Hence what we need to do now is change the rewrite the vector array to point
171 * to the copied region inside the buffer we just allocated. This allows us to
172 * format the regions into the iclog as though they are being formatted
173 * directly out of the objects themselves.
174 */
175static void
176xlog_cil_format_items(
177	struct log		*log,
178	struct xfs_log_vec	*log_vector)
 
 
179{
180	struct xfs_log_vec *lv;
181
182	ASSERT(log_vector);
183	for (lv = log_vector; lv; lv = lv->lv_next) {
184		void	*ptr;
185		int	index;
186		int	len = 0;
187
188		/* build the vector array and calculate it's length */
189		IOP_FORMAT(lv->lv_item, lv->lv_iovecp);
190		for (index = 0; index < lv->lv_niovecs; index++)
191			len += lv->lv_iovecp[index].i_len;
192
193		lv->lv_buf_len = len;
194		lv->lv_buf = kmem_alloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS);
195		ptr = lv->lv_buf;
196
197		for (index = 0; index < lv->lv_niovecs; index++) {
198			struct xfs_log_iovec *vec = &lv->lv_iovecp[index];
199
200			memcpy(ptr, vec->i_addr, vec->i_len);
201			vec->i_addr = ptr;
202			ptr += vec->i_len;
203		}
204		ASSERT(ptr == lv->lv_buf + lv->lv_buf_len);
205	}
206}
207
208/*
209 * Prepare the log item for insertion into the CIL. Calculate the difference in
210 * log space and vectors it will consume, and if it is a new item pin it as
211 * well.
212 */
213STATIC void
214xfs_cil_prepare_item(
215	struct log		*log,
216	struct xfs_log_vec	*lv,
217	int			*len,
218	int			*diff_iovecs)
219{
220	struct xfs_log_vec	*old = lv->lv_item->li_lv;
221
222	if (old) {
223		/* existing lv on log item, space used is a delta */
224		ASSERT(!list_empty(&lv->lv_item->li_cil));
225		ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs);
226
227		*len += lv->lv_buf_len - old->lv_buf_len;
228		*diff_iovecs += lv->lv_niovecs - old->lv_niovecs;
229		kmem_free(old->lv_buf);
230		kmem_free(old);
231	} else {
232		/* new lv, must pin the log item */
233		ASSERT(!lv->lv_item->li_lv);
234		ASSERT(list_empty(&lv->lv_item->li_cil));
235
236		*len += lv->lv_buf_len;
237		*diff_iovecs += lv->lv_niovecs;
238		IOP_PIN(lv->lv_item);
239
240	}
 
 
241
242	/* attach new log vector to log item */
243	lv->lv_item->li_lv = lv;
 
 
 
 
 
 
 
 
244
245	/*
246	 * If this is the first time the item is being committed to the
247	 * CIL, store the sequence number on the log item so we can
248	 * tell in future commits whether this is the first checkpoint
249	 * the item is being committed into.
250	 */
251	if (!lv->lv_item->li_seq)
252		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253}
254
255/*
256 * Insert the log items into the CIL and calculate the difference in space
257 * consumed by the item. Add the space to the checkpoint ticket and calculate
258 * if the change requires additional log metadata. If it does, take that space
259 * as well. Remove the amount of space we addded to the checkpoint ticket from
260 * the current transaction ticket so that the accounting works out correctly.
261 */
262static void
263xlog_cil_insert_items(
264	struct log		*log,
265	struct xfs_log_vec	*log_vector,
266	struct xlog_ticket	*ticket)
267{
268	struct xfs_cil		*cil = log->l_cilp;
269	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
270	struct xfs_log_vec	*lv;
271	int			len = 0;
272	int			diff_iovecs = 0;
273	int			iclog_space;
274
275	ASSERT(log_vector);
276
277	/*
278	 * Do all the accounting aggregation and switching of log vectors
279	 * around in a separate loop to the insertion of items into the CIL.
280	 * Then we can do a separate loop to update the CIL within a single
281	 * lock/unlock pair. This reduces the number of round trips on the CIL
282	 * lock from O(nr_logvectors) to O(1) and greatly reduces the overall
283	 * hold time for the transaction commit.
284	 *
285	 * If this is the first time the item is being placed into the CIL in
286	 * this context, pin it so it can't be written to disk until the CIL is
287	 * flushed to the iclog and the iclog written to disk.
288	 *
289	 * We can do this safely because the context can't checkpoint until we
290	 * are done so it doesn't matter exactly how we update the CIL.
291	 */
292	for (lv = log_vector; lv; lv = lv->lv_next)
293		xfs_cil_prepare_item(log, lv, &len, &diff_iovecs);
294
295	/* account for space used by new iovec headers  */
296	len += diff_iovecs * sizeof(xlog_op_header_t);
297
 
 
 
 
 
298	spin_lock(&cil->xc_cil_lock);
 
 
299
300	/* move the items to the tail of the CIL */
301	for (lv = log_vector; lv; lv = lv->lv_next)
302		list_move_tail(&lv->lv_item->li_cil, &cil->xc_cil);
303
 
 
 
 
 
304	ctx->nvecs += diff_iovecs;
305
 
 
 
 
306	/*
307	 * Now transfer enough transaction reservation to the context ticket
308	 * for the checkpoint. The context ticket is special - the unit
309	 * reservation has to grow as well as the current reservation as we
310	 * steal from tickets so we can correctly determine the space used
311	 * during the transaction commit.
312	 */
313	if (ctx->ticket->t_curr_res == 0) {
314		/* first commit in checkpoint, steal the header reservation */
315		ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len);
316		ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
317		ticket->t_curr_res -= ctx->ticket->t_unit_res;
318	}
319
320	/* do we need space for more log record headers? */
321	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
322	if (len > 0 && (ctx->space_used / iclog_space !=
323				(ctx->space_used + len) / iclog_space)) {
324		int hdrs;
325
326		hdrs = (len + iclog_space - 1) / iclog_space;
327		/* need to take into account split region headers, too */
328		hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
329		ctx->ticket->t_unit_res += hdrs;
330		ctx->ticket->t_curr_res += hdrs;
331		ticket->t_curr_res -= hdrs;
332		ASSERT(ticket->t_curr_res >= len);
333	}
334	ticket->t_curr_res -= len;
335	ctx->space_used += len;
336
337	spin_unlock(&cil->xc_cil_lock);
338}
339
340static void
341xlog_cil_free_logvec(
342	struct xfs_log_vec	*log_vector)
343{
344	struct xfs_log_vec	*lv;
345
346	for (lv = log_vector; lv; ) {
347		struct xfs_log_vec *next = lv->lv_next;
348		kmem_free(lv->lv_buf);
349		kmem_free(lv);
350		lv = next;
351	}
352}
353
354/*
355 * Mark all items committed and clear busy extents. We free the log vector
356 * chains in a separate pass so that we unpin the log items as quickly as
357 * possible.
358 */
359static void
360xlog_cil_committed(
361	void	*args,
362	int	abort)
363{
364	struct xfs_cil_ctx	*ctx = args;
365	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
366
367	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
368					ctx->start_lsn, abort);
369
370	xfs_alloc_busy_sort(&ctx->busy_extents);
371	xfs_alloc_busy_clear(mp, &ctx->busy_extents,
372			     (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
373
374	spin_lock(&ctx->cil->xc_cil_lock);
375	list_del(&ctx->committing);
376	spin_unlock(&ctx->cil->xc_cil_lock);
377
378	xlog_cil_free_logvec(ctx->lv_chain);
379
380	if (!list_empty(&ctx->busy_extents)) {
381		ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
382
383		xfs_discard_extents(mp, &ctx->busy_extents);
384		xfs_alloc_busy_clear(mp, &ctx->busy_extents, false);
385	}
386
387	kmem_free(ctx);
388}
389
390/*
391 * Push the Committed Item List to the log. If @push_seq flag is zero, then it
392 * is a background flush and so we can chose to ignore it. Otherwise, if the
393 * current sequence is the same as @push_seq we need to do a flush. If
394 * @push_seq is less than the current sequence, then it has already been
395 * flushed and we don't need to do anything - the caller will wait for it to
396 * complete if necessary.
397 *
398 * @push_seq is a value rather than a flag because that allows us to do an
399 * unlocked check of the sequence number for a match. Hence we can allows log
400 * forces to run racily and not issue pushes for the same sequence twice. If we
401 * get a race between multiple pushes for the same sequence they will block on
402 * the first one and then abort, hence avoiding needless pushes.
403 */
404STATIC int
405xlog_cil_push(
406	struct log		*log,
407	xfs_lsn_t		push_seq)
408{
409	struct xfs_cil		*cil = log->l_cilp;
410	struct xfs_log_vec	*lv;
411	struct xfs_cil_ctx	*ctx;
412	struct xfs_cil_ctx	*new_ctx;
413	struct xlog_in_core	*commit_iclog;
414	struct xlog_ticket	*tic;
415	int			num_lv;
416	int			num_iovecs;
417	int			len;
418	int			error = 0;
419	struct xfs_trans_header thdr;
420	struct xfs_log_iovec	lhdr;
421	struct xfs_log_vec	lvhdr = { NULL };
422	xfs_lsn_t		commit_lsn;
 
423
424	if (!cil)
425		return 0;
426
427	ASSERT(!push_seq || push_seq <= cil->xc_ctx->sequence);
428
429	new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
430	new_ctx->ticket = xlog_cil_ticket_alloc(log);
431
432	/*
433	 * Lock out transaction commit, but don't block for background pushes
434	 * unless we are well over the CIL space limit. See the definition of
435	 * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic
436	 * used here.
437	 */
438	if (!down_write_trylock(&cil->xc_ctx_lock)) {
439		if (!push_seq &&
440		    cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))
441			goto out_free_ticket;
442		down_write(&cil->xc_ctx_lock);
443	}
444	ctx = cil->xc_ctx;
445
446	/* check if we've anything to push */
447	if (list_empty(&cil->xc_cil))
448		goto out_skip;
449
450	/* check for spurious background flush */
451	if (!push_seq && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
 
 
 
 
 
 
452		goto out_skip;
 
 
 
453
454	/* check for a previously pushed seqeunce */
455	if (push_seq && push_seq < cil->xc_ctx->sequence)
456		goto out_skip;
457
458	/*
459	 * pull all the log vectors off the items in the CIL, and
460	 * remove the items from the CIL. We don't need the CIL lock
461	 * here because it's only needed on the transaction commit
462	 * side which is currently locked out by the flush lock.
463	 */
464	lv = NULL;
465	num_lv = 0;
466	num_iovecs = 0;
467	len = 0;
468	while (!list_empty(&cil->xc_cil)) {
469		struct xfs_log_item	*item;
470		int			i;
471
472		item = list_first_entry(&cil->xc_cil,
473					struct xfs_log_item, li_cil);
474		list_del_init(&item->li_cil);
475		if (!ctx->lv_chain)
476			ctx->lv_chain = item->li_lv;
477		else
478			lv->lv_next = item->li_lv;
479		lv = item->li_lv;
480		item->li_lv = NULL;
481
482		num_lv++;
483		num_iovecs += lv->lv_niovecs;
484		for (i = 0; i < lv->lv_niovecs; i++)
485			len += lv->lv_iovecp[i].i_len;
486	}
487
488	/*
489	 * initialise the new context and attach it to the CIL. Then attach
490	 * the current context to the CIL committing lsit so it can be found
491	 * during log forces to extract the commit lsn of the sequence that
492	 * needs to be forced.
493	 */
494	INIT_LIST_HEAD(&new_ctx->committing);
495	INIT_LIST_HEAD(&new_ctx->busy_extents);
496	new_ctx->sequence = ctx->sequence + 1;
497	new_ctx->cil = cil;
498	cil->xc_ctx = new_ctx;
499
500	/*
501	 * mirror the new sequence into the cil structure so that we can do
502	 * unlocked checks against the current sequence in log forces without
503	 * risking deferencing a freed context pointer.
504	 */
505	cil->xc_current_sequence = new_ctx->sequence;
506
507	/*
508	 * The switch is now done, so we can drop the context lock and move out
509	 * of a shared context. We can't just go straight to the commit record,
510	 * though - we need to synchronise with previous and future commits so
511	 * that the commit records are correctly ordered in the log to ensure
512	 * that we process items during log IO completion in the correct order.
513	 *
514	 * For example, if we get an EFI in one checkpoint and the EFD in the
515	 * next (e.g. due to log forces), we do not want the checkpoint with
516	 * the EFD to be committed before the checkpoint with the EFI.  Hence
517	 * we must strictly order the commit records of the checkpoints so
518	 * that: a) the checkpoint callbacks are attached to the iclogs in the
519	 * correct order; and b) the checkpoints are replayed in correct order
520	 * in log recovery.
521	 *
522	 * Hence we need to add this context to the committing context list so
523	 * that higher sequences will wait for us to write out a commit record
524	 * before they do.
 
 
 
 
 
 
525	 */
526	spin_lock(&cil->xc_cil_lock);
 
527	list_add(&ctx->committing, &cil->xc_committing);
528	spin_unlock(&cil->xc_cil_lock);
529	up_write(&cil->xc_ctx_lock);
530
531	/*
532	 * Build a checkpoint transaction header and write it to the log to
533	 * begin the transaction. We need to account for the space used by the
534	 * transaction header here as it is not accounted for in xlog_write().
535	 *
536	 * The LSN we need to pass to the log items on transaction commit is
537	 * the LSN reported by the first log vector write. If we use the commit
538	 * record lsn then we can move the tail beyond the grant write head.
539	 */
540	tic = ctx->ticket;
541	thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
542	thdr.th_type = XFS_TRANS_CHECKPOINT;
543	thdr.th_tid = tic->t_tid;
544	thdr.th_num_items = num_iovecs;
545	lhdr.i_addr = &thdr;
546	lhdr.i_len = sizeof(xfs_trans_header_t);
547	lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
548	tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
549
550	lvhdr.lv_niovecs = 1;
551	lvhdr.lv_iovecp = &lhdr;
552	lvhdr.lv_next = ctx->lv_chain;
553
554	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
555	if (error)
556		goto out_abort_free_ticket;
557
558	/*
559	 * now that we've written the checkpoint into the log, strictly
560	 * order the commit records so replay will get them in the right order.
561	 */
562restart:
563	spin_lock(&cil->xc_cil_lock);
564	list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
565		/*
566		 * Higher sequences will wait for this one so skip them.
567		 * Don't wait for own own sequence, either.
568		 */
569		if (new_ctx->sequence >= ctx->sequence)
570			continue;
571		if (!new_ctx->commit_lsn) {
572			/*
573			 * It is still being pushed! Wait for the push to
574			 * complete, then start again from the beginning.
575			 */
576			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
577			goto restart;
578		}
579	}
580	spin_unlock(&cil->xc_cil_lock);
581
582	/* xfs_log_done always frees the ticket on error. */
583	commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
584	if (commit_lsn == -1)
585		goto out_abort;
586
587	/* attach all the transactions w/ busy extents to iclog */
588	ctx->log_cb.cb_func = xlog_cil_committed;
589	ctx->log_cb.cb_arg = ctx;
590	error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb);
591	if (error)
592		goto out_abort;
593
594	/*
595	 * now the checkpoint commit is complete and we've attached the
596	 * callbacks to the iclog we can assign the commit LSN to the context
597	 * and wake up anyone who is waiting for the commit to complete.
598	 */
599	spin_lock(&cil->xc_cil_lock);
600	ctx->commit_lsn = commit_lsn;
601	wake_up_all(&cil->xc_commit_wait);
602	spin_unlock(&cil->xc_cil_lock);
603
604	/* release the hounds! */
605	return xfs_log_release_iclog(log->l_mp, commit_iclog);
606
607out_skip:
608	up_write(&cil->xc_ctx_lock);
609out_free_ticket:
610	xfs_log_ticket_put(new_ctx->ticket);
611	kmem_free(new_ctx);
612	return 0;
613
614out_abort_free_ticket:
615	xfs_log_ticket_put(tic);
616out_abort:
617	xlog_cil_committed(ctx, XFS_LI_ABORTED);
618	return XFS_ERROR(EIO);
619}
620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
621/*
622 * Commit a transaction with the given vector to the Committed Item List.
623 *
624 * To do this, we need to format the item, pin it in memory if required and
625 * account for the space used by the transaction. Once we have done that we
626 * need to release the unused reservation for the transaction, attach the
627 * transaction to the checkpoint context so we carry the busy extents through
628 * to checkpoint completion, and then unlock all the items in the transaction.
629 *
630 * For more specific information about the order of operations in
631 * xfs_log_commit_cil() please refer to the comments in
632 * xfs_trans_commit_iclog().
633 *
634 * Called with the context lock already held in read mode to lock out
635 * background commit, returns without it held once background commits are
636 * allowed again.
637 */
638void
639xfs_log_commit_cil(
640	struct xfs_mount	*mp,
641	struct xfs_trans	*tp,
642	struct xfs_log_vec	*log_vector,
643	xfs_lsn_t		*commit_lsn,
644	int			flags)
645{
646	struct log		*log = mp->m_log;
 
647	int			log_flags = 0;
648	int			push = 0;
649
650	if (flags & XFS_TRANS_RELEASE_LOG_RES)
651		log_flags = XFS_LOG_REL_PERM_RESERV;
652
653	/*
654	 * do all the hard work of formatting items (including memory
655	 * allocation) outside the CIL context lock. This prevents stalling CIL
656	 * pushes when we are low on memory and a transaction commit spends a
657	 * lot of time in memory reclaim.
658	 */
659	xlog_cil_format_items(log, log_vector);
660
661	/* lock out background commit */
662	down_read(&log->l_cilp->xc_ctx_lock);
663	if (commit_lsn)
664		*commit_lsn = log->l_cilp->xc_ctx->sequence;
665
666	xlog_cil_insert_items(log, log_vector, tp->t_ticket);
667
668	/* check we didn't blow the reservation */
669	if (tp->t_ticket->t_curr_res < 0)
670		xlog_print_tic_res(log->l_mp, tp->t_ticket);
671
672	/* attach the transaction to the CIL if it has any busy extents */
673	if (!list_empty(&tp->t_busy)) {
674		spin_lock(&log->l_cilp->xc_cil_lock);
675		list_splice_init(&tp->t_busy,
676					&log->l_cilp->xc_ctx->busy_extents);
677		spin_unlock(&log->l_cilp->xc_cil_lock);
678	}
679
680	tp->t_commit_lsn = *commit_lsn;
681	xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
682	xfs_trans_unreserve_and_mod_sb(tp);
683
684	/*
685	 * Once all the items of the transaction have been copied to the CIL,
686	 * the items can be unlocked and freed.
687	 *
688	 * This needs to be done before we drop the CIL context lock because we
689	 * have to update state in the log items and unlock them before they go
690	 * to disk. If we don't, then the CIL checkpoint can race with us and
691	 * we can run checkpoint completion before we've updated and unlocked
692	 * the log items. This affects (at least) processing of stale buffers,
693	 * inodes and EFIs.
694	 */
695	xfs_trans_free_items(tp, *commit_lsn, 0);
696
697	/* check for background commit before unlock */
698	if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
699		push = 1;
700
701	up_read(&log->l_cilp->xc_ctx_lock);
702
703	/*
704	 * We need to push CIL every so often so we don't cache more than we
705	 * can fit in the log. The limit really is that a checkpoint can't be
706	 * more than half the log (the current checkpoint is not allowed to
707	 * overwrite the previous checkpoint), but commit latency and memory
708	 * usage limit this to a smaller size in most cases.
709	 */
710	if (push)
711		xlog_cil_push(log, 0);
712}
713
714/*
715 * Conditionally push the CIL based on the sequence passed in.
716 *
717 * We only need to push if we haven't already pushed the sequence
718 * number given. Hence the only time we will trigger a push here is
719 * if the push sequence is the same as the current context.
720 *
721 * We return the current commit lsn to allow the callers to determine if a
722 * iclog flush is necessary following this call.
723 *
724 * XXX: Initially, just push the CIL unconditionally and return whatever
725 * commit lsn is there. It'll be empty, so this is broken for now.
726 */
727xfs_lsn_t
728xlog_cil_force_lsn(
729	struct log	*log,
730	xfs_lsn_t	sequence)
731{
732	struct xfs_cil		*cil = log->l_cilp;
733	struct xfs_cil_ctx	*ctx;
734	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
735
736	ASSERT(sequence <= cil->xc_current_sequence);
737
738	/*
739	 * check to see if we need to force out the current context.
740	 * xlog_cil_push() handles racing pushes for the same sequence,
741	 * so no need to deal with it here.
742	 */
743	if (sequence == cil->xc_current_sequence)
744		xlog_cil_push(log, sequence);
745
746	/*
747	 * See if we can find a previous sequence still committing.
748	 * We need to wait for all previous sequence commits to complete
749	 * before allowing the force of push_seq to go ahead. Hence block
750	 * on commits for those as well.
751	 */
752restart:
753	spin_lock(&cil->xc_cil_lock);
754	list_for_each_entry(ctx, &cil->xc_committing, committing) {
755		if (ctx->sequence > sequence)
756			continue;
757		if (!ctx->commit_lsn) {
758			/*
759			 * It is still being pushed! Wait for the push to
760			 * complete, then start again from the beginning.
761			 */
762			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
763			goto restart;
764		}
765		if (ctx->sequence != sequence)
766			continue;
767		/* found it! */
768		commit_lsn = ctx->commit_lsn;
769	}
770	spin_unlock(&cil->xc_cil_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
771	return commit_lsn;
772}
773
774/*
775 * Check if the current log item was first committed in this sequence.
776 * We can't rely on just the log item being in the CIL, we have to check
777 * the recorded commit sequence number.
778 *
779 * Note: for this to be used in a non-racy manner, it has to be called with
780 * CIL flushing locked out. As a result, it should only be used during the
781 * transaction commit process when deciding what to format into the item.
782 */
783bool
784xfs_log_item_in_current_chkpt(
785	struct xfs_log_item *lip)
786{
787	struct xfs_cil_ctx *ctx;
788
789	if (!(lip->li_mountp->m_flags & XFS_MOUNT_DELAYLOG))
790		return false;
791	if (list_empty(&lip->li_cil))
792		return false;
793
794	ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
795
796	/*
797	 * li_seq is written on the first commit of a log item to record the
798	 * first checkpoint it is written to. Hence if it is different to the
799	 * current sequence, we're in a new checkpoint.
800	 */
801	if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
802		return false;
803	return true;
804}
v3.15
  1/*
  2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public License as
  6 * published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it would be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 11 * GNU General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public License
 14 * along with this program; if not, write the Free Software Foundation,
 15 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 16 */
 17
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_log_format.h"
 21#include "xfs_shared.h"
 22#include "xfs_trans_resv.h"
 
 
 
 
 23#include "xfs_sb.h"
 24#include "xfs_ag.h"
 25#include "xfs_mount.h"
 26#include "xfs_error.h"
 27#include "xfs_alloc.h"
 28#include "xfs_extent_busy.h"
 29#include "xfs_discard.h"
 30#include "xfs_trans.h"
 31#include "xfs_trans_priv.h"
 32#include "xfs_log.h"
 33#include "xfs_log_priv.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34
 35/*
 36 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
 37 * recover, so we don't allow failure here. Also, we allocate in a context that
 38 * we don't want to be issuing transactions from, so we need to tell the
 39 * allocation code this as well.
 40 *
 41 * We don't reserve any space for the ticket - we are going to steal whatever
 42 * space we require from transactions as they commit. To ensure we reserve all
 43 * the space required, we need to set the current reservation of the ticket to
 44 * zero so that we know to steal the initial transaction overhead from the
 45 * first transaction commit.
 46 */
 47static struct xlog_ticket *
 48xlog_cil_ticket_alloc(
 49	struct xlog	*log)
 50{
 51	struct xlog_ticket *tic;
 52
 53	tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
 54				KM_SLEEP|KM_NOFS);
 55	tic->t_trans_type = XFS_TRANS_CHECKPOINT;
 56
 57	/*
 58	 * set the current reservation to zero so we know to steal the basic
 59	 * transaction overhead reservation from the first transaction commit.
 60	 */
 61	tic->t_curr_res = 0;
 62	return tic;
 63}
 64
 65/*
 66 * After the first stage of log recovery is done, we know where the head and
 67 * tail of the log are. We need this log initialisation done before we can
 68 * initialise the first CIL checkpoint context.
 69 *
 70 * Here we allocate a log ticket to track space usage during a CIL push.  This
 71 * ticket is passed to xlog_write() directly so that we don't slowly leak log
 72 * space by failing to account for space used by log headers and additional
 73 * region headers for split regions.
 74 */
 75void
 76xlog_cil_init_post_recovery(
 77	struct xlog	*log)
 78{
 
 
 
 79	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
 80	log->l_cilp->xc_ctx->sequence = 1;
 81	log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle,
 82								log->l_curr_block);
 83}
 84
 85/*
 86 * Prepare the log item for insertion into the CIL. Calculate the difference in
 87 * log space and vectors it will consume, and if it is a new item pin it as
 88 * well.
 89 */
 90STATIC void
 91xfs_cil_prepare_item(
 92	struct xlog		*log,
 93	struct xfs_log_vec	*lv,
 94	struct xfs_log_vec	*old_lv,
 95	int			*diff_len,
 96	int			*diff_iovecs)
 97{
 98	/* Account for the new LV being passed in */
 99	if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
100		*diff_len += lv->lv_buf_len;
101		*diff_iovecs += lv->lv_niovecs;
102	}
103
104	/*
105	 * If there is no old LV, this is the first time we've seen the item in
106	 * this CIL context and so we need to pin it. If we are replacing the
107	 * old_lv, then remove the space it accounts for and free it.
108	 */
109	if (!old_lv)
110		lv->lv_item->li_ops->iop_pin(lv->lv_item);
111	else if (old_lv != lv) {
112		ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
113
114		*diff_len -= old_lv->lv_buf_len;
115		*diff_iovecs -= old_lv->lv_niovecs;
116		kmem_free(old_lv);
117	}
118
119	/* attach new log vector to log item */
120	lv->lv_item->li_lv = lv;
121
122	/*
123	 * If this is the first time the item is being committed to the
124	 * CIL, store the sequence number on the log item so we can
125	 * tell in future commits whether this is the first checkpoint
126	 * the item is being committed into.
127	 */
128	if (!lv->lv_item->li_seq)
129		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
130}
131
132/*
133 * Format log item into a flat buffers
134 *
135 * For delayed logging, we need to hold a formatted buffer containing all the
136 * changes on the log item. This enables us to relog the item in memory and
137 * write it out asynchronously without needing to relock the object that was
138 * modified at the time it gets written into the iclog.
139 *
140 * This function builds a vector for the changes in each log item in the
141 * transaction. It then works out the length of the buffer needed for each log
142 * item, allocates them and formats the vector for the item into the buffer.
143 * The buffer is then attached to the log item are then inserted into the
144 * Committed Item List for tracking until the next checkpoint is written out.
145 *
146 * We don't set up region headers during this process; we simply copy the
147 * regions into the flat buffer. We can do this because we still have to do a
148 * formatting step to write the regions into the iclog buffer.  Writing the
149 * ophdrs during the iclog write means that we can support splitting large
150 * regions across iclog boundares without needing a change in the format of the
151 * item/region encapsulation.
152 *
153 * Hence what we need to do now is change the rewrite the vector array to point
154 * to the copied region inside the buffer we just allocated. This allows us to
155 * format the regions into the iclog as though they are being formatted
156 * directly out of the objects themselves.
157 */
158static void
159xlog_cil_insert_format_items(
160	struct xlog		*log,
161	struct xfs_trans	*tp,
162	int			*diff_len,
163	int			*diff_iovecs)
164{
165	struct xfs_log_item_desc *lidp;
166
167
168	/* Bail out if we didn't find a log item.  */
169	if (list_empty(&tp->t_items)) {
170		ASSERT(0);
171		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172	}
 
173
174	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
175		struct xfs_log_item *lip = lidp->lid_item;
176		struct xfs_log_vec *lv;
177		struct xfs_log_vec *old_lv;
178		int	niovecs = 0;
179		int	nbytes = 0;
180		int	buf_size;
181		bool	ordered = false;
 
 
 
 
 
182
183		/* Skip items which aren't dirty in this transaction. */
184		if (!(lidp->lid_flags & XFS_LID_DIRTY))
185			continue;
 
 
 
 
 
 
 
 
 
 
186
187		/* get number of vecs and size of data to be stored */
188		lip->li_ops->iop_size(lip, &niovecs, &nbytes);
 
189
190		/* Skip items that do not have any vectors for writing */
191		if (!niovecs)
192			continue;
193
194		/*
195		 * Ordered items need to be tracked but we do not wish to write
196		 * them. We need a logvec to track the object, but we do not
197		 * need an iovec or buffer to be allocated for copying data.
198		 */
199		if (niovecs == XFS_LOG_VEC_ORDERED) {
200			ordered = true;
201			niovecs = 0;
202			nbytes = 0;
203		}
204
205		/*
206		 * We 64-bit align the length of each iovec so that the start
207		 * of the next one is naturally aligned.  We'll need to
208		 * account for that slack space here. Then round nbytes up
209		 * to 64-bit alignment so that the initial buffer alignment is
210		 * easy to calculate and verify.
211		 */
212		nbytes += niovecs * sizeof(uint64_t);
213		nbytes = round_up(nbytes, sizeof(uint64_t));
214
215		/* grab the old item if it exists for reservation accounting */
216		old_lv = lip->li_lv;
217
218		/*
219		 * The data buffer needs to start 64-bit aligned, so round up
220		 * that space to ensure we can align it appropriately and not
221		 * overrun the buffer.
222		 */
223		buf_size = nbytes +
224			   round_up((sizeof(struct xfs_log_vec) +
225				     niovecs * sizeof(struct xfs_log_iovec)),
226				    sizeof(uint64_t));
227
228		/* compare to existing item size */
229		if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
230			/* same or smaller, optimise common overwrite case */
231			lv = lip->li_lv;
232			lv->lv_next = NULL;
233
234			if (ordered)
235				goto insert;
236
237			/*
238			 * set the item up as though it is a new insertion so
239			 * that the space reservation accounting is correct.
240			 */
241			*diff_iovecs -= lv->lv_niovecs;
242			*diff_len -= lv->lv_buf_len;
243		} else {
244			/* allocate new data chunk */
245			lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
246			lv->lv_item = lip;
247			lv->lv_size = buf_size;
248			if (ordered) {
249				/* track as an ordered logvec */
250				ASSERT(lip->li_lv == NULL);
251				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
252				goto insert;
253			}
254			lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
255		}
256
257		/* Ensure the lv is set up according to ->iop_size */
258		lv->lv_niovecs = niovecs;
259
260		/* The allocated data region lies beyond the iovec region */
261		lv->lv_buf_len = 0;
262		lv->lv_buf = (char *)lv + buf_size - nbytes;
263		ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
264
265		lip->li_ops->iop_format(lip, lv);
266insert:
267		ASSERT(lv->lv_buf_len <= nbytes);
268		xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
269	}
270}
271
272/*
273 * Insert the log items into the CIL and calculate the difference in space
274 * consumed by the item. Add the space to the checkpoint ticket and calculate
275 * if the change requires additional log metadata. If it does, take that space
276 * as well. Remove the amount of space we added to the checkpoint ticket from
277 * the current transaction ticket so that the accounting works out correctly.
278 */
279static void
280xlog_cil_insert_items(
281	struct xlog		*log,
282	struct xfs_trans	*tp)
 
283{
284	struct xfs_cil		*cil = log->l_cilp;
285	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
286	struct xfs_log_item_desc *lidp;
287	int			len = 0;
288	int			diff_iovecs = 0;
289	int			iclog_space;
290
291	ASSERT(tp);
292
293	/*
 
 
 
 
 
 
 
 
 
 
 
294	 * We can do this safely because the context can't checkpoint until we
295	 * are done so it doesn't matter exactly how we update the CIL.
296	 */
297	xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
 
 
 
 
298
299	/*
300	 * Now (re-)position everything modified at the tail of the CIL.
301	 * We do this here so we only need to take the CIL lock once during
302	 * the transaction commit.
303	 */
304	spin_lock(&cil->xc_cil_lock);
305	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
306		struct xfs_log_item	*lip = lidp->lid_item;
307
308		/* Skip items which aren't dirty in this transaction. */
309		if (!(lidp->lid_flags & XFS_LID_DIRTY))
310			continue;
311
312		list_move_tail(&lip->li_cil, &cil->xc_cil);
313	}
314
315	/* account for space used by new iovec headers  */
316	len += diff_iovecs * sizeof(xlog_op_header_t);
317	ctx->nvecs += diff_iovecs;
318
319	/* attach the transaction to the CIL if it has any busy extents */
320	if (!list_empty(&tp->t_busy))
321		list_splice_init(&tp->t_busy, &ctx->busy_extents);
322
323	/*
324	 * Now transfer enough transaction reservation to the context ticket
325	 * for the checkpoint. The context ticket is special - the unit
326	 * reservation has to grow as well as the current reservation as we
327	 * steal from tickets so we can correctly determine the space used
328	 * during the transaction commit.
329	 */
330	if (ctx->ticket->t_curr_res == 0) {
 
 
331		ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
332		tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res;
333	}
334
335	/* do we need space for more log record headers? */
336	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
337	if (len > 0 && (ctx->space_used / iclog_space !=
338				(ctx->space_used + len) / iclog_space)) {
339		int hdrs;
340
341		hdrs = (len + iclog_space - 1) / iclog_space;
342		/* need to take into account split region headers, too */
343		hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
344		ctx->ticket->t_unit_res += hdrs;
345		ctx->ticket->t_curr_res += hdrs;
346		tp->t_ticket->t_curr_res -= hdrs;
347		ASSERT(tp->t_ticket->t_curr_res >= len);
348	}
349	tp->t_ticket->t_curr_res -= len;
350	ctx->space_used += len;
351
352	spin_unlock(&cil->xc_cil_lock);
353}
354
355static void
356xlog_cil_free_logvec(
357	struct xfs_log_vec	*log_vector)
358{
359	struct xfs_log_vec	*lv;
360
361	for (lv = log_vector; lv; ) {
362		struct xfs_log_vec *next = lv->lv_next;
 
363		kmem_free(lv);
364		lv = next;
365	}
366}
367
368/*
369 * Mark all items committed and clear busy extents. We free the log vector
370 * chains in a separate pass so that we unpin the log items as quickly as
371 * possible.
372 */
373static void
374xlog_cil_committed(
375	void	*args,
376	int	abort)
377{
378	struct xfs_cil_ctx	*ctx = args;
379	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
380
381	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
382					ctx->start_lsn, abort);
383
384	xfs_extent_busy_sort(&ctx->busy_extents);
385	xfs_extent_busy_clear(mp, &ctx->busy_extents,
386			     (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
387
388	spin_lock(&ctx->cil->xc_push_lock);
389	list_del(&ctx->committing);
390	spin_unlock(&ctx->cil->xc_push_lock);
391
392	xlog_cil_free_logvec(ctx->lv_chain);
393
394	if (!list_empty(&ctx->busy_extents)) {
395		ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
396
397		xfs_discard_extents(mp, &ctx->busy_extents);
398		xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
399	}
400
401	kmem_free(ctx);
402}
403
404/*
405 * Push the Committed Item List to the log. If @push_seq flag is zero, then it
406 * is a background flush and so we can chose to ignore it. Otherwise, if the
407 * current sequence is the same as @push_seq we need to do a flush. If
408 * @push_seq is less than the current sequence, then it has already been
409 * flushed and we don't need to do anything - the caller will wait for it to
410 * complete if necessary.
411 *
412 * @push_seq is a value rather than a flag because that allows us to do an
413 * unlocked check of the sequence number for a match. Hence we can allows log
414 * forces to run racily and not issue pushes for the same sequence twice. If we
415 * get a race between multiple pushes for the same sequence they will block on
416 * the first one and then abort, hence avoiding needless pushes.
417 */
418STATIC int
419xlog_cil_push(
420	struct xlog		*log)
 
421{
422	struct xfs_cil		*cil = log->l_cilp;
423	struct xfs_log_vec	*lv;
424	struct xfs_cil_ctx	*ctx;
425	struct xfs_cil_ctx	*new_ctx;
426	struct xlog_in_core	*commit_iclog;
427	struct xlog_ticket	*tic;
 
428	int			num_iovecs;
 
429	int			error = 0;
430	struct xfs_trans_header thdr;
431	struct xfs_log_iovec	lhdr;
432	struct xfs_log_vec	lvhdr = { NULL };
433	xfs_lsn_t		commit_lsn;
434	xfs_lsn_t		push_seq;
435
436	if (!cil)
437		return 0;
438
 
 
439	new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
440	new_ctx->ticket = xlog_cil_ticket_alloc(log);
441
442	down_write(&cil->xc_ctx_lock);
 
 
 
 
 
 
 
 
 
 
 
443	ctx = cil->xc_ctx;
444
445	spin_lock(&cil->xc_push_lock);
446	push_seq = cil->xc_push_seq;
447	ASSERT(push_seq <= ctx->sequence);
448
449	/*
450	 * Check if we've anything to push. If there is nothing, then we don't
451	 * move on to a new sequence number and so we have to be able to push
452	 * this sequence again later.
453	 */
454	if (list_empty(&cil->xc_cil)) {
455		cil->xc_push_seq = 0;
456		spin_unlock(&cil->xc_push_lock);
457		goto out_skip;
458	}
459	spin_unlock(&cil->xc_push_lock);
460
461
462	/* check for a previously pushed seqeunce */
463	if (push_seq < cil->xc_ctx->sequence)
464		goto out_skip;
465
466	/*
467	 * pull all the log vectors off the items in the CIL, and
468	 * remove the items from the CIL. We don't need the CIL lock
469	 * here because it's only needed on the transaction commit
470	 * side which is currently locked out by the flush lock.
471	 */
472	lv = NULL;
 
473	num_iovecs = 0;
 
474	while (!list_empty(&cil->xc_cil)) {
475		struct xfs_log_item	*item;
 
476
477		item = list_first_entry(&cil->xc_cil,
478					struct xfs_log_item, li_cil);
479		list_del_init(&item->li_cil);
480		if (!ctx->lv_chain)
481			ctx->lv_chain = item->li_lv;
482		else
483			lv->lv_next = item->li_lv;
484		lv = item->li_lv;
485		item->li_lv = NULL;
 
 
486		num_iovecs += lv->lv_niovecs;
 
 
487	}
488
489	/*
490	 * initialise the new context and attach it to the CIL. Then attach
491	 * the current context to the CIL committing lsit so it can be found
492	 * during log forces to extract the commit lsn of the sequence that
493	 * needs to be forced.
494	 */
495	INIT_LIST_HEAD(&new_ctx->committing);
496	INIT_LIST_HEAD(&new_ctx->busy_extents);
497	new_ctx->sequence = ctx->sequence + 1;
498	new_ctx->cil = cil;
499	cil->xc_ctx = new_ctx;
500
501	/*
 
 
 
 
 
 
 
502	 * The switch is now done, so we can drop the context lock and move out
503	 * of a shared context. We can't just go straight to the commit record,
504	 * though - we need to synchronise with previous and future commits so
505	 * that the commit records are correctly ordered in the log to ensure
506	 * that we process items during log IO completion in the correct order.
507	 *
508	 * For example, if we get an EFI in one checkpoint and the EFD in the
509	 * next (e.g. due to log forces), we do not want the checkpoint with
510	 * the EFD to be committed before the checkpoint with the EFI.  Hence
511	 * we must strictly order the commit records of the checkpoints so
512	 * that: a) the checkpoint callbacks are attached to the iclogs in the
513	 * correct order; and b) the checkpoints are replayed in correct order
514	 * in log recovery.
515	 *
516	 * Hence we need to add this context to the committing context list so
517	 * that higher sequences will wait for us to write out a commit record
518	 * before they do.
519	 *
520	 * xfs_log_force_lsn requires us to mirror the new sequence into the cil
521	 * structure atomically with the addition of this sequence to the
522	 * committing list. This also ensures that we can do unlocked checks
523	 * against the current sequence in log forces without risking
524	 * deferencing a freed context pointer.
525	 */
526	spin_lock(&cil->xc_push_lock);
527	cil->xc_current_sequence = new_ctx->sequence;
528	list_add(&ctx->committing, &cil->xc_committing);
529	spin_unlock(&cil->xc_push_lock);
530	up_write(&cil->xc_ctx_lock);
531
532	/*
533	 * Build a checkpoint transaction header and write it to the log to
534	 * begin the transaction. We need to account for the space used by the
535	 * transaction header here as it is not accounted for in xlog_write().
536	 *
537	 * The LSN we need to pass to the log items on transaction commit is
538	 * the LSN reported by the first log vector write. If we use the commit
539	 * record lsn then we can move the tail beyond the grant write head.
540	 */
541	tic = ctx->ticket;
542	thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
543	thdr.th_type = XFS_TRANS_CHECKPOINT;
544	thdr.th_tid = tic->t_tid;
545	thdr.th_num_items = num_iovecs;
546	lhdr.i_addr = &thdr;
547	lhdr.i_len = sizeof(xfs_trans_header_t);
548	lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
549	tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
550
551	lvhdr.lv_niovecs = 1;
552	lvhdr.lv_iovecp = &lhdr;
553	lvhdr.lv_next = ctx->lv_chain;
554
555	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
556	if (error)
557		goto out_abort_free_ticket;
558
559	/*
560	 * now that we've written the checkpoint into the log, strictly
561	 * order the commit records so replay will get them in the right order.
562	 */
563restart:
564	spin_lock(&cil->xc_push_lock);
565	list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
566		/*
567		 * Higher sequences will wait for this one so skip them.
568		 * Don't wait for own own sequence, either.
569		 */
570		if (new_ctx->sequence >= ctx->sequence)
571			continue;
572		if (!new_ctx->commit_lsn) {
573			/*
574			 * It is still being pushed! Wait for the push to
575			 * complete, then start again from the beginning.
576			 */
577			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
578			goto restart;
579		}
580	}
581	spin_unlock(&cil->xc_push_lock);
582
583	/* xfs_log_done always frees the ticket on error. */
584	commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
585	if (commit_lsn == -1)
586		goto out_abort;
587
588	/* attach all the transactions w/ busy extents to iclog */
589	ctx->log_cb.cb_func = xlog_cil_committed;
590	ctx->log_cb.cb_arg = ctx;
591	error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb);
592	if (error)
593		goto out_abort;
594
595	/*
596	 * now the checkpoint commit is complete and we've attached the
597	 * callbacks to the iclog we can assign the commit LSN to the context
598	 * and wake up anyone who is waiting for the commit to complete.
599	 */
600	spin_lock(&cil->xc_push_lock);
601	ctx->commit_lsn = commit_lsn;
602	wake_up_all(&cil->xc_commit_wait);
603	spin_unlock(&cil->xc_push_lock);
604
605	/* release the hounds! */
606	return xfs_log_release_iclog(log->l_mp, commit_iclog);
607
608out_skip:
609	up_write(&cil->xc_ctx_lock);
 
610	xfs_log_ticket_put(new_ctx->ticket);
611	kmem_free(new_ctx);
612	return 0;
613
614out_abort_free_ticket:
615	xfs_log_ticket_put(tic);
616out_abort:
617	xlog_cil_committed(ctx, XFS_LI_ABORTED);
618	return XFS_ERROR(EIO);
619}
620
621static void
622xlog_cil_push_work(
623	struct work_struct	*work)
624{
625	struct xfs_cil		*cil = container_of(work, struct xfs_cil,
626							xc_push_work);
627	xlog_cil_push(cil->xc_log);
628}
629
630/*
631 * We need to push CIL every so often so we don't cache more than we can fit in
632 * the log. The limit really is that a checkpoint can't be more than half the
633 * log (the current checkpoint is not allowed to overwrite the previous
634 * checkpoint), but commit latency and memory usage limit this to a smaller
635 * size.
636 */
637static void
638xlog_cil_push_background(
639	struct xlog	*log)
640{
641	struct xfs_cil	*cil = log->l_cilp;
642
643	/*
644	 * The cil won't be empty because we are called while holding the
645	 * context lock so whatever we added to the CIL will still be there
646	 */
647	ASSERT(!list_empty(&cil->xc_cil));
648
649	/*
650	 * don't do a background push if we haven't used up all the
651	 * space available yet.
652	 */
653	if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
654		return;
655
656	spin_lock(&cil->xc_push_lock);
657	if (cil->xc_push_seq < cil->xc_current_sequence) {
658		cil->xc_push_seq = cil->xc_current_sequence;
659		queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
660	}
661	spin_unlock(&cil->xc_push_lock);
662
663}
664
665/*
666 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
667 * number that is passed. When it returns, the work will be queued for
668 * @push_seq, but it won't be completed. The caller is expected to do any
669 * waiting for push_seq to complete if it is required.
670 */
671static void
672xlog_cil_push_now(
673	struct xlog	*log,
674	xfs_lsn_t	push_seq)
675{
676	struct xfs_cil	*cil = log->l_cilp;
677
678	if (!cil)
679		return;
680
681	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
682
683	/* start on any pending background push to minimise wait time on it */
684	flush_work(&cil->xc_push_work);
685
686	/*
687	 * If the CIL is empty or we've already pushed the sequence then
688	 * there's no work we need to do.
689	 */
690	spin_lock(&cil->xc_push_lock);
691	if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
692		spin_unlock(&cil->xc_push_lock);
693		return;
694	}
695
696	cil->xc_push_seq = push_seq;
697	queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
698	spin_unlock(&cil->xc_push_lock);
699}
700
701bool
702xlog_cil_empty(
703	struct xlog	*log)
704{
705	struct xfs_cil	*cil = log->l_cilp;
706	bool		empty = false;
707
708	spin_lock(&cil->xc_push_lock);
709	if (list_empty(&cil->xc_cil))
710		empty = true;
711	spin_unlock(&cil->xc_push_lock);
712	return empty;
713}
714
715/*
716 * Commit a transaction with the given vector to the Committed Item List.
717 *
718 * To do this, we need to format the item, pin it in memory if required and
719 * account for the space used by the transaction. Once we have done that we
720 * need to release the unused reservation for the transaction, attach the
721 * transaction to the checkpoint context so we carry the busy extents through
722 * to checkpoint completion, and then unlock all the items in the transaction.
723 *
 
 
 
 
724 * Called with the context lock already held in read mode to lock out
725 * background commit, returns without it held once background commits are
726 * allowed again.
727 */
728void
729xfs_log_commit_cil(
730	struct xfs_mount	*mp,
731	struct xfs_trans	*tp,
 
732	xfs_lsn_t		*commit_lsn,
733	int			flags)
734{
735	struct xlog		*log = mp->m_log;
736	struct xfs_cil		*cil = log->l_cilp;
737	int			log_flags = 0;
 
738
739	if (flags & XFS_TRANS_RELEASE_LOG_RES)
740		log_flags = XFS_LOG_REL_PERM_RESERV;
741
 
 
 
 
 
 
 
 
742	/* lock out background commit */
743	down_read(&cil->xc_ctx_lock);
 
 
744
745	xlog_cil_insert_items(log, tp);
746
747	/* check we didn't blow the reservation */
748	if (tp->t_ticket->t_curr_res < 0)
749		xlog_print_tic_res(mp, tp->t_ticket);
750
751	tp->t_commit_lsn = cil->xc_ctx->sequence;
752	if (commit_lsn)
753		*commit_lsn = tp->t_commit_lsn;
 
 
 
 
754
 
755	xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
756	xfs_trans_unreserve_and_mod_sb(tp);
757
758	/*
759	 * Once all the items of the transaction have been copied to the CIL,
760	 * the items can be unlocked and freed.
761	 *
762	 * This needs to be done before we drop the CIL context lock because we
763	 * have to update state in the log items and unlock them before they go
764	 * to disk. If we don't, then the CIL checkpoint can race with us and
765	 * we can run checkpoint completion before we've updated and unlocked
766	 * the log items. This affects (at least) processing of stale buffers,
767	 * inodes and EFIs.
768	 */
769	xfs_trans_free_items(tp, tp->t_commit_lsn, 0);
 
 
 
 
770
771	xlog_cil_push_background(log);
772
773	up_read(&cil->xc_ctx_lock);
 
 
 
 
 
 
 
 
774}
775
776/*
777 * Conditionally push the CIL based on the sequence passed in.
778 *
779 * We only need to push if we haven't already pushed the sequence
780 * number given. Hence the only time we will trigger a push here is
781 * if the push sequence is the same as the current context.
782 *
783 * We return the current commit lsn to allow the callers to determine if a
784 * iclog flush is necessary following this call.
 
 
 
785 */
786xfs_lsn_t
787xlog_cil_force_lsn(
788	struct xlog	*log,
789	xfs_lsn_t	sequence)
790{
791	struct xfs_cil		*cil = log->l_cilp;
792	struct xfs_cil_ctx	*ctx;
793	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
794
795	ASSERT(sequence <= cil->xc_current_sequence);
796
797	/*
798	 * check to see if we need to force out the current context.
799	 * xlog_cil_push() handles racing pushes for the same sequence,
800	 * so no need to deal with it here.
801	 */
802restart:
803	xlog_cil_push_now(log, sequence);
804
805	/*
806	 * See if we can find a previous sequence still committing.
807	 * We need to wait for all previous sequence commits to complete
808	 * before allowing the force of push_seq to go ahead. Hence block
809	 * on commits for those as well.
810	 */
811	spin_lock(&cil->xc_push_lock);
 
812	list_for_each_entry(ctx, &cil->xc_committing, committing) {
813		if (ctx->sequence > sequence)
814			continue;
815		if (!ctx->commit_lsn) {
816			/*
817			 * It is still being pushed! Wait for the push to
818			 * complete, then start again from the beginning.
819			 */
820			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
821			goto restart;
822		}
823		if (ctx->sequence != sequence)
824			continue;
825		/* found it! */
826		commit_lsn = ctx->commit_lsn;
827	}
828
829	/*
830	 * The call to xlog_cil_push_now() executes the push in the background.
831	 * Hence by the time we have got here it our sequence may not have been
832	 * pushed yet. This is true if the current sequence still matches the
833	 * push sequence after the above wait loop and the CIL still contains
834	 * dirty objects.
835	 *
836	 * When the push occurs, it will empty the CIL and
837	 * atomically increment the currect sequence past the push sequence and
838	 * move it into the committing list. Of course, if the CIL is clean at
839	 * the time of the push, it won't have pushed the CIL at all, so in that
840	 * case we should try the push for this sequence again from the start
841	 * just in case.
842	 */
843
844	if (sequence == cil->xc_current_sequence &&
845	    !list_empty(&cil->xc_cil)) {
846		spin_unlock(&cil->xc_push_lock);
847		goto restart;
848	}
849
850	spin_unlock(&cil->xc_push_lock);
851	return commit_lsn;
852}
853
854/*
855 * Check if the current log item was first committed in this sequence.
856 * We can't rely on just the log item being in the CIL, we have to check
857 * the recorded commit sequence number.
858 *
859 * Note: for this to be used in a non-racy manner, it has to be called with
860 * CIL flushing locked out. As a result, it should only be used during the
861 * transaction commit process when deciding what to format into the item.
862 */
863bool
864xfs_log_item_in_current_chkpt(
865	struct xfs_log_item *lip)
866{
867	struct xfs_cil_ctx *ctx;
868
 
 
869	if (list_empty(&lip->li_cil))
870		return false;
871
872	ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
873
874	/*
875	 * li_seq is written on the first commit of a log item to record the
876	 * first checkpoint it is written to. Hence if it is different to the
877	 * current sequence, we're in a new checkpoint.
878	 */
879	if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
880		return false;
881	return true;
882}
883
884/*
885 * Perform initial CIL structure initialisation.
886 */
887int
888xlog_cil_init(
889	struct xlog	*log)
890{
891	struct xfs_cil	*cil;
892	struct xfs_cil_ctx *ctx;
893
894	cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
895	if (!cil)
896		return ENOMEM;
897
898	ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
899	if (!ctx) {
900		kmem_free(cil);
901		return ENOMEM;
902	}
903
904	INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
905	INIT_LIST_HEAD(&cil->xc_cil);
906	INIT_LIST_HEAD(&cil->xc_committing);
907	spin_lock_init(&cil->xc_cil_lock);
908	spin_lock_init(&cil->xc_push_lock);
909	init_rwsem(&cil->xc_ctx_lock);
910	init_waitqueue_head(&cil->xc_commit_wait);
911
912	INIT_LIST_HEAD(&ctx->committing);
913	INIT_LIST_HEAD(&ctx->busy_extents);
914	ctx->sequence = 1;
915	ctx->cil = cil;
916	cil->xc_ctx = ctx;
917	cil->xc_current_sequence = ctx->sequence;
918
919	cil->xc_log = log;
920	log->l_cilp = cil;
921	return 0;
922}
923
924void
925xlog_cil_destroy(
926	struct xlog	*log)
927{
928	if (log->l_cilp->xc_ctx) {
929		if (log->l_cilp->xc_ctx->ticket)
930			xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
931		kmem_free(log->l_cilp->xc_ctx);
932	}
933
934	ASSERT(list_empty(&log->l_cilp->xc_cil));
935	kmem_free(log->l_cilp);
936}
937