Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#include <linux/sched.h>
 11#include <linux/slab.h>
 12#include <linux/spinlock.h>
 13#include <linux/completion.h>
 14#include <linux/buffer_head.h>
 15#include <linux/gfs2_ondisk.h>
 16#include <linux/crc32.h>
 
 17#include <linux/delay.h>
 18#include <linux/kthread.h>
 19#include <linux/freezer.h>
 20#include <linux/bio.h>
 
 21#include <linux/writeback.h>
 
 22
 23#include "gfs2.h"
 24#include "incore.h"
 25#include "bmap.h"
 26#include "glock.h"
 27#include "log.h"
 28#include "lops.h"
 29#include "meta_io.h"
 30#include "util.h"
 31#include "dir.h"
 32#include "trace_gfs2.h"
 33
 34#define PULL 1
 35
 36/**
 37 * gfs2_struct2blk - compute stuff
 38 * @sdp: the filesystem
 39 * @nstruct: the number of structures
 40 * @ssize: the size of the structures
 41 *
 42 * Compute the number of log descriptor blocks needed to hold a certain number
 43 * of structures of a certain size.
 44 *
 45 * Returns: the number of blocks needed (minimum is always 1)
 46 */
 47
 48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
 49			     unsigned int ssize)
 50{
 51	unsigned int blks;
 52	unsigned int first, second;
 53
 54	blks = 1;
 55	first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
 56
 57	if (nstruct > first) {
 58		second = (sdp->sd_sb.sb_bsize -
 59			  sizeof(struct gfs2_meta_header)) / ssize;
 60		blks += DIV_ROUND_UP(nstruct - first, second);
 61	}
 62
 63	return blks;
 64}
 65
 66/**
 67 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
 68 * @mapping: The associated mapping (maybe NULL)
 69 * @bd: The gfs2_bufdata to remove
 70 *
 71 * The ail lock _must_ be held when calling this function
 72 *
 73 */
 74
 75void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
 76{
 77	bd->bd_ail = NULL;
 78	list_del_init(&bd->bd_ail_st_list);
 79	list_del_init(&bd->bd_ail_gl_list);
 80	atomic_dec(&bd->bd_gl->gl_ail_count);
 81	brelse(bd->bd_bh);
 82}
 83
 84/**
 85 * gfs2_ail1_start_one - Start I/O on a part of the AIL
 86 * @sdp: the filesystem
 87 * @wbc: The writeback control structure
 88 * @ai: The ail structure
 89 *
 90 */
 91
 92static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
 93			       struct writeback_control *wbc,
 94			       struct gfs2_ail *ai)
 
 95__releases(&sdp->sd_ail_lock)
 96__acquires(&sdp->sd_ail_lock)
 97{
 98	struct gfs2_glock *gl = NULL;
 99	struct address_space *mapping;
100	struct gfs2_bufdata *bd, *s;
101	struct buffer_head *bh;
102
103	list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, bd_ail_st_list) {
104		bh = bd->bd_bh;
105
106		gfs2_assert(sdp, bd->bd_ail == ai);
107
108		if (!buffer_busy(bh)) {
109			if (!buffer_uptodate(bh))
 
 
110				gfs2_io_error_bh(sdp, bh);
111			list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
 
 
112			continue;
113		}
114
115		if (!buffer_dirty(bh))
116			continue;
117		if (gl == bd->bd_gl)
118			continue;
119		gl = bd->bd_gl;
120		list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
121		mapping = bh->b_page->mapping;
122		if (!mapping)
123			continue;
124		spin_unlock(&sdp->sd_ail_lock);
125		generic_writepages(mapping, wbc);
126		spin_lock(&sdp->sd_ail_lock);
127		if (wbc->nr_to_write <= 0)
128			break;
129		return 1;
130	}
131
132	return 0;
133}
134
135
136/**
137 * gfs2_ail1_flush - start writeback of some ail1 entries 
138 * @sdp: The super block
139 * @wbc: The writeback control structure
140 *
141 * Writes back some ail1 entries, according to the limits in the
142 * writeback control structure
143 */
144
145void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
146{
147	struct list_head *head = &sdp->sd_ail1_list;
148	struct gfs2_ail *ai;
 
 
149
150	trace_gfs2_ail_flush(sdp, wbc, 1);
 
151	spin_lock(&sdp->sd_ail_lock);
152restart:
153	list_for_each_entry_reverse(ai, head, ai_list) {
154		if (wbc->nr_to_write <= 0)
155			break;
156		if (gfs2_ail1_start_one(sdp, wbc, ai))
157			goto restart;
158	}
159	spin_unlock(&sdp->sd_ail_lock);
 
 
 
160	trace_gfs2_ail_flush(sdp, wbc, 0);
161}
162
163/**
164 * gfs2_ail1_start - start writeback of all ail1 entries
165 * @sdp: The superblock
166 */
167
168static void gfs2_ail1_start(struct gfs2_sbd *sdp)
169{
170	struct writeback_control wbc = {
171		.sync_mode = WB_SYNC_NONE,
172		.nr_to_write = LONG_MAX,
173		.range_start = 0,
174		.range_end = LLONG_MAX,
175	};
176
177	return gfs2_ail1_flush(sdp, &wbc);
178}
179
180/**
181 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
182 * @sdp: the filesystem
183 * @ai: the AIL entry
184 *
185 */
186
187static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
 
188{
189	struct gfs2_bufdata *bd, *s;
190	struct buffer_head *bh;
191
192	list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
193					 bd_ail_st_list) {
194		bh = bd->bd_bh;
195		gfs2_assert(sdp, bd->bd_ail == ai);
196		if (buffer_busy(bh))
197			continue;
198		if (!buffer_uptodate(bh))
 
199			gfs2_io_error_bh(sdp, bh);
200		list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
 
 
201	}
202
203}
204
205/**
206 * gfs2_ail1_empty - Try to empty the ail1 lists
207 * @sdp: The superblock
208 *
209 * Tries to empty the ail1 lists, starting with the oldest first
210 */
211
212static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
213{
214	struct gfs2_ail *ai, *s;
 
215	int ret;
 
216
217	spin_lock(&sdp->sd_ail_lock);
218	list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
219		gfs2_ail1_empty_one(sdp, ai);
220		if (list_empty(&ai->ai_ail1_list))
221			list_move(&ai->ai_list, &sdp->sd_ail2_list);
222		else
223			break;
224	}
225	ret = list_empty(&sdp->sd_ail1_list);
226	spin_unlock(&sdp->sd_ail_lock);
227
 
 
 
228	return ret;
229}
230
231static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
232{
233	struct gfs2_ail *ai;
234	struct gfs2_bufdata *bd;
235	struct buffer_head *bh;
236
237	spin_lock(&sdp->sd_ail_lock);
238	list_for_each_entry_reverse(ai, &sdp->sd_ail1_list, ai_list) {
239		list_for_each_entry(bd, &ai->ai_ail1_list, bd_ail_st_list) {
240			bh = bd->bd_bh;
241			if (!buffer_locked(bh))
242				continue;
243			get_bh(bh);
244			spin_unlock(&sdp->sd_ail_lock);
245			wait_on_buffer(bh);
246			brelse(bh);
247			return;
248		}
249	}
250	spin_unlock(&sdp->sd_ail_lock);
251}
252
253/**
254 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
255 * @sdp: the filesystem
256 * @ai: the AIL entry
257 *
258 */
259
260static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
261{
262	struct list_head *head = &ai->ai_ail2_list;
263	struct gfs2_bufdata *bd;
264
265	while (!list_empty(head)) {
266		bd = list_entry(head->prev, struct gfs2_bufdata,
267				bd_ail_st_list);
268		gfs2_assert(sdp, bd->bd_ail == ai);
269		gfs2_remove_from_ail(bd);
270	}
271}
272
273static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
274{
275	struct gfs2_ail *ai, *safe;
276	unsigned int old_tail = sdp->sd_log_tail;
277	int wrap = (new_tail < old_tail);
278	int a, b, rm;
279
280	spin_lock(&sdp->sd_ail_lock);
281
282	list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
283		a = (old_tail <= ai->ai_first);
284		b = (ai->ai_first < new_tail);
285		rm = (wrap) ? (a || b) : (a && b);
286		if (!rm)
287			continue;
288
289		gfs2_ail2_empty_one(sdp, ai);
290		list_del(&ai->ai_list);
291		gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
292		gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
293		kfree(ai);
294	}
295
296	spin_unlock(&sdp->sd_ail_lock);
297}
298
299/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
300 * gfs2_log_reserve - Make a log reservation
301 * @sdp: The GFS2 superblock
302 * @blks: The number of blocks to reserve
303 *
304 * Note that we never give out the last few blocks of the journal. Thats
305 * due to the fact that there is a small number of header blocks
306 * associated with each log flush. The exact number can't be known until
307 * flush time, so we ensure that we have just enough free blocks at all
308 * times to avoid running out during a log flush.
309 *
310 * We no longer flush the log here, instead we wake up logd to do that
311 * for us. To avoid the thundering herd and to ensure that we deal fairly
312 * with queued waiters, we use an exclusive wait. This means that when we
313 * get woken with enough journal space to get our reservation, we need to
314 * wake the next waiter on the list.
315 *
316 * Returns: errno
317 */
318
319int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
320{
321	unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
 
322	unsigned wanted = blks + reserved_blks;
323	DEFINE_WAIT(wait);
324	int did_wait = 0;
325	unsigned int free_blocks;
326
327	if (gfs2_assert_warn(sdp, blks) ||
328	    gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
329		return -EINVAL;
 
330retry:
331	free_blocks = atomic_read(&sdp->sd_log_blks_free);
332	if (unlikely(free_blocks <= wanted)) {
333		do {
334			prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
335					TASK_UNINTERRUPTIBLE);
336			wake_up(&sdp->sd_logd_waitq);
337			did_wait = 1;
338			if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
339				io_schedule();
340			free_blocks = atomic_read(&sdp->sd_log_blks_free);
341		} while(free_blocks <= wanted);
342		finish_wait(&sdp->sd_log_waitq, &wait);
343	}
 
344	if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
345				free_blocks - blks) != free_blocks)
 
 
346		goto retry;
 
 
347	trace_gfs2_log_blocks(sdp, -blks);
348
349	/*
350	 * If we waited, then so might others, wake them up _after_ we get
351	 * our share of the log.
352	 */
353	if (unlikely(did_wait))
354		wake_up(&sdp->sd_log_waitq);
355
356	down_read(&sdp->sd_log_flush_lock);
357
358	return 0;
359}
360
361static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
362{
363	struct gfs2_journal_extent *je;
364
365	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
366		if (lbn >= je->lblock && lbn < je->lblock + je->blocks)
367			return je->dblock + lbn - je->lblock;
368	}
369
370	return -1;
 
371}
372
373/**
374 * log_distance - Compute distance between two journal blocks
375 * @sdp: The GFS2 superblock
376 * @newer: The most recent journal block of the pair
377 * @older: The older journal block of the pair
378 *
379 *   Compute the distance (in the journal direction) between two
380 *   blocks in the journal
381 *
382 * Returns: the distance in blocks
383 */
384
385static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
386					unsigned int older)
387{
388	int dist;
389
390	dist = newer - older;
391	if (dist < 0)
392		dist += sdp->sd_jdesc->jd_blocks;
393
394	return dist;
395}
396
397/**
398 * calc_reserved - Calculate the number of blocks to reserve when
399 *                 refunding a transaction's unused buffers.
400 * @sdp: The GFS2 superblock
401 *
402 * This is complex.  We need to reserve room for all our currently used
403 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
404 * all our journaled data buffers for journaled files (e.g. files in the 
405 * meta_fs like rindex, or files for which chattr +j was done.)
406 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
407 * will count it as free space (sd_log_blks_free) and corruption will follow.
408 *
409 * We can have metadata bufs and jdata bufs in the same journal.  So each
410 * type gets its own log header, for which we need to reserve a block.
411 * In fact, each type has the potential for needing more than one header 
412 * in cases where we have more buffers than will fit on a journal page.
413 * Metadata journal entries take up half the space of journaled buffer entries.
414 * Thus, metadata entries have buf_limit (502) and journaled buffers have
415 * databuf_limit (251) before they cause a wrap around.
416 *
417 * Also, we need to reserve blocks for revoke journal entries and one for an
418 * overall header for the lot.
419 *
420 * Returns: the number of blocks reserved
421 */
422static unsigned int calc_reserved(struct gfs2_sbd *sdp)
423{
424	unsigned int reserved = 0;
425	unsigned int mbuf_limit, metabufhdrs_needed;
426	unsigned int dbuf_limit, databufhdrs_needed;
427	unsigned int revokes = 0;
428
429	mbuf_limit = buf_limit(sdp);
430	metabufhdrs_needed = (sdp->sd_log_commited_buf +
431			      (mbuf_limit - 1)) / mbuf_limit;
432	dbuf_limit = databuf_limit(sdp);
433	databufhdrs_needed = (sdp->sd_log_commited_databuf +
434			      (dbuf_limit - 1)) / dbuf_limit;
 
 
435
436	if (sdp->sd_log_commited_revoke > 0)
437		revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
438					  sizeof(u64));
439
440	reserved = sdp->sd_log_commited_buf + metabufhdrs_needed +
441		sdp->sd_log_commited_databuf + databufhdrs_needed +
442		revokes;
443	/* One for the overall header */
444	if (reserved)
445		reserved++;
446	return reserved;
447}
448
449static unsigned int current_tail(struct gfs2_sbd *sdp)
450{
451	struct gfs2_ail *ai;
452	unsigned int tail;
453
454	spin_lock(&sdp->sd_ail_lock);
455
456	if (list_empty(&sdp->sd_ail1_list)) {
457		tail = sdp->sd_log_head;
458	} else {
459		ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
460		tail = ai->ai_first;
 
461	}
462
463	spin_unlock(&sdp->sd_ail_lock);
464
465	return tail;
466}
467
468void gfs2_log_incr_head(struct gfs2_sbd *sdp)
469{
470	if (sdp->sd_log_flush_head == sdp->sd_log_tail)
471		BUG_ON(sdp->sd_log_flush_head != sdp->sd_log_head);
472
473	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
474		sdp->sd_log_flush_head = 0;
475		sdp->sd_log_flush_wrapped = 1;
476	}
 
 
 
 
477}
478
479/**
480 * gfs2_log_write_endio - End of I/O for a log buffer
481 * @bh: The buffer head
482 * @uptodate: I/O Status
483 *
484 */
485
486static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
487{
488	struct gfs2_sbd *sdp = bh->b_private;
489	bh->b_private = NULL;
490
491	end_buffer_write_sync(bh, uptodate);
492	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
493		wake_up(&sdp->sd_log_flush_wait);
 
 
 
 
 
 
494}
495
496/**
497 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
498 * @sdp: The GFS2 superblock
499 *
500 * Returns: the buffer_head
501 */
502
503struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
504{
505	u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
506	struct buffer_head *bh;
507
508	bh = sb_getblk(sdp->sd_vfs, blkno);
509	lock_buffer(bh);
510	memset(bh->b_data, 0, bh->b_size);
511	set_buffer_uptodate(bh);
512	clear_buffer_dirty(bh);
513	gfs2_log_incr_head(sdp);
514	atomic_inc(&sdp->sd_log_in_flight);
515	bh->b_private = sdp;
516	bh->b_end_io = gfs2_log_write_endio;
517
518	return bh;
 
 
 
 
519}
520
521/**
522 * gfs2_fake_write_endio - 
523 * @bh: The buffer head
524 * @uptodate: The I/O Status
525 *
526 */
527
528static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
529{
530	struct buffer_head *real_bh = bh->b_private;
531	struct gfs2_bufdata *bd = real_bh->b_private;
532	struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd;
533
534	end_buffer_write_sync(bh, uptodate);
535	free_buffer_head(bh);
536	unlock_buffer(real_bh);
537	brelse(real_bh);
538	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
539		wake_up(&sdp->sd_log_flush_wait);
 
 
 
 
 
 
 
 
 
 
540}
541
542/**
543 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
544 * @sdp: the filesystem
545 * @data: the data the buffer_head should point to
546 *
547 * Returns: the log buffer descriptor
548 */
549
550struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
551				      struct buffer_head *real)
552{
553	u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
554	struct buffer_head *bh;
555
556	bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
557	atomic_set(&bh->b_count, 1);
558	bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
559	set_bh_page(bh, real->b_page, bh_offset(real));
560	bh->b_blocknr = blkno;
561	bh->b_size = sdp->sd_sb.sb_bsize;
562	bh->b_bdev = sdp->sd_vfs->s_bdev;
563	bh->b_private = real;
564	bh->b_end_io = gfs2_fake_write_endio;
 
 
 
 
565
566	gfs2_log_incr_head(sdp);
567	atomic_inc(&sdp->sd_log_in_flight);
 
568
569	return bh;
 
 
 
570}
571
572static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
573{
574	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
 
575
576	ail2_empty(sdp, new_tail);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
577
578	atomic_add(dist, &sdp->sd_log_blks_free);
579	trace_gfs2_log_blocks(sdp, dist);
580	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
581			     sdp->sd_jdesc->jd_blocks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
582
583	sdp->sd_log_tail = new_tail;
 
 
 
 
584}
585
586/**
587 * log_write_header - Get and initialize a journal header buffer
588 * @sdp: The GFS2 superblock
 
 
 
 
 
 
589 *
590 * Returns: the initialized log buffer descriptor
591 */
592
593static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
 
 
594{
595	u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
596	struct buffer_head *bh;
597	struct gfs2_log_header *lh;
598	unsigned int tail;
599	u32 hash;
600
601	bh = sb_getblk(sdp->sd_vfs, blkno);
602	lock_buffer(bh);
603	memset(bh->b_data, 0, bh->b_size);
604	set_buffer_uptodate(bh);
605	clear_buffer_dirty(bh);
606
607	gfs2_ail1_empty(sdp);
608	tail = current_tail(sdp);
609
610	lh = (struct gfs2_log_header *)bh->b_data;
611	memset(lh, 0, sizeof(struct gfs2_log_header));
612	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
613	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
614	lh->lh_header.__pad0 = cpu_to_be64(0);
615	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
616	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
617	lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
618	lh->lh_flags = cpu_to_be32(flags);
619	lh->lh_tail = cpu_to_be32(tail);
620	lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
621	hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
622	lh->lh_hash = cpu_to_be32(hash);
623
624	bh->b_end_io = end_buffer_write_sync;
625	get_bh(bh);
626	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
627		submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
628	else
629		submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
630	wait_on_buffer(bh);
631
632	if (!buffer_uptodate(bh))
633		gfs2_io_error_bh(sdp, bh);
634	brelse(bh);
635
636	if (sdp->sd_log_tail != tail)
637		log_pull_tail(sdp, tail);
638	else
639		gfs2_assert_withdraw(sdp, !pull);
640
641	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
642	gfs2_log_incr_head(sdp);
643}
644
645static void log_flush_commit(struct gfs2_sbd *sdp)
646{
647	DEFINE_WAIT(wait);
648
649	if (atomic_read(&sdp->sd_log_in_flight)) {
650		do {
651			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
652					TASK_UNINTERRUPTIBLE);
653			if (atomic_read(&sdp->sd_log_in_flight))
654				io_schedule();
655		} while(atomic_read(&sdp->sd_log_in_flight));
656		finish_wait(&sdp->sd_log_flush_wait, &wait);
657	}
 
 
658
659	log_write_header(sdp, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
660}
661
662static void gfs2_ordered_write(struct gfs2_sbd *sdp)
663{
664	struct gfs2_bufdata *bd;
665	struct buffer_head *bh;
666	LIST_HEAD(written);
667
668	gfs2_log_lock(sdp);
669	while (!list_empty(&sdp->sd_log_le_ordered)) {
670		bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list);
671		list_move(&bd->bd_le.le_list, &written);
672		bh = bd->bd_bh;
673		if (!buffer_dirty(bh))
674			continue;
675		get_bh(bh);
676		gfs2_log_unlock(sdp);
677		lock_buffer(bh);
678		if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
679			bh->b_end_io = end_buffer_write_sync;
680			submit_bh(WRITE_SYNC, bh);
681		} else {
682			unlock_buffer(bh);
683			brelse(bh);
684		}
685		gfs2_log_lock(sdp);
686	}
687	list_splice(&written, &sdp->sd_log_le_ordered);
688	gfs2_log_unlock(sdp);
689}
690
691static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
692{
693	struct gfs2_bufdata *bd;
694	struct buffer_head *bh;
 
695
696	gfs2_log_lock(sdp);
697	while (!list_empty(&sdp->sd_log_le_ordered)) {
698		bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_le.le_list);
699		bh = bd->bd_bh;
700		if (buffer_locked(bh)) {
701			get_bh(bh);
702			gfs2_log_unlock(sdp);
703			wait_on_buffer(bh);
704			brelse(bh);
705			gfs2_log_lock(sdp);
706			continue;
707		}
708		list_del_init(&bd->bd_le.le_list);
709	}
710	gfs2_log_unlock(sdp);
 
 
 
 
 
711}
712
713/**
714 * gfs2_log_flush - flush incore transaction(s)
715 * @sdp: the filesystem
716 * @gl: The glock structure to flush.  If NULL, flush the whole incore log
 
717 *
718 */
719
720void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
721{
722	struct gfs2_ail *ai;
 
723
724	down_write(&sdp->sd_log_flush_lock);
725
726	/* Log might have been flushed while we waited for the flush lock */
727	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
728		up_write(&sdp->sd_log_flush_lock);
729		return;
730	}
731	trace_gfs2_log_flush(sdp, 1);
732
733	ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
734	INIT_LIST_HEAD(&ai->ai_ail1_list);
735	INIT_LIST_HEAD(&ai->ai_ail2_list);
736
737	if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) {
738		printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf,
739		       sdp->sd_log_commited_buf);
740		gfs2_assert_withdraw(sdp, 0);
741	}
742	if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) {
743		printk(KERN_INFO "GFS2: log databuf %u %u\n",
744		       sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf);
745		gfs2_assert_withdraw(sdp, 0);
746	}
 
 
 
747	gfs2_assert_withdraw(sdp,
748			sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
749
750	sdp->sd_log_flush_head = sdp->sd_log_head;
751	sdp->sd_log_flush_wrapped = 0;
752	ai->ai_first = sdp->sd_log_flush_head;
753
754	gfs2_ordered_write(sdp);
755	lops_before_commit(sdp);
756	gfs2_ordered_wait(sdp);
757
758	if (sdp->sd_log_head != sdp->sd_log_flush_head)
759		log_flush_commit(sdp);
760	else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
761		gfs2_log_lock(sdp);
762		atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
763		trace_gfs2_log_blocks(sdp, -1);
764		gfs2_log_unlock(sdp);
765		log_write_header(sdp, 0, PULL);
766	}
767	lops_after_commit(sdp, ai);
768
769	gfs2_log_lock(sdp);
770	sdp->sd_log_head = sdp->sd_log_flush_head;
771	sdp->sd_log_blks_reserved = 0;
772	sdp->sd_log_commited_buf = 0;
773	sdp->sd_log_commited_databuf = 0;
774	sdp->sd_log_commited_revoke = 0;
775
776	spin_lock(&sdp->sd_ail_lock);
777	if (!list_empty(&ai->ai_ail1_list)) {
778		list_add(&ai->ai_list, &sdp->sd_ail1_list);
779		ai = NULL;
780	}
781	spin_unlock(&sdp->sd_ail_lock);
782	gfs2_log_unlock(sdp);
783	trace_gfs2_log_flush(sdp, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
784	up_write(&sdp->sd_log_flush_lock);
785
786	kfree(ai);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
787}
788
789static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
790{
791	unsigned int reserved;
792	unsigned int unused;
 
793
794	gfs2_log_lock(sdp);
795
796	sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
797	sdp->sd_log_commited_databuf += tr->tr_num_databuf_new -
798		tr->tr_num_databuf_rm;
799	gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) ||
800			     (((int)sdp->sd_log_commited_databuf) >= 0));
801	sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
 
 
 
802	reserved = calc_reserved(sdp);
803	gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
804	unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
 
805	atomic_add(unused, &sdp->sd_log_blks_free);
806	trace_gfs2_log_blocks(sdp, unused);
807	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
808			     sdp->sd_jdesc->jd_blocks);
809	sdp->sd_log_blks_reserved = reserved;
810
811	gfs2_log_unlock(sdp);
812}
813
814static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
815{
816	struct list_head *head = &tr->tr_list_buf;
817	struct gfs2_bufdata *bd;
818
819	gfs2_log_lock(sdp);
820	while (!list_empty(head)) {
821		bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
822		list_del_init(&bd->bd_list_tr);
823		tr->tr_num_buf--;
824	}
825	gfs2_log_unlock(sdp);
826	gfs2_assert_warn(sdp, !tr->tr_num_buf);
827}
828
829/**
830 * gfs2_log_commit - Commit a transaction to the log
831 * @sdp: the filesystem
832 * @tr: the transaction
833 *
834 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
835 * or the total number of used blocks (pinned blocks plus AIL blocks)
836 * is greater than thresh2.
837 *
838 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
839 * journal size.
840 *
841 * Returns: errno
842 */
843
844void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
845{
846	log_refund(sdp, tr);
847	buf_lo_incore_commit(sdp, tr);
848
849	up_read(&sdp->sd_log_flush_lock);
850
851	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
852	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
853	    atomic_read(&sdp->sd_log_thresh2)))
854		wake_up(&sdp->sd_logd_waitq);
855}
856
857/**
858 * gfs2_log_shutdown - write a shutdown header into a journal
859 * @sdp: the filesystem
860 *
861 */
862
863void gfs2_log_shutdown(struct gfs2_sbd *sdp)
864{
865	down_write(&sdp->sd_log_flush_lock);
866
867	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
868	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
869	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
870	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
871	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
872	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
873
874	sdp->sd_log_flush_head = sdp->sd_log_head;
875	sdp->sd_log_flush_wrapped = 0;
876
877	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT,
878			 (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL);
879
880	gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
881	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
882	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
883
884	sdp->sd_log_head = sdp->sd_log_flush_head;
885	sdp->sd_log_tail = sdp->sd_log_head;
886
887	up_write(&sdp->sd_log_flush_lock);
888}
889
890
891/**
892 * gfs2_meta_syncfs - sync all the buffers in a filesystem
893 * @sdp: the filesystem
894 *
895 */
896
897void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
898{
899	gfs2_log_flush(sdp, NULL);
900	for (;;) {
901		gfs2_ail1_start(sdp);
902		gfs2_ail1_wait(sdp);
903		if (gfs2_ail1_empty(sdp))
904			break;
905	}
906	gfs2_log_flush(sdp, NULL);
907}
908
909static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
910{
911	return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
 
 
912}
913
914static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
915{
916	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
917	return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
 
 
 
 
 
918}
919
920/**
921 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
922 * @sdp: Pointer to GFS2 superblock
923 *
924 * Also, periodically check to make sure that we're using the most recent
925 * journal index.
926 */
927
928int gfs2_logd(void *data)
929{
930	struct gfs2_sbd *sdp = data;
931	unsigned long t = 1;
932	DEFINE_WAIT(wait);
933	unsigned preflush;
934
935	while (!kthread_should_stop()) {
936
937		preflush = atomic_read(&sdp->sd_log_pinned);
 
 
 
 
 
 
 
 
 
938		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
939			gfs2_ail1_empty(sdp);
940			gfs2_log_flush(sdp, NULL);
 
 
941		}
942
943		if (gfs2_ail_flush_reqd(sdp)) {
944			gfs2_ail1_start(sdp);
945			gfs2_ail1_wait(sdp);
946			gfs2_ail1_empty(sdp);
947			gfs2_log_flush(sdp, NULL);
 
 
948		}
949
950		if (!gfs2_ail_flush_reqd(sdp))
951			wake_up(&sdp->sd_log_waitq);
952
953		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
954		if (freezing(current))
955			refrigerator();
956
957		do {
958			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
959					TASK_INTERRUPTIBLE);
960			if (!gfs2_ail_flush_reqd(sdp) &&
961			    !gfs2_jrnl_flush_reqd(sdp) &&
962			    !kthread_should_stop())
963				t = schedule_timeout(t);
964		} while(t && !gfs2_ail_flush_reqd(sdp) &&
965			!gfs2_jrnl_flush_reqd(sdp) &&
966			!kthread_should_stop());
967		finish_wait(&sdp->sd_logd_waitq, &wait);
968	}
969
970	return 0;
971}
972
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/slab.h>
   9#include <linux/spinlock.h>
  10#include <linux/completion.h>
  11#include <linux/buffer_head.h>
  12#include <linux/gfs2_ondisk.h>
  13#include <linux/crc32.h>
  14#include <linux/crc32c.h>
  15#include <linux/delay.h>
  16#include <linux/kthread.h>
  17#include <linux/freezer.h>
  18#include <linux/bio.h>
  19#include <linux/blkdev.h>
  20#include <linux/writeback.h>
  21#include <linux/list_sort.h>
  22
  23#include "gfs2.h"
  24#include "incore.h"
  25#include "bmap.h"
  26#include "glock.h"
  27#include "log.h"
  28#include "lops.h"
  29#include "meta_io.h"
  30#include "util.h"
  31#include "dir.h"
  32#include "trace_gfs2.h"
  33
 
 
  34/**
  35 * gfs2_struct2blk - compute stuff
  36 * @sdp: the filesystem
  37 * @nstruct: the number of structures
  38 * @ssize: the size of the structures
  39 *
  40 * Compute the number of log descriptor blocks needed to hold a certain number
  41 * of structures of a certain size.
  42 *
  43 * Returns: the number of blocks needed (minimum is always 1)
  44 */
  45
  46unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
  47			     unsigned int ssize)
  48{
  49	unsigned int blks;
  50	unsigned int first, second;
  51
  52	blks = 1;
  53	first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
  54
  55	if (nstruct > first) {
  56		second = (sdp->sd_sb.sb_bsize -
  57			  sizeof(struct gfs2_meta_header)) / ssize;
  58		blks += DIV_ROUND_UP(nstruct - first, second);
  59	}
  60
  61	return blks;
  62}
  63
  64/**
  65 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
  66 * @mapping: The associated mapping (maybe NULL)
  67 * @bd: The gfs2_bufdata to remove
  68 *
  69 * The ail lock _must_ be held when calling this function
  70 *
  71 */
  72
  73static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
  74{
  75	bd->bd_tr = NULL;
  76	list_del_init(&bd->bd_ail_st_list);
  77	list_del_init(&bd->bd_ail_gl_list);
  78	atomic_dec(&bd->bd_gl->gl_ail_count);
  79	brelse(bd->bd_bh);
  80}
  81
  82/**
  83 * gfs2_ail1_start_one - Start I/O on a part of the AIL
  84 * @sdp: the filesystem
  85 * @wbc: The writeback control structure
  86 * @ai: The ail structure
  87 *
  88 */
  89
  90static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
  91			       struct writeback_control *wbc,
  92			       struct gfs2_trans *tr,
  93			       bool *withdraw)
  94__releases(&sdp->sd_ail_lock)
  95__acquires(&sdp->sd_ail_lock)
  96{
  97	struct gfs2_glock *gl = NULL;
  98	struct address_space *mapping;
  99	struct gfs2_bufdata *bd, *s;
 100	struct buffer_head *bh;
 101
 102	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
 103		bh = bd->bd_bh;
 104
 105		gfs2_assert(sdp, bd->bd_tr == tr);
 106
 107		if (!buffer_busy(bh)) {
 108			if (!buffer_uptodate(bh) &&
 109			    !test_and_set_bit(SDF_AIL1_IO_ERROR,
 110					      &sdp->sd_flags)) {
 111				gfs2_io_error_bh(sdp, bh);
 112				*withdraw = true;
 113			}
 114			list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
 115			continue;
 116		}
 117
 118		if (!buffer_dirty(bh))
 119			continue;
 120		if (gl == bd->bd_gl)
 121			continue;
 122		gl = bd->bd_gl;
 123		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
 124		mapping = bh->b_page->mapping;
 125		if (!mapping)
 126			continue;
 127		spin_unlock(&sdp->sd_ail_lock);
 128		generic_writepages(mapping, wbc);
 129		spin_lock(&sdp->sd_ail_lock);
 130		if (wbc->nr_to_write <= 0)
 131			break;
 132		return 1;
 133	}
 134
 135	return 0;
 136}
 137
 138
 139/**
 140 * gfs2_ail1_flush - start writeback of some ail1 entries 
 141 * @sdp: The super block
 142 * @wbc: The writeback control structure
 143 *
 144 * Writes back some ail1 entries, according to the limits in the
 145 * writeback control structure
 146 */
 147
 148void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
 149{
 150	struct list_head *head = &sdp->sd_ail1_list;
 151	struct gfs2_trans *tr;
 152	struct blk_plug plug;
 153	bool withdraw = false;
 154
 155	trace_gfs2_ail_flush(sdp, wbc, 1);
 156	blk_start_plug(&plug);
 157	spin_lock(&sdp->sd_ail_lock);
 158restart:
 159	list_for_each_entry_reverse(tr, head, tr_list) {
 160		if (wbc->nr_to_write <= 0)
 161			break;
 162		if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
 163			goto restart;
 164	}
 165	spin_unlock(&sdp->sd_ail_lock);
 166	blk_finish_plug(&plug);
 167	if (withdraw)
 168		gfs2_lm_withdraw(sdp, NULL);
 169	trace_gfs2_ail_flush(sdp, wbc, 0);
 170}
 171
 172/**
 173 * gfs2_ail1_start - start writeback of all ail1 entries
 174 * @sdp: The superblock
 175 */
 176
 177static void gfs2_ail1_start(struct gfs2_sbd *sdp)
 178{
 179	struct writeback_control wbc = {
 180		.sync_mode = WB_SYNC_NONE,
 181		.nr_to_write = LONG_MAX,
 182		.range_start = 0,
 183		.range_end = LLONG_MAX,
 184	};
 185
 186	return gfs2_ail1_flush(sdp, &wbc);
 187}
 188
 189/**
 190 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
 191 * @sdp: the filesystem
 192 * @ai: the AIL entry
 193 *
 194 */
 195
 196static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
 197				bool *withdraw)
 198{
 199	struct gfs2_bufdata *bd, *s;
 200	struct buffer_head *bh;
 201
 202	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
 203					 bd_ail_st_list) {
 204		bh = bd->bd_bh;
 205		gfs2_assert(sdp, bd->bd_tr == tr);
 206		if (buffer_busy(bh))
 207			continue;
 208		if (!buffer_uptodate(bh) &&
 209		    !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) {
 210			gfs2_io_error_bh(sdp, bh);
 211			*withdraw = true;
 212		}
 213		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
 214	}
 
 215}
 216
 217/**
 218 * gfs2_ail1_empty - Try to empty the ail1 lists
 219 * @sdp: The superblock
 220 *
 221 * Tries to empty the ail1 lists, starting with the oldest first
 222 */
 223
 224static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
 225{
 226	struct gfs2_trans *tr, *s;
 227	int oldest_tr = 1;
 228	int ret;
 229	bool withdraw = false;
 230
 231	spin_lock(&sdp->sd_ail_lock);
 232	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
 233		gfs2_ail1_empty_one(sdp, tr, &withdraw);
 234		if (list_empty(&tr->tr_ail1_list) && oldest_tr)
 235			list_move(&tr->tr_list, &sdp->sd_ail2_list);
 236		else
 237			oldest_tr = 0;
 238	}
 239	ret = list_empty(&sdp->sd_ail1_list);
 240	spin_unlock(&sdp->sd_ail_lock);
 241
 242	if (withdraw)
 243		gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");
 244
 245	return ret;
 246}
 247
 248static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
 249{
 250	struct gfs2_trans *tr;
 251	struct gfs2_bufdata *bd;
 252	struct buffer_head *bh;
 253
 254	spin_lock(&sdp->sd_ail_lock);
 255	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
 256		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
 257			bh = bd->bd_bh;
 258			if (!buffer_locked(bh))
 259				continue;
 260			get_bh(bh);
 261			spin_unlock(&sdp->sd_ail_lock);
 262			wait_on_buffer(bh);
 263			brelse(bh);
 264			return;
 265		}
 266	}
 267	spin_unlock(&sdp->sd_ail_lock);
 268}
 269
 270/**
 271 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
 272 * @sdp: the filesystem
 273 * @ai: the AIL entry
 274 *
 275 */
 276
 277static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 278{
 279	struct list_head *head = &tr->tr_ail2_list;
 280	struct gfs2_bufdata *bd;
 281
 282	while (!list_empty(head)) {
 283		bd = list_entry(head->prev, struct gfs2_bufdata,
 284				bd_ail_st_list);
 285		gfs2_assert(sdp, bd->bd_tr == tr);
 286		gfs2_remove_from_ail(bd);
 287	}
 288}
 289
 290static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
 291{
 292	struct gfs2_trans *tr, *safe;
 293	unsigned int old_tail = sdp->sd_log_tail;
 294	int wrap = (new_tail < old_tail);
 295	int a, b, rm;
 296
 297	spin_lock(&sdp->sd_ail_lock);
 298
 299	list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
 300		a = (old_tail <= tr->tr_first);
 301		b = (tr->tr_first < new_tail);
 302		rm = (wrap) ? (a || b) : (a && b);
 303		if (!rm)
 304			continue;
 305
 306		gfs2_ail2_empty_one(sdp, tr);
 307		list_del(&tr->tr_list);
 308		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
 309		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
 310		kfree(tr);
 311	}
 312
 313	spin_unlock(&sdp->sd_ail_lock);
 314}
 315
 316/**
 317 * gfs2_log_release - Release a given number of log blocks
 318 * @sdp: The GFS2 superblock
 319 * @blks: The number of blocks
 320 *
 321 */
 322
 323void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
 324{
 325
 326	atomic_add(blks, &sdp->sd_log_blks_free);
 327	trace_gfs2_log_blocks(sdp, blks);
 328	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
 329				  sdp->sd_jdesc->jd_blocks);
 330	up_read(&sdp->sd_log_flush_lock);
 331}
 332
 333/**
 334 * gfs2_log_reserve - Make a log reservation
 335 * @sdp: The GFS2 superblock
 336 * @blks: The number of blocks to reserve
 337 *
 338 * Note that we never give out the last few blocks of the journal. Thats
 339 * due to the fact that there is a small number of header blocks
 340 * associated with each log flush. The exact number can't be known until
 341 * flush time, so we ensure that we have just enough free blocks at all
 342 * times to avoid running out during a log flush.
 343 *
 344 * We no longer flush the log here, instead we wake up logd to do that
 345 * for us. To avoid the thundering herd and to ensure that we deal fairly
 346 * with queued waiters, we use an exclusive wait. This means that when we
 347 * get woken with enough journal space to get our reservation, we need to
 348 * wake the next waiter on the list.
 349 *
 350 * Returns: errno
 351 */
 352
 353int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
 354{
 355	int ret = 0;
 356	unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
 357	unsigned wanted = blks + reserved_blks;
 358	DEFINE_WAIT(wait);
 359	int did_wait = 0;
 360	unsigned int free_blocks;
 361
 362	if (gfs2_assert_warn(sdp, blks) ||
 363	    gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
 364		return -EINVAL;
 365	atomic_add(blks, &sdp->sd_log_blks_needed);
 366retry:
 367	free_blocks = atomic_read(&sdp->sd_log_blks_free);
 368	if (unlikely(free_blocks <= wanted)) {
 369		do {
 370			prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
 371					TASK_UNINTERRUPTIBLE);
 372			wake_up(&sdp->sd_logd_waitq);
 373			did_wait = 1;
 374			if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
 375				io_schedule();
 376			free_blocks = atomic_read(&sdp->sd_log_blks_free);
 377		} while(free_blocks <= wanted);
 378		finish_wait(&sdp->sd_log_waitq, &wait);
 379	}
 380	atomic_inc(&sdp->sd_reserving_log);
 381	if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
 382				free_blocks - blks) != free_blocks) {
 383		if (atomic_dec_and_test(&sdp->sd_reserving_log))
 384			wake_up(&sdp->sd_reserving_log_wait);
 385		goto retry;
 386	}
 387	atomic_sub(blks, &sdp->sd_log_blks_needed);
 388	trace_gfs2_log_blocks(sdp, -blks);
 389
 390	/*
 391	 * If we waited, then so might others, wake them up _after_ we get
 392	 * our share of the log.
 393	 */
 394	if (unlikely(did_wait))
 395		wake_up(&sdp->sd_log_waitq);
 396
 397	down_read(&sdp->sd_log_flush_lock);
 398	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
 399		gfs2_log_release(sdp, blks);
 400		ret = -EROFS;
 
 
 
 
 
 
 
 
 401	}
 402	if (atomic_dec_and_test(&sdp->sd_reserving_log))
 403		wake_up(&sdp->sd_reserving_log_wait);
 404	return ret;
 405}
 406
 407/**
 408 * log_distance - Compute distance between two journal blocks
 409 * @sdp: The GFS2 superblock
 410 * @newer: The most recent journal block of the pair
 411 * @older: The older journal block of the pair
 412 *
 413 *   Compute the distance (in the journal direction) between two
 414 *   blocks in the journal
 415 *
 416 * Returns: the distance in blocks
 417 */
 418
 419static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
 420					unsigned int older)
 421{
 422	int dist;
 423
 424	dist = newer - older;
 425	if (dist < 0)
 426		dist += sdp->sd_jdesc->jd_blocks;
 427
 428	return dist;
 429}
 430
 431/**
 432 * calc_reserved - Calculate the number of blocks to reserve when
 433 *                 refunding a transaction's unused buffers.
 434 * @sdp: The GFS2 superblock
 435 *
 436 * This is complex.  We need to reserve room for all our currently used
 437 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
 438 * all our journaled data buffers for journaled files (e.g. files in the 
 439 * meta_fs like rindex, or files for which chattr +j was done.)
 440 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
 441 * will count it as free space (sd_log_blks_free) and corruption will follow.
 442 *
 443 * We can have metadata bufs and jdata bufs in the same journal.  So each
 444 * type gets its own log header, for which we need to reserve a block.
 445 * In fact, each type has the potential for needing more than one header 
 446 * in cases where we have more buffers than will fit on a journal page.
 447 * Metadata journal entries take up half the space of journaled buffer entries.
 448 * Thus, metadata entries have buf_limit (502) and journaled buffers have
 449 * databuf_limit (251) before they cause a wrap around.
 450 *
 451 * Also, we need to reserve blocks for revoke journal entries and one for an
 452 * overall header for the lot.
 453 *
 454 * Returns: the number of blocks reserved
 455 */
 456static unsigned int calc_reserved(struct gfs2_sbd *sdp)
 457{
 458	unsigned int reserved = 0;
 459	unsigned int mbuf;
 460	unsigned int dbuf;
 461	struct gfs2_trans *tr = sdp->sd_log_tr;
 462
 463	if (tr) {
 464		mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
 465		dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
 466		reserved = mbuf + dbuf;
 467		/* Account for header blocks */
 468		reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
 469		reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
 470	}
 471
 472	if (sdp->sd_log_commited_revoke > 0)
 473		reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
 474					  sizeof(u64));
 
 
 
 
 475	/* One for the overall header */
 476	if (reserved)
 477		reserved++;
 478	return reserved;
 479}
 480
 481static unsigned int current_tail(struct gfs2_sbd *sdp)
 482{
 483	struct gfs2_trans *tr;
 484	unsigned int tail;
 485
 486	spin_lock(&sdp->sd_ail_lock);
 487
 488	if (list_empty(&sdp->sd_ail1_list)) {
 489		tail = sdp->sd_log_head;
 490	} else {
 491		tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
 492				tr_list);
 493		tail = tr->tr_first;
 494	}
 495
 496	spin_unlock(&sdp->sd_ail_lock);
 497
 498	return tail;
 499}
 500
 501static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
 502{
 503	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
 
 504
 505	ail2_empty(sdp, new_tail);
 506
 507	atomic_add(dist, &sdp->sd_log_blks_free);
 508	trace_gfs2_log_blocks(sdp, dist);
 509	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
 510			     sdp->sd_jdesc->jd_blocks);
 511
 512	sdp->sd_log_tail = new_tail;
 513}
 514
 
 
 
 
 
 
 515
 516static void log_flush_wait(struct gfs2_sbd *sdp)
 517{
 518	DEFINE_WAIT(wait);
 
 519
 520	if (atomic_read(&sdp->sd_log_in_flight)) {
 521		do {
 522			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
 523					TASK_UNINTERRUPTIBLE);
 524			if (atomic_read(&sdp->sd_log_in_flight))
 525				io_schedule();
 526		} while(atomic_read(&sdp->sd_log_in_flight));
 527		finish_wait(&sdp->sd_log_flush_wait, &wait);
 528	}
 529}
 530
 531static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
 
 
 
 
 
 
 
 532{
 533	struct gfs2_inode *ipa, *ipb;
 
 534
 535	ipa = list_entry(a, struct gfs2_inode, i_ordered);
 536	ipb = list_entry(b, struct gfs2_inode, i_ordered);
 
 
 
 
 
 
 
 537
 538	if (ipa->i_no_addr < ipb->i_no_addr)
 539		return -1;
 540	if (ipa->i_no_addr > ipb->i_no_addr)
 541		return 1;
 542	return 0;
 543}
 544
 545static void gfs2_ordered_write(struct gfs2_sbd *sdp)
 
 
 
 
 
 
 
 546{
 547	struct gfs2_inode *ip;
 548	LIST_HEAD(written);
 
 549
 550	spin_lock(&sdp->sd_ordered_lock);
 551	list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
 552	while (!list_empty(&sdp->sd_log_ordered)) {
 553		ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
 554		if (ip->i_inode.i_mapping->nrpages == 0) {
 555			test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
 556			list_del(&ip->i_ordered);
 557			continue;
 558		}
 559		list_move(&ip->i_ordered, &written);
 560		spin_unlock(&sdp->sd_ordered_lock);
 561		filemap_fdatawrite(ip->i_inode.i_mapping);
 562		spin_lock(&sdp->sd_ordered_lock);
 563	}
 564	list_splice(&written, &sdp->sd_log_ordered);
 565	spin_unlock(&sdp->sd_ordered_lock);
 566}
 567
 568static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
 
 
 
 
 
 
 
 
 
 569{
 570	struct gfs2_inode *ip;
 
 571
 572	spin_lock(&sdp->sd_ordered_lock);
 573	while (!list_empty(&sdp->sd_log_ordered)) {
 574		ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
 575		list_del(&ip->i_ordered);
 576		WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
 577		if (ip->i_inode.i_mapping->nrpages == 0)
 578			continue;
 579		spin_unlock(&sdp->sd_ordered_lock);
 580		filemap_fdatawait(ip->i_inode.i_mapping);
 581		spin_lock(&sdp->sd_ordered_lock);
 582	}
 583	spin_unlock(&sdp->sd_ordered_lock);
 584}
 585
 586void gfs2_ordered_del_inode(struct gfs2_inode *ip)
 587{
 588	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 589
 590	spin_lock(&sdp->sd_ordered_lock);
 591	if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
 592		list_del(&ip->i_ordered);
 593	spin_unlock(&sdp->sd_ordered_lock);
 594}
 595
 596void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 597{
 598	struct buffer_head *bh = bd->bd_bh;
 599	struct gfs2_glock *gl = bd->bd_gl;
 600
 601	bh->b_private = NULL;
 602	bd->bd_blkno = bh->b_blocknr;
 603	gfs2_remove_from_ail(bd); /* drops ref on bh */
 604	bd->bd_bh = NULL;
 605	sdp->sd_log_num_revoke++;
 606	if (atomic_inc_return(&gl->gl_revokes) == 1)
 607		gfs2_glock_hold(gl);
 608	set_bit(GLF_LFLUSH, &gl->gl_flags);
 609	list_add(&bd->bd_list, &sdp->sd_log_revokes);
 610}
 611
 612void gfs2_write_revokes(struct gfs2_sbd *sdp)
 613{
 614	struct gfs2_trans *tr;
 615	struct gfs2_bufdata *bd, *tmp;
 616	int have_revokes = 0;
 617	int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
 618
 619	gfs2_ail1_empty(sdp);
 620	spin_lock(&sdp->sd_ail_lock);
 621	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
 622		list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
 623			if (list_empty(&bd->bd_list)) {
 624				have_revokes = 1;
 625				goto done;
 626			}
 627		}
 628	}
 629done:
 630	spin_unlock(&sdp->sd_ail_lock);
 631	if (have_revokes == 0)
 632		return;
 633	while (sdp->sd_log_num_revoke > max_revokes)
 634		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
 635	max_revokes -= sdp->sd_log_num_revoke;
 636	if (!sdp->sd_log_num_revoke) {
 637		atomic_dec(&sdp->sd_log_blks_free);
 638		/* If no blocks have been reserved, we need to also
 639		 * reserve a block for the header */
 640		if (!sdp->sd_log_blks_reserved)
 641			atomic_dec(&sdp->sd_log_blks_free);
 642	}
 643	gfs2_log_lock(sdp);
 644	spin_lock(&sdp->sd_ail_lock);
 645	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
 646		list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
 647			if (max_revokes == 0)
 648				goto out_of_blocks;
 649			if (!list_empty(&bd->bd_list))
 650				continue;
 651			gfs2_add_revoke(sdp, bd);
 652			max_revokes--;
 653		}
 654	}
 655out_of_blocks:
 656	spin_unlock(&sdp->sd_ail_lock);
 657	gfs2_log_unlock(sdp);
 658
 659	if (!sdp->sd_log_num_revoke) {
 660		atomic_inc(&sdp->sd_log_blks_free);
 661		if (!sdp->sd_log_blks_reserved)
 662			atomic_inc(&sdp->sd_log_blks_free);
 663	}
 664}
 665
 666/**
 667 * gfs2_write_log_header - Write a journal log header buffer at lblock
 668 * @sdp: The GFS2 superblock
 669 * @jd: journal descriptor of the journal to which we are writing
 670 * @seq: sequence number
 671 * @tail: tail of the log
 672 * @lblock: value for lh_blkno (block number relative to start of journal)
 673 * @flags: log header flags GFS2_LOG_HEAD_*
 674 * @op_flags: flags to pass to the bio
 675 *
 676 * Returns: the initialized log buffer descriptor
 677 */
 678
 679void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
 680			   u64 seq, u32 tail, u32 lblock, u32 flags,
 681			   int op_flags)
 682{
 
 
 683	struct gfs2_log_header *lh;
 684	u32 hash, crc;
 685	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
 686	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 687	struct timespec64 tv;
 688	struct super_block *sb = sdp->sd_vfs;
 689	u64 dblock;
 
 
 690
 691	lh = page_address(page);
 692	clear_page(lh);
 693
 
 
 694	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
 695	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
 696	lh->lh_header.__pad0 = cpu_to_be64(0);
 697	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
 698	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
 699	lh->lh_sequence = cpu_to_be64(seq);
 700	lh->lh_flags = cpu_to_be32(flags);
 701	lh->lh_tail = cpu_to_be32(tail);
 702	lh->lh_blkno = cpu_to_be32(lblock);
 703	hash = ~crc32(~0, lh, LH_V1_SIZE);
 704	lh->lh_hash = cpu_to_be32(hash);
 705
 706	ktime_get_coarse_real_ts64(&tv);
 707	lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
 708	lh->lh_sec = cpu_to_be64(tv.tv_sec);
 709	if (!list_empty(&jd->extent_list))
 710		dblock = gfs2_log_bmap(sdp);
 711	else {
 712		int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
 713		if (gfs2_assert_withdraw(sdp, ret == 0))
 714			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 715	}
 716	lh->lh_addr = cpu_to_be64(dblock);
 717	lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
 718
 719	/* We may only write local statfs, quota, etc., when writing to our
 720	   own journal. The values are left 0 when recovering a journal
 721	   different from our own. */
 722	if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
 723		lh->lh_statfs_addr =
 724			cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
 725		lh->lh_quota_addr =
 726			cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
 727
 728		spin_lock(&sdp->sd_statfs_spin);
 729		lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
 730		lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
 731		lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
 732		spin_unlock(&sdp->sd_statfs_spin);
 733	}
 734
 735	BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
 736
 737	crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
 738		     sb->s_blocksize - LH_V1_SIZE - 4);
 739	lh->lh_crc = cpu_to_be32(crc);
 740
 741	gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
 742	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
 743	log_flush_wait(sdp);
 744}
 745
 746/**
 747 * log_write_header - Get and initialize a journal header buffer
 748 * @sdp: The GFS2 superblock
 749 * @flags: The log header flags, including log header origin
 750 *
 751 * Returns: the initialized log buffer descriptor
 752 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 753
 754static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 755{
 756	unsigned int tail;
 757	int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
 758	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
 759
 760	gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
 761	tail = current_tail(sdp);
 762
 763	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
 764		gfs2_ordered_wait(sdp);
 765		log_flush_wait(sdp);
 766		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
 
 
 
 
 
 
 767	}
 768	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
 769	gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
 770			      sdp->sd_log_flush_head, flags, op_flags);
 771
 772	if (sdp->sd_log_tail != tail)
 773		log_pull_tail(sdp, tail);
 774}
 775
 776/**
 777 * gfs2_log_flush - flush incore transaction(s)
 778 * @sdp: the filesystem
 779 * @gl: The glock structure to flush.  If NULL, flush the whole incore log
 780 * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
 781 *
 782 */
 783
 784void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
 785{
 786	struct gfs2_trans *tr;
 787	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
 788
 789	down_write(&sdp->sd_log_flush_lock);
 790
 791	/* Log might have been flushed while we waited for the flush lock */
 792	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
 793		up_write(&sdp->sd_log_flush_lock);
 794		return;
 795	}
 796	trace_gfs2_log_flush(sdp, 1, flags);
 797
 798	if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
 799		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
 800
 801	sdp->sd_log_flush_head = sdp->sd_log_head;
 802	tr = sdp->sd_log_tr;
 803	if (tr) {
 804		sdp->sd_log_tr = NULL;
 805		INIT_LIST_HEAD(&tr->tr_ail1_list);
 806		INIT_LIST_HEAD(&tr->tr_ail2_list);
 807		tr->tr_first = sdp->sd_log_flush_head;
 808		if (unlikely (state == SFS_FROZEN))
 809			gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
 
 810	}
 811
 812	if (unlikely(state == SFS_FROZEN))
 813		gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
 814	gfs2_assert_withdraw(sdp,
 815			sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
 816
 
 
 
 
 817	gfs2_ordered_write(sdp);
 818	lops_before_commit(sdp, tr);
 819	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
 820
 821	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
 822		log_flush_wait(sdp);
 823		log_write_header(sdp, flags);
 824	} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
 825		atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
 826		trace_gfs2_log_blocks(sdp, -1);
 827		log_write_header(sdp, flags);
 
 828	}
 829	lops_after_commit(sdp, tr);
 830
 831	gfs2_log_lock(sdp);
 832	sdp->sd_log_head = sdp->sd_log_flush_head;
 833	sdp->sd_log_blks_reserved = 0;
 
 
 834	sdp->sd_log_commited_revoke = 0;
 835
 836	spin_lock(&sdp->sd_ail_lock);
 837	if (tr && !list_empty(&tr->tr_ail1_list)) {
 838		list_add(&tr->tr_list, &sdp->sd_ail1_list);
 839		tr = NULL;
 840	}
 841	spin_unlock(&sdp->sd_ail_lock);
 842	gfs2_log_unlock(sdp);
 843
 844	if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
 845		if (!sdp->sd_log_idle) {
 846			for (;;) {
 847				gfs2_ail1_start(sdp);
 848				gfs2_ail1_wait(sdp);
 849				if (gfs2_ail1_empty(sdp))
 850					break;
 851			}
 852			atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
 853			trace_gfs2_log_blocks(sdp, -1);
 854			log_write_header(sdp, flags);
 855			sdp->sd_log_head = sdp->sd_log_flush_head;
 856		}
 857		if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
 858			     GFS2_LOG_HEAD_FLUSH_FREEZE))
 859			gfs2_log_shutdown(sdp);
 860		if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
 861			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
 862	}
 863
 864	trace_gfs2_log_flush(sdp, 0, flags);
 865	up_write(&sdp->sd_log_flush_lock);
 866
 867	kfree(tr);
 868}
 869
 870/**
 871 * gfs2_merge_trans - Merge a new transaction into a cached transaction
 872 * @old: Original transaction to be expanded
 873 * @new: New transaction to be merged
 874 */
 875
 876static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
 877{
 878	WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
 879
 880	old->tr_num_buf_new	+= new->tr_num_buf_new;
 881	old->tr_num_databuf_new	+= new->tr_num_databuf_new;
 882	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
 883	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
 884	old->tr_num_revoke	+= new->tr_num_revoke;
 885
 886	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
 887	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
 888}
 889
 890static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 891{
 892	unsigned int reserved;
 893	unsigned int unused;
 894	unsigned int maxres;
 895
 896	gfs2_log_lock(sdp);
 897
 898	if (sdp->sd_log_tr) {
 899		gfs2_merge_trans(sdp->sd_log_tr, tr);
 900	} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
 901		gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
 902		sdp->sd_log_tr = tr;
 903		set_bit(TR_ATTACHED, &tr->tr_flags);
 904	}
 905
 906	sdp->sd_log_commited_revoke += tr->tr_num_revoke;
 907	reserved = calc_reserved(sdp);
 908	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
 909	gfs2_assert_withdraw(sdp, maxres >= reserved);
 910	unused = maxres - reserved;
 911	atomic_add(unused, &sdp->sd_log_blks_free);
 912	trace_gfs2_log_blocks(sdp, unused);
 913	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
 914			     sdp->sd_jdesc->jd_blocks);
 915	sdp->sd_log_blks_reserved = reserved;
 916
 917	gfs2_log_unlock(sdp);
 918}
 919
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920/**
 921 * gfs2_log_commit - Commit a transaction to the log
 922 * @sdp: the filesystem
 923 * @tr: the transaction
 924 *
 925 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
 926 * or the total number of used blocks (pinned blocks plus AIL blocks)
 927 * is greater than thresh2.
 928 *
 929 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
 930 * journal size.
 931 *
 932 * Returns: errno
 933 */
 934
 935void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 936{
 937	log_refund(sdp, tr);
 
 
 
 938
 939	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
 940	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
 941	    atomic_read(&sdp->sd_log_thresh2)))
 942		wake_up(&sdp->sd_logd_waitq);
 943}
 944
 945/**
 946 * gfs2_log_shutdown - write a shutdown header into a journal
 947 * @sdp: the filesystem
 948 *
 949 */
 950
 951void gfs2_log_shutdown(struct gfs2_sbd *sdp)
 952{
 
 
 953	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
 
 954	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
 
 
 955	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
 956
 957	sdp->sd_log_flush_head = sdp->sd_log_head;
 
 958
 959	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
 
 960
 
 961	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
 962	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
 963
 964	sdp->sd_log_head = sdp->sd_log_flush_head;
 965	sdp->sd_log_tail = sdp->sd_log_head;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 966}
 967
 968static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
 969{
 970	return (atomic_read(&sdp->sd_log_pinned) +
 971		atomic_read(&sdp->sd_log_blks_needed) >=
 972		atomic_read(&sdp->sd_log_thresh1));
 973}
 974
 975static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
 976{
 977	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
 978
 979	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
 980		return 1;
 981
 982	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
 983		atomic_read(&sdp->sd_log_thresh2);
 984}
 985
 986/**
 987 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
 988 * @sdp: Pointer to GFS2 superblock
 989 *
 990 * Also, periodically check to make sure that we're using the most recent
 991 * journal index.
 992 */
 993
 994int gfs2_logd(void *data)
 995{
 996	struct gfs2_sbd *sdp = data;
 997	unsigned long t = 1;
 998	DEFINE_WAIT(wait);
 999	bool did_flush;
1000
1001	while (!kthread_should_stop()) {
1002
1003		/* Check for errors writing to the journal */
1004		if (sdp->sd_log_error) {
1005			gfs2_lm_withdraw(sdp,
1006					 "GFS2: fsid=%s: error %d: "
1007					 "withdrawing the file system to "
1008					 "prevent further damage.\n",
1009					 sdp->sd_fsname, sdp->sd_log_error);
1010		}
1011
1012		did_flush = false;
1013		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1014			gfs2_ail1_empty(sdp);
1015			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1016				       GFS2_LFC_LOGD_JFLUSH_REQD);
1017			did_flush = true;
1018		}
1019
1020		if (gfs2_ail_flush_reqd(sdp)) {
1021			gfs2_ail1_start(sdp);
1022			gfs2_ail1_wait(sdp);
1023			gfs2_ail1_empty(sdp);
1024			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1025				       GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1026			did_flush = true;
1027		}
1028
1029		if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1030			wake_up(&sdp->sd_log_waitq);
1031
1032		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1033
1034		try_to_freeze();
1035
1036		do {
1037			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1038					TASK_INTERRUPTIBLE);
1039			if (!gfs2_ail_flush_reqd(sdp) &&
1040			    !gfs2_jrnl_flush_reqd(sdp) &&
1041			    !kthread_should_stop())
1042				t = schedule_timeout(t);
1043		} while(t && !gfs2_ail_flush_reqd(sdp) &&
1044			!gfs2_jrnl_flush_reqd(sdp) &&
1045			!kthread_should_stop());
1046		finish_wait(&sdp->sd_logd_waitq, &wait);
1047	}
1048
1049	return 0;
1050}
1051