Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#include <linux/sched.h>
 11#include <linux/slab.h>
 12#include <linux/spinlock.h>
 13#include <linux/completion.h>
 14#include <linux/buffer_head.h>
 15#include <linux/gfs2_ondisk.h>
 16#include <linux/crc32.h>
 
 17#include <linux/delay.h>
 18#include <linux/kthread.h>
 19#include <linux/freezer.h>
 20#include <linux/bio.h>
 21#include <linux/blkdev.h>
 22#include <linux/writeback.h>
 23#include <linux/list_sort.h>
 24
 25#include "gfs2.h"
 26#include "incore.h"
 27#include "bmap.h"
 28#include "glock.h"
 29#include "log.h"
 30#include "lops.h"
 31#include "meta_io.h"
 32#include "util.h"
 33#include "dir.h"
 34#include "trace_gfs2.h"
 
 
 
 35
 36/**
 37 * gfs2_struct2blk - compute stuff
 38 * @sdp: the filesystem
 39 * @nstruct: the number of structures
 40 * @ssize: the size of the structures
 41 *
 42 * Compute the number of log descriptor blocks needed to hold a certain number
 43 * of structures of a certain size.
 44 *
 45 * Returns: the number of blocks needed (minimum is always 1)
 46 */
 47
 48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
 49			     unsigned int ssize)
 50{
 51	unsigned int blks;
 52	unsigned int first, second;
 53
 
 54	blks = 1;
 55	first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
 56
 57	if (nstruct > first) {
 58		second = (sdp->sd_sb.sb_bsize -
 59			  sizeof(struct gfs2_meta_header)) / ssize;
 60		blks += DIV_ROUND_UP(nstruct - first, second);
 61	}
 62
 63	return blks;
 64}
 65
 66/**
 67 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
 68 * @mapping: The associated mapping (maybe NULL)
 69 * @bd: The gfs2_bufdata to remove
 70 *
 71 * The ail lock _must_ be held when calling this function
 72 *
 73 */
 74
 75void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
 76{
 77	bd->bd_tr = NULL;
 78	list_del_init(&bd->bd_ail_st_list);
 79	list_del_init(&bd->bd_ail_gl_list);
 80	atomic_dec(&bd->bd_gl->gl_ail_count);
 81	brelse(bd->bd_bh);
 82}
 83
 84/**
 85 * gfs2_ail1_start_one - Start I/O on a part of the AIL
 86 * @sdp: the filesystem
 87 * @wbc: The writeback control structure
 88 * @ai: The ail structure
 89 *
 90 */
 91
 92static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
 93			       struct writeback_control *wbc,
 94			       struct gfs2_trans *tr)
 95__releases(&sdp->sd_ail_lock)
 96__acquires(&sdp->sd_ail_lock)
 97{
 98	struct gfs2_glock *gl = NULL;
 99	struct address_space *mapping;
100	struct gfs2_bufdata *bd, *s;
101	struct buffer_head *bh;
 
102
103	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
104		bh = bd->bd_bh;
105
106		gfs2_assert(sdp, bd->bd_tr == tr);
107
108		if (!buffer_busy(bh)) {
109			if (!buffer_uptodate(bh))
 
 
 
 
 
110				gfs2_io_error_bh(sdp, bh);
111			list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
112			continue;
113		}
114
 
 
 
 
115		if (!buffer_dirty(bh))
116			continue;
117		if (gl == bd->bd_gl)
118			continue;
119		gl = bd->bd_gl;
120		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
121		mapping = bh->b_page->mapping;
122		if (!mapping)
123			continue;
124		spin_unlock(&sdp->sd_ail_lock);
125		generic_writepages(mapping, wbc);
 
 
 
 
 
126		spin_lock(&sdp->sd_ail_lock);
127		if (wbc->nr_to_write <= 0)
 
 
 
128			break;
129		return 1;
130	}
131
132	return 0;
133}
134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
136/**
137 * gfs2_ail1_flush - start writeback of some ail1 entries 
138 * @sdp: The super block
139 * @wbc: The writeback control structure
140 *
141 * Writes back some ail1 entries, according to the limits in the
142 * writeback control structure
143 */
144
145void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
146{
147	struct list_head *head = &sdp->sd_ail1_list;
148	struct gfs2_trans *tr;
149	struct blk_plug plug;
 
 
150
151	trace_gfs2_ail_flush(sdp, wbc, 1);
152	blk_start_plug(&plug);
153	spin_lock(&sdp->sd_ail_lock);
154restart:
 
 
 
 
 
 
 
155	list_for_each_entry_reverse(tr, head, tr_list) {
156		if (wbc->nr_to_write <= 0)
157			break;
158		if (gfs2_ail1_start_one(sdp, wbc, tr))
159			goto restart;
 
 
 
 
160	}
 
161	spin_unlock(&sdp->sd_ail_lock);
162	blk_finish_plug(&plug);
 
 
 
 
163	trace_gfs2_ail_flush(sdp, wbc, 0);
164}
165
166/**
167 * gfs2_ail1_start - start writeback of all ail1 entries
168 * @sdp: The superblock
169 */
170
171static void gfs2_ail1_start(struct gfs2_sbd *sdp)
172{
173	struct writeback_control wbc = {
174		.sync_mode = WB_SYNC_NONE,
175		.nr_to_write = LONG_MAX,
176		.range_start = 0,
177		.range_end = LLONG_MAX,
178	};
179
180	return gfs2_ail1_flush(sdp, &wbc);
181}
182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183/**
184 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
185 * @sdp: the filesystem
186 * @ai: the AIL entry
 
187 *
 
188 */
189
190static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 
191{
192	struct gfs2_bufdata *bd, *s;
193	struct buffer_head *bh;
 
194
195	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
196					 bd_ail_st_list) {
197		bh = bd->bd_bh;
198		gfs2_assert(sdp, bd->bd_tr == tr);
199		if (buffer_busy(bh))
 
 
 
 
 
 
 
 
 
 
200			continue;
201		if (!buffer_uptodate(bh))
 
 
202			gfs2_io_error_bh(sdp, bh);
 
 
 
 
 
 
 
 
 
 
 
 
 
203		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
204	}
205
206}
207
208/**
209 * gfs2_ail1_empty - Try to empty the ail1 lists
210 * @sdp: The superblock
 
211 *
212 * Tries to empty the ail1 lists, starting with the oldest first
 
213 */
214
215static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
216{
217	struct gfs2_trans *tr, *s;
218	int oldest_tr = 1;
219	int ret;
220
221	spin_lock(&sdp->sd_ail_lock);
222	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
223		gfs2_ail1_empty_one(sdp, tr);
224		if (list_empty(&tr->tr_ail1_list) && oldest_tr)
225			list_move(&tr->tr_list, &sdp->sd_ail2_list);
226		else
227			oldest_tr = 0;
228	}
229	ret = list_empty(&sdp->sd_ail1_list);
 
230	spin_unlock(&sdp->sd_ail_lock);
231
232	return ret;
233}
234
235static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
236{
237	struct gfs2_trans *tr;
238	struct gfs2_bufdata *bd;
239	struct buffer_head *bh;
240
241	spin_lock(&sdp->sd_ail_lock);
242	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
243		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
244			bh = bd->bd_bh;
245			if (!buffer_locked(bh))
246				continue;
247			get_bh(bh);
248			spin_unlock(&sdp->sd_ail_lock);
249			wait_on_buffer(bh);
250			brelse(bh);
251			return;
252		}
253	}
254	spin_unlock(&sdp->sd_ail_lock);
255}
256
257/**
258 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
259 * @sdp: the filesystem
260 * @ai: the AIL entry
261 *
262 */
263
264static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
265{
266	struct list_head *head = &tr->tr_ail2_list;
267	struct gfs2_bufdata *bd;
268
269	while (!list_empty(head)) {
270		bd = list_entry(head->prev, struct gfs2_bufdata,
271				bd_ail_st_list);
272		gfs2_assert(sdp, bd->bd_tr == tr);
273		gfs2_remove_from_ail(bd);
274	}
275}
276
277static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
278{
279	struct gfs2_trans *tr, *safe;
280	unsigned int old_tail = sdp->sd_log_tail;
281	int wrap = (new_tail < old_tail);
282	int a, b, rm;
283
284	spin_lock(&sdp->sd_ail_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
285
286	list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
287		a = (old_tail <= tr->tr_first);
288		b = (tr->tr_first < new_tail);
289		rm = (wrap) ? (a || b) : (a && b);
290		if (!rm)
291			continue;
292
293		gfs2_ail2_empty_one(sdp, tr);
294		list_del(&tr->tr_list);
295		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
296		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
297		kfree(tr);
 
 
 
 
 
 
 
 
298	}
 
 
299
300	spin_unlock(&sdp->sd_ail_lock);
 
 
 
 
 
 
 
 
 
 
301}
302
303/**
304 * gfs2_log_release - Release a given number of log blocks
305 * @sdp: The GFS2 superblock
306 * @blks: The number of blocks
307 *
308 */
309
310void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
311{
312
313	atomic_add(blks, &sdp->sd_log_blks_free);
314	trace_gfs2_log_blocks(sdp, blks);
315	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
316				  sdp->sd_jdesc->jd_blocks);
317	up_read(&sdp->sd_log_flush_lock);
 
318}
319
320/**
321 * gfs2_log_reserve - Make a log reservation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322 * @sdp: The GFS2 superblock
323 * @blks: The number of blocks to reserve
 
324 *
325 * Note that we never give out the last few blocks of the journal. Thats
326 * due to the fact that there is a small number of header blocks
327 * associated with each log flush. The exact number can't be known until
328 * flush time, so we ensure that we have just enough free blocks at all
329 * times to avoid running out during a log flush.
330 *
331 * We no longer flush the log here, instead we wake up logd to do that
332 * for us. To avoid the thundering herd and to ensure that we deal fairly
333 * with queued waiters, we use an exclusive wait. This means that when we
334 * get woken with enough journal space to get our reservation, we need to
335 * wake the next waiter on the list.
336 *
337 * Returns: errno
338 */
339
340int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
 
341{
342	int ret = 0;
343	unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
344	unsigned wanted = blks + reserved_blks;
345	DEFINE_WAIT(wait);
346	int did_wait = 0;
347	unsigned int free_blocks;
348
349	if (gfs2_assert_warn(sdp, blks) ||
350	    gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
351		return -EINVAL;
352retry:
353	free_blocks = atomic_read(&sdp->sd_log_blks_free);
354	if (unlikely(free_blocks <= wanted)) {
355		do {
356			prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
357					TASK_UNINTERRUPTIBLE);
358			wake_up(&sdp->sd_logd_waitq);
359			did_wait = 1;
360			if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
361				io_schedule();
362			free_blocks = atomic_read(&sdp->sd_log_blks_free);
363		} while(free_blocks <= wanted);
364		finish_wait(&sdp->sd_log_waitq, &wait);
365	}
366	atomic_inc(&sdp->sd_reserving_log);
367	if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
368				free_blocks - blks) != free_blocks) {
369		if (atomic_dec_and_test(&sdp->sd_reserving_log))
370			wake_up(&sdp->sd_reserving_log_wait);
371		goto retry;
372	}
373	trace_gfs2_log_blocks(sdp, -blks);
374
375	/*
376	 * If we waited, then so might others, wake them up _after_ we get
377	 * our share of the log.
378	 */
379	if (unlikely(did_wait))
380		wake_up(&sdp->sd_log_waitq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
382	down_read(&sdp->sd_log_flush_lock);
383	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
384		gfs2_log_release(sdp, blks);
385		ret = -EROFS;
 
 
 
 
 
 
 
 
386	}
387	if (atomic_dec_and_test(&sdp->sd_reserving_log))
388		wake_up(&sdp->sd_reserving_log_wait);
389	return ret;
390}
391
392/**
393 * log_distance - Compute distance between two journal blocks
394 * @sdp: The GFS2 superblock
395 * @newer: The most recent journal block of the pair
396 * @older: The older journal block of the pair
397 *
398 *   Compute the distance (in the journal direction) between two
399 *   blocks in the journal
400 *
401 * Returns: the distance in blocks
402 */
403
404static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
405					unsigned int older)
406{
407	int dist;
408
409	dist = newer - older;
410	if (dist < 0)
411		dist += sdp->sd_jdesc->jd_blocks;
412
413	return dist;
414}
415
416/**
417 * calc_reserved - Calculate the number of blocks to reserve when
418 *                 refunding a transaction's unused buffers.
419 * @sdp: The GFS2 superblock
420 *
421 * This is complex.  We need to reserve room for all our currently used
422 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
423 * all our journaled data buffers for journaled files (e.g. files in the 
424 * meta_fs like rindex, or files for which chattr +j was done.)
425 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
426 * will count it as free space (sd_log_blks_free) and corruption will follow.
427 *
428 * We can have metadata bufs and jdata bufs in the same journal.  So each
429 * type gets its own log header, for which we need to reserve a block.
430 * In fact, each type has the potential for needing more than one header 
431 * in cases where we have more buffers than will fit on a journal page.
432 * Metadata journal entries take up half the space of journaled buffer entries.
433 * Thus, metadata entries have buf_limit (502) and journaled buffers have
434 * databuf_limit (251) before they cause a wrap around.
435 *
436 * Also, we need to reserve blocks for revoke journal entries and one for an
437 * overall header for the lot.
438 *
439 * Returns: the number of blocks reserved
440 */
441static unsigned int calc_reserved(struct gfs2_sbd *sdp)
442{
443	unsigned int reserved = 0;
444	unsigned int mbuf;
445	unsigned int dbuf;
446	struct gfs2_trans *tr = sdp->sd_log_tr;
447
448	if (tr) {
449		mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
450		dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
451		reserved = mbuf + dbuf;
452		/* Account for header blocks */
453		reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
454		reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
455	}
456
457	if (sdp->sd_log_commited_revoke > 0)
458		reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
459					  sizeof(u64));
460	/* One for the overall header */
461	if (reserved)
462		reserved++;
463	return reserved;
464}
465
466static unsigned int current_tail(struct gfs2_sbd *sdp)
467{
468	struct gfs2_trans *tr;
469	unsigned int tail;
470
471	spin_lock(&sdp->sd_ail_lock);
472
473	if (list_empty(&sdp->sd_ail1_list)) {
474		tail = sdp->sd_log_head;
475	} else {
476		tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
477				tr_list);
478		tail = tr->tr_first;
479	}
480
481	spin_unlock(&sdp->sd_ail_lock);
482
483	return tail;
484}
485
486static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
487{
488	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
 
489
 
 
 
490	ail2_empty(sdp, new_tail);
491
492	atomic_add(dist, &sdp->sd_log_blks_free);
493	trace_gfs2_log_blocks(sdp, dist);
494	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
495			     sdp->sd_jdesc->jd_blocks);
496
497	sdp->sd_log_tail = new_tail;
498}
499
500
501static void log_flush_wait(struct gfs2_sbd *sdp)
502{
503	DEFINE_WAIT(wait);
504
505	if (atomic_read(&sdp->sd_log_in_flight)) {
506		do {
507			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
508					TASK_UNINTERRUPTIBLE);
509			if (atomic_read(&sdp->sd_log_in_flight))
510				io_schedule();
511		} while(atomic_read(&sdp->sd_log_in_flight));
512		finish_wait(&sdp->sd_log_flush_wait, &wait);
513	}
514}
515
516static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
517{
518	struct gfs2_inode *ipa, *ipb;
519
520	ipa = list_entry(a, struct gfs2_inode, i_ordered);
521	ipb = list_entry(b, struct gfs2_inode, i_ordered);
522
523	if (ipa->i_no_addr < ipb->i_no_addr)
524		return -1;
525	if (ipa->i_no_addr > ipb->i_no_addr)
526		return 1;
527	return 0;
528}
529
 
 
 
 
 
 
530static void gfs2_ordered_write(struct gfs2_sbd *sdp)
531{
532	struct gfs2_inode *ip;
533	LIST_HEAD(written);
534
535	spin_lock(&sdp->sd_ordered_lock);
536	list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp);
537	while (!list_empty(&sdp->sd_log_le_ordered)) {
538		ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
539		list_move(&ip->i_ordered, &written);
540		if (ip->i_inode.i_mapping->nrpages == 0)
541			continue;
 
 
542		spin_unlock(&sdp->sd_ordered_lock);
543		filemap_fdatawrite(ip->i_inode.i_mapping);
544		spin_lock(&sdp->sd_ordered_lock);
545	}
546	list_splice(&written, &sdp->sd_log_le_ordered);
547	spin_unlock(&sdp->sd_ordered_lock);
548}
549
550static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
551{
552	struct gfs2_inode *ip;
553
554	spin_lock(&sdp->sd_ordered_lock);
555	while (!list_empty(&sdp->sd_log_le_ordered)) {
556		ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
557		list_del(&ip->i_ordered);
558		WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
559		if (ip->i_inode.i_mapping->nrpages == 0)
560			continue;
561		spin_unlock(&sdp->sd_ordered_lock);
562		filemap_fdatawait(ip->i_inode.i_mapping);
563		spin_lock(&sdp->sd_ordered_lock);
564	}
565	spin_unlock(&sdp->sd_ordered_lock);
566}
567
568void gfs2_ordered_del_inode(struct gfs2_inode *ip)
569{
570	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
571
572	spin_lock(&sdp->sd_ordered_lock);
573	if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
574		list_del(&ip->i_ordered);
575	spin_unlock(&sdp->sd_ordered_lock);
576}
577
578void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
579{
580	struct buffer_head *bh = bd->bd_bh;
581	struct gfs2_glock *gl = bd->bd_gl;
582
 
 
 
583	bh->b_private = NULL;
584	bd->bd_blkno = bh->b_blocknr;
585	gfs2_remove_from_ail(bd); /* drops ref on bh */
586	bd->bd_bh = NULL;
587	bd->bd_ops = &gfs2_revoke_lops;
588	sdp->sd_log_num_revoke++;
589	atomic_inc(&gl->gl_revokes);
590	set_bit(GLF_LFLUSH, &gl->gl_flags);
591	list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
592}
593
594void gfs2_write_revokes(struct gfs2_sbd *sdp)
595{
596	struct gfs2_trans *tr;
597	struct gfs2_bufdata *bd, *tmp;
598	int have_revokes = 0;
599	int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
600
601	gfs2_ail1_empty(sdp);
602	spin_lock(&sdp->sd_ail_lock);
603	list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
604		list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
605			if (list_empty(&bd->bd_list)) {
606				have_revokes = 1;
607				goto done;
608			}
609		}
610	}
611done:
612	spin_unlock(&sdp->sd_ail_lock);
613	if (have_revokes == 0)
614		return;
615	while (sdp->sd_log_num_revoke > max_revokes)
616		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
617	max_revokes -= sdp->sd_log_num_revoke;
618	if (!sdp->sd_log_num_revoke) {
619		atomic_dec(&sdp->sd_log_blks_free);
620		/* If no blocks have been reserved, we need to also
621		 * reserve a block for the header */
622		if (!sdp->sd_log_blks_reserved)
623			atomic_dec(&sdp->sd_log_blks_free);
624	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
625	gfs2_log_lock(sdp);
626	spin_lock(&sdp->sd_ail_lock);
627	list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
628		list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
629			if (max_revokes == 0)
630				goto out_of_blocks;
631			if (!list_empty(&bd->bd_list))
632				continue;
633			gfs2_add_revoke(sdp, bd);
634			max_revokes--;
635		}
636	}
637out_of_blocks:
638	spin_unlock(&sdp->sd_ail_lock);
639	gfs2_log_unlock(sdp);
640
641	if (!sdp->sd_log_num_revoke) {
642		atomic_inc(&sdp->sd_log_blks_free);
643		if (!sdp->sd_log_blks_reserved)
644			atomic_inc(&sdp->sd_log_blks_free);
645	}
646}
647
648/**
649 * log_write_header - Get and initialize a journal header buffer
650 * @sdp: The GFS2 superblock
 
 
 
 
 
 
651 *
652 * Returns: the initialized log buffer descriptor
653 */
654
655static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 
 
656{
657	struct gfs2_log_header *lh;
658	unsigned int tail;
659	u32 hash;
660	int rw = WRITE_FLUSH_FUA | REQ_META;
661	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
662	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
663	lh = page_address(page);
664	clear_page(lh);
665
666	gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
 
667
668	tail = current_tail(sdp);
 
 
669
670	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
671	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
672	lh->lh_header.__pad0 = cpu_to_be64(0);
673	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
674	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
675	lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
676	lh->lh_flags = cpu_to_be32(flags);
677	lh->lh_tail = cpu_to_be32(tail);
678	lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
679	hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header));
680	lh->lh_hash = cpu_to_be32(hash);
681
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
682	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
683		gfs2_ordered_wait(sdp);
684		log_flush_wait(sdp);
685		rw = WRITE_SYNC | REQ_META | REQ_PRIO;
686	}
687
688	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
689	gfs2_log_write_page(sdp, page);
690	gfs2_log_flush_bio(sdp, rw);
 
691	log_flush_wait(sdp);
 
 
 
 
 
 
 
 
 
 
 
692
693	if (sdp->sd_log_tail != tail)
694		log_pull_tail(sdp, tail);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695}
696
697/**
698 * gfs2_log_flush - flush incore transaction(s)
699 * @sdp: the filesystem
700 * @gl: The glock structure to flush.  If NULL, flush the whole incore log
 
701 *
702 */
703
704void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
705		    enum gfs2_flush_type type)
706{
707	struct gfs2_trans *tr;
708	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
 
 
 
709
710	down_write(&sdp->sd_log_flush_lock);
 
 
 
 
 
 
 
 
 
 
711
712	/* Log might have been flushed while we waited for the flush lock */
713	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
714		up_write(&sdp->sd_log_flush_lock);
715		return;
716	}
717	trace_gfs2_log_flush(sdp, 1);
718
719	if (type == SHUTDOWN_FLUSH)
720		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
721
722	sdp->sd_log_flush_head = sdp->sd_log_head;
723	sdp->sd_log_flush_wrapped = 0;
724	tr = sdp->sd_log_tr;
725	if (tr) {
726		sdp->sd_log_tr = NULL;
727		INIT_LIST_HEAD(&tr->tr_ail1_list);
728		INIT_LIST_HEAD(&tr->tr_ail2_list);
729		tr->tr_first = sdp->sd_log_flush_head;
730		if (unlikely (state == SFS_FROZEN))
731			gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732	}
733
734	if (unlikely(state == SFS_FROZEN))
735		gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
736	gfs2_assert_withdraw(sdp,
737			sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
 
 
738
739	gfs2_ordered_write(sdp);
 
 
740	lops_before_commit(sdp, tr);
741	gfs2_log_flush_bio(sdp, WRITE);
 
 
 
 
 
742
743	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
744		log_flush_wait(sdp);
745		log_write_header(sdp, 0);
746	} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
747		atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
748		trace_gfs2_log_blocks(sdp, -1);
749		log_write_header(sdp, 0);
750	}
 
 
751	lops_after_commit(sdp, tr);
752
753	gfs2_log_lock(sdp);
754	sdp->sd_log_head = sdp->sd_log_flush_head;
755	sdp->sd_log_blks_reserved = 0;
756	sdp->sd_log_commited_revoke = 0;
757
758	spin_lock(&sdp->sd_ail_lock);
759	if (tr && !list_empty(&tr->tr_ail1_list)) {
760		list_add(&tr->tr_list, &sdp->sd_ail1_list);
761		tr = NULL;
762	}
763	spin_unlock(&sdp->sd_ail_lock);
764	gfs2_log_unlock(sdp);
765
766	if (type != NORMAL_FLUSH) {
767		if (!sdp->sd_log_idle) {
768			for (;;) {
769				gfs2_ail1_start(sdp);
770				gfs2_ail1_wait(sdp);
771				if (gfs2_ail1_empty(sdp))
772					break;
773			}
774			atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
775			trace_gfs2_log_blocks(sdp, -1);
776			sdp->sd_log_flush_wrapped = 0;
777			log_write_header(sdp, 0);
778			sdp->sd_log_head = sdp->sd_log_flush_head;
779		}
780		if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH)
 
781			gfs2_log_shutdown(sdp);
782		if (type == FREEZE_FLUSH)
783			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
784	}
785
786	trace_gfs2_log_flush(sdp, 0);
 
 
 
 
 
 
 
 
 
 
 
787	up_write(&sdp->sd_log_flush_lock);
788
789	kfree(tr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
790}
791
792/**
793 * gfs2_merge_trans - Merge a new transaction into a cached transaction
794 * @old: Original transaction to be expanded
795 * @new: New transaction to be merged
796 */
797
798static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
799{
800	WARN_ON_ONCE(old->tr_attached != 1);
 
 
801
802	old->tr_num_buf_new	+= new->tr_num_buf_new;
803	old->tr_num_databuf_new	+= new->tr_num_databuf_new;
804	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
805	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
 
806	old->tr_num_revoke	+= new->tr_num_revoke;
807	old->tr_num_revoke_rm	+= new->tr_num_revoke_rm;
808
809	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
810	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
 
 
 
 
 
811}
812
813static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
814{
815	unsigned int reserved;
816	unsigned int unused;
817	unsigned int maxres;
818
819	gfs2_log_lock(sdp);
820
821	if (sdp->sd_log_tr) {
822		gfs2_merge_trans(sdp->sd_log_tr, tr);
823	} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
824		gfs2_assert_withdraw(sdp, tr->tr_alloced);
825		sdp->sd_log_tr = tr;
826		tr->tr_attached = 1;
827	}
828
829	sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
830	reserved = calc_reserved(sdp);
831	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
832	gfs2_assert_withdraw(sdp, maxres >= reserved);
833	unused = maxres - reserved;
834	atomic_add(unused, &sdp->sd_log_blks_free);
835	trace_gfs2_log_blocks(sdp, unused);
836	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
837			     sdp->sd_jdesc->jd_blocks);
838	sdp->sd_log_blks_reserved = reserved;
839
840	gfs2_log_unlock(sdp);
841}
842
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
843/**
844 * gfs2_log_commit - Commit a transaction to the log
845 * @sdp: the filesystem
846 * @tr: the transaction
847 *
848 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
849 * or the total number of used blocks (pinned blocks plus AIL blocks)
850 * is greater than thresh2.
851 *
852 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
853 * journal size.
854 *
855 * Returns: errno
856 */
857
858void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
859{
860	log_refund(sdp, tr);
861
862	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
863	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
864	    atomic_read(&sdp->sd_log_thresh2)))
865		wake_up(&sdp->sd_logd_waitq);
866}
867
868/**
869 * gfs2_log_shutdown - write a shutdown header into a journal
870 * @sdp: the filesystem
871 *
872 */
873
874void gfs2_log_shutdown(struct gfs2_sbd *sdp)
875{
876	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
877	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
878	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
879
880	sdp->sd_log_flush_head = sdp->sd_log_head;
881	sdp->sd_log_flush_wrapped = 0;
882
883	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT);
884
885	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
886	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
887
888	sdp->sd_log_head = sdp->sd_log_flush_head;
889	sdp->sd_log_tail = sdp->sd_log_head;
890}
891
892static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
893{
894	return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
895}
896
897static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
898{
899	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
900	return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
901}
902
903/**
904 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
905 * @sdp: Pointer to GFS2 superblock
906 *
907 * Also, periodically check to make sure that we're using the most recent
908 * journal index.
909 */
910
911int gfs2_logd(void *data)
912{
913	struct gfs2_sbd *sdp = data;
914	unsigned long t = 1;
915	DEFINE_WAIT(wait);
916
 
917	while (!kthread_should_stop()) {
 
 
 
 
 
 
 
 
 
 
 
 
 
918
919		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
920			gfs2_ail1_empty(sdp);
921			gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
 
922		}
923
924		if (gfs2_ail_flush_reqd(sdp)) {
 
 
925			gfs2_ail1_start(sdp);
926			gfs2_ail1_wait(sdp);
927			gfs2_ail1_empty(sdp);
928			gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
 
929		}
930
931		if (!gfs2_ail_flush_reqd(sdp))
932			wake_up(&sdp->sd_log_waitq);
933
934		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
935
936		try_to_freeze();
937
938		do {
939			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
940					TASK_INTERRUPTIBLE);
941			if (!gfs2_ail_flush_reqd(sdp) &&
942			    !gfs2_jrnl_flush_reqd(sdp) &&
943			    !kthread_should_stop())
944				t = schedule_timeout(t);
945		} while(t && !gfs2_ail_flush_reqd(sdp) &&
946			!gfs2_jrnl_flush_reqd(sdp) &&
947			!kthread_should_stop());
948		finish_wait(&sdp->sd_logd_waitq, &wait);
949	}
 
 
 
950
951	return 0;
952}
953
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/slab.h>
   9#include <linux/spinlock.h>
  10#include <linux/completion.h>
  11#include <linux/buffer_head.h>
  12#include <linux/gfs2_ondisk.h>
  13#include <linux/crc32.h>
  14#include <linux/crc32c.h>
  15#include <linux/delay.h>
  16#include <linux/kthread.h>
  17#include <linux/freezer.h>
  18#include <linux/bio.h>
  19#include <linux/blkdev.h>
  20#include <linux/writeback.h>
  21#include <linux/list_sort.h>
  22
  23#include "gfs2.h"
  24#include "incore.h"
  25#include "bmap.h"
  26#include "glock.h"
  27#include "log.h"
  28#include "lops.h"
  29#include "meta_io.h"
  30#include "util.h"
  31#include "dir.h"
  32#include "trace_gfs2.h"
  33#include "trans.h"
  34
  35static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
  36
  37/**
  38 * gfs2_struct2blk - compute stuff
  39 * @sdp: the filesystem
  40 * @nstruct: the number of structures
 
  41 *
  42 * Compute the number of log descriptor blocks needed to hold a certain number
  43 * of structures of a certain size.
  44 *
  45 * Returns: the number of blocks needed (minimum is always 1)
  46 */
  47
  48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
 
  49{
  50	unsigned int blks;
  51	unsigned int first, second;
  52
  53	/* The initial struct gfs2_log_descriptor block */
  54	blks = 1;
  55	first = sdp->sd_ldptrs;
  56
  57	if (nstruct > first) {
  58		/* Subsequent struct gfs2_meta_header blocks */
  59		second = sdp->sd_inptrs;
  60		blks += DIV_ROUND_UP(nstruct - first, second);
  61	}
  62
  63	return blks;
  64}
  65
  66/**
  67 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
 
  68 * @bd: The gfs2_bufdata to remove
  69 *
  70 * The ail lock _must_ be held when calling this function
  71 *
  72 */
  73
  74void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
  75{
  76	bd->bd_tr = NULL;
  77	list_del_init(&bd->bd_ail_st_list);
  78	list_del_init(&bd->bd_ail_gl_list);
  79	atomic_dec(&bd->bd_gl->gl_ail_count);
  80	brelse(bd->bd_bh);
  81}
  82
  83/**
  84 * gfs2_ail1_start_one - Start I/O on a transaction
  85 * @sdp: The superblock
  86 * @wbc: The writeback control structure
  87 * @tr: The transaction to start I/O on
  88 * @plug: The block plug currently active
  89 */
  90
  91static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
  92			       struct writeback_control *wbc,
  93			       struct gfs2_trans *tr, struct blk_plug *plug)
  94__releases(&sdp->sd_ail_lock)
  95__acquires(&sdp->sd_ail_lock)
  96{
  97	struct gfs2_glock *gl = NULL;
  98	struct address_space *mapping;
  99	struct gfs2_bufdata *bd, *s;
 100	struct buffer_head *bh;
 101	int ret = 0;
 102
 103	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
 104		bh = bd->bd_bh;
 105
 106		gfs2_assert(sdp, bd->bd_tr == tr);
 107
 108		if (!buffer_busy(bh)) {
 109			if (buffer_uptodate(bh)) {
 110				list_move(&bd->bd_ail_st_list,
 111					  &tr->tr_ail2_list);
 112				continue;
 113			}
 114			if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
 115				gfs2_io_error_bh(sdp, bh);
 116				gfs2_withdraw_delayed(sdp);
 117			}
 118		}
 119
 120		if (gfs2_withdrawing_or_withdrawn(sdp)) {
 121			gfs2_remove_from_ail(bd);
 122			continue;
 123		}
 124		if (!buffer_dirty(bh))
 125			continue;
 126		if (gl == bd->bd_gl)
 127			continue;
 128		gl = bd->bd_gl;
 129		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
 130		mapping = bh->b_folio->mapping;
 131		if (!mapping)
 132			continue;
 133		spin_unlock(&sdp->sd_ail_lock);
 134		ret = mapping->a_ops->writepages(mapping, wbc);
 135		if (need_resched()) {
 136			blk_finish_plug(plug);
 137			cond_resched();
 138			blk_start_plug(plug);
 139		}
 140		spin_lock(&sdp->sd_ail_lock);
 141		if (ret == -ENODATA) /* if a jdata write into a new hole */
 142			ret = 0; /* ignore it */
 143		mapping_set_error(mapping, ret);
 144		if (ret || wbc->nr_to_write <= 0)
 145			break;
 146		return -EBUSY;
 147	}
 148
 149	return ret;
 150}
 151
 152static void dump_ail_list(struct gfs2_sbd *sdp)
 153{
 154	struct gfs2_trans *tr;
 155	struct gfs2_bufdata *bd;
 156	struct buffer_head *bh;
 157
 158	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
 159		list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
 160					    bd_ail_st_list) {
 161			bh = bd->bd_bh;
 162			fs_err(sdp, "bd %p: blk:0x%llx bh=%p ", bd,
 163			       (unsigned long long)bd->bd_blkno, bh);
 164			if (!bh) {
 165				fs_err(sdp, "\n");
 166				continue;
 167			}
 168			fs_err(sdp, "0x%llx up2:%d dirt:%d lkd:%d req:%d "
 169			       "map:%d new:%d ar:%d aw:%d delay:%d "
 170			       "io err:%d unwritten:%d dfr:%d pin:%d esc:%d\n",
 171			       (unsigned long long)bh->b_blocknr,
 172			       buffer_uptodate(bh), buffer_dirty(bh),
 173			       buffer_locked(bh), buffer_req(bh),
 174			       buffer_mapped(bh), buffer_new(bh),
 175			       buffer_async_read(bh), buffer_async_write(bh),
 176			       buffer_delay(bh), buffer_write_io_error(bh),
 177			       buffer_unwritten(bh),
 178			       buffer_defer_completion(bh),
 179			       buffer_pinned(bh), buffer_escaped(bh));
 180		}
 181	}
 182}
 183
 184/**
 185 * gfs2_ail1_flush - start writeback of some ail1 entries 
 186 * @sdp: The super block
 187 * @wbc: The writeback control structure
 188 *
 189 * Writes back some ail1 entries, according to the limits in the
 190 * writeback control structure
 191 */
 192
 193void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
 194{
 195	struct list_head *head = &sdp->sd_ail1_list;
 196	struct gfs2_trans *tr;
 197	struct blk_plug plug;
 198	int ret;
 199	unsigned long flush_start = jiffies;
 200
 201	trace_gfs2_ail_flush(sdp, wbc, 1);
 202	blk_start_plug(&plug);
 203	spin_lock(&sdp->sd_ail_lock);
 204restart:
 205	ret = 0;
 206	if (time_after(jiffies, flush_start + (HZ * 600))) {
 207		fs_err(sdp, "Error: In %s for ten minutes! t=%d\n",
 208		       __func__, current->journal_info ? 1 : 0);
 209		dump_ail_list(sdp);
 210		goto out;
 211	}
 212	list_for_each_entry_reverse(tr, head, tr_list) {
 213		if (wbc->nr_to_write <= 0)
 214			break;
 215		ret = gfs2_ail1_start_one(sdp, wbc, tr, &plug);
 216		if (ret) {
 217			if (ret == -EBUSY)
 218				goto restart;
 219			break;
 220		}
 221	}
 222out:
 223	spin_unlock(&sdp->sd_ail_lock);
 224	blk_finish_plug(&plug);
 225	if (ret) {
 226		gfs2_lm(sdp, "gfs2_ail1_start_one returned: %d\n", ret);
 227		gfs2_withdraw(sdp);
 228	}
 229	trace_gfs2_ail_flush(sdp, wbc, 0);
 230}
 231
 232/**
 233 * gfs2_ail1_start - start writeback of all ail1 entries
 234 * @sdp: The superblock
 235 */
 236
 237static void gfs2_ail1_start(struct gfs2_sbd *sdp)
 238{
 239	struct writeback_control wbc = {
 240		.sync_mode = WB_SYNC_NONE,
 241		.nr_to_write = LONG_MAX,
 242		.range_start = 0,
 243		.range_end = LLONG_MAX,
 244	};
 245
 246	return gfs2_ail1_flush(sdp, &wbc);
 247}
 248
 249static void gfs2_log_update_flush_tail(struct gfs2_sbd *sdp)
 250{
 251	unsigned int new_flush_tail = sdp->sd_log_head;
 252	struct gfs2_trans *tr;
 253
 254	if (!list_empty(&sdp->sd_ail1_list)) {
 255		tr = list_last_entry(&sdp->sd_ail1_list,
 256				     struct gfs2_trans, tr_list);
 257		new_flush_tail = tr->tr_first;
 258	}
 259	sdp->sd_log_flush_tail = new_flush_tail;
 260}
 261
 262static void gfs2_log_update_head(struct gfs2_sbd *sdp)
 263{
 264	unsigned int new_head = sdp->sd_log_flush_head;
 265
 266	if (sdp->sd_log_flush_tail == sdp->sd_log_head)
 267		sdp->sd_log_flush_tail = new_head;
 268	sdp->sd_log_head = new_head;
 269}
 270
 271/*
 272 * gfs2_ail_empty_tr - empty one of the ail lists of a transaction
 273 */
 274
 275static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
 276			      struct list_head *head)
 277{
 278	struct gfs2_bufdata *bd;
 279
 280	while (!list_empty(head)) {
 281		bd = list_first_entry(head, struct gfs2_bufdata,
 282				      bd_ail_st_list);
 283		gfs2_assert(sdp, bd->bd_tr == tr);
 284		gfs2_remove_from_ail(bd);
 285	}
 286}
 287
 288/**
 289 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
 290 * @sdp: the filesystem
 291 * @tr: the transaction
 292 * @max_revokes: If nonzero, issue revokes for the bd items for written buffers
 293 *
 294 * returns: the transaction's count of remaining active items
 295 */
 296
 297static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
 298				int *max_revokes)
 299{
 300	struct gfs2_bufdata *bd, *s;
 301	struct buffer_head *bh;
 302	int active_count = 0;
 303
 304	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
 305					 bd_ail_st_list) {
 306		bh = bd->bd_bh;
 307		gfs2_assert(sdp, bd->bd_tr == tr);
 308		/*
 309		 * If another process flagged an io error, e.g. writing to the
 310		 * journal, error all other bhs and move them off the ail1 to
 311		 * prevent a tight loop when unmount tries to flush ail1,
 312		 * regardless of whether they're still busy. If no outside
 313		 * errors were found and the buffer is busy, move to the next.
 314		 * If the ail buffer is not busy and caught an error, flag it
 315		 * for others.
 316		 */
 317		if (!sdp->sd_log_error && buffer_busy(bh)) {
 318			active_count++;
 319			continue;
 320		}
 321		if (!buffer_uptodate(bh) &&
 322		    !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
 323			gfs2_io_error_bh(sdp, bh);
 324			gfs2_withdraw_delayed(sdp);
 325		}
 326		/*
 327		 * If we have space for revokes and the bd is no longer on any
 328		 * buf list, we can just add a revoke for it immediately and
 329		 * avoid having to put it on the ail2 list, where it would need
 330		 * to be revoked later.
 331		 */
 332		if (*max_revokes && list_empty(&bd->bd_list)) {
 333			gfs2_add_revoke(sdp, bd);
 334			(*max_revokes)--;
 335			continue;
 336		}
 337		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
 338	}
 339	return active_count;
 340}
 341
 342/**
 343 * gfs2_ail1_empty - Try to empty the ail1 lists
 344 * @sdp: The superblock
 345 * @max_revokes: If non-zero, add revokes where appropriate
 346 *
 347 * Tries to empty the ail1 lists, starting with the oldest first.
 348 * Returns %true if the ail1 list is now empty.
 349 */
 350
 351static bool gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
 352{
 353	struct gfs2_trans *tr, *s;
 354	int oldest_tr = 1;
 355	bool empty;
 356
 357	spin_lock(&sdp->sd_ail_lock);
 358	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
 359		if (!gfs2_ail1_empty_one(sdp, tr, &max_revokes) && oldest_tr)
 
 360			list_move(&tr->tr_list, &sdp->sd_ail2_list);
 361		else
 362			oldest_tr = 0;
 363	}
 364	gfs2_log_update_flush_tail(sdp);
 365	empty = list_empty(&sdp->sd_ail1_list);
 366	spin_unlock(&sdp->sd_ail_lock);
 367
 368	return empty;
 369}
 370
 371static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
 372{
 373	struct gfs2_trans *tr;
 374	struct gfs2_bufdata *bd;
 375	struct buffer_head *bh;
 376
 377	spin_lock(&sdp->sd_ail_lock);
 378	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
 379		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
 380			bh = bd->bd_bh;
 381			if (!buffer_locked(bh))
 382				continue;
 383			get_bh(bh);
 384			spin_unlock(&sdp->sd_ail_lock);
 385			wait_on_buffer(bh);
 386			brelse(bh);
 387			return;
 388		}
 389	}
 390	spin_unlock(&sdp->sd_ail_lock);
 391}
 392
 393static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 
 
 
 
 
 
 
 394{
 395	gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
 396	list_del(&tr->tr_list);
 397	gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
 398	gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
 399	gfs2_trans_free(sdp, tr);
 
 
 
 
 400}
 401
 402static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
 403{
 404	struct list_head *ail2_list = &sdp->sd_ail2_list;
 405	unsigned int old_tail = sdp->sd_log_tail;
 406	struct gfs2_trans *tr, *safe;
 
 407
 408	spin_lock(&sdp->sd_ail_lock);
 409	if (old_tail <= new_tail) {
 410		list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
 411			if (old_tail <= tr->tr_first && tr->tr_first < new_tail)
 412				__ail2_empty(sdp, tr);
 413		}
 414	} else {
 415		list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
 416			if (old_tail <= tr->tr_first || tr->tr_first < new_tail)
 417				__ail2_empty(sdp, tr);
 418		}
 419	}
 420	spin_unlock(&sdp->sd_ail_lock);
 421}
 422
 423/**
 424 * gfs2_log_is_empty - Check if the log is empty
 425 * @sdp: The GFS2 superblock
 426 */
 
 
 427
 428bool gfs2_log_is_empty(struct gfs2_sbd *sdp) {
 429	return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks;
 430}
 431
 432static bool __gfs2_log_try_reserve_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
 433{
 434	unsigned int available;
 435
 436	available = atomic_read(&sdp->sd_log_revokes_available);
 437	while (available >= revokes) {
 438		if (atomic_try_cmpxchg(&sdp->sd_log_revokes_available,
 439				       &available, available - revokes))
 440			return true;
 441	}
 442	return false;
 443}
 444
 445/**
 446 * gfs2_log_release_revokes - Release a given number of revokes
 447 * @sdp: The GFS2 superblock
 448 * @revokes: The number of revokes to release
 449 *
 450 * sdp->sd_log_flush_lock must be held.
 451 */
 452void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
 453{
 454	if (revokes)
 455		atomic_add(revokes, &sdp->sd_log_revokes_available);
 456}
 457
 458/**
 459 * gfs2_log_release - Release a given number of log blocks
 460 * @sdp: The GFS2 superblock
 461 * @blks: The number of blocks
 462 *
 463 */
 464
 465void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
 466{
 
 467	atomic_add(blks, &sdp->sd_log_blks_free);
 468	trace_gfs2_log_blocks(sdp, blks);
 469	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
 470				  sdp->sd_jdesc->jd_blocks);
 471	if (atomic_read(&sdp->sd_log_blks_needed))
 472		wake_up(&sdp->sd_log_waitq);
 473}
 474
 475/**
 476 * __gfs2_log_try_reserve - Try to make a log reservation
 477 * @sdp: The GFS2 superblock
 478 * @blks: The number of blocks to reserve
 479 * @taboo_blks: The number of blocks to leave free
 480 *
 481 * Try to do the same as __gfs2_log_reserve(), but fail if no more log
 482 * space is immediately available.
 483 */
 484static bool __gfs2_log_try_reserve(struct gfs2_sbd *sdp, unsigned int blks,
 485				   unsigned int taboo_blks)
 486{
 487	unsigned wanted = blks + taboo_blks;
 488	unsigned int free_blocks;
 489
 490	free_blocks = atomic_read(&sdp->sd_log_blks_free);
 491	while (free_blocks >= wanted) {
 492		if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks,
 493				       free_blocks - blks)) {
 494			trace_gfs2_log_blocks(sdp, -blks);
 495			return true;
 496		}
 497	}
 498	return false;
 499}
 500
 501/**
 502 * __gfs2_log_reserve - Make a log reservation
 503 * @sdp: The GFS2 superblock
 504 * @blks: The number of blocks to reserve
 505 * @taboo_blks: The number of blocks to leave free
 506 *
 507 * @taboo_blks is set to 0 for logd, and to GFS2_LOG_FLUSH_MIN_BLOCKS
 508 * for all other processes.  This ensures that when the log is almost full,
 509 * logd will still be able to call gfs2_log_flush one more time  without
 510 * blocking, which will advance the tail and make some more log space
 511 * available.
 512 *
 513 * We no longer flush the log here, instead we wake up logd to do that
 514 * for us. To avoid the thundering herd and to ensure that we deal fairly
 515 * with queued waiters, we use an exclusive wait. This means that when we
 516 * get woken with enough journal space to get our reservation, we need to
 517 * wake the next waiter on the list.
 
 
 518 */
 519
 520static void __gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks,
 521			       unsigned int taboo_blks)
 522{
 523	unsigned wanted = blks + taboo_blks;
 
 
 
 
 524	unsigned int free_blocks;
 525
 526	atomic_add(blks, &sdp->sd_log_blks_needed);
 527	for (;;) {
 528		if (current != sdp->sd_logd_process)
 
 
 
 
 
 
 529			wake_up(&sdp->sd_logd_waitq);
 530		io_wait_event(sdp->sd_log_waitq,
 531			(free_blocks = atomic_read(&sdp->sd_log_blks_free),
 532			 free_blocks >= wanted));
 533		do {
 534			if (atomic_try_cmpxchg(&sdp->sd_log_blks_free,
 535					       &free_blocks,
 536					       free_blocks - blks))
 537				goto reserved;
 538		} while (free_blocks >= wanted);
 
 
 
 
 539	}
 
 540
 541reserved:
 542	trace_gfs2_log_blocks(sdp, -blks);
 543	if (atomic_sub_return(blks, &sdp->sd_log_blks_needed))
 
 
 544		wake_up(&sdp->sd_log_waitq);
 545}
 546
 547/**
 548 * gfs2_log_try_reserve - Try to make a log reservation
 549 * @sdp: The GFS2 superblock
 550 * @tr: The transaction
 551 * @extra_revokes: The number of additional revokes reserved (output)
 552 *
 553 * This is similar to gfs2_log_reserve, but sdp->sd_log_flush_lock must be
 554 * held for correct revoke accounting.
 555 */
 556
 557bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
 558			  unsigned int *extra_revokes)
 559{
 560	unsigned int blks = tr->tr_reserved;
 561	unsigned int revokes = tr->tr_revokes;
 562	unsigned int revoke_blks = 0;
 563
 564	*extra_revokes = 0;
 565	if (revokes && !__gfs2_log_try_reserve_revokes(sdp, revokes)) {
 566		revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
 567		*extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
 568		blks += revoke_blks;
 569	}
 570	if (!blks)
 571		return true;
 572	if (__gfs2_log_try_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS))
 573		return true;
 574	if (!revoke_blks)
 575		gfs2_log_release_revokes(sdp, revokes);
 576	return false;
 577}
 578
 579/**
 580 * gfs2_log_reserve - Make a log reservation
 581 * @sdp: The GFS2 superblock
 582 * @tr: The transaction
 583 * @extra_revokes: The number of additional revokes reserved (output)
 584 *
 585 * sdp->sd_log_flush_lock must not be held.
 586 */
 587
 588void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
 589		      unsigned int *extra_revokes)
 590{
 591	unsigned int blks = tr->tr_reserved;
 592	unsigned int revokes = tr->tr_revokes;
 593	unsigned int revoke_blks;
 594
 595	*extra_revokes = 0;
 596	if (revokes) {
 597		revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
 598		*extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
 599		blks += revoke_blks;
 600	}
 601	__gfs2_log_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS);
 
 
 602}
 603
 604/**
 605 * log_distance - Compute distance between two journal blocks
 606 * @sdp: The GFS2 superblock
 607 * @newer: The most recent journal block of the pair
 608 * @older: The older journal block of the pair
 609 *
 610 *   Compute the distance (in the journal direction) between two
 611 *   blocks in the journal
 612 *
 613 * Returns: the distance in blocks
 614 */
 615
 616static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
 617					unsigned int older)
 618{
 619	int dist;
 620
 621	dist = newer - older;
 622	if (dist < 0)
 623		dist += sdp->sd_jdesc->jd_blocks;
 624
 625	return dist;
 626}
 627
 628/**
 629 * calc_reserved - Calculate the number of blocks to keep reserved
 
 630 * @sdp: The GFS2 superblock
 631 *
 632 * This is complex.  We need to reserve room for all our currently used
 633 * metadata blocks (e.g. normal file I/O rewriting file time stamps) and
 634 * all our journaled data blocks for journaled files (e.g. files in the
 635 * meta_fs like rindex, or files for which chattr +j was done.)
 636 * If we don't reserve enough space, corruption will follow.
 
 637 *
 638 * We can have metadata blocks and jdata blocks in the same journal.  Each
 639 * type gets its own log descriptor, for which we need to reserve a block.
 640 * In fact, each type has the potential for needing more than one log descriptor
 641 * in cases where we have more blocks than will fit in a log descriptor.
 642 * Metadata journal entries take up half the space of journaled buffer entries.
 
 
 643 *
 644 * Also, we need to reserve blocks for revoke journal entries and one for an
 645 * overall header for the lot.
 646 *
 647 * Returns: the number of blocks reserved
 648 */
 649static unsigned int calc_reserved(struct gfs2_sbd *sdp)
 650{
 651	unsigned int reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
 652	unsigned int blocks;
 
 653	struct gfs2_trans *tr = sdp->sd_log_tr;
 654
 655	if (tr) {
 656		blocks = tr->tr_num_buf_new - tr->tr_num_buf_rm;
 657		reserved += blocks + DIV_ROUND_UP(blocks, buf_limit(sdp));
 658		blocks = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
 659		reserved += blocks + DIV_ROUND_UP(blocks, databuf_limit(sdp));
 
 
 660	}
 
 
 
 
 
 
 
 661	return reserved;
 662}
 663
 664static void log_pull_tail(struct gfs2_sbd *sdp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 665{
 666	unsigned int new_tail = sdp->sd_log_flush_tail;
 667	unsigned int dist;
 668
 669	if (new_tail == sdp->sd_log_tail)
 670		return;
 671	dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
 672	ail2_empty(sdp, new_tail);
 673	gfs2_log_release(sdp, dist);
 
 
 
 
 
 674	sdp->sd_log_tail = new_tail;
 675}
 676
 677
 678void log_flush_wait(struct gfs2_sbd *sdp)
 679{
 680	DEFINE_WAIT(wait);
 681
 682	if (atomic_read(&sdp->sd_log_in_flight)) {
 683		do {
 684			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
 685					TASK_UNINTERRUPTIBLE);
 686			if (atomic_read(&sdp->sd_log_in_flight))
 687				io_schedule();
 688		} while(atomic_read(&sdp->sd_log_in_flight));
 689		finish_wait(&sdp->sd_log_flush_wait, &wait);
 690	}
 691}
 692
 693static int ip_cmp(void *priv, const struct list_head *a, const struct list_head *b)
 694{
 695	struct gfs2_inode *ipa, *ipb;
 696
 697	ipa = list_entry(a, struct gfs2_inode, i_ordered);
 698	ipb = list_entry(b, struct gfs2_inode, i_ordered);
 699
 700	if (ipa->i_no_addr < ipb->i_no_addr)
 701		return -1;
 702	if (ipa->i_no_addr > ipb->i_no_addr)
 703		return 1;
 704	return 0;
 705}
 706
 707static void __ordered_del_inode(struct gfs2_inode *ip)
 708{
 709	if (!list_empty(&ip->i_ordered))
 710		list_del_init(&ip->i_ordered);
 711}
 712
 713static void gfs2_ordered_write(struct gfs2_sbd *sdp)
 714{
 715	struct gfs2_inode *ip;
 716	LIST_HEAD(written);
 717
 718	spin_lock(&sdp->sd_ordered_lock);
 719	list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
 720	while (!list_empty(&sdp->sd_log_ordered)) {
 721		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
 722		if (ip->i_inode.i_mapping->nrpages == 0) {
 723			__ordered_del_inode(ip);
 724			continue;
 725		}
 726		list_move(&ip->i_ordered, &written);
 727		spin_unlock(&sdp->sd_ordered_lock);
 728		filemap_fdatawrite(ip->i_inode.i_mapping);
 729		spin_lock(&sdp->sd_ordered_lock);
 730	}
 731	list_splice(&written, &sdp->sd_log_ordered);
 732	spin_unlock(&sdp->sd_ordered_lock);
 733}
 734
 735static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
 736{
 737	struct gfs2_inode *ip;
 738
 739	spin_lock(&sdp->sd_ordered_lock);
 740	while (!list_empty(&sdp->sd_log_ordered)) {
 741		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
 742		__ordered_del_inode(ip);
 
 743		if (ip->i_inode.i_mapping->nrpages == 0)
 744			continue;
 745		spin_unlock(&sdp->sd_ordered_lock);
 746		filemap_fdatawait(ip->i_inode.i_mapping);
 747		spin_lock(&sdp->sd_ordered_lock);
 748	}
 749	spin_unlock(&sdp->sd_ordered_lock);
 750}
 751
 752void gfs2_ordered_del_inode(struct gfs2_inode *ip)
 753{
 754	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 755
 756	spin_lock(&sdp->sd_ordered_lock);
 757	__ordered_del_inode(ip);
 
 758	spin_unlock(&sdp->sd_ordered_lock);
 759}
 760
 761void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 762{
 763	struct buffer_head *bh = bd->bd_bh;
 764	struct gfs2_glock *gl = bd->bd_gl;
 765
 766	sdp->sd_log_num_revoke++;
 767	if (atomic_inc_return(&gl->gl_revokes) == 1)
 768		gfs2_glock_hold(gl);
 769	bh->b_private = NULL;
 770	bd->bd_blkno = bh->b_blocknr;
 771	gfs2_remove_from_ail(bd); /* drops ref on bh */
 772	bd->bd_bh = NULL;
 
 
 
 773	set_bit(GLF_LFLUSH, &gl->gl_flags);
 774	list_add(&bd->bd_list, &sdp->sd_log_revokes);
 775}
 776
 777void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
 778{
 779	if (atomic_dec_return(&gl->gl_revokes) == 0) {
 780		clear_bit(GLF_LFLUSH, &gl->gl_flags);
 781		gfs2_glock_put_async(gl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782	}
 783}
 784
 785/**
 786 * gfs2_flush_revokes - Add as many revokes to the system transaction as we can
 787 * @sdp: The GFS2 superblock
 788 *
 789 * Our usual strategy is to defer writing revokes as much as we can in the hope
 790 * that we'll eventually overwrite the journal, which will make those revokes
 791 * go away.  This changes when we flush the log: at that point, there will
 792 * likely be some left-over space in the last revoke block of that transaction.
 793 * We can fill that space with additional revokes for blocks that have already
 794 * been written back.  This will basically come at no cost now, and will save
 795 * us from having to keep track of those blocks on the AIL2 list later.
 796 */
 797void gfs2_flush_revokes(struct gfs2_sbd *sdp)
 798{
 799	/* number of revokes we still have room for */
 800	unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available);
 801
 802	gfs2_log_lock(sdp);
 803	gfs2_ail1_empty(sdp, max_revokes);
 
 
 
 
 
 
 
 
 
 
 
 
 804	gfs2_log_unlock(sdp);
 805
 806	if (gfs2_withdrawing(sdp))
 807		gfs2_withdraw(sdp);
 
 
 
 808}
 809
 810/**
 811 * gfs2_write_log_header - Write a journal log header buffer at lblock
 812 * @sdp: The GFS2 superblock
 813 * @jd: journal descriptor of the journal to which we are writing
 814 * @seq: sequence number
 815 * @tail: tail of the log
 816 * @lblock: value for lh_blkno (block number relative to start of journal)
 817 * @flags: log header flags GFS2_LOG_HEAD_*
 818 * @op_flags: flags to pass to the bio
 819 *
 820 * Returns: the initialized log buffer descriptor
 821 */
 822
 823void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
 824			   u64 seq, u32 tail, u32 lblock, u32 flags,
 825			   blk_opf_t op_flags)
 826{
 827	struct gfs2_log_header *lh;
 828	u32 hash, crc;
 829	struct page *page;
 830	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 831	struct timespec64 tv;
 832	struct super_block *sb = sdp->sd_vfs;
 833	u64 dblock;
 
 834
 835	if (gfs2_withdrawing_or_withdrawn(sdp))
 836		return;
 837
 838	page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
 839	lh = page_address(page);
 840	clear_page(lh);
 841
 842	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
 843	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
 844	lh->lh_header.__pad0 = cpu_to_be64(0);
 845	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
 846	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
 847	lh->lh_sequence = cpu_to_be64(seq);
 848	lh->lh_flags = cpu_to_be32(flags);
 849	lh->lh_tail = cpu_to_be32(tail);
 850	lh->lh_blkno = cpu_to_be32(lblock);
 851	hash = ~crc32(~0, lh, LH_V1_SIZE);
 852	lh->lh_hash = cpu_to_be32(hash);
 853
 854	ktime_get_coarse_real_ts64(&tv);
 855	lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
 856	lh->lh_sec = cpu_to_be64(tv.tv_sec);
 857	if (!list_empty(&jd->extent_list))
 858		dblock = gfs2_log_bmap(jd, lblock);
 859	else {
 860		unsigned int extlen;
 861		int ret;
 862
 863		extlen = 1;
 864		ret = gfs2_get_extent(jd->jd_inode, lblock, &dblock, &extlen);
 865		if (gfs2_assert_withdraw(sdp, ret == 0))
 866			return;
 867	}
 868	lh->lh_addr = cpu_to_be64(dblock);
 869	lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
 870
 871	/* We may only write local statfs, quota, etc., when writing to our
 872	   own journal. The values are left 0 when recovering a journal
 873	   different from our own. */
 874	if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
 875		lh->lh_statfs_addr =
 876			cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
 877		lh->lh_quota_addr =
 878			cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
 879
 880		spin_lock(&sdp->sd_statfs_spin);
 881		lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
 882		lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
 883		lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
 884		spin_unlock(&sdp->sd_statfs_spin);
 885	}
 886
 887	BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
 888
 889	crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
 890		     sb->s_blocksize - LH_V1_SIZE - 4);
 891	lh->lh_crc = cpu_to_be32(crc);
 892
 893	gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock);
 894	gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags);
 895}
 896
 897/**
 898 * log_write_header - Get and initialize a journal header buffer
 899 * @sdp: The GFS2 superblock
 900 * @flags: The log header flags, including log header origin
 901 *
 902 * Returns: the initialized log buffer descriptor
 903 */
 904
 905static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 906{
 907	blk_opf_t op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
 908	struct super_block *sb = sdp->sd_vfs;
 909
 910	gfs2_assert_withdraw(sdp, sb->s_writers.frozen != SB_FREEZE_COMPLETE);
 911
 912	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
 913		gfs2_ordered_wait(sdp);
 914		log_flush_wait(sdp);
 915		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
 916	}
 917	sdp->sd_log_idle = (sdp->sd_log_flush_tail == sdp->sd_log_flush_head);
 918	gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++,
 919			      sdp->sd_log_flush_tail, sdp->sd_log_flush_head,
 920			      flags, op_flags);
 921	gfs2_log_incr_head(sdp);
 922	log_flush_wait(sdp);
 923	log_pull_tail(sdp);
 924	gfs2_log_update_head(sdp);
 925}
 926
 927/**
 928 * gfs2_ail_drain - drain the ail lists after a withdraw
 929 * @sdp: Pointer to GFS2 superblock
 930 */
 931void gfs2_ail_drain(struct gfs2_sbd *sdp)
 932{
 933	struct gfs2_trans *tr;
 934
 935	spin_lock(&sdp->sd_ail_lock);
 936	/*
 937	 * For transactions on the sd_ail1_list we need to drain both the
 938	 * ail1 and ail2 lists. That's because function gfs2_ail1_start_one
 939	 * (temporarily) moves items from its tr_ail1 list to tr_ail2 list
 940	 * before revokes are sent for that block. Items on the sd_ail2_list
 941	 * should have already gotten beyond that point, so no need.
 942	 */
 943	while (!list_empty(&sdp->sd_ail1_list)) {
 944		tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
 945				      tr_list);
 946		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
 947		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
 948		list_del(&tr->tr_list);
 949		gfs2_trans_free(sdp, tr);
 950	}
 951	while (!list_empty(&sdp->sd_ail2_list)) {
 952		tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
 953				      tr_list);
 954		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
 955		list_del(&tr->tr_list);
 956		gfs2_trans_free(sdp, tr);
 957	}
 958	gfs2_drain_revokes(sdp);
 959	spin_unlock(&sdp->sd_ail_lock);
 960}
 961
 962/**
 963 * empty_ail1_list - try to start IO and empty the ail1 list
 964 * @sdp: Pointer to GFS2 superblock
 965 */
 966static void empty_ail1_list(struct gfs2_sbd *sdp)
 967{
 968	unsigned long start = jiffies;
 969	bool empty = false;
 970
 971	while (!empty) {
 972		if (time_after(jiffies, start + (HZ * 600))) {
 973			fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
 974			       __func__, current->journal_info ? 1 : 0);
 975			dump_ail_list(sdp);
 976			return;
 977		}
 978		gfs2_ail1_start(sdp);
 979		gfs2_ail1_wait(sdp);
 980		empty = gfs2_ail1_empty(sdp, 0);
 981
 982		if (gfs2_withdrawing_or_withdrawn(sdp))
 983			break;
 984	}
 985
 986	if (gfs2_withdrawing(sdp))
 987		gfs2_withdraw(sdp);
 988}
 989
 990/**
 991 * trans_drain - drain the buf and databuf queue for a failed transaction
 992 * @tr: the transaction to drain
 993 *
 994 * When this is called, we're taking an error exit for a log write that failed
 995 * but since we bypassed the after_commit functions, we need to remove the
 996 * items from the buf and databuf queue.
 997 */
 998static void trans_drain(struct gfs2_trans *tr)
 999{
1000	struct gfs2_bufdata *bd;
1001	struct list_head *head;
1002
1003	if (!tr)
1004		return;
1005
1006	head = &tr->tr_buf;
1007	while (!list_empty(head)) {
1008		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1009		list_del_init(&bd->bd_list);
1010		if (!list_empty(&bd->bd_ail_st_list))
1011			gfs2_remove_from_ail(bd);
1012		kmem_cache_free(gfs2_bufdata_cachep, bd);
1013	}
1014	head = &tr->tr_databuf;
1015	while (!list_empty(head)) {
1016		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1017		list_del_init(&bd->bd_list);
1018		if (!list_empty(&bd->bd_ail_st_list))
1019			gfs2_remove_from_ail(bd);
1020		kmem_cache_free(gfs2_bufdata_cachep, bd);
1021	}
1022}
1023
1024/**
1025 * gfs2_log_flush - flush incore transaction(s)
1026 * @sdp: The filesystem
1027 * @gl: The glock structure to flush.  If NULL, flush the whole incore log
1028 * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
1029 *
1030 */
1031
1032void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
 
1033{
1034	struct gfs2_trans *tr = NULL;
1035	unsigned int reserved_blocks = 0, used_blocks = 0;
1036	bool frozen = test_bit(SDF_FROZEN, &sdp->sd_flags);
1037	unsigned int first_log_head;
1038	unsigned int reserved_revokes = 0;
1039
1040	down_write(&sdp->sd_log_flush_lock);
1041	trace_gfs2_log_flush(sdp, 1, flags);
1042
1043repeat:
1044	/*
1045	 * Do this check while holding the log_flush_lock to prevent new
1046	 * buffers from being added to the ail via gfs2_pin()
1047	 */
1048	if (gfs2_withdrawing_or_withdrawn(sdp) ||
1049	    !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1050		goto out;
1051
1052	/* Log might have been flushed while we waited for the flush lock */
1053	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
1054		goto out;
 
 
 
1055
1056	first_log_head = sdp->sd_log_head;
1057	sdp->sd_log_flush_head = first_log_head;
1058
 
 
1059	tr = sdp->sd_log_tr;
1060	if (tr || sdp->sd_log_num_revoke) {
1061		if (reserved_blocks)
1062			gfs2_log_release(sdp, reserved_blocks);
1063		reserved_blocks = sdp->sd_log_blks_reserved;
1064		reserved_revokes = sdp->sd_log_num_revoke;
1065		if (tr) {
1066			sdp->sd_log_tr = NULL;
1067			tr->tr_first = first_log_head;
1068			if (unlikely(frozen)) {
1069				if (gfs2_assert_withdraw_delayed(sdp,
1070				       !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
1071					goto out_withdraw;
1072			}
1073		}
1074	} else if (!reserved_blocks) {
1075		unsigned int taboo_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
1076
1077		reserved_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
1078		if (current == sdp->sd_logd_process)
1079			taboo_blocks = 0;
1080
1081		if (!__gfs2_log_try_reserve(sdp, reserved_blocks, taboo_blocks)) {
1082			up_write(&sdp->sd_log_flush_lock);
1083			__gfs2_log_reserve(sdp, reserved_blocks, taboo_blocks);
1084			down_write(&sdp->sd_log_flush_lock);
1085			goto repeat;
1086		}
1087		BUG_ON(sdp->sd_log_num_revoke);
1088	}
1089
1090	if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
1091		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
1092
1093	if (unlikely(frozen))
1094		if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
1095			goto out_withdraw;
1096
1097	gfs2_ordered_write(sdp);
1098	if (gfs2_withdrawing_or_withdrawn(sdp))
1099		goto out_withdraw;
1100	lops_before_commit(sdp, tr);
1101	if (gfs2_withdrawing_or_withdrawn(sdp))
1102		goto out_withdraw;
1103	if (sdp->sd_jdesc)
1104		gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
1105	if (gfs2_withdrawing_or_withdrawn(sdp))
1106		goto out_withdraw;
1107
1108	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
1109		log_write_header(sdp, flags);
1110	} else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
1111		log_write_header(sdp, flags);
 
 
 
1112	}
1113	if (gfs2_withdrawing_or_withdrawn(sdp))
1114		goto out_withdraw;
1115	lops_after_commit(sdp, tr);
1116
1117	gfs2_log_lock(sdp);
 
1118	sdp->sd_log_blks_reserved = 0;
 
1119
1120	spin_lock(&sdp->sd_ail_lock);
1121	if (tr && !list_empty(&tr->tr_ail1_list)) {
1122		list_add(&tr->tr_list, &sdp->sd_ail1_list);
1123		tr = NULL;
1124	}
1125	spin_unlock(&sdp->sd_ail_lock);
1126	gfs2_log_unlock(sdp);
1127
1128	if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
1129		if (!sdp->sd_log_idle) {
1130			empty_ail1_list(sdp);
1131			if (gfs2_withdrawing_or_withdrawn(sdp))
1132				goto out_withdraw;
1133			log_write_header(sdp, flags);
 
 
 
 
 
 
 
1134		}
1135		if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
1136			     GFS2_LOG_HEAD_FLUSH_FREEZE))
1137			gfs2_log_shutdown(sdp);
 
 
1138	}
1139
1140out_end:
1141	used_blocks = log_distance(sdp, sdp->sd_log_flush_head, first_log_head);
1142	reserved_revokes += atomic_read(&sdp->sd_log_revokes_available);
1143	atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
1144	gfs2_assert_withdraw(sdp, reserved_revokes % sdp->sd_inptrs == sdp->sd_ldptrs);
1145	if (reserved_revokes > sdp->sd_ldptrs)
1146		reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
1147out:
1148	if (used_blocks != reserved_blocks) {
1149		gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
1150		gfs2_log_release(sdp, reserved_blocks - used_blocks);
1151	}
1152	up_write(&sdp->sd_log_flush_lock);
1153	gfs2_trans_free(sdp, tr);
1154	if (gfs2_withdrawing(sdp))
1155		gfs2_withdraw(sdp);
1156	trace_gfs2_log_flush(sdp, 0, flags);
1157	return;
1158
1159out_withdraw:
1160	trans_drain(tr);
1161	/**
1162	 * If the tr_list is empty, we're withdrawing during a log
1163	 * flush that targets a transaction, but the transaction was
1164	 * never queued onto any of the ail lists. Here we add it to
1165	 * ail1 just so that ail_drain() will find and free it.
1166	 */
1167	spin_lock(&sdp->sd_ail_lock);
1168	if (tr && list_empty(&tr->tr_list))
1169		list_add(&tr->tr_list, &sdp->sd_ail1_list);
1170	spin_unlock(&sdp->sd_ail_lock);
1171	tr = NULL;
1172	goto out_end;
1173}
1174
1175/**
1176 * gfs2_merge_trans - Merge a new transaction into a cached transaction
1177 * @sdp: the filesystem
1178 * @new: New transaction to be merged
1179 */
1180
1181static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
1182{
1183	struct gfs2_trans *old = sdp->sd_log_tr;
1184
1185	WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
1186
1187	old->tr_num_buf_new	+= new->tr_num_buf_new;
1188	old->tr_num_databuf_new	+= new->tr_num_databuf_new;
1189	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
1190	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
1191	old->tr_revokes		+= new->tr_revokes;
1192	old->tr_num_revoke	+= new->tr_num_revoke;
 
1193
1194	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
1195	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
1196
1197	spin_lock(&sdp->sd_ail_lock);
1198	list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
1199	list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
1200	spin_unlock(&sdp->sd_ail_lock);
1201}
1202
1203static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1204{
1205	unsigned int reserved;
1206	unsigned int unused;
1207	unsigned int maxres;
1208
1209	gfs2_log_lock(sdp);
1210
1211	if (sdp->sd_log_tr) {
1212		gfs2_merge_trans(sdp, tr);
1213	} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
1214		gfs2_assert_withdraw(sdp, !test_bit(TR_ONSTACK, &tr->tr_flags));
1215		sdp->sd_log_tr = tr;
1216		set_bit(TR_ATTACHED, &tr->tr_flags);
1217	}
1218
 
1219	reserved = calc_reserved(sdp);
1220	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
1221	gfs2_assert_withdraw(sdp, maxres >= reserved);
1222	unused = maxres - reserved;
1223	if (unused)
1224		gfs2_log_release(sdp, unused);
 
 
1225	sdp->sd_log_blks_reserved = reserved;
1226
1227	gfs2_log_unlock(sdp);
1228}
1229
1230static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1231{
1232	return atomic_read(&sdp->sd_log_pinned) +
1233	       atomic_read(&sdp->sd_log_blks_needed) >=
1234	       atomic_read(&sdp->sd_log_thresh1);
1235}
1236
1237static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1238{
1239	return sdp->sd_jdesc->jd_blocks -
1240	       atomic_read(&sdp->sd_log_blks_free) +
1241	       atomic_read(&sdp->sd_log_blks_needed) >=
1242	       atomic_read(&sdp->sd_log_thresh2);
1243}
1244
1245/**
1246 * gfs2_log_commit - Commit a transaction to the log
1247 * @sdp: the filesystem
1248 * @tr: the transaction
1249 *
1250 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
1251 * or the total number of used blocks (pinned blocks plus AIL blocks)
1252 * is greater than thresh2.
1253 *
1254 * At mount time thresh1 is 2/5ths of journal size, thresh2 is 4/5ths of
1255 * journal size.
1256 *
1257 * Returns: errno
1258 */
1259
1260void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1261{
1262	log_refund(sdp, tr);
1263
1264	if (gfs2_ail_flush_reqd(sdp) || gfs2_jrnl_flush_reqd(sdp))
 
 
1265		wake_up(&sdp->sd_logd_waitq);
1266}
1267
1268/**
1269 * gfs2_log_shutdown - write a shutdown header into a journal
1270 * @sdp: the filesystem
1271 *
1272 */
1273
1274static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
1275{
1276	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
1277	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
1278	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1279
1280	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
1281	log_pull_tail(sdp);
 
 
1282
1283	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1284	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1285}
1286
1287/**
1288 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
1289 * @data: Pointer to GFS2 superblock
1290 *
1291 * Also, periodically check to make sure that we're using the most recent
1292 * journal index.
1293 */
1294
1295int gfs2_logd(void *data)
1296{
1297	struct gfs2_sbd *sdp = data;
1298	unsigned long t = 1;
 
1299
1300	set_freezable();
1301	while (!kthread_should_stop()) {
1302		if (gfs2_withdrawing_or_withdrawn(sdp))
1303			break;
1304
1305		/* Check for errors writing to the journal */
1306		if (sdp->sd_log_error) {
1307			gfs2_lm(sdp,
1308				"GFS2: fsid=%s: error %d: "
1309				"withdrawing the file system to "
1310				"prevent further damage.\n",
1311				sdp->sd_fsname, sdp->sd_log_error);
1312			gfs2_withdraw(sdp);
1313			break;
1314		}
1315
1316		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1317			gfs2_ail1_empty(sdp, 0);
1318			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1319						  GFS2_LFC_LOGD_JFLUSH_REQD);
1320		}
1321
1322		if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
1323		    gfs2_ail_flush_reqd(sdp)) {
1324			clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
1325			gfs2_ail1_start(sdp);
1326			gfs2_ail1_wait(sdp);
1327			gfs2_ail1_empty(sdp, 0);
1328			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1329						  GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1330		}
1331
 
 
 
1332		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1333
1334		t = wait_event_freezable_timeout(sdp->sd_logd_waitq,
1335				test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
1336				gfs2_ail_flush_reqd(sdp) ||
1337				gfs2_jrnl_flush_reqd(sdp) ||
1338				sdp->sd_log_error ||
1339				gfs2_withdrawing_or_withdrawn(sdp) ||
1340				kthread_should_stop(),
1341				t);
 
 
 
 
 
1342	}
1343
1344	if (gfs2_withdrawing(sdp))
1345		gfs2_withdraw(sdp);
1346
1347	return 0;
1348}
1349