Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#include <linux/sched.h>
 11#include <linux/slab.h>
 12#include <linux/spinlock.h>
 13#include <linux/completion.h>
 14#include <linux/buffer_head.h>
 15#include <linux/mempool.h>
 16#include <linux/gfs2_ondisk.h>
 17#include <linux/bio.h>
 18#include <linux/fs.h>
 19#include <linux/list_sort.h>
 
 20
 
 21#include "dir.h"
 22#include "gfs2.h"
 23#include "incore.h"
 24#include "inode.h"
 25#include "glock.h"
 
 26#include "log.h"
 27#include "lops.h"
 28#include "meta_io.h"
 29#include "recovery.h"
 30#include "rgrp.h"
 31#include "trans.h"
 32#include "util.h"
 33#include "trace_gfs2.h"
 34
 35/**
 36 * gfs2_pin - Pin a buffer in memory
 37 * @sdp: The superblock
 38 * @bh: The buffer to be pinned
 39 *
 40 * The log lock must be held when calling this function
 41 */
 42void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
 43{
 44	struct gfs2_bufdata *bd;
 45
 46	BUG_ON(!current->journal_info);
 47
 48	clear_buffer_dirty(bh);
 49	if (test_set_buffer_pinned(bh))
 50		gfs2_assert_withdraw(sdp, 0);
 51	if (!buffer_uptodate(bh))
 52		gfs2_io_error_bh(sdp, bh);
 53	bd = bh->b_private;
 54	/* If this buffer is in the AIL and it has already been written
 55	 * to in-place disk block, remove it from the AIL.
 56	 */
 57	spin_lock(&sdp->sd_ail_lock);
 58	if (bd->bd_tr)
 59		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
 60	spin_unlock(&sdp->sd_ail_lock);
 61	get_bh(bh);
 62	atomic_inc(&sdp->sd_log_pinned);
 63	trace_gfs2_pin(bd, 1);
 64}
 65
 66static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
 67{
 68	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
 69}
 70
 71static void maybe_release_space(struct gfs2_bufdata *bd)
 72{
 73	struct gfs2_glock *gl = bd->bd_gl;
 74	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 75	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
 76	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
 77	struct gfs2_bitmap *bi = rgd->rd_bits + index;
 78
 
 79	if (bi->bi_clone == NULL)
 80		return;
 81	if (sdp->sd_args.ar_discard)
 82		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
 83	memcpy(bi->bi_clone + bi->bi_offset,
 84	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
 85	clear_bit(GBF_FULL, &bi->bi_flags);
 86	rgd->rd_free_clone = rgd->rd_free;
 
 87	rgd->rd_extfail_pt = rgd->rd_free;
 
 
 
 88}
 89
 90/**
 91 * gfs2_unpin - Unpin a buffer
 92 * @sdp: the filesystem the buffer belongs to
 93 * @bh: The buffer to unpin
 94 * @ai:
 95 * @flags: The inode dirty flags
 96 *
 97 */
 98
 99static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
100		       struct gfs2_trans *tr)
101{
102	struct gfs2_bufdata *bd = bh->b_private;
103
104	BUG_ON(!buffer_uptodate(bh));
105	BUG_ON(!buffer_pinned(bh));
106
107	lock_buffer(bh);
108	mark_buffer_dirty(bh);
109	clear_buffer_pinned(bh);
110
111	if (buffer_is_rgrp(bd))
112		maybe_release_space(bd);
113
114	spin_lock(&sdp->sd_ail_lock);
115	if (bd->bd_tr) {
116		list_del(&bd->bd_ail_st_list);
117		brelse(bh);
118	} else {
119		struct gfs2_glock *gl = bd->bd_gl;
120		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
121		atomic_inc(&gl->gl_ail_count);
122	}
123	bd->bd_tr = tr;
124	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
125	spin_unlock(&sdp->sd_ail_lock);
126
127	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
128	trace_gfs2_pin(bd, 0);
129	unlock_buffer(bh);
130	atomic_dec(&sdp->sd_log_pinned);
131}
132
133static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
134{
135	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
136	       (sdp->sd_log_flush_head != sdp->sd_log_head));
137
138	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
139		sdp->sd_log_flush_head = 0;
140}
141
142u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
143{
144	unsigned int lbn = sdp->sd_log_flush_head;
145	struct gfs2_journal_extent *je;
146	u64 block;
147
148	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
149		if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) {
150			block = je->dblock + lbn - je->lblock;
151			gfs2_log_incr_head(sdp);
152			return block;
153		}
154	}
155
156	return -1;
157}
158
159/**
160 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
161 * @sdp: The superblock
162 * @bvec: The bio_vec
163 * @error: The i/o status
164 *
165 * This finds the relevant buffers and unlocks them and sets the
166 * error flag according to the status of the i/o request. This is
167 * used when the log is writing data which has an in-place version
168 * that is pinned in the pagecache.
169 */
170
171static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
 
172				  blk_status_t error)
173{
174	struct buffer_head *bh, *next;
175	struct page *page = bvec->bv_page;
176	unsigned size;
177
178	bh = page_buffers(page);
179	size = bvec->bv_len;
180	while (bh_offset(bh) < bvec->bv_offset)
181		bh = bh->b_this_page;
182	do {
183		if (error)
184			mark_buffer_write_io_error(bh);
185		unlock_buffer(bh);
186		next = bh->b_this_page;
187		size -= bh->b_size;
188		brelse(bh);
189		bh = next;
190	} while(bh && size);
191}
192
193/**
194 * gfs2_end_log_write - end of i/o to the log
195 * @bio: The bio
196 * @error: Status of i/o request
197 *
198 * Each bio_vec contains either data from the pagecache or data
199 * relating to the log itself. Here we iterate over the bio_vec
200 * array, processing both kinds of data.
201 *
202 */
203
204static void gfs2_end_log_write(struct bio *bio)
205{
206	struct gfs2_sbd *sdp = bio->bi_private;
207	struct bio_vec *bvec;
208	struct page *page;
209	int i;
210
211	if (bio->bi_status) {
212		fs_err(sdp, "Error %d writing to journal, jid=%u\n",
213		       bio->bi_status, sdp->sd_jdesc->jd_jid);
 
 
 
 
214		wake_up(&sdp->sd_logd_waitq);
215	}
216
217	bio_for_each_segment_all(bvec, bio, i) {
218		page = bvec->bv_page;
219		if (page_has_buffers(page))
220			gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
221		else
222			mempool_free(page, gfs2_page_pool);
223	}
224
225	bio_put(bio);
226	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
227		wake_up(&sdp->sd_log_flush_wait);
228}
229
230/**
231 * gfs2_log_flush_bio - Submit any pending log bio
232 * @sdp: The superblock
233 * @op: REQ_OP
234 * @op_flags: req_flag_bits
235 *
236 * Submit any pending part-built or full bio to the block device. If
237 * there is no pending bio, then this is a no-op.
238 */
239
240void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags)
241{
242	if (sdp->sd_log_bio) {
 
 
243		atomic_inc(&sdp->sd_log_in_flight);
244		bio_set_op_attrs(sdp->sd_log_bio, op, op_flags);
245		submit_bio(sdp->sd_log_bio);
246		sdp->sd_log_bio = NULL;
247	}
248}
249
250/**
251 * gfs2_log_alloc_bio - Allocate a new bio for log writing
252 * @sdp: The superblock
253 * @blkno: The next device block number we want to write to
 
254 *
255 * This should never be called when there is a cached bio in the
256 * super block. When it returns, there will be a cached bio in the
257 * super block which will have as many bio_vecs as the device is
258 * happy to handle.
259 *
260 * Returns: Newly allocated bio
261 */
262
263static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
 
264{
265	struct super_block *sb = sdp->sd_vfs;
266	struct bio *bio;
267
268	BUG_ON(sdp->sd_log_bio);
269
270	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
271	bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
272	bio_set_dev(bio, sb->s_bdev);
273	bio->bi_end_io = gfs2_end_log_write;
274	bio->bi_private = sdp;
275
276	sdp->sd_log_bio = bio;
277
278	return bio;
279}
280
281/**
282 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
283 * @sdp: The superblock
284 * @blkno: The device block number we want to write to
 
 
 
 
285 *
286 * If there is a cached bio, then if the next block number is sequential
287 * with the previous one, return it, otherwise flush the bio to the
288 * device. If there is not a cached bio, or we just flushed it, then
289 * allocate a new one.
290 *
291 * Returns: The bio to use for log writes
292 */
293
294static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
 
 
295{
296	struct bio *bio = sdp->sd_log_bio;
297	u64 nblk;
298
299	if (bio) {
 
 
300		nblk = bio_end_sector(bio);
301		nblk >>= sdp->sd_fsb2bb_shift;
302		if (blkno == nblk)
303			return bio;
304		gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
305	}
306
307	return gfs2_log_alloc_bio(sdp, blkno);
 
308}
309
310/**
311 * gfs2_log_write - write to log
312 * @sdp: the filesystem
 
313 * @page: the page to write
314 * @size: the size of the data to write
315 * @offset: the offset within the page 
316 * @blkno: block number of the log entry
317 *
318 * Try and add the page segment to the current bio. If that fails,
319 * submit the current bio to the device and create a new one, and
320 * then add the page segment to that.
321 */
322
323void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
324		    unsigned size, unsigned offset, u64 blkno)
 
325{
326	struct bio *bio;
327	int ret;
328
329	bio = gfs2_log_get_bio(sdp, blkno);
 
330	ret = bio_add_page(bio, page, size, offset);
331	if (ret == 0) {
332		gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
333		bio = gfs2_log_alloc_bio(sdp, blkno);
334		ret = bio_add_page(bio, page, size, offset);
335		WARN_ON(ret == 0);
336	}
337}
338
339/**
340 * gfs2_log_write_bh - write a buffer's content to the log
341 * @sdp: The super block
342 * @bh: The buffer pointing to the in-place location
343 * 
344 * This writes the content of the buffer to the next available location
345 * in the log. The buffer will be unlocked once the i/o to the log has
346 * completed.
347 */
348
349static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
350{
351	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh),
352		       gfs2_log_bmap(sdp));
 
 
 
 
353}
354
355/**
356 * gfs2_log_write_page - write one block stored in a page, into the log
357 * @sdp: The superblock
358 * @page: The struct page
359 *
360 * This writes the first block-sized part of the page into the log. Note
361 * that the page must have been allocated from the gfs2_page_pool mempool
362 * and that after this has been called, ownership has been transferred and
363 * the page may be freed at any time.
364 */
365
366void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
367{
368	struct super_block *sb = sdp->sd_vfs;
369	gfs2_log_write(sdp, page, sb->s_blocksize, 0,
370		       gfs2_log_bmap(sdp));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371}
372
373static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
374				      u32 ld_length, u32 ld_data1)
375{
376	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
377	struct gfs2_log_descriptor *ld = page_address(page);
378	clear_page(ld);
379	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
380	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
381	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
382	ld->ld_type = cpu_to_be32(ld_type);
383	ld->ld_length = cpu_to_be32(ld_length);
384	ld->ld_data1 = cpu_to_be32(ld_data1);
385	ld->ld_data2 = 0;
386	return page;
387}
388
389static void gfs2_check_magic(struct buffer_head *bh)
390{
391	void *kaddr;
392	__be32 *ptr;
393
394	clear_buffer_escaped(bh);
395	kaddr = kmap_atomic(bh->b_page);
396	ptr = kaddr + bh_offset(bh);
397	if (*ptr == cpu_to_be32(GFS2_MAGIC))
398		set_buffer_escaped(bh);
399	kunmap_atomic(kaddr);
400}
401
402static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
 
403{
404	struct gfs2_bufdata *bda, *bdb;
405
406	bda = list_entry(a, struct gfs2_bufdata, bd_list);
407	bdb = list_entry(b, struct gfs2_bufdata, bd_list);
408
409	if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
410		return -1;
411	if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
412		return 1;
413	return 0;
414}
415
416static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
417				unsigned int total, struct list_head *blist,
418				bool is_databuf)
419{
420	struct gfs2_log_descriptor *ld;
421	struct gfs2_bufdata *bd1 = NULL, *bd2;
422	struct page *page;
423	unsigned int num;
424	unsigned n;
425	__be64 *ptr;
426
427	gfs2_log_lock(sdp);
428	list_sort(NULL, blist, blocknr_cmp);
429	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
430	while(total) {
431		num = total;
432		if (total > limit)
433			num = limit;
434		gfs2_log_unlock(sdp);
435		page = gfs2_get_log_desc(sdp,
436					 is_databuf ? GFS2_LOG_DESC_JDATA :
437					 GFS2_LOG_DESC_METADATA, num + 1, num);
438		ld = page_address(page);
439		gfs2_log_lock(sdp);
440		ptr = (__be64 *)(ld + 1);
441
442		n = 0;
443		list_for_each_entry_continue(bd1, blist, bd_list) {
444			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
445			if (is_databuf) {
446				gfs2_check_magic(bd1->bd_bh);
447				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
448			}
449			if (++n >= num)
450				break;
451		}
452
453		gfs2_log_unlock(sdp);
454		gfs2_log_write_page(sdp, page);
455		gfs2_log_lock(sdp);
456
457		n = 0;
458		list_for_each_entry_continue(bd2, blist, bd_list) {
459			get_bh(bd2->bd_bh);
460			gfs2_log_unlock(sdp);
461			lock_buffer(bd2->bd_bh);
462
463			if (buffer_escaped(bd2->bd_bh)) {
464				void *kaddr;
465				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
466				ptr = page_address(page);
467				kaddr = kmap_atomic(bd2->bd_bh->b_page);
468				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
469				       bd2->bd_bh->b_size);
470				kunmap_atomic(kaddr);
471				*(__be32 *)ptr = 0;
472				clear_buffer_escaped(bd2->bd_bh);
473				unlock_buffer(bd2->bd_bh);
474				brelse(bd2->bd_bh);
475				gfs2_log_write_page(sdp, page);
476			} else {
477				gfs2_log_write_bh(sdp, bd2->bd_bh);
478			}
479			gfs2_log_lock(sdp);
480			if (++n >= num)
481				break;
482		}
483
484		BUG_ON(total < num);
485		total -= num;
486	}
487	gfs2_log_unlock(sdp);
488}
489
490static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
491{
492	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
493	unsigned int nbuf;
494	if (tr == NULL)
495		return;
496	nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
497	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
498}
499
500static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
501{
502	struct list_head *head;
503	struct gfs2_bufdata *bd;
504
505	if (tr == NULL)
506		return;
507
508	head = &tr->tr_buf;
509	while (!list_empty(head)) {
510		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
511		list_del_init(&bd->bd_list);
512		gfs2_unpin(sdp, bd->bd_bh, tr);
513	}
514}
515
516static void buf_lo_before_scan(struct gfs2_jdesc *jd,
517			       struct gfs2_log_header_host *head, int pass)
518{
519	if (pass != 0)
520		return;
521
522	jd->jd_found_blocks = 0;
523	jd->jd_replayed_blocks = 0;
524}
525
526static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
527				struct gfs2_log_descriptor *ld, __be64 *ptr,
528				int pass)
529{
530	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
531	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
532	struct gfs2_glock *gl = ip->i_gl;
533	unsigned int blks = be32_to_cpu(ld->ld_data1);
534	struct buffer_head *bh_log, *bh_ip;
535	u64 blkno;
536	int error = 0;
537
538	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
539		return 0;
540
541	gfs2_replay_incr_blk(jd, &start);
542
543	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
544		blkno = be64_to_cpu(*ptr++);
545
546		jd->jd_found_blocks++;
547
548		if (gfs2_revoke_check(jd, blkno, start))
549			continue;
550
551		error = gfs2_replay_read_block(jd, start, &bh_log);
552		if (error)
553			return error;
554
555		bh_ip = gfs2_meta_new(gl, blkno);
556		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
557
558		if (gfs2_meta_check(sdp, bh_ip))
559			error = -EIO;
560		else
561			mark_buffer_dirty(bh_ip);
 
 
 
 
562
 
 
563		brelse(bh_log);
564		brelse(bh_ip);
565
566		if (error)
567			break;
568
569		jd->jd_replayed_blocks++;
570	}
571
572	return error;
573}
574
575/**
576 * gfs2_meta_sync - Sync all buffers associated with a glock
577 * @gl: The glock
578 *
579 */
580
581static void gfs2_meta_sync(struct gfs2_glock *gl)
582{
583	struct address_space *mapping = gfs2_glock2aspace(gl);
584	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
585	int error;
586
587	if (mapping == NULL)
588		mapping = &sdp->sd_aspace;
589
590	filemap_fdatawrite(mapping);
591	error = filemap_fdatawait(mapping);
592
593	if (error)
594		gfs2_io_error(gl->gl_name.ln_sbd);
595}
596
597static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
598{
599	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
600	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
601
602	if (error) {
603		gfs2_meta_sync(ip->i_gl);
604		return;
605	}
606	if (pass != 1)
607		return;
608
609	gfs2_meta_sync(ip->i_gl);
610
611	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
612	        jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
613}
614
615static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
616{
617	struct gfs2_meta_header *mh;
618	unsigned int offset;
619	struct list_head *head = &sdp->sd_log_le_revoke;
620	struct gfs2_bufdata *bd;
621	struct page *page;
622	unsigned int length;
623
624	gfs2_write_revokes(sdp);
625	if (!sdp->sd_log_num_revoke)
626		return;
627
628	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
629	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
630	offset = sizeof(struct gfs2_log_descriptor);
631
632	list_for_each_entry(bd, head, bd_list) {
633		sdp->sd_log_num_revoke--;
634
635		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
636
637			gfs2_log_write_page(sdp, page);
638			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
639			mh = page_address(page);
640			clear_page(mh);
641			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
642			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
643			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
644			offset = sizeof(struct gfs2_meta_header);
645		}
646
647		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
648		offset += sizeof(u64);
649	}
650	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
651
652	gfs2_log_write_page(sdp, page);
653}
654
655static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
656{
657	struct list_head *head = &sdp->sd_log_le_revoke;
658	struct gfs2_bufdata *bd;
659	struct gfs2_glock *gl;
660
661	while (!list_empty(head)) {
662		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
663		list_del_init(&bd->bd_list);
664		gl = bd->bd_gl;
665		atomic_dec(&gl->gl_revokes);
666		clear_bit(GLF_LFLUSH, &gl->gl_flags);
667		kmem_cache_free(gfs2_bufdata_cachep, bd);
668	}
669}
670
 
 
 
 
 
671static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
672				  struct gfs2_log_header_host *head, int pass)
673{
674	if (pass != 0)
675		return;
676
677	jd->jd_found_revokes = 0;
678	jd->jd_replay_tail = head->lh_tail;
679}
680
681static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
682				   struct gfs2_log_descriptor *ld, __be64 *ptr,
683				   int pass)
684{
685	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
686	unsigned int blks = be32_to_cpu(ld->ld_length);
687	unsigned int revokes = be32_to_cpu(ld->ld_data1);
688	struct buffer_head *bh;
689	unsigned int offset;
690	u64 blkno;
691	int first = 1;
692	int error;
693
694	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
695		return 0;
696
697	offset = sizeof(struct gfs2_log_descriptor);
698
699	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
700		error = gfs2_replay_read_block(jd, start, &bh);
701		if (error)
702			return error;
703
704		if (!first)
705			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
706
707		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
708			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
709
710			error = gfs2_revoke_add(jd, blkno, start);
711			if (error < 0) {
712				brelse(bh);
713				return error;
714			}
715			else if (error)
716				jd->jd_found_revokes++;
717
718			if (!--revokes)
719				break;
720			offset += sizeof(u64);
721		}
722
723		brelse(bh);
724		offset = sizeof(struct gfs2_meta_header);
725		first = 0;
726	}
727
728	return 0;
729}
730
731static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
732{
733	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
734
735	if (error) {
736		gfs2_revoke_clean(jd);
737		return;
738	}
739	if (pass != 1)
740		return;
741
742	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
743	        jd->jd_jid, jd->jd_found_revokes);
744
745	gfs2_revoke_clean(jd);
746}
747
748/**
749 * databuf_lo_before_commit - Scan the data buffers, writing as we go
750 *
 
751 */
752
753static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
754{
755	unsigned int limit = databuf_limit(sdp);
756	unsigned int nbuf;
757	if (tr == NULL)
758		return;
759	nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
760	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
761}
762
763static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
764				    struct gfs2_log_descriptor *ld,
765				    __be64 *ptr, int pass)
766{
767	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
768	struct gfs2_glock *gl = ip->i_gl;
769	unsigned int blks = be32_to_cpu(ld->ld_data1);
770	struct buffer_head *bh_log, *bh_ip;
771	u64 blkno;
772	u64 esc;
773	int error = 0;
774
775	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
776		return 0;
777
778	gfs2_replay_incr_blk(jd, &start);
779	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
780		blkno = be64_to_cpu(*ptr++);
781		esc = be64_to_cpu(*ptr++);
782
783		jd->jd_found_blocks++;
784
785		if (gfs2_revoke_check(jd, blkno, start))
786			continue;
787
788		error = gfs2_replay_read_block(jd, start, &bh_log);
789		if (error)
790			return error;
791
792		bh_ip = gfs2_meta_new(gl, blkno);
793		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
794
795		/* Unescape */
796		if (esc) {
797			__be32 *eptr = (__be32 *)bh_ip->b_data;
798			*eptr = cpu_to_be32(GFS2_MAGIC);
799		}
800		mark_buffer_dirty(bh_ip);
801
802		brelse(bh_log);
803		brelse(bh_ip);
804
805		jd->jd_replayed_blocks++;
806	}
807
808	return error;
809}
810
811/* FIXME: sort out accounting for log blocks etc. */
812
813static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
814{
815	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
816	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
817
818	if (error) {
819		gfs2_meta_sync(ip->i_gl);
820		return;
821	}
822	if (pass != 1)
823		return;
824
825	/* data sync? */
826	gfs2_meta_sync(ip->i_gl);
827
828	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
829		jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
830}
831
832static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
833{
834	struct list_head *head;
835	struct gfs2_bufdata *bd;
836
837	if (tr == NULL)
838		return;
839
840	head = &tr->tr_databuf;
841	while (!list_empty(head)) {
842		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
843		list_del_init(&bd->bd_list);
844		gfs2_unpin(sdp, bd->bd_bh, tr);
845	}
846}
847
848
849const struct gfs2_log_operations gfs2_buf_lops = {
850	.lo_before_commit = buf_lo_before_commit,
851	.lo_after_commit = buf_lo_after_commit,
852	.lo_before_scan = buf_lo_before_scan,
853	.lo_scan_elements = buf_lo_scan_elements,
854	.lo_after_scan = buf_lo_after_scan,
855	.lo_name = "buf",
856};
857
858const struct gfs2_log_operations gfs2_revoke_lops = {
859	.lo_before_commit = revoke_lo_before_commit,
860	.lo_after_commit = revoke_lo_after_commit,
861	.lo_before_scan = revoke_lo_before_scan,
862	.lo_scan_elements = revoke_lo_scan_elements,
863	.lo_after_scan = revoke_lo_after_scan,
864	.lo_name = "revoke",
865};
866
867const struct gfs2_log_operations gfs2_databuf_lops = {
868	.lo_before_commit = databuf_lo_before_commit,
869	.lo_after_commit = databuf_lo_after_commit,
870	.lo_scan_elements = databuf_lo_scan_elements,
871	.lo_after_scan = databuf_lo_after_scan,
872	.lo_name = "databuf",
873};
874
875const struct gfs2_log_operations *gfs2_log_ops[] = {
876	&gfs2_databuf_lops,
877	&gfs2_buf_lops,
878	&gfs2_revoke_lops,
879	NULL,
880};
881
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/slab.h>
   9#include <linux/spinlock.h>
  10#include <linux/completion.h>
  11#include <linux/buffer_head.h>
  12#include <linux/mempool.h>
  13#include <linux/gfs2_ondisk.h>
  14#include <linux/bio.h>
  15#include <linux/fs.h>
  16#include <linux/list_sort.h>
  17#include <linux/blkdev.h>
  18
  19#include "bmap.h"
  20#include "dir.h"
  21#include "gfs2.h"
  22#include "incore.h"
  23#include "inode.h"
  24#include "glock.h"
  25#include "glops.h"
  26#include "log.h"
  27#include "lops.h"
  28#include "meta_io.h"
  29#include "recovery.h"
  30#include "rgrp.h"
  31#include "trans.h"
  32#include "util.h"
  33#include "trace_gfs2.h"
  34
  35/**
  36 * gfs2_pin - Pin a buffer in memory
  37 * @sdp: The superblock
  38 * @bh: The buffer to be pinned
  39 *
  40 * The log lock must be held when calling this function
  41 */
  42void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
  43{
  44	struct gfs2_bufdata *bd;
  45
  46	BUG_ON(!current->journal_info);
  47
  48	clear_buffer_dirty(bh);
  49	if (test_set_buffer_pinned(bh))
  50		gfs2_assert_withdraw(sdp, 0);
  51	if (!buffer_uptodate(bh))
  52		gfs2_io_error_bh_wd(sdp, bh);
  53	bd = bh->b_private;
  54	/* If this buffer is in the AIL and it has already been written
  55	 * to in-place disk block, remove it from the AIL.
  56	 */
  57	spin_lock(&sdp->sd_ail_lock);
  58	if (bd->bd_tr)
  59		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
  60	spin_unlock(&sdp->sd_ail_lock);
  61	get_bh(bh);
  62	atomic_inc(&sdp->sd_log_pinned);
  63	trace_gfs2_pin(bd, 1);
  64}
  65
  66static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
  67{
  68	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
  69}
  70
  71static void maybe_release_space(struct gfs2_bufdata *bd)
  72{
  73	struct gfs2_glock *gl = bd->bd_gl;
  74	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  75	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
  76	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
  77	struct gfs2_bitmap *bi = rgd->rd_bits + index;
  78
  79	rgrp_lock_local(rgd);
  80	if (bi->bi_clone == NULL)
  81		goto out;
  82	if (sdp->sd_args.ar_discard)
  83		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
  84	memcpy(bi->bi_clone + bi->bi_offset,
  85	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
  86	clear_bit(GBF_FULL, &bi->bi_flags);
  87	rgd->rd_free_clone = rgd->rd_free;
  88	BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
  89	rgd->rd_extfail_pt = rgd->rd_free;
  90
  91out:
  92	rgrp_unlock_local(rgd);
  93}
  94
  95/**
  96 * gfs2_unpin - Unpin a buffer
  97 * @sdp: the filesystem the buffer belongs to
  98 * @bh: The buffer to unpin
  99 * @tr: The system transaction being flushed
 
 
 100 */
 101
 102static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
 103		       struct gfs2_trans *tr)
 104{
 105	struct gfs2_bufdata *bd = bh->b_private;
 106
 107	BUG_ON(!buffer_uptodate(bh));
 108	BUG_ON(!buffer_pinned(bh));
 109
 110	lock_buffer(bh);
 111	mark_buffer_dirty(bh);
 112	clear_buffer_pinned(bh);
 113
 114	if (buffer_is_rgrp(bd))
 115		maybe_release_space(bd);
 116
 117	spin_lock(&sdp->sd_ail_lock);
 118	if (bd->bd_tr) {
 119		list_del(&bd->bd_ail_st_list);
 120		brelse(bh);
 121	} else {
 122		struct gfs2_glock *gl = bd->bd_gl;
 123		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
 124		atomic_inc(&gl->gl_ail_count);
 125	}
 126	bd->bd_tr = tr;
 127	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
 128	spin_unlock(&sdp->sd_ail_lock);
 129
 130	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
 131	trace_gfs2_pin(bd, 0);
 132	unlock_buffer(bh);
 133	atomic_dec(&sdp->sd_log_pinned);
 134}
 135
 136void gfs2_log_incr_head(struct gfs2_sbd *sdp)
 137{
 138	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
 139	       (sdp->sd_log_flush_head != sdp->sd_log_head));
 140
 141	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
 142		sdp->sd_log_flush_head = 0;
 143}
 144
 145u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
 146{
 
 147	struct gfs2_journal_extent *je;
 
 148
 149	list_for_each_entry(je, &jd->extent_list, list) {
 150		if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
 151			return je->dblock + lblock - je->lblock;
 
 
 
 152	}
 153
 154	return -1;
 155}
 156
 157/**
 158 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
 159 * @sdp: The superblock
 160 * @bvec: The bio_vec
 161 * @error: The i/o status
 162 *
 163 * This finds the relevant buffers and unlocks them and sets the
 164 * error flag according to the status of the i/o request. This is
 165 * used when the log is writing data which has an in-place version
 166 * that is pinned in the pagecache.
 167 */
 168
 169static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
 170				  struct bio_vec *bvec,
 171				  blk_status_t error)
 172{
 173	struct buffer_head *bh, *next;
 174	struct page *page = bvec->bv_page;
 175	unsigned size;
 176
 177	bh = page_buffers(page);
 178	size = bvec->bv_len;
 179	while (bh_offset(bh) < bvec->bv_offset)
 180		bh = bh->b_this_page;
 181	do {
 182		if (error)
 183			mark_buffer_write_io_error(bh);
 184		unlock_buffer(bh);
 185		next = bh->b_this_page;
 186		size -= bh->b_size;
 187		brelse(bh);
 188		bh = next;
 189	} while(bh && size);
 190}
 191
 192/**
 193 * gfs2_end_log_write - end of i/o to the log
 194 * @bio: The bio
 
 195 *
 196 * Each bio_vec contains either data from the pagecache or data
 197 * relating to the log itself. Here we iterate over the bio_vec
 198 * array, processing both kinds of data.
 199 *
 200 */
 201
 202static void gfs2_end_log_write(struct bio *bio)
 203{
 204	struct gfs2_sbd *sdp = bio->bi_private;
 205	struct bio_vec *bvec;
 206	struct page *page;
 207	struct bvec_iter_all iter_all;
 208
 209	if (bio->bi_status) {
 210		if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
 211			fs_err(sdp, "Error %d writing to journal, jid=%u\n",
 212			       bio->bi_status, sdp->sd_jdesc->jd_jid);
 213		gfs2_withdraw_delayed(sdp);
 214		/* prevent more writes to the journal */
 215		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
 216		wake_up(&sdp->sd_logd_waitq);
 217	}
 218
 219	bio_for_each_segment_all(bvec, bio, iter_all) {
 220		page = bvec->bv_page;
 221		if (page_has_buffers(page))
 222			gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
 223		else
 224			mempool_free(page, gfs2_page_pool);
 225	}
 226
 227	bio_put(bio);
 228	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
 229		wake_up(&sdp->sd_log_flush_wait);
 230}
 231
 232/**
 233 * gfs2_log_submit_bio - Submit any pending log bio
 234 * @biop: Address of the bio pointer
 235 * @opf: REQ_OP | op_flags
 
 236 *
 237 * Submit any pending part-built or full bio to the block device. If
 238 * there is no pending bio, then this is a no-op.
 239 */
 240
 241void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf)
 242{
 243	struct bio *bio = *biop;
 244	if (bio) {
 245		struct gfs2_sbd *sdp = bio->bi_private;
 246		atomic_inc(&sdp->sd_log_in_flight);
 247		bio->bi_opf = opf;
 248		submit_bio(bio);
 249		*biop = NULL;
 250	}
 251}
 252
 253/**
 254 * gfs2_log_alloc_bio - Allocate a bio
 255 * @sdp: The super block
 256 * @blkno: The device block number we want to write to
 257 * @end_io: The bi_end_io callback
 258 *
 259 * Allocate a new bio, initialize it with the given parameters and return it.
 
 
 
 260 *
 261 * Returns: The newly allocated bio
 262 */
 263
 264static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
 265				      bio_end_io_t *end_io)
 266{
 267	struct super_block *sb = sdp->sd_vfs;
 268	struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
 269
 270	bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
 271	bio->bi_end_io = end_io;
 
 
 
 
 272	bio->bi_private = sdp;
 273
 
 
 274	return bio;
 275}
 276
 277/**
 278 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
 279 * @sdp: The super block
 280 * @blkno: The device block number we want to write to
 281 * @biop: The bio to get or allocate
 282 * @op: REQ_OP
 283 * @end_io: The bi_end_io callback
 284 * @flush: Always flush the current bio and allocate a new one?
 285 *
 286 * If there is a cached bio, then if the next block number is sequential
 287 * with the previous one, return it, otherwise flush the bio to the
 288 * device. If there is no cached bio, or we just flushed it, then
 289 * allocate a new one.
 290 *
 291 * Returns: The bio to use for log writes
 292 */
 293
 294static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
 295				    struct bio **biop, enum req_op op,
 296				    bio_end_io_t *end_io, bool flush)
 297{
 298	struct bio *bio = *biop;
 
 299
 300	if (bio) {
 301		u64 nblk;
 302
 303		nblk = bio_end_sector(bio);
 304		nblk >>= sdp->sd_fsb2bb_shift;
 305		if (blkno == nblk && !flush)
 306			return bio;
 307		gfs2_log_submit_bio(biop, op);
 308	}
 309
 310	*biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
 311	return *biop;
 312}
 313
 314/**
 315 * gfs2_log_write - write to log
 316 * @sdp: the filesystem
 317 * @jd: The journal descriptor
 318 * @page: the page to write
 319 * @size: the size of the data to write
 320 * @offset: the offset within the page 
 321 * @blkno: block number of the log entry
 322 *
 323 * Try and add the page segment to the current bio. If that fails,
 324 * submit the current bio to the device and create a new one, and
 325 * then add the page segment to that.
 326 */
 327
 328void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
 329		    struct page *page, unsigned size, unsigned offset,
 330		    u64 blkno)
 331{
 332	struct bio *bio;
 333	int ret;
 334
 335	bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
 336			       gfs2_end_log_write, false);
 337	ret = bio_add_page(bio, page, size, offset);
 338	if (ret == 0) {
 339		bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
 340				       REQ_OP_WRITE, gfs2_end_log_write, true);
 341		ret = bio_add_page(bio, page, size, offset);
 342		WARN_ON(ret == 0);
 343	}
 344}
 345
 346/**
 347 * gfs2_log_write_bh - write a buffer's content to the log
 348 * @sdp: The super block
 349 * @bh: The buffer pointing to the in-place location
 350 * 
 351 * This writes the content of the buffer to the next available location
 352 * in the log. The buffer will be unlocked once the i/o to the log has
 353 * completed.
 354 */
 355
 356static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
 357{
 358	u64 dblock;
 359
 360	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
 361	gfs2_log_incr_head(sdp);
 362	gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
 363		       bh_offset(bh), dblock);
 364}
 365
 366/**
 367 * gfs2_log_write_page - write one block stored in a page, into the log
 368 * @sdp: The superblock
 369 * @page: The struct page
 370 *
 371 * This writes the first block-sized part of the page into the log. Note
 372 * that the page must have been allocated from the gfs2_page_pool mempool
 373 * and that after this has been called, ownership has been transferred and
 374 * the page may be freed at any time.
 375 */
 376
 377static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
 378{
 379	struct super_block *sb = sdp->sd_vfs;
 380	u64 dblock;
 381
 382	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
 383	gfs2_log_incr_head(sdp);
 384	gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
 385}
 386
 387/**
 388 * gfs2_end_log_read - end I/O callback for reads from the log
 389 * @bio: The bio
 390 *
 391 * Simply unlock the pages in the bio. The main thread will wait on them and
 392 * process them in order as necessary.
 393 */
 394
 395static void gfs2_end_log_read(struct bio *bio)
 396{
 397	struct page *page;
 398	struct bio_vec *bvec;
 399	struct bvec_iter_all iter_all;
 400
 401	bio_for_each_segment_all(bvec, bio, iter_all) {
 402		page = bvec->bv_page;
 403		if (bio->bi_status) {
 404			int err = blk_status_to_errno(bio->bi_status);
 405
 406			SetPageError(page);
 407			mapping_set_error(page->mapping, err);
 408		}
 409		unlock_page(page);
 410	}
 411
 412	bio_put(bio);
 413}
 414
 415/**
 416 * gfs2_jhead_pg_srch - Look for the journal head in a given page.
 417 * @jd: The journal descriptor
 418 * @head: The journal head to start from
 419 * @page: The page to look in
 420 *
 421 * Returns: 1 if found, 0 otherwise.
 422 */
 423
 424static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
 425			      struct gfs2_log_header_host *head,
 426			      struct page *page)
 427{
 428	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 429	struct gfs2_log_header_host lh;
 430	void *kaddr = kmap_atomic(page);
 431	unsigned int offset;
 432	bool ret = false;
 433
 434	for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
 435		if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
 436			if (lh.lh_sequence >= head->lh_sequence)
 437				*head = lh;
 438			else {
 439				ret = true;
 440				break;
 441			}
 442		}
 443	}
 444	kunmap_atomic(kaddr);
 445	return ret;
 446}
 447
 448/**
 449 * gfs2_jhead_process_page - Search/cleanup a page
 450 * @jd: The journal descriptor
 451 * @index: Index of the page to look into
 452 * @head: The journal head to start from
 453 * @done: If set, perform only cleanup, else search and set if found.
 454 *
 455 * Find the folio with 'index' in the journal's mapping. Search the folio for
 456 * the journal head if requested (cleanup == false). Release refs on the
 457 * folio so the page cache can reclaim it. We grabbed a
 458 * reference on this folio twice, first when we did a find_or_create_page()
 459 * to obtain the folio to add it to the bio and second when we do a
 460 * filemap_get_folio() here to get the folio to wait on while I/O on it is being
 461 * completed.
 462 * This function is also used to free up a folio we might've grabbed but not
 463 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
 464 * submitted the I/O, but we already found the jhead so we only need to drop
 465 * our references to the folio.
 466 */
 467
 468static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
 469				    struct gfs2_log_header_host *head,
 470				    bool *done)
 471{
 472	struct folio *folio;
 473
 474	folio = filemap_get_folio(jd->jd_inode->i_mapping, index);
 475
 476	folio_wait_locked(folio);
 477	if (folio_test_error(folio))
 478		*done = true;
 479
 480	if (!*done)
 481		*done = gfs2_jhead_pg_srch(jd, head, &folio->page);
 482
 483	/* filemap_get_folio() and the earlier find_or_create_page() */
 484	folio_put_refs(folio, 2);
 485}
 486
 487static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
 488{
 489	struct bio *new;
 490
 491	new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
 492	bio_clone_blkg_association(new, prev);
 493	new->bi_iter.bi_sector = bio_end_sector(prev);
 494	bio_chain(new, prev);
 495	submit_bio(prev);
 496	return new;
 497}
 498
 499/**
 500 * gfs2_find_jhead - find the head of a log
 501 * @jd: The journal descriptor
 502 * @head: The log descriptor for the head of the log is returned here
 503 * @keep_cache: If set inode pages will not be truncated
 504 *
 505 * Do a search of a journal by reading it in large chunks using bios and find
 506 * the valid log entry with the highest sequence number.  (i.e. the log head)
 507 *
 508 * Returns: 0 on success, errno otherwise
 509 */
 510int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
 511		    bool keep_cache)
 512{
 513	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 514	struct address_space *mapping = jd->jd_inode->i_mapping;
 515	unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
 516	unsigned int bsize = sdp->sd_sb.sb_bsize, off;
 517	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
 518	unsigned int shift = PAGE_SHIFT - bsize_shift;
 519	unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
 520	struct gfs2_journal_extent *je;
 521	int sz, ret = 0;
 522	struct bio *bio = NULL;
 523	struct page *page = NULL;
 524	bool done = false;
 525	errseq_t since;
 526
 527	memset(head, 0, sizeof(*head));
 528	if (list_empty(&jd->extent_list))
 529		gfs2_map_journal_extents(sdp, jd);
 530
 531	since = filemap_sample_wb_err(mapping);
 532	list_for_each_entry(je, &jd->extent_list, list) {
 533		u64 dblock = je->dblock;
 534
 535		for (; block < je->lblock + je->blocks; block++, dblock++) {
 536			if (!page) {
 537				page = find_or_create_page(mapping,
 538						block >> shift, GFP_NOFS);
 539				if (!page) {
 540					ret = -ENOMEM;
 541					done = true;
 542					goto out;
 543				}
 544				off = 0;
 545			}
 546
 547			if (bio && (off || block < blocks_submitted + max_blocks)) {
 548				sector_t sector = dblock << sdp->sd_fsb2bb_shift;
 549
 550				if (bio_end_sector(bio) == sector) {
 551					sz = bio_add_page(bio, page, bsize, off);
 552					if (sz == bsize)
 553						goto block_added;
 554				}
 555				if (off) {
 556					unsigned int blocks =
 557						(PAGE_SIZE - off) >> bsize_shift;
 558
 559					bio = gfs2_chain_bio(bio, blocks);
 560					goto add_block_to_new_bio;
 561				}
 562			}
 563
 564			if (bio) {
 565				blocks_submitted = block;
 566				submit_bio(bio);
 567			}
 568
 569			bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
 570			bio->bi_opf = REQ_OP_READ;
 571add_block_to_new_bio:
 572			sz = bio_add_page(bio, page, bsize, off);
 573			BUG_ON(sz != bsize);
 574block_added:
 575			off += bsize;
 576			if (off == PAGE_SIZE)
 577				page = NULL;
 578			if (blocks_submitted <= blocks_read + max_blocks) {
 579				/* Keep at least one bio in flight */
 580				continue;
 581			}
 582
 583			gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
 584			blocks_read += PAGE_SIZE >> bsize_shift;
 585			if (done)
 586				goto out;  /* found */
 587		}
 588	}
 589
 590out:
 591	if (bio)
 592		submit_bio(bio);
 593	while (blocks_read < block) {
 594		gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
 595		blocks_read += PAGE_SIZE >> bsize_shift;
 596	}
 597
 598	if (!ret)
 599		ret = filemap_check_wb_err(mapping, since);
 600
 601	if (!keep_cache)
 602		truncate_inode_pages(mapping, 0);
 603
 604	return ret;
 605}
 606
 607static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
 608				      u32 ld_length, u32 ld_data1)
 609{
 610	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
 611	struct gfs2_log_descriptor *ld = page_address(page);
 612	clear_page(ld);
 613	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
 614	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
 615	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
 616	ld->ld_type = cpu_to_be32(ld_type);
 617	ld->ld_length = cpu_to_be32(ld_length);
 618	ld->ld_data1 = cpu_to_be32(ld_data1);
 619	ld->ld_data2 = 0;
 620	return page;
 621}
 622
 623static void gfs2_check_magic(struct buffer_head *bh)
 624{
 625	void *kaddr;
 626	__be32 *ptr;
 627
 628	clear_buffer_escaped(bh);
 629	kaddr = kmap_atomic(bh->b_page);
 630	ptr = kaddr + bh_offset(bh);
 631	if (*ptr == cpu_to_be32(GFS2_MAGIC))
 632		set_buffer_escaped(bh);
 633	kunmap_atomic(kaddr);
 634}
 635
 636static int blocknr_cmp(void *priv, const struct list_head *a,
 637		       const struct list_head *b)
 638{
 639	struct gfs2_bufdata *bda, *bdb;
 640
 641	bda = list_entry(a, struct gfs2_bufdata, bd_list);
 642	bdb = list_entry(b, struct gfs2_bufdata, bd_list);
 643
 644	if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
 645		return -1;
 646	if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
 647		return 1;
 648	return 0;
 649}
 650
 651static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
 652				unsigned int total, struct list_head *blist,
 653				bool is_databuf)
 654{
 655	struct gfs2_log_descriptor *ld;
 656	struct gfs2_bufdata *bd1 = NULL, *bd2;
 657	struct page *page;
 658	unsigned int num;
 659	unsigned n;
 660	__be64 *ptr;
 661
 662	gfs2_log_lock(sdp);
 663	list_sort(NULL, blist, blocknr_cmp);
 664	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
 665	while(total) {
 666		num = total;
 667		if (total > limit)
 668			num = limit;
 669		gfs2_log_unlock(sdp);
 670		page = gfs2_get_log_desc(sdp,
 671					 is_databuf ? GFS2_LOG_DESC_JDATA :
 672					 GFS2_LOG_DESC_METADATA, num + 1, num);
 673		ld = page_address(page);
 674		gfs2_log_lock(sdp);
 675		ptr = (__be64 *)(ld + 1);
 676
 677		n = 0;
 678		list_for_each_entry_continue(bd1, blist, bd_list) {
 679			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
 680			if (is_databuf) {
 681				gfs2_check_magic(bd1->bd_bh);
 682				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
 683			}
 684			if (++n >= num)
 685				break;
 686		}
 687
 688		gfs2_log_unlock(sdp);
 689		gfs2_log_write_page(sdp, page);
 690		gfs2_log_lock(sdp);
 691
 692		n = 0;
 693		list_for_each_entry_continue(bd2, blist, bd_list) {
 694			get_bh(bd2->bd_bh);
 695			gfs2_log_unlock(sdp);
 696			lock_buffer(bd2->bd_bh);
 697
 698			if (buffer_escaped(bd2->bd_bh)) {
 699				void *kaddr;
 700				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
 701				ptr = page_address(page);
 702				kaddr = kmap_atomic(bd2->bd_bh->b_page);
 703				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
 704				       bd2->bd_bh->b_size);
 705				kunmap_atomic(kaddr);
 706				*(__be32 *)ptr = 0;
 707				clear_buffer_escaped(bd2->bd_bh);
 708				unlock_buffer(bd2->bd_bh);
 709				brelse(bd2->bd_bh);
 710				gfs2_log_write_page(sdp, page);
 711			} else {
 712				gfs2_log_write_bh(sdp, bd2->bd_bh);
 713			}
 714			gfs2_log_lock(sdp);
 715			if (++n >= num)
 716				break;
 717		}
 718
 719		BUG_ON(total < num);
 720		total -= num;
 721	}
 722	gfs2_log_unlock(sdp);
 723}
 724
 725static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 726{
 727	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
 728	unsigned int nbuf;
 729	if (tr == NULL)
 730		return;
 731	nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
 732	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
 733}
 734
 735static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 736{
 737	struct list_head *head;
 738	struct gfs2_bufdata *bd;
 739
 740	if (tr == NULL)
 741		return;
 742
 743	head = &tr->tr_buf;
 744	while (!list_empty(head)) {
 745		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
 746		list_del_init(&bd->bd_list);
 747		gfs2_unpin(sdp, bd->bd_bh, tr);
 748	}
 749}
 750
 751static void buf_lo_before_scan(struct gfs2_jdesc *jd,
 752			       struct gfs2_log_header_host *head, int pass)
 753{
 754	if (pass != 0)
 755		return;
 756
 757	jd->jd_found_blocks = 0;
 758	jd->jd_replayed_blocks = 0;
 759}
 760
 761#define obsolete_rgrp_replay \
 762"Replaying 0x%llx from jid=%d/0x%llx but we already have a bh!\n"
 763#define obsolete_rgrp_replay2 \
 764"busy:%d, pinned:%d rg_gen:0x%llx, j_gen:0x%llx\n"
 765
 766static void obsolete_rgrp(struct gfs2_jdesc *jd, struct buffer_head *bh_log,
 767			  u64 blkno)
 768{
 769	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 770	struct gfs2_rgrpd *rgd;
 771	struct gfs2_rgrp *jrgd = (struct gfs2_rgrp *)bh_log->b_data;
 772
 773	rgd = gfs2_blk2rgrpd(sdp, blkno, false);
 774	if (rgd && rgd->rd_addr == blkno &&
 775	    rgd->rd_bits && rgd->rd_bits->bi_bh) {
 776		fs_info(sdp, obsolete_rgrp_replay, (unsigned long long)blkno,
 777			jd->jd_jid, bh_log->b_blocknr);
 778		fs_info(sdp, obsolete_rgrp_replay2,
 779			buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
 780			buffer_pinned(rgd->rd_bits->bi_bh),
 781			rgd->rd_igeneration,
 782			be64_to_cpu(jrgd->rg_igeneration));
 783		gfs2_dump_glock(NULL, rgd->rd_gl, true);
 784	}
 785}
 786
 787static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
 788				struct gfs2_log_descriptor *ld, __be64 *ptr,
 789				int pass)
 790{
 791	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
 792	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 793	struct gfs2_glock *gl = ip->i_gl;
 794	unsigned int blks = be32_to_cpu(ld->ld_data1);
 795	struct buffer_head *bh_log, *bh_ip;
 796	u64 blkno;
 797	int error = 0;
 798
 799	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
 800		return 0;
 801
 802	gfs2_replay_incr_blk(jd, &start);
 803
 804	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
 805		blkno = be64_to_cpu(*ptr++);
 806
 807		jd->jd_found_blocks++;
 808
 809		if (gfs2_revoke_check(jd, blkno, start))
 810			continue;
 811
 812		error = gfs2_replay_read_block(jd, start, &bh_log);
 813		if (error)
 814			return error;
 815
 816		bh_ip = gfs2_meta_new(gl, blkno);
 817		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
 818
 819		if (gfs2_meta_check(sdp, bh_ip))
 820			error = -EIO;
 821		else {
 822			struct gfs2_meta_header *mh =
 823				(struct gfs2_meta_header *)bh_ip->b_data;
 824
 825			if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG))
 826				obsolete_rgrp(jd, bh_log, blkno);
 827
 828			mark_buffer_dirty(bh_ip);
 829		}
 830		brelse(bh_log);
 831		brelse(bh_ip);
 832
 833		if (error)
 834			break;
 835
 836		jd->jd_replayed_blocks++;
 837	}
 838
 839	return error;
 840}
 841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 842static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
 843{
 844	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
 845	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 846
 847	if (error) {
 848		gfs2_inode_metasync(ip->i_gl);
 849		return;
 850	}
 851	if (pass != 1)
 852		return;
 853
 854	gfs2_inode_metasync(ip->i_gl);
 855
 856	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
 857	        jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
 858}
 859
 860static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 861{
 862	struct gfs2_meta_header *mh;
 863	unsigned int offset;
 864	struct list_head *head = &sdp->sd_log_revokes;
 865	struct gfs2_bufdata *bd;
 866	struct page *page;
 867	unsigned int length;
 868
 869	gfs2_flush_revokes(sdp);
 870	if (!sdp->sd_log_num_revoke)
 871		return;
 872
 873	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
 874	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
 875	offset = sizeof(struct gfs2_log_descriptor);
 876
 877	list_for_each_entry(bd, head, bd_list) {
 878		sdp->sd_log_num_revoke--;
 879
 880		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
 
 881			gfs2_log_write_page(sdp, page);
 882			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
 883			mh = page_address(page);
 884			clear_page(mh);
 885			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
 886			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
 887			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
 888			offset = sizeof(struct gfs2_meta_header);
 889		}
 890
 891		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
 892		offset += sizeof(u64);
 893	}
 894	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
 895
 896	gfs2_log_write_page(sdp, page);
 897}
 898
 899void gfs2_drain_revokes(struct gfs2_sbd *sdp)
 900{
 901	struct list_head *head = &sdp->sd_log_revokes;
 902	struct gfs2_bufdata *bd;
 903	struct gfs2_glock *gl;
 904
 905	while (!list_empty(head)) {
 906		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
 907		list_del_init(&bd->bd_list);
 908		gl = bd->bd_gl;
 909		gfs2_glock_remove_revoke(gl);
 
 910		kmem_cache_free(gfs2_bufdata_cachep, bd);
 911	}
 912}
 913
 914static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 915{
 916	gfs2_drain_revokes(sdp);
 917}
 918
 919static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
 920				  struct gfs2_log_header_host *head, int pass)
 921{
 922	if (pass != 0)
 923		return;
 924
 925	jd->jd_found_revokes = 0;
 926	jd->jd_replay_tail = head->lh_tail;
 927}
 928
 929static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
 930				   struct gfs2_log_descriptor *ld, __be64 *ptr,
 931				   int pass)
 932{
 933	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 934	unsigned int blks = be32_to_cpu(ld->ld_length);
 935	unsigned int revokes = be32_to_cpu(ld->ld_data1);
 936	struct buffer_head *bh;
 937	unsigned int offset;
 938	u64 blkno;
 939	int first = 1;
 940	int error;
 941
 942	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
 943		return 0;
 944
 945	offset = sizeof(struct gfs2_log_descriptor);
 946
 947	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
 948		error = gfs2_replay_read_block(jd, start, &bh);
 949		if (error)
 950			return error;
 951
 952		if (!first)
 953			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
 954
 955		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
 956			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
 957
 958			error = gfs2_revoke_add(jd, blkno, start);
 959			if (error < 0) {
 960				brelse(bh);
 961				return error;
 962			}
 963			else if (error)
 964				jd->jd_found_revokes++;
 965
 966			if (!--revokes)
 967				break;
 968			offset += sizeof(u64);
 969		}
 970
 971		brelse(bh);
 972		offset = sizeof(struct gfs2_meta_header);
 973		first = 0;
 974	}
 975
 976	return 0;
 977}
 978
 979static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
 980{
 981	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 982
 983	if (error) {
 984		gfs2_revoke_clean(jd);
 985		return;
 986	}
 987	if (pass != 1)
 988		return;
 989
 990	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
 991	        jd->jd_jid, jd->jd_found_revokes);
 992
 993	gfs2_revoke_clean(jd);
 994}
 995
 996/**
 997 * databuf_lo_before_commit - Scan the data buffers, writing as we go
 998 * @sdp: The filesystem
 999 * @tr: The system transaction being flushed
1000 */
1001
1002static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1003{
1004	unsigned int limit = databuf_limit(sdp);
1005	unsigned int nbuf;
1006	if (tr == NULL)
1007		return;
1008	nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
1009	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
1010}
1011
1012static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
1013				    struct gfs2_log_descriptor *ld,
1014				    __be64 *ptr, int pass)
1015{
1016	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1017	struct gfs2_glock *gl = ip->i_gl;
1018	unsigned int blks = be32_to_cpu(ld->ld_data1);
1019	struct buffer_head *bh_log, *bh_ip;
1020	u64 blkno;
1021	u64 esc;
1022	int error = 0;
1023
1024	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1025		return 0;
1026
1027	gfs2_replay_incr_blk(jd, &start);
1028	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1029		blkno = be64_to_cpu(*ptr++);
1030		esc = be64_to_cpu(*ptr++);
1031
1032		jd->jd_found_blocks++;
1033
1034		if (gfs2_revoke_check(jd, blkno, start))
1035			continue;
1036
1037		error = gfs2_replay_read_block(jd, start, &bh_log);
1038		if (error)
1039			return error;
1040
1041		bh_ip = gfs2_meta_new(gl, blkno);
1042		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1043
1044		/* Unescape */
1045		if (esc) {
1046			__be32 *eptr = (__be32 *)bh_ip->b_data;
1047			*eptr = cpu_to_be32(GFS2_MAGIC);
1048		}
1049		mark_buffer_dirty(bh_ip);
1050
1051		brelse(bh_log);
1052		brelse(bh_ip);
1053
1054		jd->jd_replayed_blocks++;
1055	}
1056
1057	return error;
1058}
1059
1060/* FIXME: sort out accounting for log blocks etc. */
1061
1062static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1063{
1064	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1065	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1066
1067	if (error) {
1068		gfs2_inode_metasync(ip->i_gl);
1069		return;
1070	}
1071	if (pass != 1)
1072		return;
1073
1074	/* data sync? */
1075	gfs2_inode_metasync(ip->i_gl);
1076
1077	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1078		jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1079}
1080
1081static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1082{
1083	struct list_head *head;
1084	struct gfs2_bufdata *bd;
1085
1086	if (tr == NULL)
1087		return;
1088
1089	head = &tr->tr_databuf;
1090	while (!list_empty(head)) {
1091		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1092		list_del_init(&bd->bd_list);
1093		gfs2_unpin(sdp, bd->bd_bh, tr);
1094	}
1095}
1096
1097
1098static const struct gfs2_log_operations gfs2_buf_lops = {
1099	.lo_before_commit = buf_lo_before_commit,
1100	.lo_after_commit = buf_lo_after_commit,
1101	.lo_before_scan = buf_lo_before_scan,
1102	.lo_scan_elements = buf_lo_scan_elements,
1103	.lo_after_scan = buf_lo_after_scan,
1104	.lo_name = "buf",
1105};
1106
1107static const struct gfs2_log_operations gfs2_revoke_lops = {
1108	.lo_before_commit = revoke_lo_before_commit,
1109	.lo_after_commit = revoke_lo_after_commit,
1110	.lo_before_scan = revoke_lo_before_scan,
1111	.lo_scan_elements = revoke_lo_scan_elements,
1112	.lo_after_scan = revoke_lo_after_scan,
1113	.lo_name = "revoke",
1114};
1115
1116static const struct gfs2_log_operations gfs2_databuf_lops = {
1117	.lo_before_commit = databuf_lo_before_commit,
1118	.lo_after_commit = databuf_lo_after_commit,
1119	.lo_scan_elements = databuf_lo_scan_elements,
1120	.lo_after_scan = databuf_lo_after_scan,
1121	.lo_name = "databuf",
1122};
1123
1124const struct gfs2_log_operations *gfs2_log_ops[] = {
1125	&gfs2_databuf_lops,
1126	&gfs2_buf_lops,
1127	&gfs2_revoke_lops,
1128	NULL,
1129};
1130