Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#include <linux/sched.h>
 11#include <linux/slab.h>
 12#include <linux/spinlock.h>
 13#include <linux/completion.h>
 14#include <linux/buffer_head.h>
 15#include <linux/mempool.h>
 16#include <linux/gfs2_ondisk.h>
 17#include <linux/bio.h>
 18#include <linux/fs.h>
 19
 20#include "gfs2.h"
 21#include "incore.h"
 22#include "inode.h"
 23#include "glock.h"
 24#include "log.h"
 25#include "lops.h"
 26#include "meta_io.h"
 27#include "recovery.h"
 28#include "rgrp.h"
 29#include "trans.h"
 30#include "util.h"
 31#include "trace_gfs2.h"
 32
 33/**
 34 * gfs2_pin - Pin a buffer in memory
 35 * @sdp: The superblock
 36 * @bh: The buffer to be pinned
 37 *
 38 * The log lock must be held when calling this function
 39 */
 40static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
 41{
 42	struct gfs2_bufdata *bd;
 43
 44	BUG_ON(!current->journal_info);
 45
 46	clear_buffer_dirty(bh);
 47	if (test_set_buffer_pinned(bh))
 48		gfs2_assert_withdraw(sdp, 0);
 49	if (!buffer_uptodate(bh))
 50		gfs2_io_error_bh(sdp, bh);
 51	bd = bh->b_private;
 52	/* If this buffer is in the AIL and it has already been written
 53	 * to in-place disk block, remove it from the AIL.
 54	 */
 55	spin_lock(&sdp->sd_ail_lock);
 56	if (bd->bd_ail)
 57		list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
 58	spin_unlock(&sdp->sd_ail_lock);
 59	get_bh(bh);
 60	atomic_inc(&sdp->sd_log_pinned);
 61	trace_gfs2_pin(bd, 1);
 62}
 63
 64static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
 65{
 66	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
 67}
 68
 69static void maybe_release_space(struct gfs2_bufdata *bd)
 70{
 71	struct gfs2_glock *gl = bd->bd_gl;
 72	struct gfs2_sbd *sdp = gl->gl_sbd;
 73	struct gfs2_rgrpd *rgd = gl->gl_object;
 74	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
 75	struct gfs2_bitmap *bi = rgd->rd_bits + index;
 76
 77	if (bi->bi_clone == 0)
 78		return;
 79	if (sdp->sd_args.ar_discard)
 80		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
 81	memcpy(bi->bi_clone + bi->bi_offset,
 82	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
 83	clear_bit(GBF_FULL, &bi->bi_flags);
 84	rgd->rd_free_clone = rgd->rd_free;
 85}
 86
 87/**
 88 * gfs2_unpin - Unpin a buffer
 89 * @sdp: the filesystem the buffer belongs to
 90 * @bh: The buffer to unpin
 91 * @ai:
 92 * @flags: The inode dirty flags
 93 *
 94 */
 95
 96static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
 97		       struct gfs2_ail *ai)
 98{
 99	struct gfs2_bufdata *bd = bh->b_private;
100
101	BUG_ON(!buffer_uptodate(bh));
102	BUG_ON(!buffer_pinned(bh));
103
104	lock_buffer(bh);
105	mark_buffer_dirty(bh);
106	clear_buffer_pinned(bh);
107
108	if (buffer_is_rgrp(bd))
109		maybe_release_space(bd);
110
111	spin_lock(&sdp->sd_ail_lock);
112	if (bd->bd_ail) {
113		list_del(&bd->bd_ail_st_list);
114		brelse(bh);
115	} else {
116		struct gfs2_glock *gl = bd->bd_gl;
117		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
118		atomic_inc(&gl->gl_ail_count);
119	}
120	bd->bd_ail = ai;
121	list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
122	spin_unlock(&sdp->sd_ail_lock);
123
124	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
125	trace_gfs2_pin(bd, 0);
126	unlock_buffer(bh);
127	atomic_dec(&sdp->sd_log_pinned);
128}
129
130static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
131{
132	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
133	       (sdp->sd_log_flush_head != sdp->sd_log_head));
134
135	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
136		sdp->sd_log_flush_head = 0;
137		sdp->sd_log_flush_wrapped = 1;
138	}
139}
140
141static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
142{
143	unsigned int lbn = sdp->sd_log_flush_head;
144	struct gfs2_journal_extent *je;
145	u64 block;
146
147	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
148		if (lbn >= je->lblock && lbn < je->lblock + je->blocks) {
149			block = je->dblock + lbn - je->lblock;
150			gfs2_log_incr_head(sdp);
151			return block;
152		}
153	}
154
155	return -1;
156}
157
158/**
159 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
160 * @sdp: The superblock
161 * @bvec: The bio_vec
162 * @error: The i/o status
163 *
164 * This finds the relavent buffers and unlocks then and sets the
165 * error flag according to the status of the i/o request. This is
166 * used when the log is writing data which has an in-place version
167 * that is pinned in the pagecache.
168 */
169
170static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
171				  int error)
172{
173	struct buffer_head *bh, *next;
174	struct page *page = bvec->bv_page;
175	unsigned size;
176
177	bh = page_buffers(page);
178	size = bvec->bv_len;
179	while (bh_offset(bh) < bvec->bv_offset)
180		bh = bh->b_this_page;
181	do {
182		if (error)
183			set_buffer_write_io_error(bh);
184		unlock_buffer(bh);
185		next = bh->b_this_page;
186		size -= bh->b_size;
187		brelse(bh);
188		bh = next;
189	} while(bh && size);
190}
191
192/**
193 * gfs2_end_log_write - end of i/o to the log
194 * @bio: The bio
195 * @error: Status of i/o request
196 *
197 * Each bio_vec contains either data from the pagecache or data
198 * relating to the log itself. Here we iterate over the bio_vec
199 * array, processing both kinds of data.
200 *
201 */
202
203static void gfs2_end_log_write(struct bio *bio, int error)
204{
205	struct gfs2_sbd *sdp = bio->bi_private;
206	struct bio_vec *bvec;
207	struct page *page;
208	int i;
209
210	if (error) {
211		sdp->sd_log_error = error;
212		fs_err(sdp, "Error %d writing to log\n", error);
213	}
214
215	bio_for_each_segment(bvec, bio, i) {
216		page = bvec->bv_page;
217		if (page_has_buffers(page))
218			gfs2_end_log_write_bh(sdp, bvec, error);
219		else
220			mempool_free(page, gfs2_page_pool);
221	}
222
223	bio_put(bio);
224	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
225		wake_up(&sdp->sd_log_flush_wait);
226}
227
228/**
229 * gfs2_log_flush_bio - Submit any pending log bio
230 * @sdp: The superblock
231 * @rw: The rw flags
232 *
233 * Submit any pending part-built or full bio to the block device. If
234 * there is no pending bio, then this is a no-op.
235 */
236
237void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
238{
239	if (sdp->sd_log_bio) {
240		atomic_inc(&sdp->sd_log_in_flight);
241		submit_bio(rw, sdp->sd_log_bio);
242		sdp->sd_log_bio = NULL;
243	}
244}
245
246/**
247 * gfs2_log_alloc_bio - Allocate a new bio for log writing
248 * @sdp: The superblock
249 * @blkno: The next device block number we want to write to
250 *
251 * This should never be called when there is a cached bio in the
252 * super block. When it returns, there will be a cached bio in the
253 * super block which will have as many bio_vecs as the device is
254 * happy to handle.
255 *
256 * Returns: Newly allocated bio
257 */
258
259static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
260{
261	struct super_block *sb = sdp->sd_vfs;
262	unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
263	struct bio *bio;
264
265	BUG_ON(sdp->sd_log_bio);
266
267	while (1) {
268		bio = bio_alloc(GFP_NOIO, nrvecs);
269		if (likely(bio))
270			break;
271		nrvecs = max(nrvecs/2, 1U);
272	}
273
274	bio->bi_sector = blkno * (sb->s_blocksize >> 9);
275	bio->bi_bdev = sb->s_bdev;
276	bio->bi_end_io = gfs2_end_log_write;
277	bio->bi_private = sdp;
278
279	sdp->sd_log_bio = bio;
280
281	return bio;
282}
283
284/**
285 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
286 * @sdp: The superblock
287 * @blkno: The device block number we want to write to
288 *
289 * If there is a cached bio, then if the next block number is sequential
290 * with the previous one, return it, otherwise flush the bio to the
291 * device. If there is not a cached bio, or we just flushed it, then
292 * allocate a new one.
293 *
294 * Returns: The bio to use for log writes
295 */
296
297static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
298{
299	struct bio *bio = sdp->sd_log_bio;
300	u64 nblk;
301
302	if (bio) {
303		nblk = bio->bi_sector + bio_sectors(bio);
304		nblk >>= sdp->sd_fsb2bb_shift;
305		if (blkno == nblk)
306			return bio;
307		gfs2_log_flush_bio(sdp, WRITE);
308	}
309
310	return gfs2_log_alloc_bio(sdp, blkno);
311}
312
313
314/**
315 * gfs2_log_write - write to log
316 * @sdp: the filesystem
317 * @page: the page to write
318 * @size: the size of the data to write
319 * @offset: the offset within the page 
320 *
321 * Try and add the page segment to the current bio. If that fails,
322 * submit the current bio to the device and create a new one, and
323 * then add the page segment to that.
324 */
325
326static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
327			   unsigned size, unsigned offset)
328{
329	u64 blkno = gfs2_log_bmap(sdp);
330	struct bio *bio;
331	int ret;
332
333	bio = gfs2_log_get_bio(sdp, blkno);
334	ret = bio_add_page(bio, page, size, offset);
335	if (ret == 0) {
336		gfs2_log_flush_bio(sdp, WRITE);
337		bio = gfs2_log_alloc_bio(sdp, blkno);
338		ret = bio_add_page(bio, page, size, offset);
339		WARN_ON(ret == 0);
340	}
341}
342
343/**
344 * gfs2_log_write_bh - write a buffer's content to the log
345 * @sdp: The super block
346 * @bh: The buffer pointing to the in-place location
347 * 
348 * This writes the content of the buffer to the next available location
349 * in the log. The buffer will be unlocked once the i/o to the log has
350 * completed.
351 */
352
353static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
354{
355	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
356}
357
358/**
359 * gfs2_log_write_page - write one block stored in a page, into the log
360 * @sdp: The superblock
361 * @page: The struct page
362 *
363 * This writes the first block-sized part of the page into the log. Note
364 * that the page must have been allocated from the gfs2_page_pool mempool
365 * and that after this has been called, ownership has been transferred and
366 * the page may be freed at any time.
367 */
368
369void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
370{
371	struct super_block *sb = sdp->sd_vfs;
372	gfs2_log_write(sdp, page, sb->s_blocksize, 0);
373}
374
375static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
376				      u32 ld_length, u32 ld_data1)
377{
378	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
379	struct gfs2_log_descriptor *ld = page_address(page);
380	clear_page(ld);
381	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
382	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
383	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
384	ld->ld_type = cpu_to_be32(ld_type);
385	ld->ld_length = cpu_to_be32(ld_length);
386	ld->ld_data1 = cpu_to_be32(ld_data1);
387	ld->ld_data2 = 0;
388	return page;
389}
390
391static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
392{
393	struct gfs2_meta_header *mh;
394	struct gfs2_trans *tr;
395
396	lock_buffer(bd->bd_bh);
397	gfs2_log_lock(sdp);
398	tr = current->journal_info;
399	tr->tr_touched = 1;
400	if (!list_empty(&bd->bd_list))
401		goto out;
402	set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
403	set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
404	gfs2_meta_check(sdp, bd->bd_bh);
405	gfs2_pin(sdp, bd->bd_bh);
406	mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
407	mh->__pad0 = cpu_to_be64(0);
408	mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
409	sdp->sd_log_num_buf++;
410	list_add(&bd->bd_list, &sdp->sd_log_le_buf);
411	tr->tr_num_buf_new++;
412out:
413	gfs2_log_unlock(sdp);
414	unlock_buffer(bd->bd_bh);
415}
416
417static void gfs2_check_magic(struct buffer_head *bh)
418{
419	void *kaddr;
420	__be32 *ptr;
421
422	clear_buffer_escaped(bh);
423	kaddr = kmap_atomic(bh->b_page);
424	ptr = kaddr + bh_offset(bh);
425	if (*ptr == cpu_to_be32(GFS2_MAGIC))
426		set_buffer_escaped(bh);
427	kunmap_atomic(kaddr);
428}
429
430static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
431				unsigned int total, struct list_head *blist,
432				bool is_databuf)
433{
434	struct gfs2_log_descriptor *ld;
435	struct gfs2_bufdata *bd1 = NULL, *bd2;
436	struct page *page;
437	unsigned int num;
438	unsigned n;
439	__be64 *ptr;
440
441	gfs2_log_lock(sdp);
442	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
443	while(total) {
444		num = total;
445		if (total > limit)
446			num = limit;
447		gfs2_log_unlock(sdp);
448		page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num);
449		ld = page_address(page);
450		gfs2_log_lock(sdp);
451		ptr = (__be64 *)(ld + 1);
452
453		n = 0;
454		list_for_each_entry_continue(bd1, blist, bd_list) {
455			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
456			if (is_databuf) {
457				gfs2_check_magic(bd1->bd_bh);
458				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
459			}
460			if (++n >= num)
461				break;
462		}
463
464		gfs2_log_unlock(sdp);
465		gfs2_log_write_page(sdp, page);
466		gfs2_log_lock(sdp);
467
468		n = 0;
469		list_for_each_entry_continue(bd2, blist, bd_list) {
470			get_bh(bd2->bd_bh);
471			gfs2_log_unlock(sdp);
472			lock_buffer(bd2->bd_bh);
473
474			if (buffer_escaped(bd2->bd_bh)) {
475				void *kaddr;
476				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
477				ptr = page_address(page);
478				kaddr = kmap_atomic(bd2->bd_bh->b_page);
479				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
480				       bd2->bd_bh->b_size);
481				kunmap_atomic(kaddr);
482				*(__be32 *)ptr = 0;
483				clear_buffer_escaped(bd2->bd_bh);
484				unlock_buffer(bd2->bd_bh);
485				brelse(bd2->bd_bh);
486				gfs2_log_write_page(sdp, page);
487			} else {
488				gfs2_log_write_bh(sdp, bd2->bd_bh);
489			}
490			gfs2_log_lock(sdp);
491			if (++n >= num)
492				break;
493		}
494
495		BUG_ON(total < num);
496		total -= num;
497	}
498	gfs2_log_unlock(sdp);
499}
500
501static void buf_lo_before_commit(struct gfs2_sbd *sdp)
502{
503	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
504
505	gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf,
506			   &sdp->sd_log_le_buf, 0);
507}
508
509static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
510{
511	struct list_head *head = &sdp->sd_log_le_buf;
512	struct gfs2_bufdata *bd;
513
514	while (!list_empty(head)) {
515		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
516		list_del_init(&bd->bd_list);
517		sdp->sd_log_num_buf--;
518
519		gfs2_unpin(sdp, bd->bd_bh, ai);
520	}
521	gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
522}
523
524static void buf_lo_before_scan(struct gfs2_jdesc *jd,
525			       struct gfs2_log_header_host *head, int pass)
526{
527	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
528
529	if (pass != 0)
530		return;
531
532	sdp->sd_found_blocks = 0;
533	sdp->sd_replayed_blocks = 0;
534}
535
536static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
537				struct gfs2_log_descriptor *ld, __be64 *ptr,
538				int pass)
539{
540	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
541	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
542	struct gfs2_glock *gl = ip->i_gl;
543	unsigned int blks = be32_to_cpu(ld->ld_data1);
544	struct buffer_head *bh_log, *bh_ip;
545	u64 blkno;
546	int error = 0;
547
548	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
549		return 0;
550
551	gfs2_replay_incr_blk(sdp, &start);
552
553	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
554		blkno = be64_to_cpu(*ptr++);
555
556		sdp->sd_found_blocks++;
557
558		if (gfs2_revoke_check(sdp, blkno, start))
559			continue;
560
561		error = gfs2_replay_read_block(jd, start, &bh_log);
562		if (error)
563			return error;
564
565		bh_ip = gfs2_meta_new(gl, blkno);
566		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
567
568		if (gfs2_meta_check(sdp, bh_ip))
569			error = -EIO;
570		else
571			mark_buffer_dirty(bh_ip);
572
573		brelse(bh_log);
574		brelse(bh_ip);
575
576		if (error)
577			break;
578
579		sdp->sd_replayed_blocks++;
580	}
581
582	return error;
583}
584
585static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
586{
587	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
588	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
589
590	if (error) {
591		gfs2_meta_sync(ip->i_gl);
592		return;
593	}
594	if (pass != 1)
595		return;
596
597	gfs2_meta_sync(ip->i_gl);
598
599	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
600	        jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
601}
602
603static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
604{
605	struct gfs2_glock *gl = bd->bd_gl;
606	struct gfs2_trans *tr;
607
608	tr = current->journal_info;
609	tr->tr_touched = 1;
610	tr->tr_num_revoke++;
611	sdp->sd_log_num_revoke++;
612	atomic_inc(&gl->gl_revokes);
613	set_bit(GLF_LFLUSH, &gl->gl_flags);
614	list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
615}
616
617static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
618{
619	struct gfs2_log_descriptor *ld;
620	struct gfs2_meta_header *mh;
621	unsigned int offset;
622	struct list_head *head = &sdp->sd_log_le_revoke;
623	struct gfs2_bufdata *bd;
624	struct page *page;
625	unsigned int length;
626
627	if (!sdp->sd_log_num_revoke)
628		return;
629
630	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
631	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
632	ld = page_address(page);
633	offset = sizeof(struct gfs2_log_descriptor);
634
635	list_for_each_entry(bd, head, bd_list) {
636		sdp->sd_log_num_revoke--;
637
638		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
639
640			gfs2_log_write_page(sdp, page);
641			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
642			mh = page_address(page);
643			clear_page(mh);
644			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
645			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
646			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
647			offset = sizeof(struct gfs2_meta_header);
648		}
649
650		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
651		offset += sizeof(u64);
652	}
653	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
654
655	gfs2_log_write_page(sdp, page);
656}
657
658static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
659{
660	struct list_head *head = &sdp->sd_log_le_revoke;
661	struct gfs2_bufdata *bd;
662	struct gfs2_glock *gl;
663
664	while (!list_empty(head)) {
665		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
666		list_del_init(&bd->bd_list);
667		gl = bd->bd_gl;
668		atomic_dec(&gl->gl_revokes);
669		clear_bit(GLF_LFLUSH, &gl->gl_flags);
670		kmem_cache_free(gfs2_bufdata_cachep, bd);
671	}
672}
673
674static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
675				  struct gfs2_log_header_host *head, int pass)
676{
677	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
678
679	if (pass != 0)
680		return;
681
682	sdp->sd_found_revokes = 0;
683	sdp->sd_replay_tail = head->lh_tail;
684}
685
686static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
687				   struct gfs2_log_descriptor *ld, __be64 *ptr,
688				   int pass)
689{
690	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
691	unsigned int blks = be32_to_cpu(ld->ld_length);
692	unsigned int revokes = be32_to_cpu(ld->ld_data1);
693	struct buffer_head *bh;
694	unsigned int offset;
695	u64 blkno;
696	int first = 1;
697	int error;
698
699	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
700		return 0;
701
702	offset = sizeof(struct gfs2_log_descriptor);
703
704	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
705		error = gfs2_replay_read_block(jd, start, &bh);
706		if (error)
707			return error;
708
709		if (!first)
710			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
711
712		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
713			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
714
715			error = gfs2_revoke_add(sdp, blkno, start);
716			if (error < 0) {
717				brelse(bh);
718				return error;
719			}
720			else if (error)
721				sdp->sd_found_revokes++;
722
723			if (!--revokes)
724				break;
725			offset += sizeof(u64);
726		}
727
728		brelse(bh);
729		offset = sizeof(struct gfs2_meta_header);
730		first = 0;
731	}
732
733	return 0;
734}
735
736static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
737{
738	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
739
740	if (error) {
741		gfs2_revoke_clean(sdp);
742		return;
743	}
744	if (pass != 1)
745		return;
746
747	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
748	        jd->jd_jid, sdp->sd_found_revokes);
749
750	gfs2_revoke_clean(sdp);
751}
752
753/**
754 * databuf_lo_add - Add a databuf to the transaction.
755 *
756 * This is used in two distinct cases:
757 * i) In ordered write mode
758 *    We put the data buffer on a list so that we can ensure that its
759 *    synced to disk at the right time
760 * ii) In journaled data mode
761 *    We need to journal the data block in the same way as metadata in
762 *    the functions above. The difference is that here we have a tag
763 *    which is two __be64's being the block number (as per meta data)
764 *    and a flag which says whether the data block needs escaping or
765 *    not. This means we need a new log entry for each 251 or so data
766 *    blocks, which isn't an enormous overhead but twice as much as
767 *    for normal metadata blocks.
768 */
769static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
770{
771	struct gfs2_trans *tr = current->journal_info;
772	struct address_space *mapping = bd->bd_bh->b_page->mapping;
773	struct gfs2_inode *ip = GFS2_I(mapping->host);
774
775	lock_buffer(bd->bd_bh);
776	gfs2_log_lock(sdp);
777	if (tr)
778		tr->tr_touched = 1;
779	if (!list_empty(&bd->bd_list))
780		goto out;
781	set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
782	set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
783	if (gfs2_is_jdata(ip)) {
784		gfs2_pin(sdp, bd->bd_bh);
785		tr->tr_num_databuf_new++;
786		sdp->sd_log_num_databuf++;
787		list_add_tail(&bd->bd_list, &sdp->sd_log_le_databuf);
788	} else {
789		list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered);
790	}
791out:
792	gfs2_log_unlock(sdp);
793	unlock_buffer(bd->bd_bh);
794}
795
796/**
797 * databuf_lo_before_commit - Scan the data buffers, writing as we go
798 *
799 */
800
801static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
802{
803	unsigned int limit = buf_limit(sdp) / 2;
804
805	gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf,
806			   &sdp->sd_log_le_databuf, 1);
807}
808
809static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
810				    struct gfs2_log_descriptor *ld,
811				    __be64 *ptr, int pass)
812{
813	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
814	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
815	struct gfs2_glock *gl = ip->i_gl;
816	unsigned int blks = be32_to_cpu(ld->ld_data1);
817	struct buffer_head *bh_log, *bh_ip;
818	u64 blkno;
819	u64 esc;
820	int error = 0;
821
822	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
823		return 0;
824
825	gfs2_replay_incr_blk(sdp, &start);
826	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
827		blkno = be64_to_cpu(*ptr++);
828		esc = be64_to_cpu(*ptr++);
829
830		sdp->sd_found_blocks++;
831
832		if (gfs2_revoke_check(sdp, blkno, start))
833			continue;
834
835		error = gfs2_replay_read_block(jd, start, &bh_log);
836		if (error)
837			return error;
838
839		bh_ip = gfs2_meta_new(gl, blkno);
840		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
841
842		/* Unescape */
843		if (esc) {
844			__be32 *eptr = (__be32 *)bh_ip->b_data;
845			*eptr = cpu_to_be32(GFS2_MAGIC);
846		}
847		mark_buffer_dirty(bh_ip);
848
849		brelse(bh_log);
850		brelse(bh_ip);
851
852		sdp->sd_replayed_blocks++;
853	}
854
855	return error;
856}
857
858/* FIXME: sort out accounting for log blocks etc. */
859
860static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
861{
862	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
863	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
864
865	if (error) {
866		gfs2_meta_sync(ip->i_gl);
867		return;
868	}
869	if (pass != 1)
870		return;
871
872	/* data sync? */
873	gfs2_meta_sync(ip->i_gl);
874
875	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
876		jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
877}
878
879static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
880{
881	struct list_head *head = &sdp->sd_log_le_databuf;
882	struct gfs2_bufdata *bd;
883
884	while (!list_empty(head)) {
885		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
886		list_del_init(&bd->bd_list);
887		sdp->sd_log_num_databuf--;
888		gfs2_unpin(sdp, bd->bd_bh, ai);
889	}
890	gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
891}
892
893
894const struct gfs2_log_operations gfs2_buf_lops = {
895	.lo_add = buf_lo_add,
896	.lo_before_commit = buf_lo_before_commit,
897	.lo_after_commit = buf_lo_after_commit,
898	.lo_before_scan = buf_lo_before_scan,
899	.lo_scan_elements = buf_lo_scan_elements,
900	.lo_after_scan = buf_lo_after_scan,
901	.lo_name = "buf",
902};
903
904const struct gfs2_log_operations gfs2_revoke_lops = {
905	.lo_add = revoke_lo_add,
906	.lo_before_commit = revoke_lo_before_commit,
907	.lo_after_commit = revoke_lo_after_commit,
908	.lo_before_scan = revoke_lo_before_scan,
909	.lo_scan_elements = revoke_lo_scan_elements,
910	.lo_after_scan = revoke_lo_after_scan,
911	.lo_name = "revoke",
912};
913
914const struct gfs2_log_operations gfs2_rg_lops = {
915	.lo_name = "rg",
916};
917
918const struct gfs2_log_operations gfs2_databuf_lops = {
919	.lo_add = databuf_lo_add,
920	.lo_before_commit = databuf_lo_before_commit,
921	.lo_after_commit = databuf_lo_after_commit,
922	.lo_scan_elements = databuf_lo_scan_elements,
923	.lo_after_scan = databuf_lo_after_scan,
924	.lo_name = "databuf",
925};
926
927const struct gfs2_log_operations *gfs2_log_ops[] = {
928	&gfs2_databuf_lops,
929	&gfs2_buf_lops,
930	&gfs2_rg_lops,
931	&gfs2_revoke_lops,
932	NULL,
933};
934