Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mempool.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/bio.h>
18#include <linux/fs.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "inode.h"
23#include "glock.h"
24#include "log.h"
25#include "lops.h"
26#include "meta_io.h"
27#include "recovery.h"
28#include "rgrp.h"
29#include "trans.h"
30#include "util.h"
31#include "trace_gfs2.h"
32
33/**
34 * gfs2_pin - Pin a buffer in memory
35 * @sdp: The superblock
36 * @bh: The buffer to be pinned
37 *
38 * The log lock must be held when calling this function
39 */
40static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
41{
42 struct gfs2_bufdata *bd;
43
44 BUG_ON(!current->journal_info);
45
46 clear_buffer_dirty(bh);
47 if (test_set_buffer_pinned(bh))
48 gfs2_assert_withdraw(sdp, 0);
49 if (!buffer_uptodate(bh))
50 gfs2_io_error_bh(sdp, bh);
51 bd = bh->b_private;
52 /* If this buffer is in the AIL and it has already been written
53 * to in-place disk block, remove it from the AIL.
54 */
55 spin_lock(&sdp->sd_ail_lock);
56 if (bd->bd_ail)
57 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
58 spin_unlock(&sdp->sd_ail_lock);
59 get_bh(bh);
60 atomic_inc(&sdp->sd_log_pinned);
61 trace_gfs2_pin(bd, 1);
62}
63
64static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
65{
66 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
67}
68
69static void maybe_release_space(struct gfs2_bufdata *bd)
70{
71 struct gfs2_glock *gl = bd->bd_gl;
72 struct gfs2_sbd *sdp = gl->gl_sbd;
73 struct gfs2_rgrpd *rgd = gl->gl_object;
74 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
75 struct gfs2_bitmap *bi = rgd->rd_bits + index;
76
77 if (bi->bi_clone == 0)
78 return;
79 if (sdp->sd_args.ar_discard)
80 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
81 memcpy(bi->bi_clone + bi->bi_offset,
82 bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
83 clear_bit(GBF_FULL, &bi->bi_flags);
84 rgd->rd_free_clone = rgd->rd_free;
85}
86
87/**
88 * gfs2_unpin - Unpin a buffer
89 * @sdp: the filesystem the buffer belongs to
90 * @bh: The buffer to unpin
91 * @ai:
92 * @flags: The inode dirty flags
93 *
94 */
95
96static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
97 struct gfs2_ail *ai)
98{
99 struct gfs2_bufdata *bd = bh->b_private;
100
101 BUG_ON(!buffer_uptodate(bh));
102 BUG_ON(!buffer_pinned(bh));
103
104 lock_buffer(bh);
105 mark_buffer_dirty(bh);
106 clear_buffer_pinned(bh);
107
108 if (buffer_is_rgrp(bd))
109 maybe_release_space(bd);
110
111 spin_lock(&sdp->sd_ail_lock);
112 if (bd->bd_ail) {
113 list_del(&bd->bd_ail_st_list);
114 brelse(bh);
115 } else {
116 struct gfs2_glock *gl = bd->bd_gl;
117 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
118 atomic_inc(&gl->gl_ail_count);
119 }
120 bd->bd_ail = ai;
121 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
122 spin_unlock(&sdp->sd_ail_lock);
123
124 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
125 trace_gfs2_pin(bd, 0);
126 unlock_buffer(bh);
127 atomic_dec(&sdp->sd_log_pinned);
128}
129
130static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
131{
132 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
133 (sdp->sd_log_flush_head != sdp->sd_log_head));
134
135 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
136 sdp->sd_log_flush_head = 0;
137 sdp->sd_log_flush_wrapped = 1;
138 }
139}
140
141static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
142{
143 unsigned int lbn = sdp->sd_log_flush_head;
144 struct gfs2_journal_extent *je;
145 u64 block;
146
147 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
148 if (lbn >= je->lblock && lbn < je->lblock + je->blocks) {
149 block = je->dblock + lbn - je->lblock;
150 gfs2_log_incr_head(sdp);
151 return block;
152 }
153 }
154
155 return -1;
156}
157
158/**
159 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
160 * @sdp: The superblock
161 * @bvec: The bio_vec
162 * @error: The i/o status
163 *
164 * This finds the relavent buffers and unlocks then and sets the
165 * error flag according to the status of the i/o request. This is
166 * used when the log is writing data which has an in-place version
167 * that is pinned in the pagecache.
168 */
169
170static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
171 int error)
172{
173 struct buffer_head *bh, *next;
174 struct page *page = bvec->bv_page;
175 unsigned size;
176
177 bh = page_buffers(page);
178 size = bvec->bv_len;
179 while (bh_offset(bh) < bvec->bv_offset)
180 bh = bh->b_this_page;
181 do {
182 if (error)
183 set_buffer_write_io_error(bh);
184 unlock_buffer(bh);
185 next = bh->b_this_page;
186 size -= bh->b_size;
187 brelse(bh);
188 bh = next;
189 } while(bh && size);
190}
191
192/**
193 * gfs2_end_log_write - end of i/o to the log
194 * @bio: The bio
195 * @error: Status of i/o request
196 *
197 * Each bio_vec contains either data from the pagecache or data
198 * relating to the log itself. Here we iterate over the bio_vec
199 * array, processing both kinds of data.
200 *
201 */
202
203static void gfs2_end_log_write(struct bio *bio, int error)
204{
205 struct gfs2_sbd *sdp = bio->bi_private;
206 struct bio_vec *bvec;
207 struct page *page;
208 int i;
209
210 if (error) {
211 sdp->sd_log_error = error;
212 fs_err(sdp, "Error %d writing to log\n", error);
213 }
214
215 bio_for_each_segment(bvec, bio, i) {
216 page = bvec->bv_page;
217 if (page_has_buffers(page))
218 gfs2_end_log_write_bh(sdp, bvec, error);
219 else
220 mempool_free(page, gfs2_page_pool);
221 }
222
223 bio_put(bio);
224 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
225 wake_up(&sdp->sd_log_flush_wait);
226}
227
228/**
229 * gfs2_log_flush_bio - Submit any pending log bio
230 * @sdp: The superblock
231 * @rw: The rw flags
232 *
233 * Submit any pending part-built or full bio to the block device. If
234 * there is no pending bio, then this is a no-op.
235 */
236
237void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
238{
239 if (sdp->sd_log_bio) {
240 atomic_inc(&sdp->sd_log_in_flight);
241 submit_bio(rw, sdp->sd_log_bio);
242 sdp->sd_log_bio = NULL;
243 }
244}
245
246/**
247 * gfs2_log_alloc_bio - Allocate a new bio for log writing
248 * @sdp: The superblock
249 * @blkno: The next device block number we want to write to
250 *
251 * This should never be called when there is a cached bio in the
252 * super block. When it returns, there will be a cached bio in the
253 * super block which will have as many bio_vecs as the device is
254 * happy to handle.
255 *
256 * Returns: Newly allocated bio
257 */
258
259static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
260{
261 struct super_block *sb = sdp->sd_vfs;
262 unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
263 struct bio *bio;
264
265 BUG_ON(sdp->sd_log_bio);
266
267 while (1) {
268 bio = bio_alloc(GFP_NOIO, nrvecs);
269 if (likely(bio))
270 break;
271 nrvecs = max(nrvecs/2, 1U);
272 }
273
274 bio->bi_sector = blkno * (sb->s_blocksize >> 9);
275 bio->bi_bdev = sb->s_bdev;
276 bio->bi_end_io = gfs2_end_log_write;
277 bio->bi_private = sdp;
278
279 sdp->sd_log_bio = bio;
280
281 return bio;
282}
283
284/**
285 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
286 * @sdp: The superblock
287 * @blkno: The device block number we want to write to
288 *
289 * If there is a cached bio, then if the next block number is sequential
290 * with the previous one, return it, otherwise flush the bio to the
291 * device. If there is not a cached bio, or we just flushed it, then
292 * allocate a new one.
293 *
294 * Returns: The bio to use for log writes
295 */
296
297static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
298{
299 struct bio *bio = sdp->sd_log_bio;
300 u64 nblk;
301
302 if (bio) {
303 nblk = bio->bi_sector + bio_sectors(bio);
304 nblk >>= sdp->sd_fsb2bb_shift;
305 if (blkno == nblk)
306 return bio;
307 gfs2_log_flush_bio(sdp, WRITE);
308 }
309
310 return gfs2_log_alloc_bio(sdp, blkno);
311}
312
313
314/**
315 * gfs2_log_write - write to log
316 * @sdp: the filesystem
317 * @page: the page to write
318 * @size: the size of the data to write
319 * @offset: the offset within the page
320 *
321 * Try and add the page segment to the current bio. If that fails,
322 * submit the current bio to the device and create a new one, and
323 * then add the page segment to that.
324 */
325
326static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
327 unsigned size, unsigned offset)
328{
329 u64 blkno = gfs2_log_bmap(sdp);
330 struct bio *bio;
331 int ret;
332
333 bio = gfs2_log_get_bio(sdp, blkno);
334 ret = bio_add_page(bio, page, size, offset);
335 if (ret == 0) {
336 gfs2_log_flush_bio(sdp, WRITE);
337 bio = gfs2_log_alloc_bio(sdp, blkno);
338 ret = bio_add_page(bio, page, size, offset);
339 WARN_ON(ret == 0);
340 }
341}
342
343/**
344 * gfs2_log_write_bh - write a buffer's content to the log
345 * @sdp: The super block
346 * @bh: The buffer pointing to the in-place location
347 *
348 * This writes the content of the buffer to the next available location
349 * in the log. The buffer will be unlocked once the i/o to the log has
350 * completed.
351 */
352
353static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
354{
355 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
356}
357
358/**
359 * gfs2_log_write_page - write one block stored in a page, into the log
360 * @sdp: The superblock
361 * @page: The struct page
362 *
363 * This writes the first block-sized part of the page into the log. Note
364 * that the page must have been allocated from the gfs2_page_pool mempool
365 * and that after this has been called, ownership has been transferred and
366 * the page may be freed at any time.
367 */
368
369void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
370{
371 struct super_block *sb = sdp->sd_vfs;
372 gfs2_log_write(sdp, page, sb->s_blocksize, 0);
373}
374
375static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
376 u32 ld_length, u32 ld_data1)
377{
378 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
379 struct gfs2_log_descriptor *ld = page_address(page);
380 clear_page(ld);
381 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
382 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
383 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
384 ld->ld_type = cpu_to_be32(ld_type);
385 ld->ld_length = cpu_to_be32(ld_length);
386 ld->ld_data1 = cpu_to_be32(ld_data1);
387 ld->ld_data2 = 0;
388 return page;
389}
390
391static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
392{
393 struct gfs2_meta_header *mh;
394 struct gfs2_trans *tr;
395
396 lock_buffer(bd->bd_bh);
397 gfs2_log_lock(sdp);
398 tr = current->journal_info;
399 tr->tr_touched = 1;
400 if (!list_empty(&bd->bd_list))
401 goto out;
402 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
403 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
404 gfs2_meta_check(sdp, bd->bd_bh);
405 gfs2_pin(sdp, bd->bd_bh);
406 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
407 mh->__pad0 = cpu_to_be64(0);
408 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
409 sdp->sd_log_num_buf++;
410 list_add(&bd->bd_list, &sdp->sd_log_le_buf);
411 tr->tr_num_buf_new++;
412out:
413 gfs2_log_unlock(sdp);
414 unlock_buffer(bd->bd_bh);
415}
416
417static void gfs2_check_magic(struct buffer_head *bh)
418{
419 void *kaddr;
420 __be32 *ptr;
421
422 clear_buffer_escaped(bh);
423 kaddr = kmap_atomic(bh->b_page);
424 ptr = kaddr + bh_offset(bh);
425 if (*ptr == cpu_to_be32(GFS2_MAGIC))
426 set_buffer_escaped(bh);
427 kunmap_atomic(kaddr);
428}
429
430static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
431 unsigned int total, struct list_head *blist,
432 bool is_databuf)
433{
434 struct gfs2_log_descriptor *ld;
435 struct gfs2_bufdata *bd1 = NULL, *bd2;
436 struct page *page;
437 unsigned int num;
438 unsigned n;
439 __be64 *ptr;
440
441 gfs2_log_lock(sdp);
442 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
443 while(total) {
444 num = total;
445 if (total > limit)
446 num = limit;
447 gfs2_log_unlock(sdp);
448 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num);
449 ld = page_address(page);
450 gfs2_log_lock(sdp);
451 ptr = (__be64 *)(ld + 1);
452
453 n = 0;
454 list_for_each_entry_continue(bd1, blist, bd_list) {
455 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
456 if (is_databuf) {
457 gfs2_check_magic(bd1->bd_bh);
458 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
459 }
460 if (++n >= num)
461 break;
462 }
463
464 gfs2_log_unlock(sdp);
465 gfs2_log_write_page(sdp, page);
466 gfs2_log_lock(sdp);
467
468 n = 0;
469 list_for_each_entry_continue(bd2, blist, bd_list) {
470 get_bh(bd2->bd_bh);
471 gfs2_log_unlock(sdp);
472 lock_buffer(bd2->bd_bh);
473
474 if (buffer_escaped(bd2->bd_bh)) {
475 void *kaddr;
476 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
477 ptr = page_address(page);
478 kaddr = kmap_atomic(bd2->bd_bh->b_page);
479 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
480 bd2->bd_bh->b_size);
481 kunmap_atomic(kaddr);
482 *(__be32 *)ptr = 0;
483 clear_buffer_escaped(bd2->bd_bh);
484 unlock_buffer(bd2->bd_bh);
485 brelse(bd2->bd_bh);
486 gfs2_log_write_page(sdp, page);
487 } else {
488 gfs2_log_write_bh(sdp, bd2->bd_bh);
489 }
490 gfs2_log_lock(sdp);
491 if (++n >= num)
492 break;
493 }
494
495 BUG_ON(total < num);
496 total -= num;
497 }
498 gfs2_log_unlock(sdp);
499}
500
501static void buf_lo_before_commit(struct gfs2_sbd *sdp)
502{
503 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
504
505 gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf,
506 &sdp->sd_log_le_buf, 0);
507}
508
509static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
510{
511 struct list_head *head = &sdp->sd_log_le_buf;
512 struct gfs2_bufdata *bd;
513
514 while (!list_empty(head)) {
515 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
516 list_del_init(&bd->bd_list);
517 sdp->sd_log_num_buf--;
518
519 gfs2_unpin(sdp, bd->bd_bh, ai);
520 }
521 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
522}
523
524static void buf_lo_before_scan(struct gfs2_jdesc *jd,
525 struct gfs2_log_header_host *head, int pass)
526{
527 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
528
529 if (pass != 0)
530 return;
531
532 sdp->sd_found_blocks = 0;
533 sdp->sd_replayed_blocks = 0;
534}
535
536static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
537 struct gfs2_log_descriptor *ld, __be64 *ptr,
538 int pass)
539{
540 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
541 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
542 struct gfs2_glock *gl = ip->i_gl;
543 unsigned int blks = be32_to_cpu(ld->ld_data1);
544 struct buffer_head *bh_log, *bh_ip;
545 u64 blkno;
546 int error = 0;
547
548 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
549 return 0;
550
551 gfs2_replay_incr_blk(sdp, &start);
552
553 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
554 blkno = be64_to_cpu(*ptr++);
555
556 sdp->sd_found_blocks++;
557
558 if (gfs2_revoke_check(sdp, blkno, start))
559 continue;
560
561 error = gfs2_replay_read_block(jd, start, &bh_log);
562 if (error)
563 return error;
564
565 bh_ip = gfs2_meta_new(gl, blkno);
566 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
567
568 if (gfs2_meta_check(sdp, bh_ip))
569 error = -EIO;
570 else
571 mark_buffer_dirty(bh_ip);
572
573 brelse(bh_log);
574 brelse(bh_ip);
575
576 if (error)
577 break;
578
579 sdp->sd_replayed_blocks++;
580 }
581
582 return error;
583}
584
585static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
586{
587 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
588 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
589
590 if (error) {
591 gfs2_meta_sync(ip->i_gl);
592 return;
593 }
594 if (pass != 1)
595 return;
596
597 gfs2_meta_sync(ip->i_gl);
598
599 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
600 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
601}
602
603static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
604{
605 struct gfs2_glock *gl = bd->bd_gl;
606 struct gfs2_trans *tr;
607
608 tr = current->journal_info;
609 tr->tr_touched = 1;
610 tr->tr_num_revoke++;
611 sdp->sd_log_num_revoke++;
612 atomic_inc(&gl->gl_revokes);
613 set_bit(GLF_LFLUSH, &gl->gl_flags);
614 list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
615}
616
617static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
618{
619 struct gfs2_log_descriptor *ld;
620 struct gfs2_meta_header *mh;
621 unsigned int offset;
622 struct list_head *head = &sdp->sd_log_le_revoke;
623 struct gfs2_bufdata *bd;
624 struct page *page;
625 unsigned int length;
626
627 if (!sdp->sd_log_num_revoke)
628 return;
629
630 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
631 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
632 ld = page_address(page);
633 offset = sizeof(struct gfs2_log_descriptor);
634
635 list_for_each_entry(bd, head, bd_list) {
636 sdp->sd_log_num_revoke--;
637
638 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
639
640 gfs2_log_write_page(sdp, page);
641 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
642 mh = page_address(page);
643 clear_page(mh);
644 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
645 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
646 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
647 offset = sizeof(struct gfs2_meta_header);
648 }
649
650 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
651 offset += sizeof(u64);
652 }
653 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
654
655 gfs2_log_write_page(sdp, page);
656}
657
658static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
659{
660 struct list_head *head = &sdp->sd_log_le_revoke;
661 struct gfs2_bufdata *bd;
662 struct gfs2_glock *gl;
663
664 while (!list_empty(head)) {
665 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
666 list_del_init(&bd->bd_list);
667 gl = bd->bd_gl;
668 atomic_dec(&gl->gl_revokes);
669 clear_bit(GLF_LFLUSH, &gl->gl_flags);
670 kmem_cache_free(gfs2_bufdata_cachep, bd);
671 }
672}
673
674static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
675 struct gfs2_log_header_host *head, int pass)
676{
677 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
678
679 if (pass != 0)
680 return;
681
682 sdp->sd_found_revokes = 0;
683 sdp->sd_replay_tail = head->lh_tail;
684}
685
686static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
687 struct gfs2_log_descriptor *ld, __be64 *ptr,
688 int pass)
689{
690 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
691 unsigned int blks = be32_to_cpu(ld->ld_length);
692 unsigned int revokes = be32_to_cpu(ld->ld_data1);
693 struct buffer_head *bh;
694 unsigned int offset;
695 u64 blkno;
696 int first = 1;
697 int error;
698
699 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
700 return 0;
701
702 offset = sizeof(struct gfs2_log_descriptor);
703
704 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
705 error = gfs2_replay_read_block(jd, start, &bh);
706 if (error)
707 return error;
708
709 if (!first)
710 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
711
712 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
713 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
714
715 error = gfs2_revoke_add(sdp, blkno, start);
716 if (error < 0) {
717 brelse(bh);
718 return error;
719 }
720 else if (error)
721 sdp->sd_found_revokes++;
722
723 if (!--revokes)
724 break;
725 offset += sizeof(u64);
726 }
727
728 brelse(bh);
729 offset = sizeof(struct gfs2_meta_header);
730 first = 0;
731 }
732
733 return 0;
734}
735
736static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
737{
738 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
739
740 if (error) {
741 gfs2_revoke_clean(sdp);
742 return;
743 }
744 if (pass != 1)
745 return;
746
747 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
748 jd->jd_jid, sdp->sd_found_revokes);
749
750 gfs2_revoke_clean(sdp);
751}
752
753/**
754 * databuf_lo_add - Add a databuf to the transaction.
755 *
756 * This is used in two distinct cases:
757 * i) In ordered write mode
758 * We put the data buffer on a list so that we can ensure that its
759 * synced to disk at the right time
760 * ii) In journaled data mode
761 * We need to journal the data block in the same way as metadata in
762 * the functions above. The difference is that here we have a tag
763 * which is two __be64's being the block number (as per meta data)
764 * and a flag which says whether the data block needs escaping or
765 * not. This means we need a new log entry for each 251 or so data
766 * blocks, which isn't an enormous overhead but twice as much as
767 * for normal metadata blocks.
768 */
769static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
770{
771 struct gfs2_trans *tr = current->journal_info;
772 struct address_space *mapping = bd->bd_bh->b_page->mapping;
773 struct gfs2_inode *ip = GFS2_I(mapping->host);
774
775 lock_buffer(bd->bd_bh);
776 gfs2_log_lock(sdp);
777 if (tr)
778 tr->tr_touched = 1;
779 if (!list_empty(&bd->bd_list))
780 goto out;
781 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
782 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
783 if (gfs2_is_jdata(ip)) {
784 gfs2_pin(sdp, bd->bd_bh);
785 tr->tr_num_databuf_new++;
786 sdp->sd_log_num_databuf++;
787 list_add_tail(&bd->bd_list, &sdp->sd_log_le_databuf);
788 } else {
789 list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered);
790 }
791out:
792 gfs2_log_unlock(sdp);
793 unlock_buffer(bd->bd_bh);
794}
795
796/**
797 * databuf_lo_before_commit - Scan the data buffers, writing as we go
798 *
799 */
800
801static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
802{
803 unsigned int limit = buf_limit(sdp) / 2;
804
805 gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf,
806 &sdp->sd_log_le_databuf, 1);
807}
808
809static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
810 struct gfs2_log_descriptor *ld,
811 __be64 *ptr, int pass)
812{
813 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
814 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
815 struct gfs2_glock *gl = ip->i_gl;
816 unsigned int blks = be32_to_cpu(ld->ld_data1);
817 struct buffer_head *bh_log, *bh_ip;
818 u64 blkno;
819 u64 esc;
820 int error = 0;
821
822 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
823 return 0;
824
825 gfs2_replay_incr_blk(sdp, &start);
826 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
827 blkno = be64_to_cpu(*ptr++);
828 esc = be64_to_cpu(*ptr++);
829
830 sdp->sd_found_blocks++;
831
832 if (gfs2_revoke_check(sdp, blkno, start))
833 continue;
834
835 error = gfs2_replay_read_block(jd, start, &bh_log);
836 if (error)
837 return error;
838
839 bh_ip = gfs2_meta_new(gl, blkno);
840 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
841
842 /* Unescape */
843 if (esc) {
844 __be32 *eptr = (__be32 *)bh_ip->b_data;
845 *eptr = cpu_to_be32(GFS2_MAGIC);
846 }
847 mark_buffer_dirty(bh_ip);
848
849 brelse(bh_log);
850 brelse(bh_ip);
851
852 sdp->sd_replayed_blocks++;
853 }
854
855 return error;
856}
857
858/* FIXME: sort out accounting for log blocks etc. */
859
860static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
861{
862 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
863 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
864
865 if (error) {
866 gfs2_meta_sync(ip->i_gl);
867 return;
868 }
869 if (pass != 1)
870 return;
871
872 /* data sync? */
873 gfs2_meta_sync(ip->i_gl);
874
875 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
876 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
877}
878
879static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
880{
881 struct list_head *head = &sdp->sd_log_le_databuf;
882 struct gfs2_bufdata *bd;
883
884 while (!list_empty(head)) {
885 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
886 list_del_init(&bd->bd_list);
887 sdp->sd_log_num_databuf--;
888 gfs2_unpin(sdp, bd->bd_bh, ai);
889 }
890 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
891}
892
893
894const struct gfs2_log_operations gfs2_buf_lops = {
895 .lo_add = buf_lo_add,
896 .lo_before_commit = buf_lo_before_commit,
897 .lo_after_commit = buf_lo_after_commit,
898 .lo_before_scan = buf_lo_before_scan,
899 .lo_scan_elements = buf_lo_scan_elements,
900 .lo_after_scan = buf_lo_after_scan,
901 .lo_name = "buf",
902};
903
904const struct gfs2_log_operations gfs2_revoke_lops = {
905 .lo_add = revoke_lo_add,
906 .lo_before_commit = revoke_lo_before_commit,
907 .lo_after_commit = revoke_lo_after_commit,
908 .lo_before_scan = revoke_lo_before_scan,
909 .lo_scan_elements = revoke_lo_scan_elements,
910 .lo_after_scan = revoke_lo_after_scan,
911 .lo_name = "revoke",
912};
913
914const struct gfs2_log_operations gfs2_rg_lops = {
915 .lo_name = "rg",
916};
917
918const struct gfs2_log_operations gfs2_databuf_lops = {
919 .lo_add = databuf_lo_add,
920 .lo_before_commit = databuf_lo_before_commit,
921 .lo_after_commit = databuf_lo_after_commit,
922 .lo_scan_elements = databuf_lo_scan_elements,
923 .lo_after_scan = databuf_lo_after_scan,
924 .lo_name = "databuf",
925};
926
927const struct gfs2_log_operations *gfs2_log_ops[] = {
928 &gfs2_databuf_lops,
929 &gfs2_buf_lops,
930 &gfs2_rg_lops,
931 &gfs2_revoke_lops,
932 NULL,
933};
934
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mempool.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/bio.h>
18#include <linux/fs.h>
19#include <linux/list_sort.h>
20
21#include "dir.h"
22#include "gfs2.h"
23#include "incore.h"
24#include "inode.h"
25#include "glock.h"
26#include "log.h"
27#include "lops.h"
28#include "meta_io.h"
29#include "recovery.h"
30#include "rgrp.h"
31#include "trans.h"
32#include "util.h"
33#include "trace_gfs2.h"
34
35/**
36 * gfs2_pin - Pin a buffer in memory
37 * @sdp: The superblock
38 * @bh: The buffer to be pinned
39 *
40 * The log lock must be held when calling this function
41 */
42void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43{
44 struct gfs2_bufdata *bd;
45
46 BUG_ON(!current->journal_info);
47
48 clear_buffer_dirty(bh);
49 if (test_set_buffer_pinned(bh))
50 gfs2_assert_withdraw(sdp, 0);
51 if (!buffer_uptodate(bh))
52 gfs2_io_error_bh(sdp, bh);
53 bd = bh->b_private;
54 /* If this buffer is in the AIL and it has already been written
55 * to in-place disk block, remove it from the AIL.
56 */
57 spin_lock(&sdp->sd_ail_lock);
58 if (bd->bd_tr)
59 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 spin_unlock(&sdp->sd_ail_lock);
61 get_bh(bh);
62 atomic_inc(&sdp->sd_log_pinned);
63 trace_gfs2_pin(bd, 1);
64}
65
66static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67{
68 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69}
70
71static void maybe_release_space(struct gfs2_bufdata *bd)
72{
73 struct gfs2_glock *gl = bd->bd_gl;
74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 struct gfs2_bitmap *bi = rgd->rd_bits + index;
78
79 if (bi->bi_clone == NULL)
80 return;
81 if (sdp->sd_args.ar_discard)
82 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
83 memcpy(bi->bi_clone + bi->bi_offset,
84 bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
85 clear_bit(GBF_FULL, &bi->bi_flags);
86 rgd->rd_free_clone = rgd->rd_free;
87 rgd->rd_extfail_pt = rgd->rd_free;
88}
89
90/**
91 * gfs2_unpin - Unpin a buffer
92 * @sdp: the filesystem the buffer belongs to
93 * @bh: The buffer to unpin
94 * @ai:
95 * @flags: The inode dirty flags
96 *
97 */
98
99static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
100 struct gfs2_trans *tr)
101{
102 struct gfs2_bufdata *bd = bh->b_private;
103
104 BUG_ON(!buffer_uptodate(bh));
105 BUG_ON(!buffer_pinned(bh));
106
107 lock_buffer(bh);
108 mark_buffer_dirty(bh);
109 clear_buffer_pinned(bh);
110
111 if (buffer_is_rgrp(bd))
112 maybe_release_space(bd);
113
114 spin_lock(&sdp->sd_ail_lock);
115 if (bd->bd_tr) {
116 list_del(&bd->bd_ail_st_list);
117 brelse(bh);
118 } else {
119 struct gfs2_glock *gl = bd->bd_gl;
120 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
121 atomic_inc(&gl->gl_ail_count);
122 }
123 bd->bd_tr = tr;
124 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
125 spin_unlock(&sdp->sd_ail_lock);
126
127 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
128 trace_gfs2_pin(bd, 0);
129 unlock_buffer(bh);
130 atomic_dec(&sdp->sd_log_pinned);
131}
132
133static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
134{
135 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
136 (sdp->sd_log_flush_head != sdp->sd_log_head));
137
138 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
139 sdp->sd_log_flush_head = 0;
140}
141
142u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
143{
144 unsigned int lbn = sdp->sd_log_flush_head;
145 struct gfs2_journal_extent *je;
146 u64 block;
147
148 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
149 if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) {
150 block = je->dblock + lbn - je->lblock;
151 gfs2_log_incr_head(sdp);
152 return block;
153 }
154 }
155
156 return -1;
157}
158
159/**
160 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
161 * @sdp: The superblock
162 * @bvec: The bio_vec
163 * @error: The i/o status
164 *
165 * This finds the relevant buffers and unlocks them and sets the
166 * error flag according to the status of the i/o request. This is
167 * used when the log is writing data which has an in-place version
168 * that is pinned in the pagecache.
169 */
170
171static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
172 blk_status_t error)
173{
174 struct buffer_head *bh, *next;
175 struct page *page = bvec->bv_page;
176 unsigned size;
177
178 bh = page_buffers(page);
179 size = bvec->bv_len;
180 while (bh_offset(bh) < bvec->bv_offset)
181 bh = bh->b_this_page;
182 do {
183 if (error)
184 mark_buffer_write_io_error(bh);
185 unlock_buffer(bh);
186 next = bh->b_this_page;
187 size -= bh->b_size;
188 brelse(bh);
189 bh = next;
190 } while(bh && size);
191}
192
193/**
194 * gfs2_end_log_write - end of i/o to the log
195 * @bio: The bio
196 * @error: Status of i/o request
197 *
198 * Each bio_vec contains either data from the pagecache or data
199 * relating to the log itself. Here we iterate over the bio_vec
200 * array, processing both kinds of data.
201 *
202 */
203
204static void gfs2_end_log_write(struct bio *bio)
205{
206 struct gfs2_sbd *sdp = bio->bi_private;
207 struct bio_vec *bvec;
208 struct page *page;
209 int i;
210
211 if (bio->bi_status) {
212 fs_err(sdp, "Error %d writing to journal, jid=%u\n",
213 bio->bi_status, sdp->sd_jdesc->jd_jid);
214 wake_up(&sdp->sd_logd_waitq);
215 }
216
217 bio_for_each_segment_all(bvec, bio, i) {
218 page = bvec->bv_page;
219 if (page_has_buffers(page))
220 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
221 else
222 mempool_free(page, gfs2_page_pool);
223 }
224
225 bio_put(bio);
226 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
227 wake_up(&sdp->sd_log_flush_wait);
228}
229
230/**
231 * gfs2_log_flush_bio - Submit any pending log bio
232 * @sdp: The superblock
233 * @op: REQ_OP
234 * @op_flags: req_flag_bits
235 *
236 * Submit any pending part-built or full bio to the block device. If
237 * there is no pending bio, then this is a no-op.
238 */
239
240void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags)
241{
242 if (sdp->sd_log_bio) {
243 atomic_inc(&sdp->sd_log_in_flight);
244 bio_set_op_attrs(sdp->sd_log_bio, op, op_flags);
245 submit_bio(sdp->sd_log_bio);
246 sdp->sd_log_bio = NULL;
247 }
248}
249
250/**
251 * gfs2_log_alloc_bio - Allocate a new bio for log writing
252 * @sdp: The superblock
253 * @blkno: The next device block number we want to write to
254 *
255 * This should never be called when there is a cached bio in the
256 * super block. When it returns, there will be a cached bio in the
257 * super block which will have as many bio_vecs as the device is
258 * happy to handle.
259 *
260 * Returns: Newly allocated bio
261 */
262
263static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
264{
265 struct super_block *sb = sdp->sd_vfs;
266 struct bio *bio;
267
268 BUG_ON(sdp->sd_log_bio);
269
270 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
271 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
272 bio_set_dev(bio, sb->s_bdev);
273 bio->bi_end_io = gfs2_end_log_write;
274 bio->bi_private = sdp;
275
276 sdp->sd_log_bio = bio;
277
278 return bio;
279}
280
281/**
282 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
283 * @sdp: The superblock
284 * @blkno: The device block number we want to write to
285 *
286 * If there is a cached bio, then if the next block number is sequential
287 * with the previous one, return it, otherwise flush the bio to the
288 * device. If there is not a cached bio, or we just flushed it, then
289 * allocate a new one.
290 *
291 * Returns: The bio to use for log writes
292 */
293
294static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
295{
296 struct bio *bio = sdp->sd_log_bio;
297 u64 nblk;
298
299 if (bio) {
300 nblk = bio_end_sector(bio);
301 nblk >>= sdp->sd_fsb2bb_shift;
302 if (blkno == nblk)
303 return bio;
304 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
305 }
306
307 return gfs2_log_alloc_bio(sdp, blkno);
308}
309
310/**
311 * gfs2_log_write - write to log
312 * @sdp: the filesystem
313 * @page: the page to write
314 * @size: the size of the data to write
315 * @offset: the offset within the page
316 * @blkno: block number of the log entry
317 *
318 * Try and add the page segment to the current bio. If that fails,
319 * submit the current bio to the device and create a new one, and
320 * then add the page segment to that.
321 */
322
323void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
324 unsigned size, unsigned offset, u64 blkno)
325{
326 struct bio *bio;
327 int ret;
328
329 bio = gfs2_log_get_bio(sdp, blkno);
330 ret = bio_add_page(bio, page, size, offset);
331 if (ret == 0) {
332 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
333 bio = gfs2_log_alloc_bio(sdp, blkno);
334 ret = bio_add_page(bio, page, size, offset);
335 WARN_ON(ret == 0);
336 }
337}
338
339/**
340 * gfs2_log_write_bh - write a buffer's content to the log
341 * @sdp: The super block
342 * @bh: The buffer pointing to the in-place location
343 *
344 * This writes the content of the buffer to the next available location
345 * in the log. The buffer will be unlocked once the i/o to the log has
346 * completed.
347 */
348
349static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
350{
351 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh),
352 gfs2_log_bmap(sdp));
353}
354
355/**
356 * gfs2_log_write_page - write one block stored in a page, into the log
357 * @sdp: The superblock
358 * @page: The struct page
359 *
360 * This writes the first block-sized part of the page into the log. Note
361 * that the page must have been allocated from the gfs2_page_pool mempool
362 * and that after this has been called, ownership has been transferred and
363 * the page may be freed at any time.
364 */
365
366void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
367{
368 struct super_block *sb = sdp->sd_vfs;
369 gfs2_log_write(sdp, page, sb->s_blocksize, 0,
370 gfs2_log_bmap(sdp));
371}
372
373static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
374 u32 ld_length, u32 ld_data1)
375{
376 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
377 struct gfs2_log_descriptor *ld = page_address(page);
378 clear_page(ld);
379 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
380 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
381 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
382 ld->ld_type = cpu_to_be32(ld_type);
383 ld->ld_length = cpu_to_be32(ld_length);
384 ld->ld_data1 = cpu_to_be32(ld_data1);
385 ld->ld_data2 = 0;
386 return page;
387}
388
389static void gfs2_check_magic(struct buffer_head *bh)
390{
391 void *kaddr;
392 __be32 *ptr;
393
394 clear_buffer_escaped(bh);
395 kaddr = kmap_atomic(bh->b_page);
396 ptr = kaddr + bh_offset(bh);
397 if (*ptr == cpu_to_be32(GFS2_MAGIC))
398 set_buffer_escaped(bh);
399 kunmap_atomic(kaddr);
400}
401
402static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
403{
404 struct gfs2_bufdata *bda, *bdb;
405
406 bda = list_entry(a, struct gfs2_bufdata, bd_list);
407 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
408
409 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
410 return -1;
411 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
412 return 1;
413 return 0;
414}
415
416static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
417 unsigned int total, struct list_head *blist,
418 bool is_databuf)
419{
420 struct gfs2_log_descriptor *ld;
421 struct gfs2_bufdata *bd1 = NULL, *bd2;
422 struct page *page;
423 unsigned int num;
424 unsigned n;
425 __be64 *ptr;
426
427 gfs2_log_lock(sdp);
428 list_sort(NULL, blist, blocknr_cmp);
429 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
430 while(total) {
431 num = total;
432 if (total > limit)
433 num = limit;
434 gfs2_log_unlock(sdp);
435 page = gfs2_get_log_desc(sdp,
436 is_databuf ? GFS2_LOG_DESC_JDATA :
437 GFS2_LOG_DESC_METADATA, num + 1, num);
438 ld = page_address(page);
439 gfs2_log_lock(sdp);
440 ptr = (__be64 *)(ld + 1);
441
442 n = 0;
443 list_for_each_entry_continue(bd1, blist, bd_list) {
444 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
445 if (is_databuf) {
446 gfs2_check_magic(bd1->bd_bh);
447 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
448 }
449 if (++n >= num)
450 break;
451 }
452
453 gfs2_log_unlock(sdp);
454 gfs2_log_write_page(sdp, page);
455 gfs2_log_lock(sdp);
456
457 n = 0;
458 list_for_each_entry_continue(bd2, blist, bd_list) {
459 get_bh(bd2->bd_bh);
460 gfs2_log_unlock(sdp);
461 lock_buffer(bd2->bd_bh);
462
463 if (buffer_escaped(bd2->bd_bh)) {
464 void *kaddr;
465 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
466 ptr = page_address(page);
467 kaddr = kmap_atomic(bd2->bd_bh->b_page);
468 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
469 bd2->bd_bh->b_size);
470 kunmap_atomic(kaddr);
471 *(__be32 *)ptr = 0;
472 clear_buffer_escaped(bd2->bd_bh);
473 unlock_buffer(bd2->bd_bh);
474 brelse(bd2->bd_bh);
475 gfs2_log_write_page(sdp, page);
476 } else {
477 gfs2_log_write_bh(sdp, bd2->bd_bh);
478 }
479 gfs2_log_lock(sdp);
480 if (++n >= num)
481 break;
482 }
483
484 BUG_ON(total < num);
485 total -= num;
486 }
487 gfs2_log_unlock(sdp);
488}
489
490static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
491{
492 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
493 unsigned int nbuf;
494 if (tr == NULL)
495 return;
496 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
497 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
498}
499
500static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
501{
502 struct list_head *head;
503 struct gfs2_bufdata *bd;
504
505 if (tr == NULL)
506 return;
507
508 head = &tr->tr_buf;
509 while (!list_empty(head)) {
510 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
511 list_del_init(&bd->bd_list);
512 gfs2_unpin(sdp, bd->bd_bh, tr);
513 }
514}
515
516static void buf_lo_before_scan(struct gfs2_jdesc *jd,
517 struct gfs2_log_header_host *head, int pass)
518{
519 if (pass != 0)
520 return;
521
522 jd->jd_found_blocks = 0;
523 jd->jd_replayed_blocks = 0;
524}
525
526static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
527 struct gfs2_log_descriptor *ld, __be64 *ptr,
528 int pass)
529{
530 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
531 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
532 struct gfs2_glock *gl = ip->i_gl;
533 unsigned int blks = be32_to_cpu(ld->ld_data1);
534 struct buffer_head *bh_log, *bh_ip;
535 u64 blkno;
536 int error = 0;
537
538 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
539 return 0;
540
541 gfs2_replay_incr_blk(jd, &start);
542
543 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
544 blkno = be64_to_cpu(*ptr++);
545
546 jd->jd_found_blocks++;
547
548 if (gfs2_revoke_check(jd, blkno, start))
549 continue;
550
551 error = gfs2_replay_read_block(jd, start, &bh_log);
552 if (error)
553 return error;
554
555 bh_ip = gfs2_meta_new(gl, blkno);
556 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
557
558 if (gfs2_meta_check(sdp, bh_ip))
559 error = -EIO;
560 else
561 mark_buffer_dirty(bh_ip);
562
563 brelse(bh_log);
564 brelse(bh_ip);
565
566 if (error)
567 break;
568
569 jd->jd_replayed_blocks++;
570 }
571
572 return error;
573}
574
575/**
576 * gfs2_meta_sync - Sync all buffers associated with a glock
577 * @gl: The glock
578 *
579 */
580
581static void gfs2_meta_sync(struct gfs2_glock *gl)
582{
583 struct address_space *mapping = gfs2_glock2aspace(gl);
584 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
585 int error;
586
587 if (mapping == NULL)
588 mapping = &sdp->sd_aspace;
589
590 filemap_fdatawrite(mapping);
591 error = filemap_fdatawait(mapping);
592
593 if (error)
594 gfs2_io_error(gl->gl_name.ln_sbd);
595}
596
597static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
598{
599 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
600 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
601
602 if (error) {
603 gfs2_meta_sync(ip->i_gl);
604 return;
605 }
606 if (pass != 1)
607 return;
608
609 gfs2_meta_sync(ip->i_gl);
610
611 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
612 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
613}
614
615static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
616{
617 struct gfs2_meta_header *mh;
618 unsigned int offset;
619 struct list_head *head = &sdp->sd_log_le_revoke;
620 struct gfs2_bufdata *bd;
621 struct page *page;
622 unsigned int length;
623
624 gfs2_write_revokes(sdp);
625 if (!sdp->sd_log_num_revoke)
626 return;
627
628 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
629 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
630 offset = sizeof(struct gfs2_log_descriptor);
631
632 list_for_each_entry(bd, head, bd_list) {
633 sdp->sd_log_num_revoke--;
634
635 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
636
637 gfs2_log_write_page(sdp, page);
638 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
639 mh = page_address(page);
640 clear_page(mh);
641 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
642 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
643 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
644 offset = sizeof(struct gfs2_meta_header);
645 }
646
647 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
648 offset += sizeof(u64);
649 }
650 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
651
652 gfs2_log_write_page(sdp, page);
653}
654
655static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
656{
657 struct list_head *head = &sdp->sd_log_le_revoke;
658 struct gfs2_bufdata *bd;
659 struct gfs2_glock *gl;
660
661 while (!list_empty(head)) {
662 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
663 list_del_init(&bd->bd_list);
664 gl = bd->bd_gl;
665 atomic_dec(&gl->gl_revokes);
666 clear_bit(GLF_LFLUSH, &gl->gl_flags);
667 kmem_cache_free(gfs2_bufdata_cachep, bd);
668 }
669}
670
671static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
672 struct gfs2_log_header_host *head, int pass)
673{
674 if (pass != 0)
675 return;
676
677 jd->jd_found_revokes = 0;
678 jd->jd_replay_tail = head->lh_tail;
679}
680
681static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
682 struct gfs2_log_descriptor *ld, __be64 *ptr,
683 int pass)
684{
685 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
686 unsigned int blks = be32_to_cpu(ld->ld_length);
687 unsigned int revokes = be32_to_cpu(ld->ld_data1);
688 struct buffer_head *bh;
689 unsigned int offset;
690 u64 blkno;
691 int first = 1;
692 int error;
693
694 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
695 return 0;
696
697 offset = sizeof(struct gfs2_log_descriptor);
698
699 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
700 error = gfs2_replay_read_block(jd, start, &bh);
701 if (error)
702 return error;
703
704 if (!first)
705 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
706
707 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
708 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
709
710 error = gfs2_revoke_add(jd, blkno, start);
711 if (error < 0) {
712 brelse(bh);
713 return error;
714 }
715 else if (error)
716 jd->jd_found_revokes++;
717
718 if (!--revokes)
719 break;
720 offset += sizeof(u64);
721 }
722
723 brelse(bh);
724 offset = sizeof(struct gfs2_meta_header);
725 first = 0;
726 }
727
728 return 0;
729}
730
731static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
732{
733 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
734
735 if (error) {
736 gfs2_revoke_clean(jd);
737 return;
738 }
739 if (pass != 1)
740 return;
741
742 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
743 jd->jd_jid, jd->jd_found_revokes);
744
745 gfs2_revoke_clean(jd);
746}
747
748/**
749 * databuf_lo_before_commit - Scan the data buffers, writing as we go
750 *
751 */
752
753static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
754{
755 unsigned int limit = databuf_limit(sdp);
756 unsigned int nbuf;
757 if (tr == NULL)
758 return;
759 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
760 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
761}
762
763static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
764 struct gfs2_log_descriptor *ld,
765 __be64 *ptr, int pass)
766{
767 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
768 struct gfs2_glock *gl = ip->i_gl;
769 unsigned int blks = be32_to_cpu(ld->ld_data1);
770 struct buffer_head *bh_log, *bh_ip;
771 u64 blkno;
772 u64 esc;
773 int error = 0;
774
775 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
776 return 0;
777
778 gfs2_replay_incr_blk(jd, &start);
779 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
780 blkno = be64_to_cpu(*ptr++);
781 esc = be64_to_cpu(*ptr++);
782
783 jd->jd_found_blocks++;
784
785 if (gfs2_revoke_check(jd, blkno, start))
786 continue;
787
788 error = gfs2_replay_read_block(jd, start, &bh_log);
789 if (error)
790 return error;
791
792 bh_ip = gfs2_meta_new(gl, blkno);
793 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
794
795 /* Unescape */
796 if (esc) {
797 __be32 *eptr = (__be32 *)bh_ip->b_data;
798 *eptr = cpu_to_be32(GFS2_MAGIC);
799 }
800 mark_buffer_dirty(bh_ip);
801
802 brelse(bh_log);
803 brelse(bh_ip);
804
805 jd->jd_replayed_blocks++;
806 }
807
808 return error;
809}
810
811/* FIXME: sort out accounting for log blocks etc. */
812
813static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
814{
815 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
816 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
817
818 if (error) {
819 gfs2_meta_sync(ip->i_gl);
820 return;
821 }
822 if (pass != 1)
823 return;
824
825 /* data sync? */
826 gfs2_meta_sync(ip->i_gl);
827
828 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
829 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
830}
831
832static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
833{
834 struct list_head *head;
835 struct gfs2_bufdata *bd;
836
837 if (tr == NULL)
838 return;
839
840 head = &tr->tr_databuf;
841 while (!list_empty(head)) {
842 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
843 list_del_init(&bd->bd_list);
844 gfs2_unpin(sdp, bd->bd_bh, tr);
845 }
846}
847
848
849const struct gfs2_log_operations gfs2_buf_lops = {
850 .lo_before_commit = buf_lo_before_commit,
851 .lo_after_commit = buf_lo_after_commit,
852 .lo_before_scan = buf_lo_before_scan,
853 .lo_scan_elements = buf_lo_scan_elements,
854 .lo_after_scan = buf_lo_after_scan,
855 .lo_name = "buf",
856};
857
858const struct gfs2_log_operations gfs2_revoke_lops = {
859 .lo_before_commit = revoke_lo_before_commit,
860 .lo_after_commit = revoke_lo_after_commit,
861 .lo_before_scan = revoke_lo_before_scan,
862 .lo_scan_elements = revoke_lo_scan_elements,
863 .lo_after_scan = revoke_lo_after_scan,
864 .lo_name = "revoke",
865};
866
867const struct gfs2_log_operations gfs2_databuf_lops = {
868 .lo_before_commit = databuf_lo_before_commit,
869 .lo_after_commit = databuf_lo_after_commit,
870 .lo_scan_elements = databuf_lo_scan_elements,
871 .lo_after_scan = databuf_lo_after_scan,
872 .lo_name = "databuf",
873};
874
875const struct gfs2_log_operations *gfs2_log_ops[] = {
876 &gfs2_databuf_lops,
877 &gfs2_buf_lops,
878 &gfs2_revoke_lops,
879 NULL,
880};
881