Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mempool.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/bio.h>
18#include <linux/fs.h>
19#include <linux/list_sort.h>
20
21#include "dir.h"
22#include "gfs2.h"
23#include "incore.h"
24#include "inode.h"
25#include "glock.h"
26#include "log.h"
27#include "lops.h"
28#include "meta_io.h"
29#include "recovery.h"
30#include "rgrp.h"
31#include "trans.h"
32#include "util.h"
33#include "trace_gfs2.h"
34
35/**
36 * gfs2_pin - Pin a buffer in memory
37 * @sdp: The superblock
38 * @bh: The buffer to be pinned
39 *
40 * The log lock must be held when calling this function
41 */
42void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43{
44 struct gfs2_bufdata *bd;
45
46 BUG_ON(!current->journal_info);
47
48 clear_buffer_dirty(bh);
49 if (test_set_buffer_pinned(bh))
50 gfs2_assert_withdraw(sdp, 0);
51 if (!buffer_uptodate(bh))
52 gfs2_io_error_bh(sdp, bh);
53 bd = bh->b_private;
54 /* If this buffer is in the AIL and it has already been written
55 * to in-place disk block, remove it from the AIL.
56 */
57 spin_lock(&sdp->sd_ail_lock);
58 if (bd->bd_tr)
59 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 spin_unlock(&sdp->sd_ail_lock);
61 get_bh(bh);
62 atomic_inc(&sdp->sd_log_pinned);
63 trace_gfs2_pin(bd, 1);
64}
65
66static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67{
68 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69}
70
71static void maybe_release_space(struct gfs2_bufdata *bd)
72{
73 struct gfs2_glock *gl = bd->bd_gl;
74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 struct gfs2_bitmap *bi = rgd->rd_bits + index;
78
79 if (bi->bi_clone == NULL)
80 return;
81 if (sdp->sd_args.ar_discard)
82 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
83 memcpy(bi->bi_clone + bi->bi_offset,
84 bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
85 clear_bit(GBF_FULL, &bi->bi_flags);
86 rgd->rd_free_clone = rgd->rd_free;
87 rgd->rd_extfail_pt = rgd->rd_free;
88}
89
90/**
91 * gfs2_unpin - Unpin a buffer
92 * @sdp: the filesystem the buffer belongs to
93 * @bh: The buffer to unpin
94 * @ai:
95 * @flags: The inode dirty flags
96 *
97 */
98
99static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
100 struct gfs2_trans *tr)
101{
102 struct gfs2_bufdata *bd = bh->b_private;
103
104 BUG_ON(!buffer_uptodate(bh));
105 BUG_ON(!buffer_pinned(bh));
106
107 lock_buffer(bh);
108 mark_buffer_dirty(bh);
109 clear_buffer_pinned(bh);
110
111 if (buffer_is_rgrp(bd))
112 maybe_release_space(bd);
113
114 spin_lock(&sdp->sd_ail_lock);
115 if (bd->bd_tr) {
116 list_del(&bd->bd_ail_st_list);
117 brelse(bh);
118 } else {
119 struct gfs2_glock *gl = bd->bd_gl;
120 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
121 atomic_inc(&gl->gl_ail_count);
122 }
123 bd->bd_tr = tr;
124 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
125 spin_unlock(&sdp->sd_ail_lock);
126
127 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
128 trace_gfs2_pin(bd, 0);
129 unlock_buffer(bh);
130 atomic_dec(&sdp->sd_log_pinned);
131}
132
133static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
134{
135 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
136 (sdp->sd_log_flush_head != sdp->sd_log_head));
137
138 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
139 sdp->sd_log_flush_head = 0;
140}
141
142u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
143{
144 unsigned int lbn = sdp->sd_log_flush_head;
145 struct gfs2_journal_extent *je;
146 u64 block;
147
148 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
149 if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) {
150 block = je->dblock + lbn - je->lblock;
151 gfs2_log_incr_head(sdp);
152 return block;
153 }
154 }
155
156 return -1;
157}
158
159/**
160 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
161 * @sdp: The superblock
162 * @bvec: The bio_vec
163 * @error: The i/o status
164 *
165 * This finds the relevant buffers and unlocks them and sets the
166 * error flag according to the status of the i/o request. This is
167 * used when the log is writing data which has an in-place version
168 * that is pinned in the pagecache.
169 */
170
171static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
172 blk_status_t error)
173{
174 struct buffer_head *bh, *next;
175 struct page *page = bvec->bv_page;
176 unsigned size;
177
178 bh = page_buffers(page);
179 size = bvec->bv_len;
180 while (bh_offset(bh) < bvec->bv_offset)
181 bh = bh->b_this_page;
182 do {
183 if (error)
184 mark_buffer_write_io_error(bh);
185 unlock_buffer(bh);
186 next = bh->b_this_page;
187 size -= bh->b_size;
188 brelse(bh);
189 bh = next;
190 } while(bh && size);
191}
192
193/**
194 * gfs2_end_log_write - end of i/o to the log
195 * @bio: The bio
196 * @error: Status of i/o request
197 *
198 * Each bio_vec contains either data from the pagecache or data
199 * relating to the log itself. Here we iterate over the bio_vec
200 * array, processing both kinds of data.
201 *
202 */
203
204static void gfs2_end_log_write(struct bio *bio)
205{
206 struct gfs2_sbd *sdp = bio->bi_private;
207 struct bio_vec *bvec;
208 struct page *page;
209 int i;
210
211 if (bio->bi_status) {
212 fs_err(sdp, "Error %d writing to journal, jid=%u\n",
213 bio->bi_status, sdp->sd_jdesc->jd_jid);
214 wake_up(&sdp->sd_logd_waitq);
215 }
216
217 bio_for_each_segment_all(bvec, bio, i) {
218 page = bvec->bv_page;
219 if (page_has_buffers(page))
220 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
221 else
222 mempool_free(page, gfs2_page_pool);
223 }
224
225 bio_put(bio);
226 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
227 wake_up(&sdp->sd_log_flush_wait);
228}
229
230/**
231 * gfs2_log_flush_bio - Submit any pending log bio
232 * @sdp: The superblock
233 * @op: REQ_OP
234 * @op_flags: req_flag_bits
235 *
236 * Submit any pending part-built or full bio to the block device. If
237 * there is no pending bio, then this is a no-op.
238 */
239
240void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags)
241{
242 if (sdp->sd_log_bio) {
243 atomic_inc(&sdp->sd_log_in_flight);
244 bio_set_op_attrs(sdp->sd_log_bio, op, op_flags);
245 submit_bio(sdp->sd_log_bio);
246 sdp->sd_log_bio = NULL;
247 }
248}
249
250/**
251 * gfs2_log_alloc_bio - Allocate a new bio for log writing
252 * @sdp: The superblock
253 * @blkno: The next device block number we want to write to
254 *
255 * This should never be called when there is a cached bio in the
256 * super block. When it returns, there will be a cached bio in the
257 * super block which will have as many bio_vecs as the device is
258 * happy to handle.
259 *
260 * Returns: Newly allocated bio
261 */
262
263static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
264{
265 struct super_block *sb = sdp->sd_vfs;
266 struct bio *bio;
267
268 BUG_ON(sdp->sd_log_bio);
269
270 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
271 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
272 bio_set_dev(bio, sb->s_bdev);
273 bio->bi_end_io = gfs2_end_log_write;
274 bio->bi_private = sdp;
275
276 sdp->sd_log_bio = bio;
277
278 return bio;
279}
280
281/**
282 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
283 * @sdp: The superblock
284 * @blkno: The device block number we want to write to
285 *
286 * If there is a cached bio, then if the next block number is sequential
287 * with the previous one, return it, otherwise flush the bio to the
288 * device. If there is not a cached bio, or we just flushed it, then
289 * allocate a new one.
290 *
291 * Returns: The bio to use for log writes
292 */
293
294static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
295{
296 struct bio *bio = sdp->sd_log_bio;
297 u64 nblk;
298
299 if (bio) {
300 nblk = bio_end_sector(bio);
301 nblk >>= sdp->sd_fsb2bb_shift;
302 if (blkno == nblk)
303 return bio;
304 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
305 }
306
307 return gfs2_log_alloc_bio(sdp, blkno);
308}
309
310/**
311 * gfs2_log_write - write to log
312 * @sdp: the filesystem
313 * @page: the page to write
314 * @size: the size of the data to write
315 * @offset: the offset within the page
316 * @blkno: block number of the log entry
317 *
318 * Try and add the page segment to the current bio. If that fails,
319 * submit the current bio to the device and create a new one, and
320 * then add the page segment to that.
321 */
322
323void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
324 unsigned size, unsigned offset, u64 blkno)
325{
326 struct bio *bio;
327 int ret;
328
329 bio = gfs2_log_get_bio(sdp, blkno);
330 ret = bio_add_page(bio, page, size, offset);
331 if (ret == 0) {
332 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
333 bio = gfs2_log_alloc_bio(sdp, blkno);
334 ret = bio_add_page(bio, page, size, offset);
335 WARN_ON(ret == 0);
336 }
337}
338
339/**
340 * gfs2_log_write_bh - write a buffer's content to the log
341 * @sdp: The super block
342 * @bh: The buffer pointing to the in-place location
343 *
344 * This writes the content of the buffer to the next available location
345 * in the log. The buffer will be unlocked once the i/o to the log has
346 * completed.
347 */
348
349static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
350{
351 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh),
352 gfs2_log_bmap(sdp));
353}
354
355/**
356 * gfs2_log_write_page - write one block stored in a page, into the log
357 * @sdp: The superblock
358 * @page: The struct page
359 *
360 * This writes the first block-sized part of the page into the log. Note
361 * that the page must have been allocated from the gfs2_page_pool mempool
362 * and that after this has been called, ownership has been transferred and
363 * the page may be freed at any time.
364 */
365
366void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
367{
368 struct super_block *sb = sdp->sd_vfs;
369 gfs2_log_write(sdp, page, sb->s_blocksize, 0,
370 gfs2_log_bmap(sdp));
371}
372
373static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
374 u32 ld_length, u32 ld_data1)
375{
376 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
377 struct gfs2_log_descriptor *ld = page_address(page);
378 clear_page(ld);
379 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
380 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
381 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
382 ld->ld_type = cpu_to_be32(ld_type);
383 ld->ld_length = cpu_to_be32(ld_length);
384 ld->ld_data1 = cpu_to_be32(ld_data1);
385 ld->ld_data2 = 0;
386 return page;
387}
388
389static void gfs2_check_magic(struct buffer_head *bh)
390{
391 void *kaddr;
392 __be32 *ptr;
393
394 clear_buffer_escaped(bh);
395 kaddr = kmap_atomic(bh->b_page);
396 ptr = kaddr + bh_offset(bh);
397 if (*ptr == cpu_to_be32(GFS2_MAGIC))
398 set_buffer_escaped(bh);
399 kunmap_atomic(kaddr);
400}
401
402static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
403{
404 struct gfs2_bufdata *bda, *bdb;
405
406 bda = list_entry(a, struct gfs2_bufdata, bd_list);
407 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
408
409 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
410 return -1;
411 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
412 return 1;
413 return 0;
414}
415
416static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
417 unsigned int total, struct list_head *blist,
418 bool is_databuf)
419{
420 struct gfs2_log_descriptor *ld;
421 struct gfs2_bufdata *bd1 = NULL, *bd2;
422 struct page *page;
423 unsigned int num;
424 unsigned n;
425 __be64 *ptr;
426
427 gfs2_log_lock(sdp);
428 list_sort(NULL, blist, blocknr_cmp);
429 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
430 while(total) {
431 num = total;
432 if (total > limit)
433 num = limit;
434 gfs2_log_unlock(sdp);
435 page = gfs2_get_log_desc(sdp,
436 is_databuf ? GFS2_LOG_DESC_JDATA :
437 GFS2_LOG_DESC_METADATA, num + 1, num);
438 ld = page_address(page);
439 gfs2_log_lock(sdp);
440 ptr = (__be64 *)(ld + 1);
441
442 n = 0;
443 list_for_each_entry_continue(bd1, blist, bd_list) {
444 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
445 if (is_databuf) {
446 gfs2_check_magic(bd1->bd_bh);
447 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
448 }
449 if (++n >= num)
450 break;
451 }
452
453 gfs2_log_unlock(sdp);
454 gfs2_log_write_page(sdp, page);
455 gfs2_log_lock(sdp);
456
457 n = 0;
458 list_for_each_entry_continue(bd2, blist, bd_list) {
459 get_bh(bd2->bd_bh);
460 gfs2_log_unlock(sdp);
461 lock_buffer(bd2->bd_bh);
462
463 if (buffer_escaped(bd2->bd_bh)) {
464 void *kaddr;
465 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
466 ptr = page_address(page);
467 kaddr = kmap_atomic(bd2->bd_bh->b_page);
468 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
469 bd2->bd_bh->b_size);
470 kunmap_atomic(kaddr);
471 *(__be32 *)ptr = 0;
472 clear_buffer_escaped(bd2->bd_bh);
473 unlock_buffer(bd2->bd_bh);
474 brelse(bd2->bd_bh);
475 gfs2_log_write_page(sdp, page);
476 } else {
477 gfs2_log_write_bh(sdp, bd2->bd_bh);
478 }
479 gfs2_log_lock(sdp);
480 if (++n >= num)
481 break;
482 }
483
484 BUG_ON(total < num);
485 total -= num;
486 }
487 gfs2_log_unlock(sdp);
488}
489
490static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
491{
492 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
493 unsigned int nbuf;
494 if (tr == NULL)
495 return;
496 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
497 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
498}
499
500static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
501{
502 struct list_head *head;
503 struct gfs2_bufdata *bd;
504
505 if (tr == NULL)
506 return;
507
508 head = &tr->tr_buf;
509 while (!list_empty(head)) {
510 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
511 list_del_init(&bd->bd_list);
512 gfs2_unpin(sdp, bd->bd_bh, tr);
513 }
514}
515
516static void buf_lo_before_scan(struct gfs2_jdesc *jd,
517 struct gfs2_log_header_host *head, int pass)
518{
519 if (pass != 0)
520 return;
521
522 jd->jd_found_blocks = 0;
523 jd->jd_replayed_blocks = 0;
524}
525
526static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
527 struct gfs2_log_descriptor *ld, __be64 *ptr,
528 int pass)
529{
530 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
531 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
532 struct gfs2_glock *gl = ip->i_gl;
533 unsigned int blks = be32_to_cpu(ld->ld_data1);
534 struct buffer_head *bh_log, *bh_ip;
535 u64 blkno;
536 int error = 0;
537
538 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
539 return 0;
540
541 gfs2_replay_incr_blk(jd, &start);
542
543 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
544 blkno = be64_to_cpu(*ptr++);
545
546 jd->jd_found_blocks++;
547
548 if (gfs2_revoke_check(jd, blkno, start))
549 continue;
550
551 error = gfs2_replay_read_block(jd, start, &bh_log);
552 if (error)
553 return error;
554
555 bh_ip = gfs2_meta_new(gl, blkno);
556 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
557
558 if (gfs2_meta_check(sdp, bh_ip))
559 error = -EIO;
560 else
561 mark_buffer_dirty(bh_ip);
562
563 brelse(bh_log);
564 brelse(bh_ip);
565
566 if (error)
567 break;
568
569 jd->jd_replayed_blocks++;
570 }
571
572 return error;
573}
574
575/**
576 * gfs2_meta_sync - Sync all buffers associated with a glock
577 * @gl: The glock
578 *
579 */
580
581static void gfs2_meta_sync(struct gfs2_glock *gl)
582{
583 struct address_space *mapping = gfs2_glock2aspace(gl);
584 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
585 int error;
586
587 if (mapping == NULL)
588 mapping = &sdp->sd_aspace;
589
590 filemap_fdatawrite(mapping);
591 error = filemap_fdatawait(mapping);
592
593 if (error)
594 gfs2_io_error(gl->gl_name.ln_sbd);
595}
596
597static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
598{
599 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
600 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
601
602 if (error) {
603 gfs2_meta_sync(ip->i_gl);
604 return;
605 }
606 if (pass != 1)
607 return;
608
609 gfs2_meta_sync(ip->i_gl);
610
611 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
612 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
613}
614
615static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
616{
617 struct gfs2_meta_header *mh;
618 unsigned int offset;
619 struct list_head *head = &sdp->sd_log_le_revoke;
620 struct gfs2_bufdata *bd;
621 struct page *page;
622 unsigned int length;
623
624 gfs2_write_revokes(sdp);
625 if (!sdp->sd_log_num_revoke)
626 return;
627
628 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
629 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
630 offset = sizeof(struct gfs2_log_descriptor);
631
632 list_for_each_entry(bd, head, bd_list) {
633 sdp->sd_log_num_revoke--;
634
635 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
636
637 gfs2_log_write_page(sdp, page);
638 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
639 mh = page_address(page);
640 clear_page(mh);
641 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
642 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
643 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
644 offset = sizeof(struct gfs2_meta_header);
645 }
646
647 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
648 offset += sizeof(u64);
649 }
650 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
651
652 gfs2_log_write_page(sdp, page);
653}
654
655static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
656{
657 struct list_head *head = &sdp->sd_log_le_revoke;
658 struct gfs2_bufdata *bd;
659 struct gfs2_glock *gl;
660
661 while (!list_empty(head)) {
662 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
663 list_del_init(&bd->bd_list);
664 gl = bd->bd_gl;
665 atomic_dec(&gl->gl_revokes);
666 clear_bit(GLF_LFLUSH, &gl->gl_flags);
667 kmem_cache_free(gfs2_bufdata_cachep, bd);
668 }
669}
670
671static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
672 struct gfs2_log_header_host *head, int pass)
673{
674 if (pass != 0)
675 return;
676
677 jd->jd_found_revokes = 0;
678 jd->jd_replay_tail = head->lh_tail;
679}
680
681static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
682 struct gfs2_log_descriptor *ld, __be64 *ptr,
683 int pass)
684{
685 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
686 unsigned int blks = be32_to_cpu(ld->ld_length);
687 unsigned int revokes = be32_to_cpu(ld->ld_data1);
688 struct buffer_head *bh;
689 unsigned int offset;
690 u64 blkno;
691 int first = 1;
692 int error;
693
694 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
695 return 0;
696
697 offset = sizeof(struct gfs2_log_descriptor);
698
699 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
700 error = gfs2_replay_read_block(jd, start, &bh);
701 if (error)
702 return error;
703
704 if (!first)
705 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
706
707 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
708 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
709
710 error = gfs2_revoke_add(jd, blkno, start);
711 if (error < 0) {
712 brelse(bh);
713 return error;
714 }
715 else if (error)
716 jd->jd_found_revokes++;
717
718 if (!--revokes)
719 break;
720 offset += sizeof(u64);
721 }
722
723 brelse(bh);
724 offset = sizeof(struct gfs2_meta_header);
725 first = 0;
726 }
727
728 return 0;
729}
730
731static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
732{
733 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
734
735 if (error) {
736 gfs2_revoke_clean(jd);
737 return;
738 }
739 if (pass != 1)
740 return;
741
742 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
743 jd->jd_jid, jd->jd_found_revokes);
744
745 gfs2_revoke_clean(jd);
746}
747
748/**
749 * databuf_lo_before_commit - Scan the data buffers, writing as we go
750 *
751 */
752
753static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
754{
755 unsigned int limit = databuf_limit(sdp);
756 unsigned int nbuf;
757 if (tr == NULL)
758 return;
759 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
760 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
761}
762
763static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
764 struct gfs2_log_descriptor *ld,
765 __be64 *ptr, int pass)
766{
767 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
768 struct gfs2_glock *gl = ip->i_gl;
769 unsigned int blks = be32_to_cpu(ld->ld_data1);
770 struct buffer_head *bh_log, *bh_ip;
771 u64 blkno;
772 u64 esc;
773 int error = 0;
774
775 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
776 return 0;
777
778 gfs2_replay_incr_blk(jd, &start);
779 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
780 blkno = be64_to_cpu(*ptr++);
781 esc = be64_to_cpu(*ptr++);
782
783 jd->jd_found_blocks++;
784
785 if (gfs2_revoke_check(jd, blkno, start))
786 continue;
787
788 error = gfs2_replay_read_block(jd, start, &bh_log);
789 if (error)
790 return error;
791
792 bh_ip = gfs2_meta_new(gl, blkno);
793 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
794
795 /* Unescape */
796 if (esc) {
797 __be32 *eptr = (__be32 *)bh_ip->b_data;
798 *eptr = cpu_to_be32(GFS2_MAGIC);
799 }
800 mark_buffer_dirty(bh_ip);
801
802 brelse(bh_log);
803 brelse(bh_ip);
804
805 jd->jd_replayed_blocks++;
806 }
807
808 return error;
809}
810
811/* FIXME: sort out accounting for log blocks etc. */
812
813static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
814{
815 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
816 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
817
818 if (error) {
819 gfs2_meta_sync(ip->i_gl);
820 return;
821 }
822 if (pass != 1)
823 return;
824
825 /* data sync? */
826 gfs2_meta_sync(ip->i_gl);
827
828 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
829 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
830}
831
832static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
833{
834 struct list_head *head;
835 struct gfs2_bufdata *bd;
836
837 if (tr == NULL)
838 return;
839
840 head = &tr->tr_databuf;
841 while (!list_empty(head)) {
842 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
843 list_del_init(&bd->bd_list);
844 gfs2_unpin(sdp, bd->bd_bh, tr);
845 }
846}
847
848
849const struct gfs2_log_operations gfs2_buf_lops = {
850 .lo_before_commit = buf_lo_before_commit,
851 .lo_after_commit = buf_lo_after_commit,
852 .lo_before_scan = buf_lo_before_scan,
853 .lo_scan_elements = buf_lo_scan_elements,
854 .lo_after_scan = buf_lo_after_scan,
855 .lo_name = "buf",
856};
857
858const struct gfs2_log_operations gfs2_revoke_lops = {
859 .lo_before_commit = revoke_lo_before_commit,
860 .lo_after_commit = revoke_lo_after_commit,
861 .lo_before_scan = revoke_lo_before_scan,
862 .lo_scan_elements = revoke_lo_scan_elements,
863 .lo_after_scan = revoke_lo_after_scan,
864 .lo_name = "revoke",
865};
866
867const struct gfs2_log_operations gfs2_databuf_lops = {
868 .lo_before_commit = databuf_lo_before_commit,
869 .lo_after_commit = databuf_lo_after_commit,
870 .lo_scan_elements = databuf_lo_scan_elements,
871 .lo_after_scan = databuf_lo_after_scan,
872 .lo_name = "databuf",
873};
874
875const struct gfs2_log_operations *gfs2_log_ops[] = {
876 &gfs2_databuf_lops,
877 &gfs2_buf_lops,
878 &gfs2_revoke_lops,
879 NULL,
880};
881
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mempool.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/bio.h>
18#include <linux/fs.h>
19#include <linux/list_sort.h>
20
21#include "gfs2.h"
22#include "incore.h"
23#include "inode.h"
24#include "glock.h"
25#include "log.h"
26#include "lops.h"
27#include "meta_io.h"
28#include "recovery.h"
29#include "rgrp.h"
30#include "trans.h"
31#include "util.h"
32#include "trace_gfs2.h"
33
34/**
35 * gfs2_pin - Pin a buffer in memory
36 * @sdp: The superblock
37 * @bh: The buffer to be pinned
38 *
39 * The log lock must be held when calling this function
40 */
41void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
42{
43 struct gfs2_bufdata *bd;
44
45 BUG_ON(!current->journal_info);
46
47 clear_buffer_dirty(bh);
48 if (test_set_buffer_pinned(bh))
49 gfs2_assert_withdraw(sdp, 0);
50 if (!buffer_uptodate(bh))
51 gfs2_io_error_bh(sdp, bh);
52 bd = bh->b_private;
53 /* If this buffer is in the AIL and it has already been written
54 * to in-place disk block, remove it from the AIL.
55 */
56 spin_lock(&sdp->sd_ail_lock);
57 if (bd->bd_tr)
58 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
59 spin_unlock(&sdp->sd_ail_lock);
60 get_bh(bh);
61 atomic_inc(&sdp->sd_log_pinned);
62 trace_gfs2_pin(bd, 1);
63}
64
65static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
66{
67 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
68}
69
70static void maybe_release_space(struct gfs2_bufdata *bd)
71{
72 struct gfs2_glock *gl = bd->bd_gl;
73 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
74 struct gfs2_rgrpd *rgd = gl->gl_object;
75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
76 struct gfs2_bitmap *bi = rgd->rd_bits + index;
77
78 if (bi->bi_clone == NULL)
79 return;
80 if (sdp->sd_args.ar_discard)
81 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
82 memcpy(bi->bi_clone + bi->bi_offset,
83 bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
84 clear_bit(GBF_FULL, &bi->bi_flags);
85 rgd->rd_free_clone = rgd->rd_free;
86 rgd->rd_extfail_pt = rgd->rd_free;
87}
88
89/**
90 * gfs2_unpin - Unpin a buffer
91 * @sdp: the filesystem the buffer belongs to
92 * @bh: The buffer to unpin
93 * @ai:
94 * @flags: The inode dirty flags
95 *
96 */
97
98static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
99 struct gfs2_trans *tr)
100{
101 struct gfs2_bufdata *bd = bh->b_private;
102
103 BUG_ON(!buffer_uptodate(bh));
104 BUG_ON(!buffer_pinned(bh));
105
106 lock_buffer(bh);
107 mark_buffer_dirty(bh);
108 clear_buffer_pinned(bh);
109
110 if (buffer_is_rgrp(bd))
111 maybe_release_space(bd);
112
113 spin_lock(&sdp->sd_ail_lock);
114 if (bd->bd_tr) {
115 list_del(&bd->bd_ail_st_list);
116 brelse(bh);
117 } else {
118 struct gfs2_glock *gl = bd->bd_gl;
119 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
120 atomic_inc(&gl->gl_ail_count);
121 }
122 bd->bd_tr = tr;
123 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
124 spin_unlock(&sdp->sd_ail_lock);
125
126 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
127 trace_gfs2_pin(bd, 0);
128 unlock_buffer(bh);
129 atomic_dec(&sdp->sd_log_pinned);
130}
131
132static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
133{
134 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
135 (sdp->sd_log_flush_head != sdp->sd_log_head));
136
137 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
138 sdp->sd_log_flush_head = 0;
139 sdp->sd_log_flush_wrapped = 1;
140 }
141}
142
143static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
144{
145 unsigned int lbn = sdp->sd_log_flush_head;
146 struct gfs2_journal_extent *je;
147 u64 block;
148
149 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
150 if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) {
151 block = je->dblock + lbn - je->lblock;
152 gfs2_log_incr_head(sdp);
153 return block;
154 }
155 }
156
157 return -1;
158}
159
160/**
161 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
162 * @sdp: The superblock
163 * @bvec: The bio_vec
164 * @error: The i/o status
165 *
166 * This finds the relavent buffers and unlocks then and sets the
167 * error flag according to the status of the i/o request. This is
168 * used when the log is writing data which has an in-place version
169 * that is pinned in the pagecache.
170 */
171
172static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
173 int error)
174{
175 struct buffer_head *bh, *next;
176 struct page *page = bvec->bv_page;
177 unsigned size;
178
179 bh = page_buffers(page);
180 size = bvec->bv_len;
181 while (bh_offset(bh) < bvec->bv_offset)
182 bh = bh->b_this_page;
183 do {
184 if (error)
185 set_buffer_write_io_error(bh);
186 unlock_buffer(bh);
187 next = bh->b_this_page;
188 size -= bh->b_size;
189 brelse(bh);
190 bh = next;
191 } while(bh && size);
192}
193
194/**
195 * gfs2_end_log_write - end of i/o to the log
196 * @bio: The bio
197 * @error: Status of i/o request
198 *
199 * Each bio_vec contains either data from the pagecache or data
200 * relating to the log itself. Here we iterate over the bio_vec
201 * array, processing both kinds of data.
202 *
203 */
204
205static void gfs2_end_log_write(struct bio *bio)
206{
207 struct gfs2_sbd *sdp = bio->bi_private;
208 struct bio_vec *bvec;
209 struct page *page;
210 int i;
211
212 if (bio->bi_error) {
213 sdp->sd_log_error = bio->bi_error;
214 fs_err(sdp, "Error %d writing to log\n", bio->bi_error);
215 }
216
217 bio_for_each_segment_all(bvec, bio, i) {
218 page = bvec->bv_page;
219 if (page_has_buffers(page))
220 gfs2_end_log_write_bh(sdp, bvec, bio->bi_error);
221 else
222 mempool_free(page, gfs2_page_pool);
223 }
224
225 bio_put(bio);
226 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
227 wake_up(&sdp->sd_log_flush_wait);
228}
229
230/**
231 * gfs2_log_flush_bio - Submit any pending log bio
232 * @sdp: The superblock
233 * @rw: The rw flags
234 *
235 * Submit any pending part-built or full bio to the block device. If
236 * there is no pending bio, then this is a no-op.
237 */
238
239void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
240{
241 if (sdp->sd_log_bio) {
242 atomic_inc(&sdp->sd_log_in_flight);
243 submit_bio(rw, sdp->sd_log_bio);
244 sdp->sd_log_bio = NULL;
245 }
246}
247
248/**
249 * gfs2_log_alloc_bio - Allocate a new bio for log writing
250 * @sdp: The superblock
251 * @blkno: The next device block number we want to write to
252 *
253 * This should never be called when there is a cached bio in the
254 * super block. When it returns, there will be a cached bio in the
255 * super block which will have as many bio_vecs as the device is
256 * happy to handle.
257 *
258 * Returns: Newly allocated bio
259 */
260
261static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
262{
263 struct super_block *sb = sdp->sd_vfs;
264 struct bio *bio;
265
266 BUG_ON(sdp->sd_log_bio);
267
268 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
269 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
270 bio->bi_bdev = sb->s_bdev;
271 bio->bi_end_io = gfs2_end_log_write;
272 bio->bi_private = sdp;
273
274 sdp->sd_log_bio = bio;
275
276 return bio;
277}
278
279/**
280 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
281 * @sdp: The superblock
282 * @blkno: The device block number we want to write to
283 *
284 * If there is a cached bio, then if the next block number is sequential
285 * with the previous one, return it, otherwise flush the bio to the
286 * device. If there is not a cached bio, or we just flushed it, then
287 * allocate a new one.
288 *
289 * Returns: The bio to use for log writes
290 */
291
292static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
293{
294 struct bio *bio = sdp->sd_log_bio;
295 u64 nblk;
296
297 if (bio) {
298 nblk = bio_end_sector(bio);
299 nblk >>= sdp->sd_fsb2bb_shift;
300 if (blkno == nblk)
301 return bio;
302 gfs2_log_flush_bio(sdp, WRITE);
303 }
304
305 return gfs2_log_alloc_bio(sdp, blkno);
306}
307
308
309/**
310 * gfs2_log_write - write to log
311 * @sdp: the filesystem
312 * @page: the page to write
313 * @size: the size of the data to write
314 * @offset: the offset within the page
315 *
316 * Try and add the page segment to the current bio. If that fails,
317 * submit the current bio to the device and create a new one, and
318 * then add the page segment to that.
319 */
320
321static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
322 unsigned size, unsigned offset)
323{
324 u64 blkno = gfs2_log_bmap(sdp);
325 struct bio *bio;
326 int ret;
327
328 bio = gfs2_log_get_bio(sdp, blkno);
329 ret = bio_add_page(bio, page, size, offset);
330 if (ret == 0) {
331 gfs2_log_flush_bio(sdp, WRITE);
332 bio = gfs2_log_alloc_bio(sdp, blkno);
333 ret = bio_add_page(bio, page, size, offset);
334 WARN_ON(ret == 0);
335 }
336}
337
338/**
339 * gfs2_log_write_bh - write a buffer's content to the log
340 * @sdp: The super block
341 * @bh: The buffer pointing to the in-place location
342 *
343 * This writes the content of the buffer to the next available location
344 * in the log. The buffer will be unlocked once the i/o to the log has
345 * completed.
346 */
347
348static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
349{
350 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
351}
352
353/**
354 * gfs2_log_write_page - write one block stored in a page, into the log
355 * @sdp: The superblock
356 * @page: The struct page
357 *
358 * This writes the first block-sized part of the page into the log. Note
359 * that the page must have been allocated from the gfs2_page_pool mempool
360 * and that after this has been called, ownership has been transferred and
361 * the page may be freed at any time.
362 */
363
364void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
365{
366 struct super_block *sb = sdp->sd_vfs;
367 gfs2_log_write(sdp, page, sb->s_blocksize, 0);
368}
369
370static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
371 u32 ld_length, u32 ld_data1)
372{
373 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
374 struct gfs2_log_descriptor *ld = page_address(page);
375 clear_page(ld);
376 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
377 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
378 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
379 ld->ld_type = cpu_to_be32(ld_type);
380 ld->ld_length = cpu_to_be32(ld_length);
381 ld->ld_data1 = cpu_to_be32(ld_data1);
382 ld->ld_data2 = 0;
383 return page;
384}
385
386static void gfs2_check_magic(struct buffer_head *bh)
387{
388 void *kaddr;
389 __be32 *ptr;
390
391 clear_buffer_escaped(bh);
392 kaddr = kmap_atomic(bh->b_page);
393 ptr = kaddr + bh_offset(bh);
394 if (*ptr == cpu_to_be32(GFS2_MAGIC))
395 set_buffer_escaped(bh);
396 kunmap_atomic(kaddr);
397}
398
399static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
400{
401 struct gfs2_bufdata *bda, *bdb;
402
403 bda = list_entry(a, struct gfs2_bufdata, bd_list);
404 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
405
406 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
407 return -1;
408 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
409 return 1;
410 return 0;
411}
412
413static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
414 unsigned int total, struct list_head *blist,
415 bool is_databuf)
416{
417 struct gfs2_log_descriptor *ld;
418 struct gfs2_bufdata *bd1 = NULL, *bd2;
419 struct page *page;
420 unsigned int num;
421 unsigned n;
422 __be64 *ptr;
423
424 gfs2_log_lock(sdp);
425 list_sort(NULL, blist, blocknr_cmp);
426 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
427 while(total) {
428 num = total;
429 if (total > limit)
430 num = limit;
431 gfs2_log_unlock(sdp);
432 page = gfs2_get_log_desc(sdp,
433 is_databuf ? GFS2_LOG_DESC_JDATA :
434 GFS2_LOG_DESC_METADATA, num + 1, num);
435 ld = page_address(page);
436 gfs2_log_lock(sdp);
437 ptr = (__be64 *)(ld + 1);
438
439 n = 0;
440 list_for_each_entry_continue(bd1, blist, bd_list) {
441 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
442 if (is_databuf) {
443 gfs2_check_magic(bd1->bd_bh);
444 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
445 }
446 if (++n >= num)
447 break;
448 }
449
450 gfs2_log_unlock(sdp);
451 gfs2_log_write_page(sdp, page);
452 gfs2_log_lock(sdp);
453
454 n = 0;
455 list_for_each_entry_continue(bd2, blist, bd_list) {
456 get_bh(bd2->bd_bh);
457 gfs2_log_unlock(sdp);
458 lock_buffer(bd2->bd_bh);
459
460 if (buffer_escaped(bd2->bd_bh)) {
461 void *kaddr;
462 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
463 ptr = page_address(page);
464 kaddr = kmap_atomic(bd2->bd_bh->b_page);
465 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
466 bd2->bd_bh->b_size);
467 kunmap_atomic(kaddr);
468 *(__be32 *)ptr = 0;
469 clear_buffer_escaped(bd2->bd_bh);
470 unlock_buffer(bd2->bd_bh);
471 brelse(bd2->bd_bh);
472 gfs2_log_write_page(sdp, page);
473 } else {
474 gfs2_log_write_bh(sdp, bd2->bd_bh);
475 }
476 gfs2_log_lock(sdp);
477 if (++n >= num)
478 break;
479 }
480
481 BUG_ON(total < num);
482 total -= num;
483 }
484 gfs2_log_unlock(sdp);
485}
486
487static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
488{
489 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
490 unsigned int nbuf;
491 if (tr == NULL)
492 return;
493 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
494 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
495}
496
497static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
498{
499 struct list_head *head;
500 struct gfs2_bufdata *bd;
501
502 if (tr == NULL)
503 return;
504
505 head = &tr->tr_buf;
506 while (!list_empty(head)) {
507 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
508 list_del_init(&bd->bd_list);
509 gfs2_unpin(sdp, bd->bd_bh, tr);
510 }
511}
512
513static void buf_lo_before_scan(struct gfs2_jdesc *jd,
514 struct gfs2_log_header_host *head, int pass)
515{
516 if (pass != 0)
517 return;
518
519 jd->jd_found_blocks = 0;
520 jd->jd_replayed_blocks = 0;
521}
522
523static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
524 struct gfs2_log_descriptor *ld, __be64 *ptr,
525 int pass)
526{
527 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
528 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
529 struct gfs2_glock *gl = ip->i_gl;
530 unsigned int blks = be32_to_cpu(ld->ld_data1);
531 struct buffer_head *bh_log, *bh_ip;
532 u64 blkno;
533 int error = 0;
534
535 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
536 return 0;
537
538 gfs2_replay_incr_blk(sdp, &start);
539
540 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
541 blkno = be64_to_cpu(*ptr++);
542
543 jd->jd_found_blocks++;
544
545 if (gfs2_revoke_check(jd, blkno, start))
546 continue;
547
548 error = gfs2_replay_read_block(jd, start, &bh_log);
549 if (error)
550 return error;
551
552 bh_ip = gfs2_meta_new(gl, blkno);
553 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
554
555 if (gfs2_meta_check(sdp, bh_ip))
556 error = -EIO;
557 else
558 mark_buffer_dirty(bh_ip);
559
560 brelse(bh_log);
561 brelse(bh_ip);
562
563 if (error)
564 break;
565
566 jd->jd_replayed_blocks++;
567 }
568
569 return error;
570}
571
572/**
573 * gfs2_meta_sync - Sync all buffers associated with a glock
574 * @gl: The glock
575 *
576 */
577
578static void gfs2_meta_sync(struct gfs2_glock *gl)
579{
580 struct address_space *mapping = gfs2_glock2aspace(gl);
581 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
582 int error;
583
584 if (mapping == NULL)
585 mapping = &sdp->sd_aspace;
586
587 filemap_fdatawrite(mapping);
588 error = filemap_fdatawait(mapping);
589
590 if (error)
591 gfs2_io_error(gl->gl_name.ln_sbd);
592}
593
594static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
595{
596 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
597 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
598
599 if (error) {
600 gfs2_meta_sync(ip->i_gl);
601 return;
602 }
603 if (pass != 1)
604 return;
605
606 gfs2_meta_sync(ip->i_gl);
607
608 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
609 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
610}
611
612static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
613{
614 struct gfs2_meta_header *mh;
615 unsigned int offset;
616 struct list_head *head = &sdp->sd_log_le_revoke;
617 struct gfs2_bufdata *bd;
618 struct page *page;
619 unsigned int length;
620
621 gfs2_write_revokes(sdp);
622 if (!sdp->sd_log_num_revoke)
623 return;
624
625 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
626 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
627 offset = sizeof(struct gfs2_log_descriptor);
628
629 list_for_each_entry(bd, head, bd_list) {
630 sdp->sd_log_num_revoke--;
631
632 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
633
634 gfs2_log_write_page(sdp, page);
635 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
636 mh = page_address(page);
637 clear_page(mh);
638 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
639 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
640 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
641 offset = sizeof(struct gfs2_meta_header);
642 }
643
644 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
645 offset += sizeof(u64);
646 }
647 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
648
649 gfs2_log_write_page(sdp, page);
650}
651
652static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
653{
654 struct list_head *head = &sdp->sd_log_le_revoke;
655 struct gfs2_bufdata *bd;
656 struct gfs2_glock *gl;
657
658 while (!list_empty(head)) {
659 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
660 list_del_init(&bd->bd_list);
661 gl = bd->bd_gl;
662 atomic_dec(&gl->gl_revokes);
663 clear_bit(GLF_LFLUSH, &gl->gl_flags);
664 kmem_cache_free(gfs2_bufdata_cachep, bd);
665 }
666}
667
668static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
669 struct gfs2_log_header_host *head, int pass)
670{
671 if (pass != 0)
672 return;
673
674 jd->jd_found_revokes = 0;
675 jd->jd_replay_tail = head->lh_tail;
676}
677
678static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
679 struct gfs2_log_descriptor *ld, __be64 *ptr,
680 int pass)
681{
682 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
683 unsigned int blks = be32_to_cpu(ld->ld_length);
684 unsigned int revokes = be32_to_cpu(ld->ld_data1);
685 struct buffer_head *bh;
686 unsigned int offset;
687 u64 blkno;
688 int first = 1;
689 int error;
690
691 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
692 return 0;
693
694 offset = sizeof(struct gfs2_log_descriptor);
695
696 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
697 error = gfs2_replay_read_block(jd, start, &bh);
698 if (error)
699 return error;
700
701 if (!first)
702 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
703
704 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
705 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
706
707 error = gfs2_revoke_add(jd, blkno, start);
708 if (error < 0) {
709 brelse(bh);
710 return error;
711 }
712 else if (error)
713 jd->jd_found_revokes++;
714
715 if (!--revokes)
716 break;
717 offset += sizeof(u64);
718 }
719
720 brelse(bh);
721 offset = sizeof(struct gfs2_meta_header);
722 first = 0;
723 }
724
725 return 0;
726}
727
728static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
729{
730 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
731
732 if (error) {
733 gfs2_revoke_clean(jd);
734 return;
735 }
736 if (pass != 1)
737 return;
738
739 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
740 jd->jd_jid, jd->jd_found_revokes);
741
742 gfs2_revoke_clean(jd);
743}
744
745/**
746 * databuf_lo_before_commit - Scan the data buffers, writing as we go
747 *
748 */
749
750static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
751{
752 unsigned int limit = databuf_limit(sdp);
753 unsigned int nbuf;
754 if (tr == NULL)
755 return;
756 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
757 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
758}
759
760static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
761 struct gfs2_log_descriptor *ld,
762 __be64 *ptr, int pass)
763{
764 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
765 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
766 struct gfs2_glock *gl = ip->i_gl;
767 unsigned int blks = be32_to_cpu(ld->ld_data1);
768 struct buffer_head *bh_log, *bh_ip;
769 u64 blkno;
770 u64 esc;
771 int error = 0;
772
773 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
774 return 0;
775
776 gfs2_replay_incr_blk(sdp, &start);
777 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
778 blkno = be64_to_cpu(*ptr++);
779 esc = be64_to_cpu(*ptr++);
780
781 jd->jd_found_blocks++;
782
783 if (gfs2_revoke_check(jd, blkno, start))
784 continue;
785
786 error = gfs2_replay_read_block(jd, start, &bh_log);
787 if (error)
788 return error;
789
790 bh_ip = gfs2_meta_new(gl, blkno);
791 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
792
793 /* Unescape */
794 if (esc) {
795 __be32 *eptr = (__be32 *)bh_ip->b_data;
796 *eptr = cpu_to_be32(GFS2_MAGIC);
797 }
798 mark_buffer_dirty(bh_ip);
799
800 brelse(bh_log);
801 brelse(bh_ip);
802
803 jd->jd_replayed_blocks++;
804 }
805
806 return error;
807}
808
809/* FIXME: sort out accounting for log blocks etc. */
810
811static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
812{
813 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
814 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
815
816 if (error) {
817 gfs2_meta_sync(ip->i_gl);
818 return;
819 }
820 if (pass != 1)
821 return;
822
823 /* data sync? */
824 gfs2_meta_sync(ip->i_gl);
825
826 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
827 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
828}
829
830static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
831{
832 struct list_head *head;
833 struct gfs2_bufdata *bd;
834
835 if (tr == NULL)
836 return;
837
838 head = &tr->tr_databuf;
839 while (!list_empty(head)) {
840 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
841 list_del_init(&bd->bd_list);
842 gfs2_unpin(sdp, bd->bd_bh, tr);
843 }
844}
845
846
847const struct gfs2_log_operations gfs2_buf_lops = {
848 .lo_before_commit = buf_lo_before_commit,
849 .lo_after_commit = buf_lo_after_commit,
850 .lo_before_scan = buf_lo_before_scan,
851 .lo_scan_elements = buf_lo_scan_elements,
852 .lo_after_scan = buf_lo_after_scan,
853 .lo_name = "buf",
854};
855
856const struct gfs2_log_operations gfs2_revoke_lops = {
857 .lo_before_commit = revoke_lo_before_commit,
858 .lo_after_commit = revoke_lo_after_commit,
859 .lo_before_scan = revoke_lo_before_scan,
860 .lo_scan_elements = revoke_lo_scan_elements,
861 .lo_after_scan = revoke_lo_after_scan,
862 .lo_name = "revoke",
863};
864
865const struct gfs2_log_operations gfs2_databuf_lops = {
866 .lo_before_commit = databuf_lo_before_commit,
867 .lo_after_commit = databuf_lo_after_commit,
868 .lo_scan_elements = databuf_lo_scan_elements,
869 .lo_after_scan = databuf_lo_after_scan,
870 .lo_name = "databuf",
871};
872
873const struct gfs2_log_operations *gfs2_log_ops[] = {
874 &gfs2_databuf_lops,
875 &gfs2_buf_lops,
876 &gfs2_revoke_lops,
877 NULL,
878};
879