Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/completion.h>
12#include <linux/buffer_head.h>
13#include <linux/gfs2_ondisk.h>
14#include <linux/bio.h>
15#include <linux/posix_acl.h>
16
17#include "gfs2.h"
18#include "incore.h"
19#include "bmap.h"
20#include "glock.h"
21#include "glops.h"
22#include "inode.h"
23#include "log.h"
24#include "meta_io.h"
25#include "recovery.h"
26#include "rgrp.h"
27#include "util.h"
28#include "trans.h"
29#include "dir.h"
30
31static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
32{
33 fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
34 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
35 bh->b_page->mapping, bh->b_page->flags);
36 fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n",
37 gl->gl_name.ln_type, gl->gl_name.ln_number,
38 gfs2_glock2aspace(gl));
39 gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n");
40}
41
42/**
43 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
44 * @gl: the glock
45 * @fsync: set when called from fsync (not all buffers will be clean)
46 *
47 * None of the buffers should be dirty, locked, or pinned.
48 */
49
50static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
51 unsigned int nr_revokes)
52{
53 struct gfs2_sbd *sdp = gl->gl_sbd;
54 struct list_head *head = &gl->gl_ail_list;
55 struct gfs2_bufdata *bd, *tmp;
56 struct buffer_head *bh;
57 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
58
59 gfs2_log_lock(sdp);
60 spin_lock(&sdp->sd_ail_lock);
61 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
62 if (nr_revokes == 0)
63 break;
64 bh = bd->bd_bh;
65 if (bh->b_state & b_state) {
66 if (fsync)
67 continue;
68 gfs2_ail_error(gl, bh);
69 }
70 gfs2_trans_add_revoke(sdp, bd);
71 nr_revokes--;
72 }
73 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
74 spin_unlock(&sdp->sd_ail_lock);
75 gfs2_log_unlock(sdp);
76}
77
78
79static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
80{
81 struct gfs2_sbd *sdp = gl->gl_sbd;
82 struct gfs2_trans tr;
83
84 memset(&tr, 0, sizeof(tr));
85 INIT_LIST_HEAD(&tr.tr_buf);
86 INIT_LIST_HEAD(&tr.tr_databuf);
87 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
88
89 if (!tr.tr_revokes)
90 return;
91
92 /* A shortened, inline version of gfs2_trans_begin() */
93 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
94 tr.tr_ip = (unsigned long)__builtin_return_address(0);
95 sb_start_intwrite(sdp->sd_vfs);
96 gfs2_log_reserve(sdp, tr.tr_reserved);
97 WARN_ON_ONCE(current->journal_info);
98 current->journal_info = &tr;
99
100 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
101
102 gfs2_trans_end(sdp);
103 gfs2_log_flush(sdp, NULL);
104}
105
106void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
107{
108 struct gfs2_sbd *sdp = gl->gl_sbd;
109 unsigned int revokes = atomic_read(&gl->gl_ail_count);
110 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
111 int ret;
112
113 if (!revokes)
114 return;
115
116 while (revokes > max_revokes)
117 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
118
119 ret = gfs2_trans_begin(sdp, 0, max_revokes);
120 if (ret)
121 return;
122 __gfs2_ail_flush(gl, fsync, max_revokes);
123 gfs2_trans_end(sdp);
124 gfs2_log_flush(sdp, NULL);
125}
126
127/**
128 * rgrp_go_sync - sync out the metadata for this glock
129 * @gl: the glock
130 *
131 * Called when demoting or unlocking an EX glock. We must flush
132 * to disk all dirty buffers/pages relating to this glock, and must not
133 * not return to caller to demote/unlock the glock until I/O is complete.
134 */
135
136static void rgrp_go_sync(struct gfs2_glock *gl)
137{
138 struct gfs2_sbd *sdp = gl->gl_sbd;
139 struct address_space *mapping = &sdp->sd_aspace;
140 struct gfs2_rgrpd *rgd;
141 int error;
142
143 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
144 return;
145 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
146
147 gfs2_log_flush(sdp, gl);
148 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
149 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
150 mapping_set_error(mapping, error);
151 gfs2_ail_empty_gl(gl);
152
153 spin_lock(&gl->gl_spin);
154 rgd = gl->gl_object;
155 if (rgd)
156 gfs2_free_clones(rgd);
157 spin_unlock(&gl->gl_spin);
158}
159
160/**
161 * rgrp_go_inval - invalidate the metadata for this glock
162 * @gl: the glock
163 * @flags:
164 *
165 * We never used LM_ST_DEFERRED with resource groups, so that we
166 * should always see the metadata flag set here.
167 *
168 */
169
170static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
171{
172 struct gfs2_sbd *sdp = gl->gl_sbd;
173 struct address_space *mapping = &sdp->sd_aspace;
174
175 WARN_ON_ONCE(!(flags & DIO_METADATA));
176 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
177 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
178
179 if (gl->gl_object) {
180 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
181 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
182 }
183}
184
185/**
186 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
187 * @gl: the glock protecting the inode
188 *
189 */
190
191static void inode_go_sync(struct gfs2_glock *gl)
192{
193 struct gfs2_inode *ip = gl->gl_object;
194 struct address_space *metamapping = gfs2_glock2aspace(gl);
195 int error;
196
197 if (ip && !S_ISREG(ip->i_inode.i_mode))
198 ip = NULL;
199 if (ip) {
200 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
201 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
202 inode_dio_wait(&ip->i_inode);
203 }
204 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
205 return;
206
207 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
208
209 gfs2_log_flush(gl->gl_sbd, gl);
210 filemap_fdatawrite(metamapping);
211 if (ip) {
212 struct address_space *mapping = ip->i_inode.i_mapping;
213 filemap_fdatawrite(mapping);
214 error = filemap_fdatawait(mapping);
215 mapping_set_error(mapping, error);
216 }
217 error = filemap_fdatawait(metamapping);
218 mapping_set_error(metamapping, error);
219 gfs2_ail_empty_gl(gl);
220 /*
221 * Writeback of the data mapping may cause the dirty flag to be set
222 * so we have to clear it again here.
223 */
224 smp_mb__before_clear_bit();
225 clear_bit(GLF_DIRTY, &gl->gl_flags);
226}
227
228/**
229 * inode_go_inval - prepare a inode glock to be released
230 * @gl: the glock
231 * @flags:
232 *
233 * Normally we invlidate everything, but if we are moving into
234 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
235 * can keep hold of the metadata, since it won't have changed.
236 *
237 */
238
239static void inode_go_inval(struct gfs2_glock *gl, int flags)
240{
241 struct gfs2_inode *ip = gl->gl_object;
242
243 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
244
245 if (flags & DIO_METADATA) {
246 struct address_space *mapping = gfs2_glock2aspace(gl);
247 truncate_inode_pages(mapping, 0);
248 if (ip) {
249 set_bit(GIF_INVALID, &ip->i_flags);
250 forget_all_cached_acls(&ip->i_inode);
251 gfs2_dir_hash_inval(ip);
252 }
253 }
254
255 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
256 gfs2_log_flush(gl->gl_sbd, NULL);
257 gl->gl_sbd->sd_rindex_uptodate = 0;
258 }
259 if (ip && S_ISREG(ip->i_inode.i_mode))
260 truncate_inode_pages(ip->i_inode.i_mapping, 0);
261}
262
263/**
264 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
265 * @gl: the glock
266 *
267 * Returns: 1 if it's ok
268 */
269
270static int inode_go_demote_ok(const struct gfs2_glock *gl)
271{
272 struct gfs2_sbd *sdp = gl->gl_sbd;
273 struct gfs2_holder *gh;
274
275 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
276 return 0;
277
278 if (!list_empty(&gl->gl_holders)) {
279 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
280 if (gh->gh_list.next != &gl->gl_holders)
281 return 0;
282 }
283
284 return 1;
285}
286
287/**
288 * gfs2_set_nlink - Set the inode's link count based on on-disk info
289 * @inode: The inode in question
290 * @nlink: The link count
291 *
292 * If the link count has hit zero, it must never be raised, whatever the
293 * on-disk inode might say. When new struct inodes are created the link
294 * count is set to 1, so that we can safely use this test even when reading
295 * in on disk information for the first time.
296 */
297
298static void gfs2_set_nlink(struct inode *inode, u32 nlink)
299{
300 /*
301 * We will need to review setting the nlink count here in the
302 * light of the forthcoming ro bind mount work. This is a reminder
303 * to do that.
304 */
305 if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
306 if (nlink == 0)
307 clear_nlink(inode);
308 else
309 set_nlink(inode, nlink);
310 }
311}
312
313static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
314{
315 const struct gfs2_dinode *str = buf;
316 struct timespec atime;
317 u16 height, depth;
318
319 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
320 goto corrupt;
321 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
322 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
323 ip->i_inode.i_rdev = 0;
324 switch (ip->i_inode.i_mode & S_IFMT) {
325 case S_IFBLK:
326 case S_IFCHR:
327 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
328 be32_to_cpu(str->di_minor));
329 break;
330 };
331
332 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
333 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
334 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
335 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
336 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
337 atime.tv_sec = be64_to_cpu(str->di_atime);
338 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
339 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
340 ip->i_inode.i_atime = atime;
341 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
342 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
343 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
344 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
345
346 ip->i_goal = be64_to_cpu(str->di_goal_meta);
347 ip->i_generation = be64_to_cpu(str->di_generation);
348
349 ip->i_diskflags = be32_to_cpu(str->di_flags);
350 ip->i_eattr = be64_to_cpu(str->di_eattr);
351 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
352 gfs2_set_inode_flags(&ip->i_inode);
353 height = be16_to_cpu(str->di_height);
354 if (unlikely(height > GFS2_MAX_META_HEIGHT))
355 goto corrupt;
356 ip->i_height = (u8)height;
357
358 depth = be16_to_cpu(str->di_depth);
359 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
360 goto corrupt;
361 ip->i_depth = (u8)depth;
362 ip->i_entries = be32_to_cpu(str->di_entries);
363
364 if (S_ISREG(ip->i_inode.i_mode))
365 gfs2_set_aops(&ip->i_inode);
366
367 return 0;
368corrupt:
369 gfs2_consist_inode(ip);
370 return -EIO;
371}
372
373/**
374 * gfs2_inode_refresh - Refresh the incore copy of the dinode
375 * @ip: The GFS2 inode
376 *
377 * Returns: errno
378 */
379
380int gfs2_inode_refresh(struct gfs2_inode *ip)
381{
382 struct buffer_head *dibh;
383 int error;
384
385 error = gfs2_meta_inode_buffer(ip, &dibh);
386 if (error)
387 return error;
388
389 error = gfs2_dinode_in(ip, dibh->b_data);
390 brelse(dibh);
391 clear_bit(GIF_INVALID, &ip->i_flags);
392
393 return error;
394}
395
396/**
397 * inode_go_lock - operation done after an inode lock is locked by a process
398 * @gl: the glock
399 * @flags:
400 *
401 * Returns: errno
402 */
403
404static int inode_go_lock(struct gfs2_holder *gh)
405{
406 struct gfs2_glock *gl = gh->gh_gl;
407 struct gfs2_sbd *sdp = gl->gl_sbd;
408 struct gfs2_inode *ip = gl->gl_object;
409 int error = 0;
410
411 if (!ip || (gh->gh_flags & GL_SKIP))
412 return 0;
413
414 if (test_bit(GIF_INVALID, &ip->i_flags)) {
415 error = gfs2_inode_refresh(ip);
416 if (error)
417 return error;
418 }
419
420 if (gh->gh_state != LM_ST_DEFERRED)
421 inode_dio_wait(&ip->i_inode);
422
423 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
424 (gl->gl_state == LM_ST_EXCLUSIVE) &&
425 (gh->gh_state == LM_ST_EXCLUSIVE)) {
426 spin_lock(&sdp->sd_trunc_lock);
427 if (list_empty(&ip->i_trunc_list))
428 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
429 spin_unlock(&sdp->sd_trunc_lock);
430 wake_up(&sdp->sd_quota_wait);
431 return 1;
432 }
433
434 return error;
435}
436
437/**
438 * inode_go_dump - print information about an inode
439 * @seq: The iterator
440 * @ip: the inode
441 *
442 */
443
444static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
445{
446 const struct gfs2_inode *ip = gl->gl_object;
447 if (ip == NULL)
448 return;
449 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
450 (unsigned long long)ip->i_no_formal_ino,
451 (unsigned long long)ip->i_no_addr,
452 IF2DT(ip->i_inode.i_mode), ip->i_flags,
453 (unsigned int)ip->i_diskflags,
454 (unsigned long long)i_size_read(&ip->i_inode));
455}
456
457/**
458 * trans_go_sync - promote/demote the transaction glock
459 * @gl: the glock
460 * @state: the requested state
461 * @flags:
462 *
463 */
464
465static void trans_go_sync(struct gfs2_glock *gl)
466{
467 struct gfs2_sbd *sdp = gl->gl_sbd;
468
469 if (gl->gl_state != LM_ST_UNLOCKED &&
470 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
471 gfs2_meta_syncfs(sdp);
472 gfs2_log_shutdown(sdp);
473 }
474}
475
476/**
477 * trans_go_xmote_bh - After promoting/demoting the transaction glock
478 * @gl: the glock
479 *
480 */
481
482static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
483{
484 struct gfs2_sbd *sdp = gl->gl_sbd;
485 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
486 struct gfs2_glock *j_gl = ip->i_gl;
487 struct gfs2_log_header_host head;
488 int error;
489
490 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
491 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
492
493 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
494 if (error)
495 gfs2_consist(sdp);
496 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
497 gfs2_consist(sdp);
498
499 /* Initialize some head of the log stuff */
500 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
501 sdp->sd_log_sequence = head.lh_sequence + 1;
502 gfs2_log_pointers_init(sdp, head.lh_blkno);
503 }
504 }
505 return 0;
506}
507
508/**
509 * trans_go_demote_ok
510 * @gl: the glock
511 *
512 * Always returns 0
513 */
514
515static int trans_go_demote_ok(const struct gfs2_glock *gl)
516{
517 return 0;
518}
519
520/**
521 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
522 * @gl: the glock
523 *
524 * gl_spin lock is held while calling this
525 */
526static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
527{
528 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
529 struct gfs2_sbd *sdp = gl->gl_sbd;
530
531 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
532 return;
533
534 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
535 gl->gl_state == LM_ST_SHARED && ip) {
536 gl->gl_lockref.count++;
537 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
538 gl->gl_lockref.count--;
539 }
540}
541
542const struct gfs2_glock_operations gfs2_meta_glops = {
543 .go_type = LM_TYPE_META,
544};
545
546const struct gfs2_glock_operations gfs2_inode_glops = {
547 .go_sync = inode_go_sync,
548 .go_inval = inode_go_inval,
549 .go_demote_ok = inode_go_demote_ok,
550 .go_lock = inode_go_lock,
551 .go_dump = inode_go_dump,
552 .go_type = LM_TYPE_INODE,
553 .go_flags = GLOF_ASPACE,
554};
555
556const struct gfs2_glock_operations gfs2_rgrp_glops = {
557 .go_sync = rgrp_go_sync,
558 .go_inval = rgrp_go_inval,
559 .go_lock = gfs2_rgrp_go_lock,
560 .go_unlock = gfs2_rgrp_go_unlock,
561 .go_dump = gfs2_rgrp_dump,
562 .go_type = LM_TYPE_RGRP,
563 .go_flags = GLOF_LVB,
564};
565
566const struct gfs2_glock_operations gfs2_trans_glops = {
567 .go_sync = trans_go_sync,
568 .go_xmote_bh = trans_go_xmote_bh,
569 .go_demote_ok = trans_go_demote_ok,
570 .go_type = LM_TYPE_NONDISK,
571};
572
573const struct gfs2_glock_operations gfs2_iopen_glops = {
574 .go_type = LM_TYPE_IOPEN,
575 .go_callback = iopen_go_callback,
576};
577
578const struct gfs2_glock_operations gfs2_flock_glops = {
579 .go_type = LM_TYPE_FLOCK,
580};
581
582const struct gfs2_glock_operations gfs2_nondisk_glops = {
583 .go_type = LM_TYPE_NONDISK,
584};
585
586const struct gfs2_glock_operations gfs2_quota_glops = {
587 .go_type = LM_TYPE_QUOTA,
588 .go_flags = GLOF_LVB,
589};
590
591const struct gfs2_glock_operations gfs2_journal_glops = {
592 .go_type = LM_TYPE_JOURNAL,
593};
594
595const struct gfs2_glock_operations *gfs2_glops_list[] = {
596 [LM_TYPE_META] = &gfs2_meta_glops,
597 [LM_TYPE_INODE] = &gfs2_inode_glops,
598 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
599 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
600 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
601 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
602 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
603 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
604};
605
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/spinlock.h>
8#include <linux/completion.h>
9#include <linux/buffer_head.h>
10#include <linux/gfs2_ondisk.h>
11#include <linux/bio.h>
12#include <linux/posix_acl.h>
13#include <linux/security.h>
14
15#include "gfs2.h"
16#include "incore.h"
17#include "bmap.h"
18#include "glock.h"
19#include "glops.h"
20#include "inode.h"
21#include "log.h"
22#include "meta_io.h"
23#include "recovery.h"
24#include "rgrp.h"
25#include "util.h"
26#include "trans.h"
27#include "dir.h"
28#include "lops.h"
29
30struct workqueue_struct *gfs2_freeze_wq;
31
32extern struct workqueue_struct *gfs2_control_wq;
33
34static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35{
36 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
37
38 fs_err(sdp,
39 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
40 "state 0x%lx\n",
41 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
42 bh->b_folio->mapping, bh->b_folio->flags);
43 fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
44 gl->gl_name.ln_type, gl->gl_name.ln_number,
45 gfs2_glock2aspace(gl));
46 gfs2_lm(sdp, "AIL error\n");
47 gfs2_withdraw_delayed(sdp);
48}
49
50/**
51 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
52 * @gl: the glock
53 * @fsync: set when called from fsync (not all buffers will be clean)
54 * @nr_revokes: Number of buffers to revoke
55 *
56 * None of the buffers should be dirty, locked, or pinned.
57 */
58
59static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
60 unsigned int nr_revokes)
61{
62 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
63 struct list_head *head = &gl->gl_ail_list;
64 struct gfs2_bufdata *bd, *tmp;
65 struct buffer_head *bh;
66 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
67
68 gfs2_log_lock(sdp);
69 spin_lock(&sdp->sd_ail_lock);
70 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
71 if (nr_revokes == 0)
72 break;
73 bh = bd->bd_bh;
74 if (bh->b_state & b_state) {
75 if (fsync)
76 continue;
77 gfs2_ail_error(gl, bh);
78 }
79 gfs2_trans_add_revoke(sdp, bd);
80 nr_revokes--;
81 }
82 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
83 spin_unlock(&sdp->sd_ail_lock);
84 gfs2_log_unlock(sdp);
85
86 if (gfs2_withdrawing(sdp))
87 gfs2_withdraw(sdp);
88}
89
90
91static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
92{
93 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
94 struct gfs2_trans tr;
95 unsigned int revokes;
96 int ret = 0;
97
98 revokes = atomic_read(&gl->gl_ail_count);
99
100 if (!revokes) {
101 bool have_revokes;
102 bool log_in_flight;
103
104 /*
105 * We have nothing on the ail, but there could be revokes on
106 * the sdp revoke queue, in which case, we still want to flush
107 * the log and wait for it to finish.
108 *
109 * If the sdp revoke list is empty too, we might still have an
110 * io outstanding for writing revokes, so we should wait for
111 * it before returning.
112 *
113 * If none of these conditions are true, our revokes are all
114 * flushed and we can return.
115 */
116 gfs2_log_lock(sdp);
117 have_revokes = !list_empty(&sdp->sd_log_revokes);
118 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
119 gfs2_log_unlock(sdp);
120 if (have_revokes)
121 goto flush;
122 if (log_in_flight)
123 log_flush_wait(sdp);
124 return 0;
125 }
126
127 memset(&tr, 0, sizeof(tr));
128 set_bit(TR_ONSTACK, &tr.tr_flags);
129 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
130 if (ret) {
131 fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret);
132 goto flush;
133 }
134 __gfs2_ail_flush(gl, 0, revokes);
135 gfs2_trans_end(sdp);
136
137flush:
138 if (!ret)
139 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
140 GFS2_LFC_AIL_EMPTY_GL);
141 return ret;
142}
143
144void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
145{
146 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
147 unsigned int revokes = atomic_read(&gl->gl_ail_count);
148 int ret;
149
150 if (!revokes)
151 return;
152
153 ret = gfs2_trans_begin(sdp, 0, revokes);
154 if (ret)
155 return;
156 __gfs2_ail_flush(gl, fsync, revokes);
157 gfs2_trans_end(sdp);
158 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
159 GFS2_LFC_AIL_FLUSH);
160}
161
162/**
163 * gfs2_rgrp_metasync - sync out the metadata of a resource group
164 * @gl: the glock protecting the resource group
165 *
166 */
167
168static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
169{
170 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
171 struct address_space *metamapping = &sdp->sd_aspace;
172 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
173 const unsigned bsize = sdp->sd_sb.sb_bsize;
174 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
175 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
176 int error;
177
178 filemap_fdatawrite_range(metamapping, start, end);
179 error = filemap_fdatawait_range(metamapping, start, end);
180 WARN_ON_ONCE(error && !gfs2_withdrawing_or_withdrawn(sdp));
181 mapping_set_error(metamapping, error);
182 if (error)
183 gfs2_io_error(sdp);
184 return error;
185}
186
187/**
188 * rgrp_go_sync - sync out the metadata for this glock
189 * @gl: the glock
190 *
191 * Called when demoting or unlocking an EX glock. We must flush
192 * to disk all dirty buffers/pages relating to this glock, and must not
193 * return to caller to demote/unlock the glock until I/O is complete.
194 */
195
196static int rgrp_go_sync(struct gfs2_glock *gl)
197{
198 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
199 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
200 int error;
201
202 if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
203 return 0;
204 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
205
206 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
207 GFS2_LFC_RGRP_GO_SYNC);
208 error = gfs2_rgrp_metasync(gl);
209 if (!error)
210 error = gfs2_ail_empty_gl(gl);
211 gfs2_free_clones(rgd);
212 return error;
213}
214
215/**
216 * rgrp_go_inval - invalidate the metadata for this glock
217 * @gl: the glock
218 * @flags:
219 *
220 * We never used LM_ST_DEFERRED with resource groups, so that we
221 * should always see the metadata flag set here.
222 *
223 */
224
225static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
226{
227 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
228 struct address_space *mapping = &sdp->sd_aspace;
229 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
230 const unsigned bsize = sdp->sd_sb.sb_bsize;
231 loff_t start, end;
232
233 if (!rgd)
234 return;
235 start = (rgd->rd_addr * bsize) & PAGE_MASK;
236 end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
237 gfs2_rgrp_brelse(rgd);
238 WARN_ON_ONCE(!(flags & DIO_METADATA));
239 truncate_inode_pages_range(mapping, start, end);
240}
241
242static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
243 const char *fs_id_buf)
244{
245 struct gfs2_rgrpd *rgd = gl->gl_object;
246
247 if (rgd)
248 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
249}
250
251static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
252{
253 struct gfs2_inode *ip;
254
255 spin_lock(&gl->gl_lockref.lock);
256 ip = gl->gl_object;
257 if (ip)
258 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
259 spin_unlock(&gl->gl_lockref.lock);
260 return ip;
261}
262
263struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
264{
265 struct gfs2_rgrpd *rgd;
266
267 spin_lock(&gl->gl_lockref.lock);
268 rgd = gl->gl_object;
269 spin_unlock(&gl->gl_lockref.lock);
270
271 return rgd;
272}
273
274static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
275{
276 if (!ip)
277 return;
278
279 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
280 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
281}
282
283/**
284 * gfs2_inode_metasync - sync out the metadata of an inode
285 * @gl: the glock protecting the inode
286 *
287 */
288int gfs2_inode_metasync(struct gfs2_glock *gl)
289{
290 struct address_space *metamapping = gfs2_glock2aspace(gl);
291 int error;
292
293 filemap_fdatawrite(metamapping);
294 error = filemap_fdatawait(metamapping);
295 if (error)
296 gfs2_io_error(gl->gl_name.ln_sbd);
297 return error;
298}
299
300/**
301 * inode_go_sync - Sync the dirty metadata of an inode
302 * @gl: the glock protecting the inode
303 *
304 */
305
306static int inode_go_sync(struct gfs2_glock *gl)
307{
308 struct gfs2_inode *ip = gfs2_glock2inode(gl);
309 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
310 struct address_space *metamapping = gfs2_glock2aspace(gl);
311 int error = 0, ret;
312
313 if (isreg) {
314 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
315 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
316 inode_dio_wait(&ip->i_inode);
317 }
318 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
319 goto out;
320
321 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
322
323 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
324 GFS2_LFC_INODE_GO_SYNC);
325 filemap_fdatawrite(metamapping);
326 if (isreg) {
327 struct address_space *mapping = ip->i_inode.i_mapping;
328 filemap_fdatawrite(mapping);
329 error = filemap_fdatawait(mapping);
330 mapping_set_error(mapping, error);
331 }
332 ret = gfs2_inode_metasync(gl);
333 if (!error)
334 error = ret;
335 ret = gfs2_ail_empty_gl(gl);
336 if (!error)
337 error = ret;
338 /*
339 * Writeback of the data mapping may cause the dirty flag to be set
340 * so we have to clear it again here.
341 */
342 smp_mb__before_atomic();
343 clear_bit(GLF_DIRTY, &gl->gl_flags);
344
345out:
346 gfs2_clear_glop_pending(ip);
347 return error;
348}
349
350/**
351 * inode_go_inval - prepare a inode glock to be released
352 * @gl: the glock
353 * @flags:
354 *
355 * Normally we invalidate everything, but if we are moving into
356 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
357 * can keep hold of the metadata, since it won't have changed.
358 *
359 */
360
361static void inode_go_inval(struct gfs2_glock *gl, int flags)
362{
363 struct gfs2_inode *ip = gfs2_glock2inode(gl);
364
365 if (flags & DIO_METADATA) {
366 struct address_space *mapping = gfs2_glock2aspace(gl);
367 truncate_inode_pages(mapping, 0);
368 if (ip) {
369 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
370 forget_all_cached_acls(&ip->i_inode);
371 security_inode_invalidate_secctx(&ip->i_inode);
372 gfs2_dir_hash_inval(ip);
373 }
374 }
375
376 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
377 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
378 GFS2_LOG_HEAD_FLUSH_NORMAL |
379 GFS2_LFC_INODE_GO_INVAL);
380 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
381 }
382 if (ip && S_ISREG(ip->i_inode.i_mode))
383 truncate_inode_pages(ip->i_inode.i_mapping, 0);
384
385 gfs2_clear_glop_pending(ip);
386}
387
388static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
389{
390 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
391 const struct gfs2_dinode *str = buf;
392 struct timespec64 atime, iatime;
393 u16 height, depth;
394 umode_t mode = be32_to_cpu(str->di_mode);
395 struct inode *inode = &ip->i_inode;
396 bool is_new = inode->i_state & I_NEW;
397
398 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
399 gfs2_consist_inode(ip);
400 return -EIO;
401 }
402 if (unlikely(!is_new && inode_wrong_type(inode, mode))) {
403 gfs2_consist_inode(ip);
404 return -EIO;
405 }
406 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
407 inode->i_mode = mode;
408 if (is_new) {
409 inode->i_rdev = 0;
410 switch (mode & S_IFMT) {
411 case S_IFBLK:
412 case S_IFCHR:
413 inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
414 be32_to_cpu(str->di_minor));
415 break;
416 }
417 }
418
419 i_uid_write(inode, be32_to_cpu(str->di_uid));
420 i_gid_write(inode, be32_to_cpu(str->di_gid));
421 set_nlink(inode, be32_to_cpu(str->di_nlink));
422 i_size_write(inode, be64_to_cpu(str->di_size));
423 gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
424 atime.tv_sec = be64_to_cpu(str->di_atime);
425 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
426 iatime = inode_get_atime(inode);
427 if (timespec64_compare(&iatime, &atime) < 0)
428 inode_set_atime_to_ts(inode, atime);
429 inode_set_mtime(inode, be64_to_cpu(str->di_mtime),
430 be32_to_cpu(str->di_mtime_nsec));
431 inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
432 be32_to_cpu(str->di_ctime_nsec));
433
434 ip->i_goal = be64_to_cpu(str->di_goal_meta);
435 ip->i_generation = be64_to_cpu(str->di_generation);
436
437 ip->i_diskflags = be32_to_cpu(str->di_flags);
438 ip->i_eattr = be64_to_cpu(str->di_eattr);
439 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
440 gfs2_set_inode_flags(inode);
441 height = be16_to_cpu(str->di_height);
442 if (unlikely(height > sdp->sd_max_height)) {
443 gfs2_consist_inode(ip);
444 return -EIO;
445 }
446 ip->i_height = (u8)height;
447
448 depth = be16_to_cpu(str->di_depth);
449 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) {
450 gfs2_consist_inode(ip);
451 return -EIO;
452 }
453 ip->i_depth = (u8)depth;
454 ip->i_entries = be32_to_cpu(str->di_entries);
455
456 if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) {
457 gfs2_consist_inode(ip);
458 return -EIO;
459 }
460 if (S_ISREG(inode->i_mode))
461 gfs2_set_aops(inode);
462
463 return 0;
464}
465
466/**
467 * gfs2_inode_refresh - Refresh the incore copy of the dinode
468 * @ip: The GFS2 inode
469 *
470 * Returns: errno
471 */
472
473static int gfs2_inode_refresh(struct gfs2_inode *ip)
474{
475 struct buffer_head *dibh;
476 int error;
477
478 error = gfs2_meta_inode_buffer(ip, &dibh);
479 if (error)
480 return error;
481
482 error = gfs2_dinode_in(ip, dibh->b_data);
483 brelse(dibh);
484 return error;
485}
486
487/**
488 * inode_go_instantiate - read in an inode if necessary
489 * @gl: The glock
490 *
491 * Returns: errno
492 */
493
494static int inode_go_instantiate(struct gfs2_glock *gl)
495{
496 struct gfs2_inode *ip = gl->gl_object;
497 struct gfs2_glock *io_gl;
498 int error;
499
500 if (!ip) /* no inode to populate - read it in later */
501 return 0;
502
503 error = gfs2_inode_refresh(ip);
504 if (error)
505 return error;
506 io_gl = ip->i_iopen_gh.gh_gl;
507 io_gl->gl_no_formal_ino = ip->i_no_formal_ino;
508 return 0;
509}
510
511static int inode_go_held(struct gfs2_holder *gh)
512{
513 struct gfs2_glock *gl = gh->gh_gl;
514 struct gfs2_inode *ip = gl->gl_object;
515 int error = 0;
516
517 if (!ip) /* no inode to populate - read it in later */
518 return 0;
519
520 if (gh->gh_state != LM_ST_DEFERRED)
521 inode_dio_wait(&ip->i_inode);
522
523 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
524 (gl->gl_state == LM_ST_EXCLUSIVE) &&
525 (gh->gh_state == LM_ST_EXCLUSIVE))
526 error = gfs2_truncatei_resume(ip);
527
528 return error;
529}
530
531/**
532 * inode_go_dump - print information about an inode
533 * @seq: The iterator
534 * @gl: The glock
535 * @fs_id_buf: file system id (may be empty)
536 *
537 */
538
539static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
540 const char *fs_id_buf)
541{
542 struct gfs2_inode *ip = gl->gl_object;
543 const struct inode *inode = &ip->i_inode;
544
545 if (ip == NULL)
546 return;
547
548 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
549 "p:%lu\n", fs_id_buf,
550 (unsigned long long)ip->i_no_formal_ino,
551 (unsigned long long)ip->i_no_addr,
552 IF2DT(inode->i_mode), ip->i_flags,
553 (unsigned int)ip->i_diskflags,
554 (unsigned long long)i_size_read(inode),
555 inode->i_data.nrpages);
556}
557
558/**
559 * freeze_go_callback - A cluster node is requesting a freeze
560 * @gl: the glock
561 * @remote: true if this came from a different cluster node
562 */
563
564static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
565{
566 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
567 struct super_block *sb = sdp->sd_vfs;
568
569 if (!remote ||
570 (gl->gl_state != LM_ST_SHARED &&
571 gl->gl_state != LM_ST_UNLOCKED) ||
572 gl->gl_demote_state != LM_ST_UNLOCKED)
573 return;
574
575 /*
576 * Try to get an active super block reference to prevent racing with
577 * unmount (see super_trylock_shared()). But note that unmount isn't
578 * the only place where a write lock on s_umount is taken, and we can
579 * fail here because of things like remount as well.
580 */
581 if (down_read_trylock(&sb->s_umount)) {
582 atomic_inc(&sb->s_active);
583 up_read(&sb->s_umount);
584 if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
585 deactivate_super(sb);
586 }
587}
588
589/**
590 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
591 * @gl: the glock
592 */
593static int freeze_go_xmote_bh(struct gfs2_glock *gl)
594{
595 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
596 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
597 struct gfs2_glock *j_gl = ip->i_gl;
598 struct gfs2_log_header_host head;
599 int error;
600
601 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
602 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
603
604 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
605 if (gfs2_assert_withdraw_delayed(sdp, !error))
606 return error;
607 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
608 GFS2_LOG_HEAD_UNMOUNT))
609 return -EIO;
610 sdp->sd_log_sequence = head.lh_sequence + 1;
611 gfs2_log_pointers_init(sdp, head.lh_blkno);
612 }
613 return 0;
614}
615
616/**
617 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
618 * @gl: the glock
619 * @remote: true if this came from a different cluster node
620 *
621 * gl_lockref.lock lock is held while calling this
622 */
623static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
624{
625 struct gfs2_inode *ip = gl->gl_object;
626 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
627
628 if (!remote || sb_rdonly(sdp->sd_vfs) ||
629 test_bit(SDF_KILL, &sdp->sd_flags))
630 return;
631
632 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
633 gl->gl_state == LM_ST_SHARED && ip) {
634 gl->gl_lockref.count++;
635 if (!gfs2_queue_try_to_evict(gl))
636 gl->gl_lockref.count--;
637 }
638}
639
640/**
641 * inode_go_unlocked - wake up anyone waiting for dlm's unlock ast
642 * @gl: glock being unlocked
643 *
644 * For now, this is only used for the journal inode glock. In withdraw
645 * situations, we need to wait for the glock to be unlocked so that we know
646 * other nodes may proceed with recovery / journal replay.
647 */
648static void inode_go_unlocked(struct gfs2_glock *gl)
649{
650 /* Note that we cannot reference gl_object because it's already set
651 * to NULL by this point in its lifecycle. */
652 if (!test_bit(GLF_UNLOCKED, &gl->gl_flags))
653 return;
654 clear_bit_unlock(GLF_UNLOCKED, &gl->gl_flags);
655 wake_up_bit(&gl->gl_flags, GLF_UNLOCKED);
656}
657
658/**
659 * nondisk_go_callback - used to signal when a node did a withdraw
660 * @gl: the nondisk glock
661 * @remote: true if this came from a different cluster node
662 *
663 */
664static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
665{
666 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
667
668 /* Ignore the callback unless it's from another node, and it's the
669 live lock. */
670 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
671 return;
672
673 /* First order of business is to cancel the demote request. We don't
674 * really want to demote a nondisk glock. At best it's just to inform
675 * us of another node's withdraw. We'll keep it in SH mode. */
676 clear_bit(GLF_DEMOTE, &gl->gl_flags);
677 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
678
679 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
680 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
681 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
682 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
683 return;
684
685 /* We only care when a node wants us to unlock, because that means
686 * they want a journal recovered. */
687 if (gl->gl_demote_state != LM_ST_UNLOCKED)
688 return;
689
690 if (sdp->sd_args.ar_spectator) {
691 fs_warn(sdp, "Spectator node cannot recover journals.\n");
692 return;
693 }
694
695 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
696 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
697 /*
698 * We can't call remote_withdraw directly here or gfs2_recover_journal
699 * because this is called from the glock unlock function and the
700 * remote_withdraw needs to enqueue and dequeue the same "live" glock
701 * we were called from. So we queue it to the control work queue in
702 * lock_dlm.
703 */
704 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
705}
706
707const struct gfs2_glock_operations gfs2_meta_glops = {
708 .go_type = LM_TYPE_META,
709 .go_flags = GLOF_NONDISK,
710};
711
712const struct gfs2_glock_operations gfs2_inode_glops = {
713 .go_sync = inode_go_sync,
714 .go_inval = inode_go_inval,
715 .go_instantiate = inode_go_instantiate,
716 .go_held = inode_go_held,
717 .go_dump = inode_go_dump,
718 .go_type = LM_TYPE_INODE,
719 .go_flags = GLOF_ASPACE | GLOF_LVB,
720 .go_unlocked = inode_go_unlocked,
721};
722
723const struct gfs2_glock_operations gfs2_rgrp_glops = {
724 .go_sync = rgrp_go_sync,
725 .go_inval = rgrp_go_inval,
726 .go_instantiate = gfs2_rgrp_go_instantiate,
727 .go_dump = gfs2_rgrp_go_dump,
728 .go_type = LM_TYPE_RGRP,
729 .go_flags = GLOF_LVB,
730};
731
732const struct gfs2_glock_operations gfs2_freeze_glops = {
733 .go_xmote_bh = freeze_go_xmote_bh,
734 .go_callback = freeze_go_callback,
735 .go_type = LM_TYPE_NONDISK,
736 .go_flags = GLOF_NONDISK,
737};
738
739const struct gfs2_glock_operations gfs2_iopen_glops = {
740 .go_type = LM_TYPE_IOPEN,
741 .go_callback = iopen_go_callback,
742 .go_dump = inode_go_dump,
743 .go_flags = GLOF_NONDISK,
744 .go_subclass = 1,
745};
746
747const struct gfs2_glock_operations gfs2_flock_glops = {
748 .go_type = LM_TYPE_FLOCK,
749 .go_flags = GLOF_NONDISK,
750};
751
752const struct gfs2_glock_operations gfs2_nondisk_glops = {
753 .go_type = LM_TYPE_NONDISK,
754 .go_flags = GLOF_NONDISK,
755 .go_callback = nondisk_go_callback,
756};
757
758const struct gfs2_glock_operations gfs2_quota_glops = {
759 .go_type = LM_TYPE_QUOTA,
760 .go_flags = GLOF_LVB | GLOF_NONDISK,
761};
762
763const struct gfs2_glock_operations gfs2_journal_glops = {
764 .go_type = LM_TYPE_JOURNAL,
765 .go_flags = GLOF_NONDISK,
766};
767
768const struct gfs2_glock_operations *gfs2_glops_list[] = {
769 [LM_TYPE_META] = &gfs2_meta_glops,
770 [LM_TYPE_INODE] = &gfs2_inode_glops,
771 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
772 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
773 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
774 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
775 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
776 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
777};
778