Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#include <linux/spinlock.h>
 11#include <linux/completion.h>
 12#include <linux/buffer_head.h>
 13#include <linux/gfs2_ondisk.h>
 14#include <linux/bio.h>
 15#include <linux/posix_acl.h>
 16#include <linux/security.h>
 17
 18#include "gfs2.h"
 19#include "incore.h"
 20#include "bmap.h"
 21#include "glock.h"
 22#include "glops.h"
 23#include "inode.h"
 24#include "log.h"
 25#include "meta_io.h"
 26#include "recovery.h"
 27#include "rgrp.h"
 28#include "util.h"
 29#include "trans.h"
 30#include "dir.h"
 
 31
 32struct workqueue_struct *gfs2_freeze_wq;
 33
 
 
 34static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
 35{
 36	fs_err(gl->gl_name.ln_sbd,
 
 
 37	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
 38	       "state 0x%lx\n",
 39	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
 40	       bh->b_page->mapping, bh->b_page->flags);
 41	fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
 42	       gl->gl_name.ln_type, gl->gl_name.ln_number,
 43	       gfs2_glock2aspace(gl));
 44	gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
 
 45}
 46
 47/**
 48 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
 49 * @gl: the glock
 50 * @fsync: set when called from fsync (not all buffers will be clean)
 
 51 *
 52 * None of the buffers should be dirty, locked, or pinned.
 53 */
 54
 55static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
 56			     unsigned int nr_revokes)
 57{
 58	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 59	struct list_head *head = &gl->gl_ail_list;
 60	struct gfs2_bufdata *bd, *tmp;
 61	struct buffer_head *bh;
 62	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
 63
 64	gfs2_log_lock(sdp);
 65	spin_lock(&sdp->sd_ail_lock);
 66	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
 67		if (nr_revokes == 0)
 68			break;
 69		bh = bd->bd_bh;
 70		if (bh->b_state & b_state) {
 71			if (fsync)
 72				continue;
 73			gfs2_ail_error(gl, bh);
 74		}
 75		gfs2_trans_add_revoke(sdp, bd);
 76		nr_revokes--;
 77	}
 78	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
 79	spin_unlock(&sdp->sd_ail_lock);
 80	gfs2_log_unlock(sdp);
 
 
 
 81}
 82
 83
 84static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
 85{
 86	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 87	struct gfs2_trans tr;
 
 
 88
 89	memset(&tr, 0, sizeof(tr));
 90	INIT_LIST_HEAD(&tr.tr_buf);
 91	INIT_LIST_HEAD(&tr.tr_databuf);
 92	tr.tr_revokes = atomic_read(&gl->gl_ail_count);
 93
 94	if (!tr.tr_revokes)
 95		return;
 96
 97	/* A shortened, inline version of gfs2_trans_begin()
 98         * tr->alloced is not set since the transaction structure is
 99         * on the stack */
100	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
101	tr.tr_ip = _RET_IP_;
102	if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
103		return;
104	WARN_ON_ONCE(current->journal_info);
105	current->journal_info = &tr;
106
107	__gfs2_ail_flush(gl, 0, tr.tr_revokes);
 
 
 
 
 
 
 
 
 
 
 
 
108
 
 
 
 
 
 
 
 
109	gfs2_trans_end(sdp);
110	gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
 
 
 
 
 
111}
112
113void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
114{
115	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
116	unsigned int revokes = atomic_read(&gl->gl_ail_count);
117	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
118	int ret;
119
120	if (!revokes)
121		return;
122
123	while (revokes > max_revokes)
124		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
125
126	ret = gfs2_trans_begin(sdp, 0, max_revokes);
127	if (ret)
128		return;
129	__gfs2_ail_flush(gl, fsync, max_revokes);
130	gfs2_trans_end(sdp);
131	gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132}
133
134/**
135 * rgrp_go_sync - sync out the metadata for this glock
136 * @gl: the glock
137 *
138 * Called when demoting or unlocking an EX glock.  We must flush
139 * to disk all dirty buffers/pages relating to this glock, and must not
140 * not return to caller to demote/unlock the glock until I/O is complete.
141 */
142
143static void rgrp_go_sync(struct gfs2_glock *gl)
144{
145	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
146	struct address_space *mapping = &sdp->sd_aspace;
147	struct gfs2_rgrpd *rgd;
148	int error;
149
150	spin_lock(&gl->gl_lockref.lock);
151	rgd = gl->gl_object;
152	if (rgd)
153		gfs2_rgrp_brelse(rgd);
154	spin_unlock(&gl->gl_lockref.lock);
155
156	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
157		return;
158	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
159
160	gfs2_log_flush(sdp, gl, NORMAL_FLUSH);
161	filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
162	error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
163	mapping_set_error(mapping, error);
164	gfs2_ail_empty_gl(gl);
165
166	spin_lock(&gl->gl_lockref.lock);
167	rgd = gl->gl_object;
168	if (rgd)
169		gfs2_free_clones(rgd);
170	spin_unlock(&gl->gl_lockref.lock);
171}
172
173/**
174 * rgrp_go_inval - invalidate the metadata for this glock
175 * @gl: the glock
176 * @flags:
177 *
178 * We never used LM_ST_DEFERRED with resource groups, so that we
179 * should always see the metadata flag set here.
180 *
181 */
182
183static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
184{
185	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
186	struct address_space *mapping = &sdp->sd_aspace;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187	struct gfs2_rgrpd *rgd = gl->gl_object;
188
189	if (rgd)
190		gfs2_rgrp_brelse(rgd);
 
191
192	WARN_ON_ONCE(!(flags & DIO_METADATA));
193	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
194	truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
195
196	if (rgd)
197		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198}
199
200/**
201 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
202 * @gl: the glock protecting the inode
203 *
204 */
205
206static void inode_go_sync(struct gfs2_glock *gl)
207{
208	struct gfs2_inode *ip = gl->gl_object;
209	struct address_space *metamapping = gfs2_glock2aspace(gl);
210	int error;
211
212	if (ip && !S_ISREG(ip->i_inode.i_mode))
213		ip = NULL;
214	if (ip) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
216			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
217		inode_dio_wait(&ip->i_inode);
218	}
219	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
220		return;
221
222	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
223
224	gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
 
225	filemap_fdatawrite(metamapping);
226	if (ip) {
227		struct address_space *mapping = ip->i_inode.i_mapping;
228		filemap_fdatawrite(mapping);
229		error = filemap_fdatawait(mapping);
230		mapping_set_error(mapping, error);
231	}
232	error = filemap_fdatawait(metamapping);
233	mapping_set_error(metamapping, error);
234	gfs2_ail_empty_gl(gl);
 
 
 
235	/*
236	 * Writeback of the data mapping may cause the dirty flag to be set
237	 * so we have to clear it again here.
238	 */
239	smp_mb__before_atomic();
240	clear_bit(GLF_DIRTY, &gl->gl_flags);
 
 
 
 
241}
242
243/**
244 * inode_go_inval - prepare a inode glock to be released
245 * @gl: the glock
246 * @flags:
247 *
248 * Normally we invalidate everything, but if we are moving into
249 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
250 * can keep hold of the metadata, since it won't have changed.
251 *
252 */
253
254static void inode_go_inval(struct gfs2_glock *gl, int flags)
255{
256	struct gfs2_inode *ip = gl->gl_object;
257
258	gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
259
260	if (flags & DIO_METADATA) {
261		struct address_space *mapping = gfs2_glock2aspace(gl);
262		truncate_inode_pages(mapping, 0);
263		if (ip) {
264			set_bit(GIF_INVALID, &ip->i_flags);
265			forget_all_cached_acls(&ip->i_inode);
266			security_inode_invalidate_secctx(&ip->i_inode);
267			gfs2_dir_hash_inval(ip);
268		}
269	}
270
271	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
272		gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH);
 
 
273		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
274	}
275	if (ip && S_ISREG(ip->i_inode.i_mode))
276		truncate_inode_pages(ip->i_inode.i_mapping, 0);
277}
278
279/**
280 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
281 * @gl: the glock
282 *
283 * Returns: 1 if it's ok
284 */
285
286static int inode_go_demote_ok(const struct gfs2_glock *gl)
287{
288	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
289	struct gfs2_holder *gh;
290
291	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
292		return 0;
293
294	if (!list_empty(&gl->gl_holders)) {
295		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
296		if (gh->gh_list.next != &gl->gl_holders)
297			return 0;
298	}
299
300	return 1;
301}
302
303/**
304 * gfs2_set_nlink - Set the inode's link count based on on-disk info
305 * @inode: The inode in question
306 * @nlink: The link count
307 *
308 * If the link count has hit zero, it must never be raised, whatever the
309 * on-disk inode might say. When new struct inodes are created the link
310 * count is set to 1, so that we can safely use this test even when reading
311 * in on disk information for the first time.
312 */
313
314static void gfs2_set_nlink(struct inode *inode, u32 nlink)
315{
316	/*
317	 * We will need to review setting the nlink count here in the
318	 * light of the forthcoming ro bind mount work. This is a reminder
319	 * to do that.
320	 */
321	if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
322		if (nlink == 0)
323			clear_nlink(inode);
324		else
325			set_nlink(inode, nlink);
326	}
327}
328
329static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
330{
 
331	const struct gfs2_dinode *str = buf;
332	struct timespec atime;
333	u16 height, depth;
334
335	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
336		goto corrupt;
 
 
 
 
 
 
 
 
 
337	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
338	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
339	ip->i_inode.i_rdev = 0;
340	switch (ip->i_inode.i_mode & S_IFMT) {
341	case S_IFBLK:
342	case S_IFCHR:
343		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
344					   be32_to_cpu(str->di_minor));
345		break;
346	};
347
348	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
349	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
350	gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
351	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
352	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
 
 
353	atime.tv_sec = be64_to_cpu(str->di_atime);
354	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
355	if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
356		ip->i_inode.i_atime = atime;
357	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
358	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
359	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
360	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
 
361
362	ip->i_goal = be64_to_cpu(str->di_goal_meta);
363	ip->i_generation = be64_to_cpu(str->di_generation);
364
365	ip->i_diskflags = be32_to_cpu(str->di_flags);
366	ip->i_eattr = be64_to_cpu(str->di_eattr);
367	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
368	gfs2_set_inode_flags(&ip->i_inode);
369	height = be16_to_cpu(str->di_height);
370	if (unlikely(height > GFS2_MAX_META_HEIGHT))
371		goto corrupt;
 
 
372	ip->i_height = (u8)height;
373
374	depth = be16_to_cpu(str->di_depth);
375	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
376		goto corrupt;
 
 
377	ip->i_depth = (u8)depth;
378	ip->i_entries = be32_to_cpu(str->di_entries);
379
380	if (S_ISREG(ip->i_inode.i_mode))
381		gfs2_set_aops(&ip->i_inode);
 
 
 
 
382
383	return 0;
384corrupt:
385	gfs2_consist_inode(ip);
386	return -EIO;
387}
388
389/**
390 * gfs2_inode_refresh - Refresh the incore copy of the dinode
391 * @ip: The GFS2 inode
392 *
393 * Returns: errno
394 */
395
396int gfs2_inode_refresh(struct gfs2_inode *ip)
397{
398	struct buffer_head *dibh;
399	int error;
400
401	error = gfs2_meta_inode_buffer(ip, &dibh);
402	if (error)
403		return error;
404
405	error = gfs2_dinode_in(ip, dibh->b_data);
406	brelse(dibh);
407	clear_bit(GIF_INVALID, &ip->i_flags);
408
409	return error;
410}
411
412/**
413 * inode_go_lock - operation done after an inode lock is locked by a process
414 * @gl: the glock
415 * @flags:
416 *
417 * Returns: errno
418 */
419
420static int inode_go_lock(struct gfs2_holder *gh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421{
422	struct gfs2_glock *gl = gh->gh_gl;
423	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
424	struct gfs2_inode *ip = gl->gl_object;
425	int error = 0;
426
427	if (!ip || (gh->gh_flags & GL_SKIP))
428		return 0;
429
430	if (test_bit(GIF_INVALID, &ip->i_flags)) {
431		error = gfs2_inode_refresh(ip);
432		if (error)
433			return error;
434	}
435
436	if (gh->gh_state != LM_ST_DEFERRED)
437		inode_dio_wait(&ip->i_inode);
438
439	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
440	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
441	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
442		spin_lock(&sdp->sd_trunc_lock);
443		if (list_empty(&ip->i_trunc_list))
444			list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
445		spin_unlock(&sdp->sd_trunc_lock);
446		wake_up(&sdp->sd_quota_wait);
447		return 1;
448	}
449
450	return error;
451}
452
453/**
454 * inode_go_dump - print information about an inode
455 * @seq: The iterator
456 * @ip: the inode
 
457 *
458 */
459
460static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
 
461{
462	const struct gfs2_inode *ip = gl->gl_object;
 
 
463	if (ip == NULL)
464		return;
465	gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
 
 
466		  (unsigned long long)ip->i_no_formal_ino,
467		  (unsigned long long)ip->i_no_addr,
468		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
469		  (unsigned int)ip->i_diskflags,
470		  (unsigned long long)i_size_read(&ip->i_inode));
 
471}
472
473/**
474 * freeze_go_sync - promote/demote the freeze glock
475 * @gl: the glock
476 * @state: the requested state
477 * @flags:
478 *
479 */
480
481static void freeze_go_sync(struct gfs2_glock *gl)
482{
483	int error = 0;
484	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
485
486	if (gl->gl_state == LM_ST_SHARED &&
487	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
488		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
489		error = freeze_super(sdp->sd_vfs);
490		if (error) {
491			printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error);
492			gfs2_assert_withdraw(sdp, 0);
493		}
494		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
495		gfs2_log_flush(sdp, NULL, FREEZE_FLUSH);
 
 
 
 
 
 
 
496	}
497}
498
499/**
500 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
501 * @gl: the glock
502 *
503 */
504
505static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
506{
507	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
508	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
509	struct gfs2_glock *j_gl = ip->i_gl;
510	struct gfs2_log_header_host head;
511	int error;
512
513	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
514		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
515
516		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
517		if (error)
518			gfs2_consist(sdp);
519		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
520			gfs2_consist(sdp);
521
522		/*  Initialize some head of the log stuff  */
523		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
524			sdp->sd_log_sequence = head.lh_sequence + 1;
525			gfs2_log_pointers_init(sdp, head.lh_blkno);
526		}
527	}
528	return 0;
529}
530
531/**
532 * trans_go_demote_ok
533 * @gl: the glock
534 *
535 * Always returns 0
536 */
537
538static int freeze_go_demote_ok(const struct gfs2_glock *gl)
539{
540	return 0;
541}
542
543/**
544 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
545 * @gl: the glock
 
546 *
547 * gl_lockref.lock lock is held while calling this
548 */
549static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
550{
551	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
552	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
553
554	if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
 
555		return;
556
557	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
558	    gl->gl_state == LM_ST_SHARED && ip) {
559		gl->gl_lockref.count++;
560		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
561			gl->gl_lockref.count--;
562	}
563}
564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
565const struct gfs2_glock_operations gfs2_meta_glops = {
566	.go_type = LM_TYPE_META,
 
567};
568
569const struct gfs2_glock_operations gfs2_inode_glops = {
570	.go_sync = inode_go_sync,
571	.go_inval = inode_go_inval,
572	.go_demote_ok = inode_go_demote_ok,
573	.go_lock = inode_go_lock,
574	.go_dump = inode_go_dump,
575	.go_type = LM_TYPE_INODE,
576	.go_flags = GLOF_ASPACE | GLOF_LRU,
 
577};
578
579const struct gfs2_glock_operations gfs2_rgrp_glops = {
580	.go_sync = rgrp_go_sync,
581	.go_inval = rgrp_go_inval,
582	.go_lock = gfs2_rgrp_go_lock,
583	.go_unlock = gfs2_rgrp_go_unlock,
584	.go_dump = gfs2_rgrp_dump,
585	.go_type = LM_TYPE_RGRP,
586	.go_flags = GLOF_LVB,
587};
588
589const struct gfs2_glock_operations gfs2_freeze_glops = {
590	.go_sync = freeze_go_sync,
591	.go_xmote_bh = freeze_go_xmote_bh,
592	.go_demote_ok = freeze_go_demote_ok,
593	.go_type = LM_TYPE_NONDISK,
 
594};
595
596const struct gfs2_glock_operations gfs2_iopen_glops = {
597	.go_type = LM_TYPE_IOPEN,
598	.go_callback = iopen_go_callback,
599	.go_flags = GLOF_LRU,
 
 
600};
601
602const struct gfs2_glock_operations gfs2_flock_glops = {
603	.go_type = LM_TYPE_FLOCK,
604	.go_flags = GLOF_LRU,
605};
606
607const struct gfs2_glock_operations gfs2_nondisk_glops = {
608	.go_type = LM_TYPE_NONDISK,
 
 
609};
610
611const struct gfs2_glock_operations gfs2_quota_glops = {
612	.go_type = LM_TYPE_QUOTA,
613	.go_flags = GLOF_LVB | GLOF_LRU,
614};
615
616const struct gfs2_glock_operations gfs2_journal_glops = {
617	.go_type = LM_TYPE_JOURNAL,
 
618};
619
620const struct gfs2_glock_operations *gfs2_glops_list[] = {
621	[LM_TYPE_META] = &gfs2_meta_glops,
622	[LM_TYPE_INODE] = &gfs2_inode_glops,
623	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
624	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
625	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
626	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
627	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
628	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
629};
630
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 
 
 
 
  5 */
  6
  7#include <linux/spinlock.h>
  8#include <linux/completion.h>
  9#include <linux/buffer_head.h>
 10#include <linux/gfs2_ondisk.h>
 11#include <linux/bio.h>
 12#include <linux/posix_acl.h>
 13#include <linux/security.h>
 14
 15#include "gfs2.h"
 16#include "incore.h"
 17#include "bmap.h"
 18#include "glock.h"
 19#include "glops.h"
 20#include "inode.h"
 21#include "log.h"
 22#include "meta_io.h"
 23#include "recovery.h"
 24#include "rgrp.h"
 25#include "util.h"
 26#include "trans.h"
 27#include "dir.h"
 28#include "lops.h"
 29
 30struct workqueue_struct *gfs2_freeze_wq;
 31
 32extern struct workqueue_struct *gfs2_control_wq;
 33
 34static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
 35{
 36	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 37
 38	fs_err(sdp,
 39	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
 40	       "state 0x%lx\n",
 41	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
 42	       bh->b_folio->mapping, bh->b_folio->flags);
 43	fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
 44	       gl->gl_name.ln_type, gl->gl_name.ln_number,
 45	       gfs2_glock2aspace(gl));
 46	gfs2_lm(sdp, "AIL error\n");
 47	gfs2_withdraw_delayed(sdp);
 48}
 49
 50/**
 51 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
 52 * @gl: the glock
 53 * @fsync: set when called from fsync (not all buffers will be clean)
 54 * @nr_revokes: Number of buffers to revoke
 55 *
 56 * None of the buffers should be dirty, locked, or pinned.
 57 */
 58
 59static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
 60			     unsigned int nr_revokes)
 61{
 62	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 63	struct list_head *head = &gl->gl_ail_list;
 64	struct gfs2_bufdata *bd, *tmp;
 65	struct buffer_head *bh;
 66	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
 67
 68	gfs2_log_lock(sdp);
 69	spin_lock(&sdp->sd_ail_lock);
 70	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
 71		if (nr_revokes == 0)
 72			break;
 73		bh = bd->bd_bh;
 74		if (bh->b_state & b_state) {
 75			if (fsync)
 76				continue;
 77			gfs2_ail_error(gl, bh);
 78		}
 79		gfs2_trans_add_revoke(sdp, bd);
 80		nr_revokes--;
 81	}
 82	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
 83	spin_unlock(&sdp->sd_ail_lock);
 84	gfs2_log_unlock(sdp);
 85
 86	if (gfs2_withdrawing(sdp))
 87		gfs2_withdraw(sdp);
 88}
 89
 90
 91static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
 92{
 93	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 94	struct gfs2_trans tr;
 95	unsigned int revokes;
 96	int ret = 0;
 97
 98	revokes = atomic_read(&gl->gl_ail_count);
 
 
 
 99
100	if (!revokes) {
101		bool have_revokes;
102		bool log_in_flight;
103
104		/*
105		 * We have nothing on the ail, but there could be revokes on
106		 * the sdp revoke queue, in which case, we still want to flush
107		 * the log and wait for it to finish.
108		 *
109		 * If the sdp revoke list is empty too, we might still have an
110		 * io outstanding for writing revokes, so we should wait for
111		 * it before returning.
112		 *
113		 * If none of these conditions are true, our revokes are all
114		 * flushed and we can return.
115		 */
116		gfs2_log_lock(sdp);
117		have_revokes = !list_empty(&sdp->sd_log_revokes);
118		log_in_flight = atomic_read(&sdp->sd_log_in_flight);
119		gfs2_log_unlock(sdp);
120		if (have_revokes)
121			goto flush;
122		if (log_in_flight)
123			log_flush_wait(sdp);
124		return 0;
125	}
126
127	memset(&tr, 0, sizeof(tr));
128	set_bit(TR_ONSTACK, &tr.tr_flags);
129	ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
130	if (ret) {
131		fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret);
132		goto flush;
133	}
134	__gfs2_ail_flush(gl, 0, revokes);
135	gfs2_trans_end(sdp);
136
137flush:
138	if (!ret)
139		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
140				GFS2_LFC_AIL_EMPTY_GL);
141	return ret;
142}
143
144void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
145{
146	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
147	unsigned int revokes = atomic_read(&gl->gl_ail_count);
 
148	int ret;
149
150	if (!revokes)
151		return;
152
153	ret = gfs2_trans_begin(sdp, 0, revokes);
 
 
 
154	if (ret)
155		return;
156	__gfs2_ail_flush(gl, fsync, revokes);
157	gfs2_trans_end(sdp);
158	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
159		       GFS2_LFC_AIL_FLUSH);
160}
161
162/**
163 * gfs2_rgrp_metasync - sync out the metadata of a resource group
164 * @gl: the glock protecting the resource group
165 *
166 */
167
168static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
169{
170	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
171	struct address_space *metamapping = &sdp->sd_aspace;
172	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
173	const unsigned bsize = sdp->sd_sb.sb_bsize;
174	loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
175	loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
176	int error;
177
178	filemap_fdatawrite_range(metamapping, start, end);
179	error = filemap_fdatawait_range(metamapping, start, end);
180	WARN_ON_ONCE(error && !gfs2_withdrawing_or_withdrawn(sdp));
181	mapping_set_error(metamapping, error);
182	if (error)
183		gfs2_io_error(sdp);
184	return error;
185}
186
187/**
188 * rgrp_go_sync - sync out the metadata for this glock
189 * @gl: the glock
190 *
191 * Called when demoting or unlocking an EX glock.  We must flush
192 * to disk all dirty buffers/pages relating to this glock, and must not
193 * return to caller to demote/unlock the glock until I/O is complete.
194 */
195
196static int rgrp_go_sync(struct gfs2_glock *gl)
197{
198	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
199	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
 
200	int error;
201
202	if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
203		return 0;
 
 
 
 
 
 
204	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
205
206	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
207		       GFS2_LFC_RGRP_GO_SYNC);
208	error = gfs2_rgrp_metasync(gl);
209	if (!error)
210		error = gfs2_ail_empty_gl(gl);
211	gfs2_free_clones(rgd);
212	return error;
 
 
 
 
213}
214
215/**
216 * rgrp_go_inval - invalidate the metadata for this glock
217 * @gl: the glock
218 * @flags:
219 *
220 * We never used LM_ST_DEFERRED with resource groups, so that we
221 * should always see the metadata flag set here.
222 *
223 */
224
225static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
226{
227	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
228	struct address_space *mapping = &sdp->sd_aspace;
229	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
230	const unsigned bsize = sdp->sd_sb.sb_bsize;
231	loff_t start, end;
232
233	if (!rgd)
234		return;
235	start = (rgd->rd_addr * bsize) & PAGE_MASK;
236	end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
237	gfs2_rgrp_brelse(rgd);
238	WARN_ON_ONCE(!(flags & DIO_METADATA));
239	truncate_inode_pages_range(mapping, start, end);
240}
241
242static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
243			      const char *fs_id_buf)
244{
245	struct gfs2_rgrpd *rgd = gl->gl_object;
246
247	if (rgd)
248		gfs2_rgrp_dump(seq, rgd, fs_id_buf);
249}
250
251static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
252{
253	struct gfs2_inode *ip;
254
255	spin_lock(&gl->gl_lockref.lock);
256	ip = gl->gl_object;
257	if (ip)
258		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
259	spin_unlock(&gl->gl_lockref.lock);
260	return ip;
261}
262
263struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
264{
265	struct gfs2_rgrpd *rgd;
266
267	spin_lock(&gl->gl_lockref.lock);
268	rgd = gl->gl_object;
269	spin_unlock(&gl->gl_lockref.lock);
270
271	return rgd;
272}
273
274static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
275{
276	if (!ip)
277		return;
278
279	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
280	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
281}
282
283/**
284 * gfs2_inode_metasync - sync out the metadata of an inode
285 * @gl: the glock protecting the inode
286 *
287 */
288int gfs2_inode_metasync(struct gfs2_glock *gl)
 
289{
 
290	struct address_space *metamapping = gfs2_glock2aspace(gl);
291	int error;
292
293	filemap_fdatawrite(metamapping);
294	error = filemap_fdatawait(metamapping);
295	if (error)
296		gfs2_io_error(gl->gl_name.ln_sbd);
297	return error;
298}
299
300/**
301 * inode_go_sync - Sync the dirty metadata of an inode
302 * @gl: the glock protecting the inode
303 *
304 */
305
306static int inode_go_sync(struct gfs2_glock *gl)
307{
308	struct gfs2_inode *ip = gfs2_glock2inode(gl);
309	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
310	struct address_space *metamapping = gfs2_glock2aspace(gl);
311	int error = 0, ret;
312
313	if (isreg) {
314		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
315			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
316		inode_dio_wait(&ip->i_inode);
317	}
318	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
319		goto out;
320
321	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
322
323	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
324		       GFS2_LFC_INODE_GO_SYNC);
325	filemap_fdatawrite(metamapping);
326	if (isreg) {
327		struct address_space *mapping = ip->i_inode.i_mapping;
328		filemap_fdatawrite(mapping);
329		error = filemap_fdatawait(mapping);
330		mapping_set_error(mapping, error);
331	}
332	ret = gfs2_inode_metasync(gl);
333	if (!error)
334		error = ret;
335	ret = gfs2_ail_empty_gl(gl);
336	if (!error)
337		error = ret;
338	/*
339	 * Writeback of the data mapping may cause the dirty flag to be set
340	 * so we have to clear it again here.
341	 */
342	smp_mb__before_atomic();
343	clear_bit(GLF_DIRTY, &gl->gl_flags);
344
345out:
346	gfs2_clear_glop_pending(ip);
347	return error;
348}
349
350/**
351 * inode_go_inval - prepare a inode glock to be released
352 * @gl: the glock
353 * @flags:
354 *
355 * Normally we invalidate everything, but if we are moving into
356 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
357 * can keep hold of the metadata, since it won't have changed.
358 *
359 */
360
361static void inode_go_inval(struct gfs2_glock *gl, int flags)
362{
363	struct gfs2_inode *ip = gfs2_glock2inode(gl);
 
 
364
365	if (flags & DIO_METADATA) {
366		struct address_space *mapping = gfs2_glock2aspace(gl);
367		truncate_inode_pages(mapping, 0);
368		if (ip) {
369			set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
370			forget_all_cached_acls(&ip->i_inode);
371			security_inode_invalidate_secctx(&ip->i_inode);
372			gfs2_dir_hash_inval(ip);
373		}
374	}
375
376	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
377		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
378			       GFS2_LOG_HEAD_FLUSH_NORMAL |
379			       GFS2_LFC_INODE_GO_INVAL);
380		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
381	}
382	if (ip && S_ISREG(ip->i_inode.i_mode))
383		truncate_inode_pages(ip->i_inode.i_mapping, 0);
 
384
385	gfs2_clear_glop_pending(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386}
387
388static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
389{
390	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
391	const struct gfs2_dinode *str = buf;
392	struct timespec64 atime, iatime;
393	u16 height, depth;
394	umode_t mode = be32_to_cpu(str->di_mode);
395	struct inode *inode = &ip->i_inode;
396	bool is_new = inode->i_state & I_NEW;
397
398	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
399		gfs2_consist_inode(ip);
400		return -EIO;
401	}
402	if (unlikely(!is_new && inode_wrong_type(inode, mode))) {
403		gfs2_consist_inode(ip);
404		return -EIO;
405	}
406	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
407	inode->i_mode = mode;
408	if (is_new) {
409		inode->i_rdev = 0;
410		switch (mode & S_IFMT) {
411		case S_IFBLK:
412		case S_IFCHR:
413			inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
414					      be32_to_cpu(str->di_minor));
415			break;
416		}
417	}
418
419	i_uid_write(inode, be32_to_cpu(str->di_uid));
420	i_gid_write(inode, be32_to_cpu(str->di_gid));
421	set_nlink(inode, be32_to_cpu(str->di_nlink));
422	i_size_write(inode, be64_to_cpu(str->di_size));
423	gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
424	atime.tv_sec = be64_to_cpu(str->di_atime);
425	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
426	iatime = inode_get_atime(inode);
427	if (timespec64_compare(&iatime, &atime) < 0)
428		inode_set_atime_to_ts(inode, atime);
429	inode_set_mtime(inode, be64_to_cpu(str->di_mtime),
430			be32_to_cpu(str->di_mtime_nsec));
431	inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
432			be32_to_cpu(str->di_ctime_nsec));
433
434	ip->i_goal = be64_to_cpu(str->di_goal_meta);
435	ip->i_generation = be64_to_cpu(str->di_generation);
436
437	ip->i_diskflags = be32_to_cpu(str->di_flags);
438	ip->i_eattr = be64_to_cpu(str->di_eattr);
439	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
440	gfs2_set_inode_flags(inode);
441	height = be16_to_cpu(str->di_height);
442	if (unlikely(height > sdp->sd_max_height)) {
443		gfs2_consist_inode(ip);
444		return -EIO;
445	}
446	ip->i_height = (u8)height;
447
448	depth = be16_to_cpu(str->di_depth);
449	if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) {
450		gfs2_consist_inode(ip);
451		return -EIO;
452	}
453	ip->i_depth = (u8)depth;
454	ip->i_entries = be32_to_cpu(str->di_entries);
455
456	if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) {
457		gfs2_consist_inode(ip);
458		return -EIO;
459	}
460	if (S_ISREG(inode->i_mode))
461		gfs2_set_aops(inode);
462
463	return 0;
 
 
 
464}
465
466/**
467 * gfs2_inode_refresh - Refresh the incore copy of the dinode
468 * @ip: The GFS2 inode
469 *
470 * Returns: errno
471 */
472
473static int gfs2_inode_refresh(struct gfs2_inode *ip)
474{
475	struct buffer_head *dibh;
476	int error;
477
478	error = gfs2_meta_inode_buffer(ip, &dibh);
479	if (error)
480		return error;
481
482	error = gfs2_dinode_in(ip, dibh->b_data);
483	brelse(dibh);
 
 
484	return error;
485}
486
487/**
488 * inode_go_instantiate - read in an inode if necessary
489 * @gl: The glock
 
490 *
491 * Returns: errno
492 */
493
494static int inode_go_instantiate(struct gfs2_glock *gl)
495{
496	struct gfs2_inode *ip = gl->gl_object;
497	struct gfs2_glock *io_gl;
498	int error;
499
500	if (!ip) /* no inode to populate - read it in later */
501		return 0;
502
503	error = gfs2_inode_refresh(ip);
504	if (error)
505		return error;
506	io_gl = ip->i_iopen_gh.gh_gl;
507	io_gl->gl_no_formal_ino = ip->i_no_formal_ino;
508	return 0;
509}
510
511static int inode_go_held(struct gfs2_holder *gh)
512{
513	struct gfs2_glock *gl = gh->gh_gl;
 
514	struct gfs2_inode *ip = gl->gl_object;
515	int error = 0;
516
517	if (!ip) /* no inode to populate - read it in later */
518		return 0;
519
 
 
 
 
 
 
520	if (gh->gh_state != LM_ST_DEFERRED)
521		inode_dio_wait(&ip->i_inode);
522
523	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
524	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
525	    (gh->gh_state == LM_ST_EXCLUSIVE))
526		error = gfs2_truncatei_resume(ip);
 
 
 
 
 
 
527
528	return error;
529}
530
531/**
532 * inode_go_dump - print information about an inode
533 * @seq: The iterator
534 * @gl: The glock
535 * @fs_id_buf: file system id (may be empty)
536 *
537 */
538
539static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
540			  const char *fs_id_buf)
541{
542	struct gfs2_inode *ip = gl->gl_object;
543	const struct inode *inode = &ip->i_inode;
544
545	if (ip == NULL)
546		return;
547
548	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
549		       "p:%lu\n", fs_id_buf,
550		  (unsigned long long)ip->i_no_formal_ino,
551		  (unsigned long long)ip->i_no_addr,
552		  IF2DT(inode->i_mode), ip->i_flags,
553		  (unsigned int)ip->i_diskflags,
554		  (unsigned long long)i_size_read(inode),
555		  inode->i_data.nrpages);
556}
557
558/**
559 * freeze_go_callback - A cluster node is requesting a freeze
560 * @gl: the glock
561 * @remote: true if this came from a different cluster node
 
 
562 */
563
564static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
565{
 
566	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
567	struct super_block *sb = sdp->sd_vfs;
568
569	if (!remote ||
570	    (gl->gl_state != LM_ST_SHARED &&
571	     gl->gl_state != LM_ST_UNLOCKED) ||
572	    gl->gl_demote_state != LM_ST_UNLOCKED)
573		return;
574
575	/*
576	 * Try to get an active super block reference to prevent racing with
577	 * unmount (see super_trylock_shared()).  But note that unmount isn't
578	 * the only place where a write lock on s_umount is taken, and we can
579	 * fail here because of things like remount as well.
580	 */
581	if (down_read_trylock(&sb->s_umount)) {
582		atomic_inc(&sb->s_active);
583		up_read(&sb->s_umount);
584		if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
585			deactivate_super(sb);
586	}
587}
588
589/**
590 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
591 * @gl: the glock
 
592 */
593static int freeze_go_xmote_bh(struct gfs2_glock *gl)
 
594{
595	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
596	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
597	struct gfs2_glock *j_gl = ip->i_gl;
598	struct gfs2_log_header_host head;
599	int error;
600
601	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
602		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
603
604		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
605		if (gfs2_assert_withdraw_delayed(sdp, !error))
606			return error;
607		if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
608						 GFS2_LOG_HEAD_UNMOUNT))
609			return -EIO;
610		sdp->sd_log_sequence = head.lh_sequence + 1;
611		gfs2_log_pointers_init(sdp, head.lh_blkno);
 
 
 
612	}
613	return 0;
614}
615
616/**
 
 
 
 
 
 
 
 
 
 
 
 
617 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
618 * @gl: the glock
619 * @remote: true if this came from a different cluster node
620 *
621 * gl_lockref.lock lock is held while calling this
622 */
623static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
624{
625	struct gfs2_inode *ip = gl->gl_object;
626	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
627
628	if (!remote || sb_rdonly(sdp->sd_vfs) ||
629	    test_bit(SDF_KILL, &sdp->sd_flags))
630		return;
631
632	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
633	    gl->gl_state == LM_ST_SHARED && ip) {
634		gl->gl_lockref.count++;
635		if (!gfs2_queue_try_to_evict(gl))
636			gl->gl_lockref.count--;
637	}
638}
639
640/**
641 * inode_go_unlocked - wake up anyone waiting for dlm's unlock ast
642 * @gl: glock being unlocked
643 *
644 * For now, this is only used for the journal inode glock. In withdraw
645 * situations, we need to wait for the glock to be unlocked so that we know
646 * other nodes may proceed with recovery / journal replay.
647 */
648static void inode_go_unlocked(struct gfs2_glock *gl)
649{
650	/* Note that we cannot reference gl_object because it's already set
651	 * to NULL by this point in its lifecycle. */
652	if (!test_bit(GLF_UNLOCKED, &gl->gl_flags))
653		return;
654	clear_bit_unlock(GLF_UNLOCKED, &gl->gl_flags);
655	wake_up_bit(&gl->gl_flags, GLF_UNLOCKED);
656}
657
658/**
659 * nondisk_go_callback - used to signal when a node did a withdraw
660 * @gl: the nondisk glock
661 * @remote: true if this came from a different cluster node
662 *
663 */
664static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
665{
666	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
667
668	/* Ignore the callback unless it's from another node, and it's the
669	   live lock. */
670	if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
671		return;
672
673	/* First order of business is to cancel the demote request. We don't
674	 * really want to demote a nondisk glock. At best it's just to inform
675	 * us of another node's withdraw. We'll keep it in SH mode. */
676	clear_bit(GLF_DEMOTE, &gl->gl_flags);
677	clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
678
679	/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
680	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
681	    test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
682	    test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
683		return;
684
685	/* We only care when a node wants us to unlock, because that means
686	 * they want a journal recovered. */
687	if (gl->gl_demote_state != LM_ST_UNLOCKED)
688		return;
689
690	if (sdp->sd_args.ar_spectator) {
691		fs_warn(sdp, "Spectator node cannot recover journals.\n");
692		return;
693	}
694
695	fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
696	set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
697	/*
698	 * We can't call remote_withdraw directly here or gfs2_recover_journal
699	 * because this is called from the glock unlock function and the
700	 * remote_withdraw needs to enqueue and dequeue the same "live" glock
701	 * we were called from. So we queue it to the control work queue in
702	 * lock_dlm.
703	 */
704	queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
705}
706
707const struct gfs2_glock_operations gfs2_meta_glops = {
708	.go_type = LM_TYPE_META,
709	.go_flags = GLOF_NONDISK,
710};
711
712const struct gfs2_glock_operations gfs2_inode_glops = {
713	.go_sync = inode_go_sync,
714	.go_inval = inode_go_inval,
715	.go_instantiate = inode_go_instantiate,
716	.go_held = inode_go_held,
717	.go_dump = inode_go_dump,
718	.go_type = LM_TYPE_INODE,
719	.go_flags = GLOF_ASPACE | GLOF_LVB,
720	.go_unlocked = inode_go_unlocked,
721};
722
723const struct gfs2_glock_operations gfs2_rgrp_glops = {
724	.go_sync = rgrp_go_sync,
725	.go_inval = rgrp_go_inval,
726	.go_instantiate = gfs2_rgrp_go_instantiate,
727	.go_dump = gfs2_rgrp_go_dump,
 
728	.go_type = LM_TYPE_RGRP,
729	.go_flags = GLOF_LVB,
730};
731
732const struct gfs2_glock_operations gfs2_freeze_glops = {
 
733	.go_xmote_bh = freeze_go_xmote_bh,
734	.go_callback = freeze_go_callback,
735	.go_type = LM_TYPE_NONDISK,
736	.go_flags = GLOF_NONDISK,
737};
738
739const struct gfs2_glock_operations gfs2_iopen_glops = {
740	.go_type = LM_TYPE_IOPEN,
741	.go_callback = iopen_go_callback,
742	.go_dump = inode_go_dump,
743	.go_flags = GLOF_NONDISK,
744	.go_subclass = 1,
745};
746
747const struct gfs2_glock_operations gfs2_flock_glops = {
748	.go_type = LM_TYPE_FLOCK,
749	.go_flags = GLOF_NONDISK,
750};
751
752const struct gfs2_glock_operations gfs2_nondisk_glops = {
753	.go_type = LM_TYPE_NONDISK,
754	.go_flags = GLOF_NONDISK,
755	.go_callback = nondisk_go_callback,
756};
757
758const struct gfs2_glock_operations gfs2_quota_glops = {
759	.go_type = LM_TYPE_QUOTA,
760	.go_flags = GLOF_LVB | GLOF_NONDISK,
761};
762
763const struct gfs2_glock_operations gfs2_journal_glops = {
764	.go_type = LM_TYPE_JOURNAL,
765	.go_flags = GLOF_NONDISK,
766};
767
768const struct gfs2_glock_operations *gfs2_glops_list[] = {
769	[LM_TYPE_META] = &gfs2_meta_glops,
770	[LM_TYPE_INODE] = &gfs2_inode_glops,
771	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
772	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
773	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
774	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
775	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
776	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
777};
778