Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v4.6
 
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11
  12#include <linux/sched.h>
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/buffer_head.h>
  16#include <linux/delay.h>
  17#include <linux/sort.h>
 
  18#include <linux/jhash.h>
  19#include <linux/kallsyms.h>
  20#include <linux/gfs2_ondisk.h>
  21#include <linux/list.h>
  22#include <linux/wait.h>
  23#include <linux/module.h>
  24#include <asm/uaccess.h>
  25#include <linux/seq_file.h>
  26#include <linux/debugfs.h>
  27#include <linux/kthread.h>
  28#include <linux/freezer.h>
  29#include <linux/workqueue.h>
  30#include <linux/jiffies.h>
  31#include <linux/rcupdate.h>
  32#include <linux/rculist_bl.h>
  33#include <linux/bit_spinlock.h>
  34#include <linux/percpu.h>
  35#include <linux/list_sort.h>
  36#include <linux/lockref.h>
  37#include <linux/rhashtable.h>
 
 
 
  38
  39#include "gfs2.h"
  40#include "incore.h"
  41#include "glock.h"
  42#include "glops.h"
  43#include "inode.h"
  44#include "lops.h"
  45#include "meta_io.h"
  46#include "quota.h"
  47#include "super.h"
  48#include "util.h"
  49#include "bmap.h"
  50#define CREATE_TRACE_POINTS
  51#include "trace_gfs2.h"
  52
  53struct gfs2_glock_iter {
  54	struct gfs2_sbd *sdp;		/* incore superblock           */
  55	struct rhashtable_iter hti;	/* rhashtable iterator         */
  56	struct gfs2_glock *gl;		/* current glock struct        */
  57	loff_t last_pos;		/* last position               */
  58};
  59
  60typedef void (*glock_examiner) (struct gfs2_glock * gl);
  61
  62static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
 
 
  63
  64static struct dentry *gfs2_root;
  65static struct workqueue_struct *glock_workqueue;
  66struct workqueue_struct *gfs2_delete_workqueue;
  67static LIST_HEAD(lru_list);
  68static atomic_t lru_count = ATOMIC_INIT(0);
  69static DEFINE_SPINLOCK(lru_lock);
  70
  71#define GFS2_GL_HASH_SHIFT      15
  72#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
  73
  74static struct rhashtable_params ht_parms = {
  75	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
  76	.key_len = sizeof(struct lm_lockname),
  77	.key_offset = offsetof(struct gfs2_glock, gl_name),
  78	.head_offset = offsetof(struct gfs2_glock, gl_node),
  79};
  80
  81static struct rhashtable gl_hash_table;
  82
  83void gfs2_glock_free(struct gfs2_glock *gl)
 
 
 
 
 
 
 
 
 
 
  84{
  85	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
 
 
 
 
 
 
 
 
 
  86
  87	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
  88		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
  89	} else {
  90		kfree(gl->gl_lksb.sb_lvbptr);
  91		kmem_cache_free(gfs2_glock_cachep, gl);
  92	}
  93	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
  94		wake_up(&sdp->sd_glock_wait);
  95}
  96
  97/**
  98 * gfs2_glock_hold() - increment reference count on glock
  99 * @gl: The glock to hold
 100 *
 101 */
 
 
 
 
 
 
 
 102
 103static void gfs2_glock_hold(struct gfs2_glock *gl)
 104{
 105	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
 106	lockref_get(&gl->gl_lockref);
 
 
 
 
 
 
 
 107}
 108
 109/**
 110 * demote_ok - Check to see if it's ok to unlock a glock
 111 * @gl: the glock
 112 *
 113 * Returns: 1 if it's ok
 
 
 
 
 
 
 
 114 */
 
 
 
 115
 116static int demote_ok(const struct gfs2_glock *gl)
 
 
 
 
 
 
 
 
 
 
 117{
 118	const struct gfs2_glock_operations *glops = gl->gl_ops;
 
 
 
 
 119
 120	if (gl->gl_state == LM_ST_UNLOCKED)
 121		return 0;
 122	if (!list_empty(&gl->gl_holders))
 123		return 0;
 124	if (glops->go_demote_ok)
 125		return glops->go_demote_ok(gl);
 126	return 1;
 127}
 128
 
 
 129
 130void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
 131{
 132	spin_lock(&lru_lock);
 
 
 
 
 
 
 
 
 
 133
 134	if (!list_empty(&gl->gl_lru))
 
 
 
 135		list_del_init(&gl->gl_lru);
 136	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137		atomic_inc(&lru_count);
 
 138
 139	list_add_tail(&gl->gl_lru, &lru_list);
 140	set_bit(GLF_LRU, &gl->gl_flags);
 141	spin_unlock(&lru_lock);
 142}
 143
 144static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
 145{
 146	spin_lock(&lru_lock);
 147	if (!list_empty(&gl->gl_lru)) {
 148		list_del_init(&gl->gl_lru);
 149		atomic_dec(&lru_count);
 150		clear_bit(GLF_LRU, &gl->gl_flags);
 151	}
 152	spin_unlock(&lru_lock);
 153}
 154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 155/**
 156 * gfs2_glock_put() - Decrement reference count on glock
 157 * @gl: The glock to put
 158 *
 159 */
 160
 161void gfs2_glock_put(struct gfs2_glock *gl)
 162{
 163	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 164	struct address_space *mapping = gfs2_glock2aspace(gl);
 165
 166	if (lockref_put_or_lock(&gl->gl_lockref))
 167		return;
 168
 169	lockref_mark_dead(&gl->gl_lockref);
 
 170
 171	gfs2_glock_remove_from_lru(gl);
 
 
 
 
 
 
 
 
 
 
 
 
 172	spin_unlock(&gl->gl_lockref.lock);
 173	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
 174	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
 175	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
 176	trace_gfs2_glock_put(gl);
 177	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
 178}
 179
 180/**
 181 * may_grant - check if its ok to grant a new lock
 182 * @gl: The glock
 
 183 * @gh: The lock request which we wish to grant
 184 *
 185 * Returns: true if its ok to grant the lock
 186 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 187
 188static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
 189{
 190	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
 191	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
 192	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
 193		return 0;
 194	if (gl->gl_state == gh->gh_state)
 195		return 1;
 196	if (gh->gh_flags & GL_EXACT)
 197		return 0;
 198	if (gl->gl_state == LM_ST_EXCLUSIVE) {
 199		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
 200			return 1;
 201		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
 202			return 1;
 203	}
 204	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
 205		return 1;
 206	return 0;
 207}
 208
 209static void gfs2_holder_wake(struct gfs2_holder *gh)
 210{
 211	clear_bit(HIF_WAIT, &gh->gh_iflags);
 212	smp_mb__after_atomic();
 213	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
 
 
 
 
 
 214}
 215
 216/**
 217 * do_error - Something unexpected has happened during a lock request
 218 *
 
 219 */
 220
 221static inline void do_error(struct gfs2_glock *gl, const int ret)
 222{
 223	struct gfs2_holder *gh, *tmp;
 224
 225	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 226		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 227			continue;
 228		if (ret & LM_OUT_ERROR)
 229			gh->gh_error = -EIO;
 230		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
 231			gh->gh_error = GLR_TRYFAILED;
 232		else
 233			continue;
 234		list_del_init(&gh->gh_list);
 235		trace_gfs2_glock_queue(gh, 0);
 236		gfs2_holder_wake(gh);
 237	}
 238}
 239
 240/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 241 * do_promote - promote as many requests as possible on the current queue
 242 * @gl: The glock
 243 * 
 244 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 245 *          if a type specific operation is underway.
 246 */
 247
 248static int do_promote(struct gfs2_glock *gl)
 249__releases(&gl->gl_lockref.lock)
 250__acquires(&gl->gl_lockref.lock)
 251{
 252	const struct gfs2_glock_operations *glops = gl->gl_ops;
 253	struct gfs2_holder *gh, *tmp;
 254	int ret;
 255
 256restart:
 257	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 258		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 259			continue;
 260		if (may_grant(gl, gh)) {
 261			if (gh->gh_list.prev == &gl->gl_holders &&
 262			    glops->go_lock) {
 263				spin_unlock(&gl->gl_lockref.lock);
 264				/* FIXME: eliminate this eventually */
 265				ret = glops->go_lock(gh);
 266				spin_lock(&gl->gl_lockref.lock);
 267				if (ret) {
 268					if (ret == 1)
 269						return 2;
 270					gh->gh_error = ret;
 271					list_del_init(&gh->gh_list);
 272					trace_gfs2_glock_queue(gh, 0);
 273					gfs2_holder_wake(gh);
 274					goto restart;
 275				}
 276				set_bit(HIF_HOLDER, &gh->gh_iflags);
 277				trace_gfs2_promote(gh, 1);
 278				gfs2_holder_wake(gh);
 279				goto restart;
 280			}
 281			set_bit(HIF_HOLDER, &gh->gh_iflags);
 282			trace_gfs2_promote(gh, 0);
 283			gfs2_holder_wake(gh);
 284			continue;
 285		}
 286		if (gh->gh_list.prev == &gl->gl_holders)
 287			return 1;
 288		do_error(gl, 0);
 289		break;
 
 290	}
 291	return 0;
 292}
 293
 294/**
 295 * find_first_waiter - find the first gh that's waiting for the glock
 296 * @gl: the glock
 297 */
 298
 299static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
 300{
 301	struct gfs2_holder *gh;
 302
 303	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
 304		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
 305			return gh;
 306	}
 307	return NULL;
 308}
 309
 310/**
 311 * state_change - record that the glock is now in a different state
 312 * @gl: the glock
 313 * @new_state the new state
 314 *
 
 315 */
 316
 317static void state_change(struct gfs2_glock *gl, unsigned int new_state)
 318{
 319	int held1, held2;
 320
 321	held1 = (gl->gl_state != LM_ST_UNLOCKED);
 322	held2 = (new_state != LM_ST_UNLOCKED);
 
 
 
 323
 324	if (held1 != held2) {
 325		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
 326		if (held2)
 327			gl->gl_lockref.count++;
 328		else
 329			gl->gl_lockref.count--;
 330	}
 331	if (held1 && held2 && list_empty(&gl->gl_holders))
 332		clear_bit(GLF_QUEUED, &gl->gl_flags);
 333
 
 
 334	if (new_state != gl->gl_target)
 335		/* shorten our minimum hold time */
 336		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
 337				       GL_GLOCK_MIN_HOLD);
 338	gl->gl_state = new_state;
 339	gl->gl_tchange = jiffies;
 340}
 341
 
 
 
 
 
 
 
 
 
 342static void gfs2_demote_wake(struct gfs2_glock *gl)
 343{
 344	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 345	clear_bit(GLF_DEMOTE, &gl->gl_flags);
 346	smp_mb__after_atomic();
 347	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
 348}
 349
 350/**
 351 * finish_xmote - The DLM has replied to one of our lock requests
 352 * @gl: The glock
 353 * @ret: The status from the DLM
 354 *
 355 */
 356
 357static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
 358{
 359	const struct gfs2_glock_operations *glops = gl->gl_ops;
 360	struct gfs2_holder *gh;
 361	unsigned state = ret & LM_OUT_ST_MASK;
 362	int rv;
 363
 364	spin_lock(&gl->gl_lockref.lock);
 365	trace_gfs2_glock_state_change(gl, state);
 366	state_change(gl, state);
 367	gh = find_first_waiter(gl);
 368
 369	/* Demote to UN request arrived during demote to SH or DF */
 370	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
 371	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
 372		gl->gl_target = LM_ST_UNLOCKED;
 373
 374	/* Check for state != intended state */
 375	if (unlikely(state != gl->gl_target)) {
 
 
 376		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
 377			/* move to back of queue and try next entry */
 378			if (ret & LM_OUT_CANCELED) {
 379				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
 380					list_move_tail(&gh->gh_list, &gl->gl_holders);
 381				gh = find_first_waiter(gl);
 382				gl->gl_target = gh->gh_state;
 
 
 383				goto retry;
 384			}
 385			/* Some error or failed "try lock" - report it */
 386			if ((ret & LM_OUT_ERROR) ||
 387			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
 388				gl->gl_target = gl->gl_state;
 389				do_error(gl, ret);
 390				goto out;
 391			}
 392		}
 393		switch(state) {
 394		/* Unlocked due to conversion deadlock, try again */
 395		case LM_ST_UNLOCKED:
 396retry:
 397			do_xmote(gl, gh, gl->gl_target);
 398			break;
 399		/* Conversion fails, unlock and try again */
 400		case LM_ST_SHARED:
 401		case LM_ST_DEFERRED:
 402			do_xmote(gl, gh, LM_ST_UNLOCKED);
 403			break;
 404		default: /* Everything else */
 405			pr_err("wanted %u got %u\n", gl->gl_target, state);
 
 406			GLOCK_BUG_ON(gl, 1);
 407		}
 408		spin_unlock(&gl->gl_lockref.lock);
 409		return;
 410	}
 411
 412	/* Fast path - we got what we asked for */
 413	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
 414		gfs2_demote_wake(gl);
 415	if (state != LM_ST_UNLOCKED) {
 416		if (glops->go_xmote_bh) {
 
 
 417			spin_unlock(&gl->gl_lockref.lock);
 418			rv = glops->go_xmote_bh(gl, gh);
 419			spin_lock(&gl->gl_lockref.lock);
 420			if (rv) {
 421				do_error(gl, rv);
 422				goto out;
 423			}
 424		}
 425		rv = do_promote(gl);
 426		if (rv == 2)
 427			goto out_locked;
 428	}
 429out:
 430	clear_bit(GLF_LOCK, &gl->gl_flags);
 431out_locked:
 432	spin_unlock(&gl->gl_lockref.lock);
 
 
 
 
 
 
 
 
 433}
 434
 435/**
 436 * do_xmote - Calls the DLM to change the state of a lock
 437 * @gl: The lock state
 438 * @gh: The holder (only for promotes)
 439 * @target: The target lock state
 440 *
 441 */
 442
 443static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
 
 444__releases(&gl->gl_lockref.lock)
 445__acquires(&gl->gl_lockref.lock)
 446{
 447	const struct gfs2_glock_operations *glops = gl->gl_ops;
 448	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
 449	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
 450	int ret;
 451
 452	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
 453		      LM_FLAG_PRIORITY);
 
 
 
 454	GLOCK_BUG_ON(gl, gl->gl_state == target);
 455	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
 456	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
 457	    glops->go_inval) {
 458		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 
 
 
 
 
 
 
 459		do_error(gl, 0); /* Fail queued try locks */
 460	}
 461	gl->gl_req = target;
 462	set_bit(GLF_BLOCKING, &gl->gl_flags);
 463	if ((gl->gl_req == LM_ST_UNLOCKED) ||
 464	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
 465	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
 466		clear_bit(GLF_BLOCKING, &gl->gl_flags);
 467	spin_unlock(&gl->gl_lockref.lock);
 468	if (glops->go_sync)
 469		glops->go_sync(gl);
 470	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 471		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
 472	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 473
 474	gfs2_glock_hold(gl);
 475	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
 476		/* lock_dlm */
 477		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
 
 
 
 478		if (ret) {
 479			pr_err("lm_lock ret %d\n", ret);
 480			GLOCK_BUG_ON(gl, 1);
 
 
 
 
 481		}
 482	} else { /* lock_nolock */
 483		finish_xmote(gl, target);
 484		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 485			gfs2_glock_put(gl);
 486	}
 487
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 488	spin_lock(&gl->gl_lockref.lock);
 489}
 490
 491/**
 492 * find_first_holder - find the first "holder" gh
 493 * @gl: the glock
 494 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495
 496static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
 497{
 498	struct gfs2_holder *gh;
 
 499
 500	if (!list_empty(&gl->gl_holders)) {
 501		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
 502		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 503			return gh;
 
 
 
 
 
 
 
 
 
 
 504	}
 505	return NULL;
 
 
 
 506}
 507
 508/**
 509 * run_queue - do all outstanding tasks related to a glock
 510 * @gl: The glock in question
 511 * @nonblock: True if we must not block in run_queue
 512 *
 513 */
 514
 515static void run_queue(struct gfs2_glock *gl, const int nonblock)
 516__releases(&gl->gl_lockref.lock)
 517__acquires(&gl->gl_lockref.lock)
 518{
 519	struct gfs2_holder *gh = NULL;
 520	int ret;
 521
 522	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
 523		return;
 
 524
 525	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
 526
 527	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
 528	    gl->gl_demote_state != gl->gl_state) {
 529		if (find_first_holder(gl))
 530			goto out_unlock;
 531		if (nonblock)
 532			goto out_sched;
 533		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
 534		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
 535		gl->gl_target = gl->gl_demote_state;
 536	} else {
 537		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
 538			gfs2_demote_wake(gl);
 539		ret = do_promote(gl);
 540		if (ret == 0)
 541			goto out_unlock;
 542		if (ret == 2)
 543			goto out;
 544		gh = find_first_waiter(gl);
 545		gl->gl_target = gh->gh_state;
 546		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
 547			do_error(gl, 0); /* Fail queued try locks */
 548	}
 549	do_xmote(gl, gh, gl->gl_target);
 550out:
 551	return;
 552
 553out_sched:
 554	clear_bit(GLF_LOCK, &gl->gl_flags);
 555	smp_mb__after_atomic();
 556	gl->gl_lockref.count++;
 557	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 558		gl->gl_lockref.count--;
 559	return;
 560
 561out_unlock:
 562	clear_bit(GLF_LOCK, &gl->gl_flags);
 563	smp_mb__after_atomic();
 564	return;
 565}
 566
 567static void delete_work_func(struct work_struct *work)
 
 
 
 
 
 568{
 569	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
 570	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 571	struct gfs2_inode *ip;
 572	struct inode *inode;
 573	u64 no_addr = gl->gl_name.ln_number;
 574
 575	/* If someone's using this glock to create a new dinode, the block must
 576	   have been freed by another node, then re-used, in which case our
 577	   iopen callback is too late after the fact. Ignore it. */
 578	if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
 579		goto out;
 
 
 
 
 
 
 580
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 581	ip = gl->gl_object;
 582	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
 
 
 
 
 
 
 
 
 
 
 
 
 
 583
 584	if (ip)
 585		inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
 586	else
 587		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
 588	if (inode && !IS_ERR(inode)) {
 589		d_prune_aliases(inode);
 590		iput(inode);
 
 
 
 
 
 
 591	}
 592out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593	gfs2_glock_put(gl);
 594}
 595
 596static void glock_work_func(struct work_struct *work)
 597{
 598	unsigned long delay = 0;
 599	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
 600	int drop_ref = 0;
 601
 602	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
 
 
 603		finish_xmote(gl, gl->gl_reply);
 604		drop_ref = 1;
 605	}
 606	spin_lock(&gl->gl_lockref.lock);
 607	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
 608	    gl->gl_state != LM_ST_UNLOCKED &&
 609	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
 610		unsigned long holdtime, now = jiffies;
 
 611
 612		holdtime = gl->gl_tchange + gl->gl_hold_time;
 613		if (time_before(now, holdtime))
 614			delay = holdtime - now;
 
 615
 616		if (!delay) {
 617			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
 618			set_bit(GLF_DEMOTE, &gl->gl_flags);
 619		}
 620	}
 621	run_queue(gl, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 622	spin_unlock(&gl->gl_lockref.lock);
 623	if (!delay)
 624		gfs2_glock_put(gl);
 625	else {
 626		if (gl->gl_name.ln_type != LM_TYPE_INODE)
 627			delay = 0;
 628		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
 629			gfs2_glock_put(gl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 630	}
 631	if (drop_ref)
 632		gfs2_glock_put(gl);
 
 
 
 
 
 
 
 
 
 633}
 634
 635/**
 636 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 637 * @sdp: The GFS2 superblock
 638 * @number: the lock number
 639 * @glops: The glock_operations to use
 640 * @create: If 0, don't create the glock if it doesn't exist
 641 * @glp: the glock is returned here
 642 *
 643 * This does not lock a glock, just finds/creates structures for one.
 644 *
 645 * Returns: errno
 646 */
 647
 648int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
 649		   const struct gfs2_glock_operations *glops, int create,
 650		   struct gfs2_glock **glp)
 651{
 652	struct super_block *s = sdp->sd_vfs;
 653	struct lm_lockname name = { .ln_number = number,
 654				    .ln_type = glops->go_type,
 655				    .ln_sbd = sdp };
 656	struct gfs2_glock *gl, *tmp = NULL;
 657	struct address_space *mapping;
 658	struct kmem_cache *cachep;
 659	int ret, tries = 0;
 660
 661	gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
 662	if (gl && !lockref_get_not_dead(&gl->gl_lockref))
 663		gl = NULL;
 664
 665	*glp = gl;
 666	if (gl)
 667		return 0;
 668	if (!create)
 669		return -ENOENT;
 670
 671	if (glops->go_flags & GLOF_ASPACE)
 672		cachep = gfs2_glock_aspace_cachep;
 673	else
 674		cachep = gfs2_glock_cachep;
 675	gl = kmem_cache_alloc(cachep, GFP_NOFS);
 676	if (!gl)
 677		return -ENOMEM;
 678
 
 
 
 679	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
 
 680
 681	if (glops->go_flags & GLOF_LVB) {
 682		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
 683		if (!gl->gl_lksb.sb_lvbptr) {
 684			kmem_cache_free(cachep, gl);
 685			return -ENOMEM;
 686		}
 687	}
 688
 689	atomic_inc(&sdp->sd_glock_disposal);
 690	gl->gl_node.next = NULL;
 691	gl->gl_flags = 0;
 
 
 692	gl->gl_name = name;
 
 693	gl->gl_lockref.count = 1;
 694	gl->gl_state = LM_ST_UNLOCKED;
 695	gl->gl_target = LM_ST_UNLOCKED;
 696	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 697	gl->gl_ops = glops;
 698	gl->gl_dstamp = ktime_set(0, 0);
 699	preempt_disable();
 700	/* We use the global stats to estimate the initial per-glock stats */
 701	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
 702	preempt_enable();
 703	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
 704	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
 705	gl->gl_tchange = jiffies;
 706	gl->gl_object = NULL;
 707	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
 708	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
 709	INIT_WORK(&gl->gl_delete, delete_work_func);
 
 710
 711	mapping = gfs2_glock2aspace(gl);
 712	if (mapping) {
 713                mapping->a_ops = &gfs2_meta_aops;
 714		mapping->host = s->s_bdev->bd_inode;
 715		mapping->flags = 0;
 716		mapping_set_gfp_mask(mapping, GFP_NOFS);
 717		mapping->private_data = NULL;
 718		mapping->writeback_index = 0;
 719	}
 720
 721again:
 722	ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node,
 723					    ht_parms);
 724	if (ret == 0) {
 725		*glp = gl;
 726		return 0;
 727	}
 728
 729	if (ret == -EEXIST) {
 730		ret = 0;
 731		tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
 732		if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
 733			if (++tries < 100) {
 734				cond_resched();
 735				goto again;
 736			}
 737			tmp = NULL;
 738			ret = -ENOMEM;
 739		}
 740	} else {
 741		WARN_ON_ONCE(ret);
 742	}
 743	kfree(gl->gl_lksb.sb_lvbptr);
 744	kmem_cache_free(cachep, gl);
 745	atomic_dec(&sdp->sd_glock_disposal);
 746	*glp = tmp;
 747
 748	return ret;
 
 
 749}
 750
 751/**
 752 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 753 * @gl: the glock
 754 * @state: the state we're requesting
 755 * @flags: the modifier flags
 756 * @gh: the holder structure
 757 *
 758 */
 759
 760void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
 761		      struct gfs2_holder *gh)
 762{
 763	INIT_LIST_HEAD(&gh->gh_list);
 764	gh->gh_gl = gl;
 765	gh->gh_ip = _RET_IP_;
 766	gh->gh_owner_pid = get_pid(task_pid(current));
 767	gh->gh_state = state;
 768	gh->gh_flags = flags;
 769	gh->gh_error = 0;
 770	gh->gh_iflags = 0;
 771	gfs2_glock_hold(gl);
 772}
 773
 774/**
 775 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 776 * @state: the state we're requesting
 777 * @flags: the modifier flags
 778 * @gh: the holder structure
 779 *
 780 * Don't mess with the glock.
 781 *
 782 */
 783
 784void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
 785{
 786	gh->gh_state = state;
 787	gh->gh_flags = flags;
 788	gh->gh_iflags = 0;
 789	gh->gh_ip = _RET_IP_;
 790	put_pid(gh->gh_owner_pid);
 791	gh->gh_owner_pid = get_pid(task_pid(current));
 792}
 793
 794/**
 795 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 796 * @gh: the holder structure
 797 *
 798 */
 799
 800void gfs2_holder_uninit(struct gfs2_holder *gh)
 801{
 802	put_pid(gh->gh_owner_pid);
 803	gfs2_glock_put(gh->gh_gl);
 804	gh->gh_gl = NULL;
 805	gh->gh_ip = 0;
 806}
 807
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 808/**
 809 * gfs2_glock_wait - wait on a glock acquisition
 810 * @gh: the glock holder
 811 *
 812 * Returns: 0 on success
 813 */
 814
 815int gfs2_glock_wait(struct gfs2_holder *gh)
 816{
 817	unsigned long time1 = jiffies;
 818
 819	might_sleep();
 820	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
 821	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
 822		/* Lengthen the minimum hold time. */
 823		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
 824					      GL_GLOCK_HOLD_INCR,
 825					      GL_GLOCK_MAX_HOLD);
 826	return gh->gh_error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 827}
 828
 829/**
 830 * handle_callback - process a demote request
 831 * @gl: the glock
 832 * @state: the state the caller wants us to change to
 
 
 833 *
 834 * There are only two requests that we are going to see in actual
 835 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
 836 */
 837
 838static void handle_callback(struct gfs2_glock *gl, unsigned int state,
 839			    unsigned long delay, bool remote)
 840{
 841	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
 842
 843	set_bit(bit, &gl->gl_flags);
 844	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
 845		gl->gl_demote_state = state;
 846		gl->gl_demote_time = jiffies;
 847	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
 848			gl->gl_demote_state != state) {
 849		gl->gl_demote_state = LM_ST_UNLOCKED;
 850	}
 851	if (gl->gl_ops->go_callback)
 852		gl->gl_ops->go_callback(gl, remote);
 853	trace_gfs2_demote_rq(gl, remote);
 854}
 855
 856void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
 857{
 858	struct va_format vaf;
 859	va_list args;
 860
 861	va_start(args, fmt);
 862
 863	if (seq) {
 864		seq_vprintf(seq, fmt, args);
 865	} else {
 866		vaf.fmt = fmt;
 867		vaf.va = &args;
 868
 869		pr_err("%pV", &vaf);
 870	}
 871
 872	va_end(args);
 873}
 874
 
 
 
 
 
 
 
 
 
 875/**
 876 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 877 * @gh: the holder structure to add
 878 *
 879 * Eventually we should move the recursive locking trap to a
 880 * debugging option or something like that. This is the fast
 881 * path and needs to have the minimum number of distractions.
 882 * 
 883 */
 884
 885static inline void add_to_queue(struct gfs2_holder *gh)
 886__releases(&gl->gl_lockref.lock)
 887__acquires(&gl->gl_lockref.lock)
 888{
 889	struct gfs2_glock *gl = gh->gh_gl;
 890	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 891	struct list_head *insert_pt = NULL;
 892	struct gfs2_holder *gh2;
 893	int try_futile = 0;
 894
 895	BUG_ON(gh->gh_owner_pid == NULL);
 896	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
 897		BUG();
 898
 899	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
 900		if (test_bit(GLF_LOCK, &gl->gl_flags))
 901			try_futile = !may_grant(gl, gh);
 
 
 
 
 902		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 903			goto fail;
 904	}
 905
 906	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
 907		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
 908		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
 909			goto trap_recursive;
 
 
 
 
 
 
 910		if (try_futile &&
 911		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
 912fail:
 913			gh->gh_error = GLR_TRYFAILED;
 914			gfs2_holder_wake(gh);
 915			return;
 916		}
 917		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
 918			continue;
 919		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
 920			insert_pt = &gh2->gh_list;
 921	}
 922	set_bit(GLF_QUEUED, &gl->gl_flags);
 923	trace_gfs2_glock_queue(gh, 1);
 924	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
 925	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
 926	if (likely(insert_pt == NULL)) {
 927		list_add_tail(&gh->gh_list, &gl->gl_holders);
 928		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
 929			goto do_cancel;
 930		return;
 931	}
 932	list_add_tail(&gh->gh_list, insert_pt);
 933do_cancel:
 934	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
 935	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
 936		spin_unlock(&gl->gl_lockref.lock);
 937		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
 938			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
 939		spin_lock(&gl->gl_lockref.lock);
 940	}
 941	return;
 942
 943trap_recursive:
 944	pr_err("original: %pSR\n", (void *)gh2->gh_ip);
 945	pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
 946	pr_err("lock type: %d req lock state : %d\n",
 947	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
 948	pr_err("new: %pSR\n", (void *)gh->gh_ip);
 949	pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
 950	pr_err("lock type: %d req lock state : %d\n",
 951	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
 952	gfs2_dump_glock(NULL, gl);
 953	BUG();
 954}
 955
 956/**
 957 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
 958 * @gh: the holder structure
 959 *
 960 * if (gh->gh_flags & GL_ASYNC), this never returns an error
 961 *
 962 * Returns: 0, GLR_TRYFAILED, or errno on failure
 963 */
 964
 965int gfs2_glock_nq(struct gfs2_holder *gh)
 966{
 967	struct gfs2_glock *gl = gh->gh_gl;
 968	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 969	int error = 0;
 970
 971	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 972		return -EIO;
 973
 974	if (test_bit(GLF_LRU, &gl->gl_flags))
 975		gfs2_glock_remove_from_lru(gl);
 976
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 977	spin_lock(&gl->gl_lockref.lock);
 978	add_to_queue(gh);
 979	if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
 980		     test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
 981		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
 982		gl->gl_lockref.count++;
 983		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 984			gl->gl_lockref.count--;
 985	}
 986	run_queue(gl, 1);
 987	spin_unlock(&gl->gl_lockref.lock);
 988
 
 989	if (!(gh->gh_flags & GL_ASYNC))
 990		error = gfs2_glock_wait(gh);
 991
 992	return error;
 993}
 994
 995/**
 996 * gfs2_glock_poll - poll to see if an async request has been completed
 997 * @gh: the holder
 998 *
 999 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1000 */
1001
1002int gfs2_glock_poll(struct gfs2_holder *gh)
1003{
1004	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1005}
1006
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007/**
1008 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1009 * @gh: the glock holder
1010 *
1011 */
1012
1013void gfs2_glock_dq(struct gfs2_holder *gh)
1014{
1015	struct gfs2_glock *gl = gh->gh_gl;
1016	const struct gfs2_glock_operations *glops = gl->gl_ops;
1017	unsigned delay = 0;
1018	int fast_path = 0;
1019
1020	spin_lock(&gl->gl_lockref.lock);
1021	if (gh->gh_flags & GL_NOCACHE)
1022		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
 
 
 
 
 
1023
1024	list_del_init(&gh->gh_list);
1025	clear_bit(HIF_HOLDER, &gh->gh_iflags);
1026	if (find_first_holder(gl) == NULL) {
1027		if (glops->go_unlock) {
1028			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1029			spin_unlock(&gl->gl_lockref.lock);
1030			glops->go_unlock(gh);
1031			spin_lock(&gl->gl_lockref.lock);
1032			clear_bit(GLF_LOCK, &gl->gl_flags);
1033		}
1034		if (list_empty(&gl->gl_holders) &&
1035		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1036		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1037			fast_path = 1;
1038	}
1039	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1040	    (glops->go_flags & GLOF_LRU))
1041		gfs2_glock_add_to_lru(gl);
1042
1043	trace_gfs2_glock_queue(gh, 0);
1044	spin_unlock(&gl->gl_lockref.lock);
1045	if (likely(fast_path))
1046		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
1047
1048	gfs2_glock_hold(gl);
1049	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1050	    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1051	    gl->gl_name.ln_type == LM_TYPE_INODE)
1052		delay = gl->gl_hold_time;
1053	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1054		gfs2_glock_put(gl);
1055}
1056
1057void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1058{
1059	struct gfs2_glock *gl = gh->gh_gl;
1060	gfs2_glock_dq(gh);
1061	might_sleep();
1062	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1063}
1064
1065/**
1066 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1067 * @gh: the holder structure
1068 *
1069 */
1070
1071void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1072{
1073	gfs2_glock_dq(gh);
1074	gfs2_holder_uninit(gh);
1075}
1076
1077/**
1078 * gfs2_glock_nq_num - acquire a glock based on lock number
1079 * @sdp: the filesystem
1080 * @number: the lock number
1081 * @glops: the glock operations for the type of glock
1082 * @state: the state to acquire the glock in
1083 * @flags: modifier flags for the acquisition
1084 * @gh: the struct gfs2_holder
1085 *
1086 * Returns: errno
1087 */
1088
1089int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1090		      const struct gfs2_glock_operations *glops,
1091		      unsigned int state, u16 flags, struct gfs2_holder *gh)
1092{
1093	struct gfs2_glock *gl;
1094	int error;
1095
1096	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1097	if (!error) {
1098		error = gfs2_glock_nq_init(gl, state, flags, gh);
1099		gfs2_glock_put(gl);
1100	}
1101
1102	return error;
1103}
1104
1105/**
1106 * glock_compare - Compare two struct gfs2_glock structures for sorting
1107 * @arg_a: the first structure
1108 * @arg_b: the second structure
1109 *
1110 */
1111
1112static int glock_compare(const void *arg_a, const void *arg_b)
1113{
1114	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1115	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1116	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1117	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1118
1119	if (a->ln_number > b->ln_number)
1120		return 1;
1121	if (a->ln_number < b->ln_number)
1122		return -1;
1123	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1124	return 0;
1125}
1126
1127/**
1128 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1129 * @num_gh: the number of structures
1130 * @ghs: an array of struct gfs2_holder structures
 
1131 *
1132 * Returns: 0 on success (all glocks acquired),
1133 *          errno on failure (no glocks acquired)
1134 */
1135
1136static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1137		     struct gfs2_holder **p)
1138{
1139	unsigned int x;
1140	int error = 0;
1141
1142	for (x = 0; x < num_gh; x++)
1143		p[x] = &ghs[x];
1144
1145	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1146
1147	for (x = 0; x < num_gh; x++) {
1148		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1149
1150		error = gfs2_glock_nq(p[x]);
1151		if (error) {
1152			while (x--)
1153				gfs2_glock_dq(p[x]);
1154			break;
1155		}
1156	}
1157
1158	return error;
1159}
1160
1161/**
1162 * gfs2_glock_nq_m - acquire multiple glocks
1163 * @num_gh: the number of structures
1164 * @ghs: an array of struct gfs2_holder structures
1165 *
1166 *
1167 * Returns: 0 on success (all glocks acquired),
1168 *          errno on failure (no glocks acquired)
1169 */
1170
1171int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1172{
1173	struct gfs2_holder *tmp[4];
1174	struct gfs2_holder **pph = tmp;
1175	int error = 0;
1176
1177	switch(num_gh) {
1178	case 0:
1179		return 0;
1180	case 1:
1181		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1182		return gfs2_glock_nq(ghs);
1183	default:
1184		if (num_gh <= 4)
1185			break;
1186		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
 
1187		if (!pph)
1188			return -ENOMEM;
1189	}
1190
1191	error = nq_m_sync(num_gh, ghs, pph);
1192
1193	if (pph != tmp)
1194		kfree(pph);
1195
1196	return error;
1197}
1198
1199/**
1200 * gfs2_glock_dq_m - release multiple glocks
1201 * @num_gh: the number of structures
1202 * @ghs: an array of struct gfs2_holder structures
1203 *
1204 */
1205
1206void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1207{
1208	while (num_gh--)
1209		gfs2_glock_dq(&ghs[num_gh]);
1210}
1211
1212void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1213{
1214	unsigned long delay = 0;
1215	unsigned long holdtime;
1216	unsigned long now = jiffies;
1217
1218	gfs2_glock_hold(gl);
1219	holdtime = gl->gl_tchange + gl->gl_hold_time;
1220	if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1221	    gl->gl_name.ln_type == LM_TYPE_INODE) {
 
 
 
 
 
1222		if (time_before(now, holdtime))
1223			delay = holdtime - now;
1224		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1225			delay = gl->gl_hold_time;
1226	}
1227
1228	spin_lock(&gl->gl_lockref.lock);
1229	handle_callback(gl, state, delay, true);
1230	spin_unlock(&gl->gl_lockref.lock);
1231	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1232		gfs2_glock_put(gl);
1233}
1234
1235/**
1236 * gfs2_should_freeze - Figure out if glock should be frozen
1237 * @gl: The glock in question
1238 *
1239 * Glocks are not frozen if (a) the result of the dlm operation is
1240 * an error, (b) the locking operation was an unlock operation or
1241 * (c) if there is a "noexp" flagged request anywhere in the queue
1242 *
1243 * Returns: 1 if freezing should occur, 0 otherwise
1244 */
1245
1246static int gfs2_should_freeze(const struct gfs2_glock *gl)
1247{
1248	const struct gfs2_holder *gh;
1249
1250	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1251		return 0;
1252	if (gl->gl_target == LM_ST_UNLOCKED)
1253		return 0;
1254
1255	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1256		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1257			continue;
1258		if (LM_FLAG_NOEXP & gh->gh_flags)
1259			return 0;
1260	}
1261
1262	return 1;
1263}
1264
1265/**
1266 * gfs2_glock_complete - Callback used by locking
1267 * @gl: Pointer to the glock
1268 * @ret: The return value from the dlm
1269 *
1270 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1271 * to use a bitfield shared with other glock state fields.
1272 */
1273
1274void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1275{
1276	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1277
1278	spin_lock(&gl->gl_lockref.lock);
1279	gl->gl_reply = ret;
1280
1281	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1282		if (gfs2_should_freeze(gl)) {
1283			set_bit(GLF_FROZEN, &gl->gl_flags);
1284			spin_unlock(&gl->gl_lockref.lock);
1285			return;
1286		}
1287	}
1288
1289	gl->gl_lockref.count++;
1290	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
 
1291	spin_unlock(&gl->gl_lockref.lock);
1292
1293	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1294		gfs2_glock_put(gl);
1295}
1296
1297static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
 
1298{
1299	struct gfs2_glock *gla, *glb;
1300
1301	gla = list_entry(a, struct gfs2_glock, gl_lru);
1302	glb = list_entry(b, struct gfs2_glock, gl_lru);
1303
1304	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1305		return 1;
1306	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1307		return -1;
1308
1309	return 0;
1310}
1311
 
 
 
 
 
 
 
 
 
 
1312/**
1313 * gfs2_dispose_glock_lru - Demote a list of glocks
1314 * @list: The list to dispose of
1315 *
1316 * Disposing of glocks may involve disk accesses, so that here we sort
1317 * the glocks by number (i.e. disk location of the inodes) so that if
1318 * there are any such accesses, they'll be sent in order (mostly).
1319 *
1320 * Must be called under the lru_lock, but may drop and retake this
1321 * lock. While the lru_lock is dropped, entries may vanish from the
1322 * list, but no new entries will appear on the list (since it is
1323 * private)
1324 */
1325
1326static void gfs2_dispose_glock_lru(struct list_head *list)
1327__releases(&lru_lock)
1328__acquires(&lru_lock)
1329{
1330	struct gfs2_glock *gl;
 
1331
1332	list_sort(NULL, list, glock_cmp);
1333
1334	while(!list_empty(list)) {
1335		gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1336		list_del_init(&gl->gl_lru);
1337		if (!spin_trylock(&gl->gl_lockref.lock)) {
1338add_back_to_lru:
1339			list_add(&gl->gl_lru, &lru_list);
1340			atomic_inc(&lru_count);
1341			continue;
1342		}
1343		if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1344			spin_unlock(&gl->gl_lockref.lock);
1345			goto add_back_to_lru;
1346		}
 
 
1347		clear_bit(GLF_LRU, &gl->gl_flags);
 
1348		gl->gl_lockref.count++;
1349		if (demote_ok(gl))
1350			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1351		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1352		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1353			gl->gl_lockref.count--;
1354		spin_unlock(&gl->gl_lockref.lock);
1355		cond_resched_lock(&lru_lock);
1356	}
 
1357}
1358
1359/**
1360 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1361 * @nr: The number of entries to scan
1362 *
1363 * This function selects the entries on the LRU which are able to
1364 * be demoted, and then kicks off the process by calling
1365 * gfs2_dispose_glock_lru() above.
1366 */
1367
1368static long gfs2_scan_glock_lru(int nr)
1369{
1370	struct gfs2_glock *gl;
1371	LIST_HEAD(skipped);
1372	LIST_HEAD(dispose);
1373	long freed = 0;
1374
1375	spin_lock(&lru_lock);
1376	while ((nr-- >= 0) && !list_empty(&lru_list)) {
1377		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1378
1379		/* Test for being demotable */
1380		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1381			list_move(&gl->gl_lru, &dispose);
1382			atomic_dec(&lru_count);
1383			freed++;
1384			continue;
1385		}
1386
1387		list_move(&gl->gl_lru, &skipped);
1388	}
1389	list_splice(&skipped, &lru_list);
1390	if (!list_empty(&dispose))
1391		gfs2_dispose_glock_lru(&dispose);
1392	spin_unlock(&lru_lock);
1393
1394	return freed;
1395}
1396
1397static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1398					    struct shrink_control *sc)
1399{
1400	if (!(sc->gfp_mask & __GFP_FS))
1401		return SHRINK_STOP;
1402	return gfs2_scan_glock_lru(sc->nr_to_scan);
1403}
1404
1405static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1406					     struct shrink_control *sc)
1407{
1408	return vfs_pressure_ratio(atomic_read(&lru_count));
1409}
1410
1411static struct shrinker glock_shrinker = {
1412	.seeks = DEFAULT_SEEKS,
1413	.count_objects = gfs2_glock_shrink_count,
1414	.scan_objects = gfs2_glock_shrink_scan,
1415};
1416
1417/**
1418 * examine_bucket - Call a function for glock in a hash bucket
1419 * @examiner: the function
1420 * @sdp: the filesystem
1421 * @bucket: the bucket
1422 *
 
 
 
1423 */
1424
1425static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1426{
1427	struct gfs2_glock *gl;
1428	struct rhash_head *pos;
1429	const struct bucket_table *tbl;
1430	int i;
1431
1432	rcu_read_lock();
1433	tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
1434	for (i = 0; i < tbl->size; i++) {
1435		rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
1436			if ((gl->gl_name.ln_sbd == sdp) &&
1437			    lockref_get_not_dead(&gl->gl_lockref))
 
1438				examiner(gl);
1439		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1440	}
1441	rcu_read_unlock();
1442	cond_resched();
 
 
 
 
1443}
1444
1445/**
1446 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1447 * @gl: The glock to thaw
1448 *
1449 */
1450
1451static void thaw_glock(struct gfs2_glock *gl)
1452{
1453	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1454		goto out;
1455	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1456	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
1457out:
1458		gfs2_glock_put(gl);
1459	}
 
 
 
1460}
1461
1462/**
1463 * clear_glock - look at a glock and see if we can free it from glock cache
1464 * @gl: the glock to look at
1465 *
1466 */
1467
1468static void clear_glock(struct gfs2_glock *gl)
1469{
1470	gfs2_glock_remove_from_lru(gl);
1471
1472	spin_lock(&gl->gl_lockref.lock);
1473	if (gl->gl_state != LM_ST_UNLOCKED)
1474		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
 
 
 
 
1475	spin_unlock(&gl->gl_lockref.lock);
1476	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1477		gfs2_glock_put(gl);
1478}
1479
1480/**
1481 * gfs2_glock_thaw - Thaw any frozen glocks
1482 * @sdp: The super block
1483 *
1484 */
1485
1486void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1487{
1488	glock_hash_walk(thaw_glock, sdp);
1489}
1490
1491static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1492{
1493	spin_lock(&gl->gl_lockref.lock);
1494	gfs2_dump_glock(seq, gl);
1495	spin_unlock(&gl->gl_lockref.lock);
1496}
1497
1498static void dump_glock_func(struct gfs2_glock *gl)
1499{
1500	dump_glock(NULL, gl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501}
1502
1503/**
1504 * gfs2_gl_hash_clear - Empty out the glock hash table
1505 * @sdp: the filesystem
1506 * @wait: wait until it's all gone
1507 *
1508 * Called when unmounting the filesystem.
1509 */
1510
1511void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1512{
 
 
 
1513	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1514	flush_workqueue(glock_workqueue);
1515	glock_hash_walk(clear_glock, sdp);
1516	flush_workqueue(glock_workqueue);
1517	wait_event_timeout(sdp->sd_glock_wait,
1518			   atomic_read(&sdp->sd_glock_disposal) == 0,
1519			   HZ * 600);
1520	glock_hash_walk(dump_glock_func, sdp);
1521}
1522
1523void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1524{
1525	struct gfs2_glock *gl = ip->i_gl;
1526	int ret;
1527
1528	ret = gfs2_truncatei_resume(ip);
1529	gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1530
1531	spin_lock(&gl->gl_lockref.lock);
1532	clear_bit(GLF_LOCK, &gl->gl_flags);
1533	run_queue(gl, 1);
1534	spin_unlock(&gl->gl_lockref.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
1535}
1536
1537static const char *state2str(unsigned state)
1538{
1539	switch(state) {
1540	case LM_ST_UNLOCKED:
1541		return "UN";
1542	case LM_ST_SHARED:
1543		return "SH";
1544	case LM_ST_DEFERRED:
1545		return "DF";
1546	case LM_ST_EXCLUSIVE:
1547		return "EX";
1548	}
1549	return "??";
1550}
1551
1552static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1553{
1554	char *p = buf;
1555	if (flags & LM_FLAG_TRY)
1556		*p++ = 't';
1557	if (flags & LM_FLAG_TRY_1CB)
1558		*p++ = 'T';
1559	if (flags & LM_FLAG_NOEXP)
1560		*p++ = 'e';
1561	if (flags & LM_FLAG_ANY)
1562		*p++ = 'A';
1563	if (flags & LM_FLAG_PRIORITY)
1564		*p++ = 'p';
1565	if (flags & GL_ASYNC)
1566		*p++ = 'a';
1567	if (flags & GL_EXACT)
1568		*p++ = 'E';
1569	if (flags & GL_NOCACHE)
1570		*p++ = 'c';
1571	if (test_bit(HIF_HOLDER, &iflags))
1572		*p++ = 'H';
1573	if (test_bit(HIF_WAIT, &iflags))
1574		*p++ = 'W';
1575	if (test_bit(HIF_FIRST, &iflags))
1576		*p++ = 'F';
1577	*p = 0;
1578	return buf;
1579}
1580
1581/**
1582 * dump_holder - print information about a glock holder
1583 * @seq: the seq_file struct
1584 * @gh: the glock holder
 
1585 *
1586 */
1587
1588static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
 
1589{
1590	struct task_struct *gh_owner = NULL;
 
1591	char flags_buf[32];
1592
1593	rcu_read_lock();
1594	if (gh->gh_owner_pid)
 
 
 
 
1595		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1596	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1597		       state2str(gh->gh_state),
 
 
 
1598		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1599		       gh->gh_error,
1600		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1601		       gh_owner ? gh_owner->comm : "(ended)",
1602		       (void *)gh->gh_ip);
1603	rcu_read_unlock();
1604}
1605
1606static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1607{
1608	const unsigned long *gflags = &gl->gl_flags;
1609	char *p = buf;
1610
1611	if (test_bit(GLF_LOCK, gflags))
1612		*p++ = 'l';
1613	if (test_bit(GLF_DEMOTE, gflags))
1614		*p++ = 'D';
1615	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1616		*p++ = 'd';
1617	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1618		*p++ = 'p';
1619	if (test_bit(GLF_DIRTY, gflags))
1620		*p++ = 'y';
1621	if (test_bit(GLF_LFLUSH, gflags))
1622		*p++ = 'f';
1623	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1624		*p++ = 'i';
1625	if (test_bit(GLF_REPLY_PENDING, gflags))
1626		*p++ = 'r';
1627	if (test_bit(GLF_INITIAL, gflags))
1628		*p++ = 'I';
1629	if (test_bit(GLF_FROZEN, gflags))
1630		*p++ = 'F';
1631	if (test_bit(GLF_QUEUED, gflags))
1632		*p++ = 'q';
1633	if (test_bit(GLF_LRU, gflags))
1634		*p++ = 'L';
1635	if (gl->gl_object)
1636		*p++ = 'o';
1637	if (test_bit(GLF_BLOCKING, gflags))
1638		*p++ = 'b';
 
 
 
 
 
 
 
 
 
 
1639	*p = 0;
1640	return buf;
1641}
1642
1643/**
1644 * gfs2_dump_glock - print information about a glock
1645 * @seq: The seq_file struct
1646 * @gl: the glock
 
1647 *
1648 * The file format is as follows:
1649 * One line per object, capital letters are used to indicate objects
1650 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1651 * other objects are indented by a single space and follow the glock to
1652 * which they are related. Fields are indicated by lower case letters
1653 * followed by a colon and the field value, except for strings which are in
1654 * [] so that its possible to see if they are composed of spaces for
1655 * example. The field's are n = number (id of the object), f = flags,
1656 * t = type, s = state, r = refcount, e = error, p = pid.
1657 *
1658 */
1659
1660void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1661{
1662	const struct gfs2_glock_operations *glops = gl->gl_ops;
1663	unsigned long long dtime;
1664	const struct gfs2_holder *gh;
1665	char gflags_buf[32];
 
 
 
 
 
 
1666
 
 
 
 
 
1667	dtime = jiffies - gl->gl_demote_time;
1668	dtime *= 1000000/HZ; /* demote time in uSec */
1669	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1670		dtime = 0;
1671	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1672		  state2str(gl->gl_state),
1673		  gl->gl_name.ln_type,
1674		  (unsigned long long)gl->gl_name.ln_number,
1675		  gflags2str(gflags_buf, gl),
1676		  state2str(gl->gl_target),
1677		  state2str(gl->gl_demote_state), dtime,
1678		  atomic_read(&gl->gl_ail_count),
1679		  atomic_read(&gl->gl_revokes),
1680		  (int)gl->gl_lockref.count, gl->gl_hold_time);
 
1681
1682	list_for_each_entry(gh, &gl->gl_holders, gh_list)
1683		dump_holder(seq, gh);
1684
1685	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1686		glops->go_dump(seq, gl);
1687}
1688
1689static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1690{
1691	struct gfs2_glock *gl = iter_ptr;
1692
1693	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1694		   gl->gl_name.ln_type,
1695		   (unsigned long long)gl->gl_name.ln_number,
1696		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1697		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1698		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1699		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1700		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1701		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1702		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1703		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1704	return 0;
1705}
1706
1707static const char *gfs2_gltype[] = {
1708	"type",
1709	"reserved",
1710	"nondisk",
1711	"inode",
1712	"rgrp",
1713	"meta",
1714	"iopen",
1715	"flock",
1716	"plock",
1717	"quota",
1718	"journal",
1719};
1720
1721static const char *gfs2_stype[] = {
1722	[GFS2_LKS_SRTT]		= "srtt",
1723	[GFS2_LKS_SRTTVAR]	= "srttvar",
1724	[GFS2_LKS_SRTTB]	= "srttb",
1725	[GFS2_LKS_SRTTVARB]	= "srttvarb",
1726	[GFS2_LKS_SIRT]		= "sirt",
1727	[GFS2_LKS_SIRTVAR]	= "sirtvar",
1728	[GFS2_LKS_DCOUNT]	= "dlm",
1729	[GFS2_LKS_QCOUNT]	= "queue",
1730};
1731
1732#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1733
1734static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1735{
1736	struct gfs2_sbd *sdp = seq->private;
1737	loff_t pos = *(loff_t *)iter_ptr;
1738	unsigned index = pos >> 3;
1739	unsigned subindex = pos & 0x07;
1740	int i;
1741
1742	if (index == 0 && subindex != 0)
1743		return 0;
1744
1745	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1746		   (index == 0) ? "cpu": gfs2_stype[subindex]);
1747
1748	for_each_possible_cpu(i) {
1749                const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1750
1751		if (index == 0)
1752			seq_printf(seq, " %15u", i);
1753		else
1754			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1755				   lkstats[index - 1].stats[subindex]);
1756	}
1757	seq_putc(seq, '\n');
1758	return 0;
1759}
1760
1761int __init gfs2_glock_init(void)
1762{
1763	int ret;
1764
1765	ret = rhashtable_init(&gl_hash_table, &ht_parms);
1766	if (ret < 0)
1767		return ret;
1768
1769	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1770					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1771	if (!glock_workqueue) {
1772		rhashtable_destroy(&gl_hash_table);
1773		return -ENOMEM;
1774	}
1775	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1776						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1777						0);
1778	if (!gfs2_delete_workqueue) {
1779		destroy_workqueue(glock_workqueue);
1780		rhashtable_destroy(&gl_hash_table);
1781		return -ENOMEM;
1782	}
1783
1784	register_shrinker(&glock_shrinker);
 
 
 
 
 
 
1785
1786	return 0;
1787}
1788
1789void gfs2_glock_exit(void)
1790{
1791	unregister_shrinker(&glock_shrinker);
1792	rhashtable_destroy(&gl_hash_table);
1793	destroy_workqueue(glock_workqueue);
1794	destroy_workqueue(gfs2_delete_workqueue);
1795}
1796
1797static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1798{
1799	do {
1800		gi->gl = rhashtable_walk_next(&gi->hti);
1801		if (IS_ERR(gi->gl)) {
1802			if (PTR_ERR(gi->gl) == -EAGAIN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1803				continue;
1804			gi->gl = NULL;
 
 
 
 
1805		}
1806	/* Skip entries for other sb and dead entries */
1807	} while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) ||
1808			      __lockref_is_dead(&gi->gl->gl_lockref)));
1809}
1810
1811static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
 
1812{
1813	struct gfs2_glock_iter *gi = seq->private;
1814	loff_t n = *pos;
1815	int ret;
1816
1817	if (gi->last_pos <= *pos)
1818		n = (*pos - gi->last_pos);
1819
1820	ret = rhashtable_walk_start(&gi->hti);
1821	if (ret)
1822		return NULL;
 
 
 
 
 
 
 
 
1823
1824	do {
1825		gfs2_glock_iter_next(gi);
1826	} while (gi->gl && n--);
1827
 
1828	gi->last_pos = *pos;
1829	return gi->gl;
1830}
1831
1832static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1833				 loff_t *pos)
1834{
1835	struct gfs2_glock_iter *gi = seq->private;
1836
1837	(*pos)++;
1838	gi->last_pos = *pos;
1839	gfs2_glock_iter_next(gi);
1840	return gi->gl;
1841}
1842
1843static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
 
1844{
1845	struct gfs2_glock_iter *gi = seq->private;
1846
1847	gi->gl = NULL;
1848	rhashtable_walk_stop(&gi->hti);
1849}
1850
1851static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1852{
1853	dump_glock(seq, iter_ptr);
1854	return 0;
1855}
1856
1857static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1858{
1859	preempt_disable();
1860	if (*pos >= GFS2_NR_SBSTATS)
1861		return NULL;
1862	return pos;
1863}
1864
1865static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1866				   loff_t *pos)
1867{
1868	(*pos)++;
1869	if (*pos >= GFS2_NR_SBSTATS)
1870		return NULL;
1871	return pos;
1872}
1873
1874static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1875{
1876	preempt_enable();
1877}
1878
1879static const struct seq_operations gfs2_glock_seq_ops = {
1880	.start = gfs2_glock_seq_start,
1881	.next  = gfs2_glock_seq_next,
1882	.stop  = gfs2_glock_seq_stop,
1883	.show  = gfs2_glock_seq_show,
1884};
1885
1886static const struct seq_operations gfs2_glstats_seq_ops = {
1887	.start = gfs2_glock_seq_start,
1888	.next  = gfs2_glock_seq_next,
1889	.stop  = gfs2_glock_seq_stop,
1890	.show  = gfs2_glstats_seq_show,
1891};
1892
1893static const struct seq_operations gfs2_sbstats_seq_ops = {
1894	.start = gfs2_sbstats_seq_start,
1895	.next  = gfs2_sbstats_seq_next,
1896	.stop  = gfs2_sbstats_seq_stop,
1897	.show  = gfs2_sbstats_seq_show,
1898};
1899
1900#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
1901
1902static int gfs2_glocks_open(struct inode *inode, struct file *file)
 
1903{
1904	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1905				   sizeof(struct gfs2_glock_iter));
1906	if (ret == 0) {
1907		struct seq_file *seq = file->private_data;
1908		struct gfs2_glock_iter *gi = seq->private;
1909
1910		gi->sdp = inode->i_private;
1911		gi->last_pos = 0;
1912		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1913		if (seq->buf)
1914			seq->size = GFS2_SEQ_GOODSIZE;
 
 
 
 
 
1915		gi->gl = NULL;
1916		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
1917	}
1918	return ret;
1919}
1920
 
 
 
 
 
1921static int gfs2_glocks_release(struct inode *inode, struct file *file)
1922{
1923	struct seq_file *seq = file->private_data;
1924	struct gfs2_glock_iter *gi = seq->private;
1925
1926	gi->gl = NULL;
 
1927	rhashtable_walk_exit(&gi->hti);
1928	return seq_release_private(inode, file);
1929}
1930
1931static int gfs2_glstats_open(struct inode *inode, struct file *file)
1932{
1933	int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
1934				   sizeof(struct gfs2_glock_iter));
1935	if (ret == 0) {
1936		struct seq_file *seq = file->private_data;
1937		struct gfs2_glock_iter *gi = seq->private;
1938		gi->sdp = inode->i_private;
1939		gi->last_pos = 0;
1940		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1941		if (seq->buf)
1942			seq->size = GFS2_SEQ_GOODSIZE;
1943		gi->gl = NULL;
1944		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
1945	}
1946	return ret;
1947}
1948
1949static int gfs2_sbstats_open(struct inode *inode, struct file *file)
1950{
1951	int ret = seq_open(file, &gfs2_sbstats_seq_ops);
1952	if (ret == 0) {
1953		struct seq_file *seq = file->private_data;
1954		seq->private = inode->i_private;  /* sdp */
1955	}
1956	return ret;
1957}
1958
1959static const struct file_operations gfs2_glocks_fops = {
1960	.owner   = THIS_MODULE,
1961	.open    = gfs2_glocks_open,
1962	.read    = seq_read,
1963	.llseek  = seq_lseek,
1964	.release = gfs2_glocks_release,
1965};
1966
1967static const struct file_operations gfs2_glstats_fops = {
1968	.owner   = THIS_MODULE,
1969	.open    = gfs2_glstats_open,
1970	.read    = seq_read,
1971	.llseek  = seq_lseek,
1972	.release = gfs2_glocks_release,
1973};
1974
1975static const struct file_operations gfs2_sbstats_fops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1976	.owner   = THIS_MODULE,
1977	.open	 = gfs2_sbstats_open,
1978	.read    = seq_read,
1979	.llseek  = seq_lseek,
1980	.release = seq_release,
1981};
1982
1983int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
 
 
1984{
1985	struct dentry *dent;
1986
1987	dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1988	if (IS_ERR_OR_NULL(dent))
1989		goto fail;
1990	sdp->debugfs_dir = dent;
1991
1992	dent = debugfs_create_file("glocks",
1993				   S_IFREG | S_IRUGO,
1994				   sdp->debugfs_dir, sdp,
1995				   &gfs2_glocks_fops);
1996	if (IS_ERR_OR_NULL(dent))
1997		goto fail;
1998	sdp->debugfs_dentry_glocks = dent;
1999
2000	dent = debugfs_create_file("glstats",
2001				   S_IFREG | S_IRUGO,
2002				   sdp->debugfs_dir, sdp,
2003				   &gfs2_glstats_fops);
2004	if (IS_ERR_OR_NULL(dent))
2005		goto fail;
2006	sdp->debugfs_dentry_glstats = dent;
2007
2008	dent = debugfs_create_file("sbstats",
2009				   S_IFREG | S_IRUGO,
2010				   sdp->debugfs_dir, sdp,
2011				   &gfs2_sbstats_fops);
2012	if (IS_ERR_OR_NULL(dent))
2013		goto fail;
2014	sdp->debugfs_dentry_sbstats = dent;
2015
2016	return 0;
2017fail:
2018	gfs2_delete_debugfs_file(sdp);
2019	return dent ? PTR_ERR(dent) : -ENOMEM;
 
 
 
 
2020}
2021
2022void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2023{
2024	if (sdp->debugfs_dir) {
2025		if (sdp->debugfs_dentry_glocks) {
2026			debugfs_remove(sdp->debugfs_dentry_glocks);
2027			sdp->debugfs_dentry_glocks = NULL;
2028		}
2029		if (sdp->debugfs_dentry_glstats) {
2030			debugfs_remove(sdp->debugfs_dentry_glstats);
2031			sdp->debugfs_dentry_glstats = NULL;
2032		}
2033		if (sdp->debugfs_dentry_sbstats) {
2034			debugfs_remove(sdp->debugfs_dentry_sbstats);
2035			sdp->debugfs_dentry_sbstats = NULL;
2036		}
2037		debugfs_remove(sdp->debugfs_dir);
2038		sdp->debugfs_dir = NULL;
2039	}
2040}
2041
2042int gfs2_register_debugfs(void)
2043{
2044	gfs2_root = debugfs_create_dir("gfs2", NULL);
2045	if (IS_ERR(gfs2_root))
2046		return PTR_ERR(gfs2_root);
2047	return gfs2_root ? 0 : -ENOMEM;
2048}
2049
2050void gfs2_unregister_debugfs(void)
2051{
2052	debugfs_remove(gfs2_root);
2053	gfs2_root = NULL;
2054}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/sched.h>
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <linux/buffer_head.h>
  13#include <linux/delay.h>
  14#include <linux/sort.h>
  15#include <linux/hash.h>
  16#include <linux/jhash.h>
  17#include <linux/kallsyms.h>
  18#include <linux/gfs2_ondisk.h>
  19#include <linux/list.h>
  20#include <linux/wait.h>
  21#include <linux/module.h>
  22#include <linux/uaccess.h>
  23#include <linux/seq_file.h>
  24#include <linux/debugfs.h>
  25#include <linux/kthread.h>
  26#include <linux/freezer.h>
  27#include <linux/workqueue.h>
  28#include <linux/jiffies.h>
  29#include <linux/rcupdate.h>
  30#include <linux/rculist_bl.h>
  31#include <linux/bit_spinlock.h>
  32#include <linux/percpu.h>
  33#include <linux/list_sort.h>
  34#include <linux/lockref.h>
  35#include <linux/rhashtable.h>
  36#include <linux/pid_namespace.h>
  37#include <linux/file.h>
  38#include <linux/random.h>
  39
  40#include "gfs2.h"
  41#include "incore.h"
  42#include "glock.h"
  43#include "glops.h"
  44#include "inode.h"
  45#include "lops.h"
  46#include "meta_io.h"
  47#include "quota.h"
  48#include "super.h"
  49#include "util.h"
  50#include "bmap.h"
  51#define CREATE_TRACE_POINTS
  52#include "trace_gfs2.h"
  53
  54struct gfs2_glock_iter {
  55	struct gfs2_sbd *sdp;		/* incore superblock           */
  56	struct rhashtable_iter hti;	/* rhashtable iterator         */
  57	struct gfs2_glock *gl;		/* current glock struct        */
  58	loff_t last_pos;		/* last position               */
  59};
  60
  61typedef void (*glock_examiner) (struct gfs2_glock * gl);
  62
  63static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  64static void request_demote(struct gfs2_glock *gl, unsigned int state,
  65			   unsigned long delay, bool remote);
  66
  67static struct dentry *gfs2_root;
 
 
  68static LIST_HEAD(lru_list);
  69static atomic_t lru_count = ATOMIC_INIT(0);
  70static DEFINE_SPINLOCK(lru_lock);
  71
  72#define GFS2_GL_HASH_SHIFT      15
  73#define GFS2_GL_HASH_SIZE       BIT(GFS2_GL_HASH_SHIFT)
  74
  75static const struct rhashtable_params ht_parms = {
  76	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
  77	.key_len = offsetofend(struct lm_lockname, ln_type),
  78	.key_offset = offsetof(struct gfs2_glock, gl_name),
  79	.head_offset = offsetof(struct gfs2_glock, gl_node),
  80};
  81
  82static struct rhashtable gl_hash_table;
  83
  84#define GLOCK_WAIT_TABLE_BITS 12
  85#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
  86static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
  87
  88struct wait_glock_queue {
  89	struct lm_lockname *name;
  90	wait_queue_entry_t wait;
  91};
  92
  93static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
  94			       int sync, void *key)
  95{
  96	struct wait_glock_queue *wait_glock =
  97		container_of(wait, struct wait_glock_queue, wait);
  98	struct lm_lockname *wait_name = wait_glock->name;
  99	struct lm_lockname *wake_name = key;
 100
 101	if (wake_name->ln_sbd != wait_name->ln_sbd ||
 102	    wake_name->ln_number != wait_name->ln_number ||
 103	    wake_name->ln_type != wait_name->ln_type)
 104		return 0;
 105	return autoremove_wake_function(wait, mode, sync, key);
 106}
 107
 108static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
 109{
 110	u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
 111
 112	return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
 
 
 
 113}
 114
 115/**
 116 * wake_up_glock  -  Wake up waiters on a glock
 117 * @gl: the glock
 
 118 */
 119static void wake_up_glock(struct gfs2_glock *gl)
 120{
 121	wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
 122
 123	if (waitqueue_active(wq))
 124		__wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
 125}
 126
 127static void gfs2_glock_dealloc(struct rcu_head *rcu)
 128{
 129	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
 130
 131	kfree(gl->gl_lksb.sb_lvbptr);
 132	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
 133		struct gfs2_glock_aspace *gla =
 134			container_of(gl, struct gfs2_glock_aspace, glock);
 135		kmem_cache_free(gfs2_glock_aspace_cachep, gla);
 136	} else
 137		kmem_cache_free(gfs2_glock_cachep, gl);
 138}
 139
 140/**
 141 * glock_blocked_by_withdraw - determine if we can still use a glock
 142 * @gl: the glock
 143 *
 144 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
 145 * when we're withdrawn. For example, to maintain metadata integrity, we should
 146 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks like
 147 * the iopen or freeze glock may be safely used because none of their
 148 * metadata goes through the journal. So in general, we should disallow all
 149 * glocks that are journaled, and allow all the others. One exception is:
 150 * we need to allow our active journal to be promoted and demoted so others
 151 * may recover it and we can reacquire it when they're done.
 152 */
 153static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
 154{
 155	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 156
 157	if (!gfs2_withdrawing_or_withdrawn(sdp))
 158		return false;
 159	if (gl->gl_ops->go_flags & GLOF_NONDISK)
 160		return false;
 161	if (!sdp->sd_jdesc ||
 162	    gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
 163		return false;
 164	return true;
 165}
 166
 167static void __gfs2_glock_free(struct gfs2_glock *gl)
 168{
 169	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
 170	smp_mb();
 171	wake_up_glock(gl);
 172	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
 173}
 174
 175void gfs2_glock_free(struct gfs2_glock *gl) {
 176	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 177
 178	__gfs2_glock_free(gl);
 179	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
 180		wake_up(&sdp->sd_kill_wait);
 
 181}
 182
 183void gfs2_glock_free_later(struct gfs2_glock *gl) {
 184	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 185
 
 
 186	spin_lock(&lru_lock);
 187	list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
 188	spin_unlock(&lru_lock);
 189	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
 190		wake_up(&sdp->sd_kill_wait);
 191}
 192
 193static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
 194{
 195	struct list_head *list = &sdp->sd_dead_glocks;
 196
 197	while(!list_empty(list)) {
 198		struct gfs2_glock *gl;
 199
 200		gl = list_first_entry(list, struct gfs2_glock, gl_lru);
 201		list_del_init(&gl->gl_lru);
 202		__gfs2_glock_free(gl);
 203	}
 204}
 205
 206/**
 207 * gfs2_glock_hold() - increment reference count on glock
 208 * @gl: The glock to hold
 209 *
 210 */
 211
 212struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl)
 213{
 214	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
 215	lockref_get(&gl->gl_lockref);
 216	return gl;
 217}
 218
 219static void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
 220{
 221	spin_lock(&lru_lock);
 222	list_move_tail(&gl->gl_lru, &lru_list);
 223
 224	if (!test_bit(GLF_LRU, &gl->gl_flags)) {
 225		set_bit(GLF_LRU, &gl->gl_flags);
 226		atomic_inc(&lru_count);
 227	}
 228
 
 
 229	spin_unlock(&lru_lock);
 230}
 231
 232static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
 233{
 234	spin_lock(&lru_lock);
 235	if (test_bit(GLF_LRU, &gl->gl_flags)) {
 236		list_del_init(&gl->gl_lru);
 237		atomic_dec(&lru_count);
 238		clear_bit(GLF_LRU, &gl->gl_flags);
 239	}
 240	spin_unlock(&lru_lock);
 241}
 242
 243/*
 244 * Enqueue the glock on the work queue.  Passes one glock reference on to the
 245 * work queue.
 246 */
 247static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
 248	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 249
 250	if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) {
 251		/*
 252		 * We are holding the lockref spinlock, and the work was still
 253		 * queued above.  The queued work (glock_work_func) takes that
 254		 * spinlock before dropping its glock reference(s), so it
 255		 * cannot have dropped them in the meantime.
 256		 */
 257		GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
 258		gl->gl_lockref.count--;
 259	}
 260}
 261
 262static void __gfs2_glock_put(struct gfs2_glock *gl)
 263{
 264	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 265	struct address_space *mapping = gfs2_glock2aspace(gl);
 266
 267	lockref_mark_dead(&gl->gl_lockref);
 268	spin_unlock(&gl->gl_lockref.lock);
 269	gfs2_glock_remove_from_lru(gl);
 270	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
 271	if (mapping) {
 272		truncate_inode_pages_final(mapping);
 273		if (!gfs2_withdrawing_or_withdrawn(sdp))
 274			GLOCK_BUG_ON(gl, !mapping_empty(mapping));
 275	}
 276	trace_gfs2_glock_put(gl);
 277	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
 278}
 279
 280static bool __gfs2_glock_put_or_lock(struct gfs2_glock *gl)
 281{
 282	if (lockref_put_or_lock(&gl->gl_lockref))
 283		return true;
 284	GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
 285	if (gl->gl_state != LM_ST_UNLOCKED) {
 286		gl->gl_lockref.count--;
 287		gfs2_glock_add_to_lru(gl);
 288		spin_unlock(&gl->gl_lockref.lock);
 289		return true;
 290	}
 291	return false;
 292}
 293
 294/**
 295 * gfs2_glock_put() - Decrement reference count on glock
 296 * @gl: The glock to put
 297 *
 298 */
 299
 300void gfs2_glock_put(struct gfs2_glock *gl)
 301{
 302	if (__gfs2_glock_put_or_lock(gl))
 
 
 
 303		return;
 304
 305	__gfs2_glock_put(gl);
 306}
 307
 308/*
 309 * gfs2_glock_put_async - Decrement reference count without sleeping
 310 * @gl: The glock to put
 311 *
 312 * Decrement the reference count on glock immediately unless it is the last
 313 * reference.  Defer putting the last reference to work queue context.
 314 */
 315void gfs2_glock_put_async(struct gfs2_glock *gl)
 316{
 317	if (__gfs2_glock_put_or_lock(gl))
 318		return;
 319
 320	gfs2_glock_queue_work(gl, 0);
 321	spin_unlock(&gl->gl_lockref.lock);
 
 
 
 
 
 322}
 323
 324/**
 325 * may_grant - check if it's ok to grant a new lock
 326 * @gl: The glock
 327 * @current_gh: One of the current holders of @gl
 328 * @gh: The lock request which we wish to grant
 329 *
 330 * With our current compatibility rules, if a glock has one or more active
 331 * holders (HIF_HOLDER flag set), any of those holders can be passed in as
 332 * @current_gh; they are all the same as far as compatibility with the new @gh
 333 * goes.
 334 *
 335 * Returns true if it's ok to grant the lock.
 336 */
 337
 338static inline bool may_grant(struct gfs2_glock *gl,
 339			     struct gfs2_holder *current_gh,
 340			     struct gfs2_holder *gh)
 341{
 342	if (current_gh) {
 343		GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, &current_gh->gh_iflags));
 344
 345		switch(current_gh->gh_state) {
 346		case LM_ST_EXCLUSIVE:
 347			/*
 348			 * Here we make a special exception to grant holders
 349			 * who agree to share the EX lock with other holders
 350			 * who also have the bit set. If the original holder
 351			 * has the LM_FLAG_NODE_SCOPE bit set, we grant more
 352			 * holders with the bit set.
 353			 */
 354			return gh->gh_state == LM_ST_EXCLUSIVE &&
 355			       (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) &&
 356			       (gh->gh_flags & LM_FLAG_NODE_SCOPE);
 357
 358		case LM_ST_SHARED:
 359		case LM_ST_DEFERRED:
 360			return gh->gh_state == current_gh->gh_state;
 361
 362		default:
 363			return false;
 364		}
 365	}
 366
 
 
 
 
 
 
 367	if (gl->gl_state == gh->gh_state)
 368		return true;
 369	if (gh->gh_flags & GL_EXACT)
 370		return false;
 371	if (gl->gl_state == LM_ST_EXCLUSIVE) {
 372		return gh->gh_state == LM_ST_SHARED ||
 373		       gh->gh_state == LM_ST_DEFERRED;
 
 
 374	}
 375	if (gh->gh_flags & LM_FLAG_ANY)
 376		return gl->gl_state != LM_ST_UNLOCKED;
 377	return false;
 378}
 379
 380static void gfs2_holder_wake(struct gfs2_holder *gh)
 381{
 382	clear_bit(HIF_WAIT, &gh->gh_iflags);
 383	smp_mb__after_atomic();
 384	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
 385	if (gh->gh_flags & GL_ASYNC) {
 386		struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
 387
 388		wake_up(&sdp->sd_async_glock_wait);
 389	}
 390}
 391
 392/**
 393 * do_error - Something unexpected has happened during a lock request
 394 * @gl: The glock
 395 * @ret: The status from the DLM
 396 */
 397
 398static void do_error(struct gfs2_glock *gl, const int ret)
 399{
 400	struct gfs2_holder *gh, *tmp;
 401
 402	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 403		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 404			continue;
 405		if (ret & LM_OUT_ERROR)
 406			gh->gh_error = -EIO;
 407		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
 408			gh->gh_error = GLR_TRYFAILED;
 409		else
 410			continue;
 411		list_del_init(&gh->gh_list);
 412		trace_gfs2_glock_queue(gh, 0);
 413		gfs2_holder_wake(gh);
 414	}
 415}
 416
 417/**
 418 * find_first_holder - find the first "holder" gh
 419 * @gl: the glock
 420 */
 421
 422static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
 423{
 424	struct gfs2_holder *gh;
 425
 426	if (!list_empty(&gl->gl_holders)) {
 427		gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
 428				      gh_list);
 429		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 430			return gh;
 431	}
 432	return NULL;
 433}
 434
 435/*
 436 * gfs2_instantiate - Call the glops instantiate function
 437 * @gh: The glock holder
 438 *
 439 * Returns: 0 if instantiate was successful, or error.
 440 */
 441int gfs2_instantiate(struct gfs2_holder *gh)
 442{
 443	struct gfs2_glock *gl = gh->gh_gl;
 444	const struct gfs2_glock_operations *glops = gl->gl_ops;
 445	int ret;
 446
 447again:
 448	if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags))
 449		goto done;
 450
 451	/*
 452	 * Since we unlock the lockref lock, we set a flag to indicate
 453	 * instantiate is in progress.
 454	 */
 455	if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
 456		wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG,
 457			    TASK_UNINTERRUPTIBLE);
 458		/*
 459		 * Here we just waited for a different instantiate to finish.
 460		 * But that may not have been successful, as when a process
 461		 * locks an inode glock _before_ it has an actual inode to
 462		 * instantiate into. So we check again. This process might
 463		 * have an inode to instantiate, so might be successful.
 464		 */
 465		goto again;
 466	}
 467
 468	ret = glops->go_instantiate(gl);
 469	if (!ret)
 470		clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
 471	clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
 472	if (ret)
 473		return ret;
 474
 475done:
 476	if (glops->go_held)
 477		return glops->go_held(gh);
 478	return 0;
 479}
 480
 481/**
 482 * do_promote - promote as many requests as possible on the current queue
 483 * @gl: The glock
 484 * 
 485 * Returns true on success (i.e., progress was made or there are no waiters).
 
 486 */
 487
 488static bool do_promote(struct gfs2_glock *gl)
 
 
 489{
 490	struct gfs2_holder *gh, *current_gh;
 
 
 491
 492	current_gh = find_first_holder(gl);
 493	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
 494		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 495			continue;
 496		if (!may_grant(gl, current_gh, gh)) {
 497			/*
 498			 * If we get here, it means we may not grant this
 499			 * holder for some reason. If this holder is at the
 500			 * head of the list, it means we have a blocked holder
 501			 * at the head, so return false.
 502			 */
 503			if (list_is_first(&gh->gh_list, &gl->gl_holders))
 504				return false;
 505			do_error(gl, 0);
 506			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507		}
 508		set_bit(HIF_HOLDER, &gh->gh_iflags);
 509		trace_gfs2_promote(gh);
 510		gfs2_holder_wake(gh);
 511		if (!current_gh)
 512			current_gh = gh;
 513	}
 514	return true;
 515}
 516
 517/**
 518 * find_first_waiter - find the first gh that's waiting for the glock
 519 * @gl: the glock
 520 */
 521
 522static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
 523{
 524	struct gfs2_holder *gh;
 525
 526	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
 527		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
 528			return gh;
 529	}
 530	return NULL;
 531}
 532
 533/**
 534 * find_last_waiter - find the last gh that's waiting for the glock
 535 * @gl: the glock
 
 536 *
 537 * This also is a fast way of finding out if there are any waiters.
 538 */
 539
 540static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl)
 541{
 542	struct gfs2_holder *gh;
 543
 544	if (list_empty(&gl->gl_holders))
 545		return NULL;
 546	gh = list_last_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
 547	return test_bit(HIF_HOLDER, &gh->gh_iflags) ? NULL : gh;
 548}
 549
 550/**
 551 * state_change - record that the glock is now in a different state
 552 * @gl: the glock
 553 * @new_state: the new state
 554 */
 
 
 
 
 555
 556static void state_change(struct gfs2_glock *gl, unsigned int new_state)
 557{
 558	if (new_state != gl->gl_target)
 559		/* shorten our minimum hold time */
 560		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
 561				       GL_GLOCK_MIN_HOLD);
 562	gl->gl_state = new_state;
 563	gl->gl_tchange = jiffies;
 564}
 565
 566static void gfs2_set_demote(int nr, struct gfs2_glock *gl)
 567{
 568	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 569
 570	set_bit(nr, &gl->gl_flags);
 571	smp_mb();
 572	wake_up(&sdp->sd_async_glock_wait);
 573}
 574
 575static void gfs2_demote_wake(struct gfs2_glock *gl)
 576{
 577	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 578	clear_bit(GLF_DEMOTE, &gl->gl_flags);
 579	smp_mb__after_atomic();
 580	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
 581}
 582
 583/**
 584 * finish_xmote - The DLM has replied to one of our lock requests
 585 * @gl: The glock
 586 * @ret: The status from the DLM
 587 *
 588 */
 589
 590static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
 591{
 592	const struct gfs2_glock_operations *glops = gl->gl_ops;
 593	struct gfs2_holder *gh;
 594	unsigned state = ret & LM_OUT_ST_MASK;
 
 595
 
 596	trace_gfs2_glock_state_change(gl, state);
 597	state_change(gl, state);
 598	gh = find_first_waiter(gl);
 599
 600	/* Demote to UN request arrived during demote to SH or DF */
 601	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
 602	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
 603		gl->gl_target = LM_ST_UNLOCKED;
 604
 605	/* Check for state != intended state */
 606	if (unlikely(state != gl->gl_target)) {
 607		if (gh && (ret & LM_OUT_CANCELED))
 608			gfs2_holder_wake(gh);
 609		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
 610			/* move to back of queue and try next entry */
 611			if (ret & LM_OUT_CANCELED) {
 612				list_move_tail(&gh->gh_list, &gl->gl_holders);
 
 613				gh = find_first_waiter(gl);
 614				gl->gl_target = gh->gh_state;
 615				if (do_promote(gl))
 616					goto out;
 617				goto retry;
 618			}
 619			/* Some error or failed "try lock" - report it */
 620			if ((ret & LM_OUT_ERROR) ||
 621			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
 622				gl->gl_target = gl->gl_state;
 623				do_error(gl, ret);
 624				goto out;
 625			}
 626		}
 627		switch(state) {
 628		/* Unlocked due to conversion deadlock, try again */
 629		case LM_ST_UNLOCKED:
 630retry:
 631			do_xmote(gl, gh, gl->gl_target);
 632			break;
 633		/* Conversion fails, unlock and try again */
 634		case LM_ST_SHARED:
 635		case LM_ST_DEFERRED:
 636			do_xmote(gl, gh, LM_ST_UNLOCKED);
 637			break;
 638		default: /* Everything else */
 639			fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
 640			       gl->gl_target, state);
 641			GLOCK_BUG_ON(gl, 1);
 642		}
 
 643		return;
 644	}
 645
 646	/* Fast path - we got what we asked for */
 647	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
 648		gfs2_demote_wake(gl);
 649	if (state != LM_ST_UNLOCKED) {
 650		if (glops->go_xmote_bh) {
 651			int rv;
 652
 653			spin_unlock(&gl->gl_lockref.lock);
 654			rv = glops->go_xmote_bh(gl);
 655			spin_lock(&gl->gl_lockref.lock);
 656			if (rv) {
 657				do_error(gl, rv);
 658				goto out;
 659			}
 660		}
 661		do_promote(gl);
 
 
 662	}
 663out:
 664	clear_bit(GLF_LOCK, &gl->gl_flags);
 665}
 666
 667static bool is_system_glock(struct gfs2_glock *gl)
 668{
 669	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 670	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 671
 672	if (gl == m_ip->i_gl)
 673		return true;
 674	return false;
 675}
 676
 677/**
 678 * do_xmote - Calls the DLM to change the state of a lock
 679 * @gl: The lock state
 680 * @gh: The holder (only for promotes)
 681 * @target: The target lock state
 682 *
 683 */
 684
 685static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh,
 686					 unsigned int target)
 687__releases(&gl->gl_lockref.lock)
 688__acquires(&gl->gl_lockref.lock)
 689{
 690	const struct gfs2_glock_operations *glops = gl->gl_ops;
 691	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 692	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
 693	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
 694	int ret;
 695
 696	if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
 697	    gh && !(gh->gh_flags & LM_FLAG_NOEXP))
 698		goto skip_inval;
 699
 700	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP);
 701	GLOCK_BUG_ON(gl, gl->gl_state == target);
 702	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
 703	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
 704	    glops->go_inval) {
 705		/*
 706		 * If another process is already doing the invalidate, let that
 707		 * finish first.  The glock state machine will get back to this
 708		 * holder again later.
 709		 */
 710		if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
 711				     &gl->gl_flags))
 712			return;
 713		do_error(gl, 0); /* Fail queued try locks */
 714	}
 715	gl->gl_req = target;
 716	set_bit(GLF_BLOCKING, &gl->gl_flags);
 717	if ((gl->gl_req == LM_ST_UNLOCKED) ||
 718	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
 719	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
 720		clear_bit(GLF_BLOCKING, &gl->gl_flags);
 721	if (!glops->go_inval && !glops->go_sync)
 722		goto skip_inval;
 
 
 
 
 723
 724	spin_unlock(&gl->gl_lockref.lock);
 725	if (glops->go_sync) {
 726		ret = glops->go_sync(gl);
 727		/* If we had a problem syncing (due to io errors or whatever,
 728		 * we should not invalidate the metadata or tell dlm to
 729		 * release the glock to other nodes.
 730		 */
 731		if (ret) {
 732			if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
 733				fs_err(sdp, "Error %d syncing glock \n", ret);
 734				gfs2_dump_glock(NULL, gl, true);
 735			}
 736			spin_lock(&gl->gl_lockref.lock);
 737			goto skip_inval;
 738		}
 
 
 
 
 739	}
 740	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
 741		/*
 742		 * The call to go_sync should have cleared out the ail list.
 743		 * If there are still items, we have a problem. We ought to
 744		 * withdraw, but we can't because the withdraw code also uses
 745		 * glocks. Warn about the error, dump the glock, then fall
 746		 * through and wait for logd to do the withdraw for us.
 747		 */
 748		if ((atomic_read(&gl->gl_ail_count) != 0) &&
 749		    (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
 750			gfs2_glock_assert_warn(gl,
 751					       !atomic_read(&gl->gl_ail_count));
 752			gfs2_dump_glock(NULL, gl, true);
 753		}
 754		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
 755		clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 756	}
 757	spin_lock(&gl->gl_lockref.lock);
 
 758
 759skip_inval:
 760	gl->gl_lockref.count++;
 761	/*
 762	 * Check for an error encountered since we called go_sync and go_inval.
 763	 * If so, we can't withdraw from the glock code because the withdraw
 764	 * code itself uses glocks (see function signal_our_withdraw) to
 765	 * change the mount to read-only. Most importantly, we must not call
 766	 * dlm to unlock the glock until the journal is in a known good state
 767	 * (after journal replay) otherwise other nodes may use the object
 768	 * (rgrp or dinode) and then later, journal replay will corrupt the
 769	 * file system. The best we can do here is wait for the logd daemon
 770	 * to see sd_log_error and withdraw, and in the meantime, requeue the
 771	 * work for later.
 772	 *
 773	 * We make a special exception for some system glocks, such as the
 774	 * system statfs inode glock, which needs to be granted before the
 775	 * gfs2_quotad daemon can exit, and that exit needs to finish before
 776	 * we can unmount the withdrawn file system.
 777	 *
 778	 * However, if we're just unlocking the lock (say, for unmount, when
 779	 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
 780	 * then it's okay to tell dlm to unlock it.
 781	 */
 782	if (unlikely(sdp->sd_log_error) && !gfs2_withdrawing_or_withdrawn(sdp))
 783		gfs2_withdraw_delayed(sdp);
 784	if (glock_blocked_by_withdraw(gl) &&
 785	    (target != LM_ST_UNLOCKED ||
 786	     test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
 787		if (!is_system_glock(gl)) {
 788			request_demote(gl, LM_ST_UNLOCKED, 0, false);
 789			/*
 790			 * Ordinarily, we would call dlm and its callback would call
 791			 * finish_xmote, which would call state_change() to the new state.
 792			 * Since we withdrew, we won't call dlm, so call state_change
 793			 * manually, but to the UNLOCKED state we desire.
 794			 */
 795			state_change(gl, LM_ST_UNLOCKED);
 796			/*
 797			 * We skip telling dlm to do the locking, so we won't get a
 798			 * reply that would otherwise clear GLF_LOCK. So we clear it here.
 799			 */
 800			clear_bit(GLF_LOCK, &gl->gl_flags);
 801			clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
 802			gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
 803			return;
 804		} else {
 805			clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 806		}
 807	}
 808
 809	if (ls->ls_ops->lm_lock) {
 810		spin_unlock(&gl->gl_lockref.lock);
 811		ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
 812		spin_lock(&gl->gl_lockref.lock);
 813
 814		if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
 815		    target == LM_ST_UNLOCKED &&
 816		    test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
 817			/*
 818			 * The lockspace has been released and the lock has
 819			 * been unlocked implicitly.
 820			 */
 821		} else if (ret) {
 822			fs_err(sdp, "lm_lock ret %d\n", ret);
 823			target = gl->gl_state | LM_OUT_ERROR;
 824		} else {
 825			/* The operation will be completed asynchronously. */
 826			return;
 827		}
 828	}
 829
 830	/* Complete the operation now. */
 831	finish_xmote(gl, target);
 832	gfs2_glock_queue_work(gl, 0);
 833}
 834
 835/**
 836 * run_queue - do all outstanding tasks related to a glock
 837 * @gl: The glock in question
 838 * @nonblock: True if we must not block in run_queue
 839 *
 840 */
 841
 842static void run_queue(struct gfs2_glock *gl, const int nonblock)
 843__releases(&gl->gl_lockref.lock)
 844__acquires(&gl->gl_lockref.lock)
 845{
 846	struct gfs2_holder *gh = NULL;
 
 847
 848	if (test_bit(GLF_LOCK, &gl->gl_flags))
 849		return;
 850	set_bit(GLF_LOCK, &gl->gl_flags);
 851
 852	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
 853
 854	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
 855	    gl->gl_demote_state != gl->gl_state) {
 856		if (find_first_holder(gl))
 857			goto out_unlock;
 858		if (nonblock)
 859			goto out_sched;
 860		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
 861		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
 862		gl->gl_target = gl->gl_demote_state;
 863	} else {
 864		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
 865			gfs2_demote_wake(gl);
 866		if (do_promote(gl))
 
 867			goto out_unlock;
 
 
 868		gh = find_first_waiter(gl);
 869		gl->gl_target = gh->gh_state;
 870		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
 871			do_error(gl, 0); /* Fail queued try locks */
 872	}
 873	do_xmote(gl, gh, gl->gl_target);
 
 874	return;
 875
 876out_sched:
 877	clear_bit(GLF_LOCK, &gl->gl_flags);
 878	smp_mb__after_atomic();
 879	gl->gl_lockref.count++;
 880	gfs2_glock_queue_work(gl, 0);
 
 881	return;
 882
 883out_unlock:
 884	clear_bit(GLF_LOCK, &gl->gl_flags);
 885	smp_mb__after_atomic();
 
 886}
 887
 888/**
 889 * glock_set_object - set the gl_object field of a glock
 890 * @gl: the glock
 891 * @object: the object
 892 */
 893void glock_set_object(struct gfs2_glock *gl, void *object)
 894{
 895	void *prev_object;
 
 
 
 
 896
 897	spin_lock(&gl->gl_lockref.lock);
 898	prev_object = gl->gl_object;
 899	gl->gl_object = object;
 900	spin_unlock(&gl->gl_lockref.lock);
 901	if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) {
 902		pr_warn("glock=%u/%llx\n",
 903			gl->gl_name.ln_type,
 904			(unsigned long long)gl->gl_name.ln_number);
 905		gfs2_dump_glock(NULL, gl, true);
 906	}
 907}
 908
 909/**
 910 * glock_clear_object - clear the gl_object field of a glock
 911 * @gl: the glock
 912 * @object: object the glock currently points at
 913 */
 914void glock_clear_object(struct gfs2_glock *gl, void *object)
 915{
 916	void *prev_object;
 917
 918	spin_lock(&gl->gl_lockref.lock);
 919	prev_object = gl->gl_object;
 920	gl->gl_object = NULL;
 921	spin_unlock(&gl->gl_lockref.lock);
 922	if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) {
 923		pr_warn("glock=%u/%llx\n",
 924			gl->gl_name.ln_type,
 925			(unsigned long long)gl->gl_name.ln_number);
 926		gfs2_dump_glock(NULL, gl, true);
 927	}
 928}
 929
 930void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
 931{
 932	struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
 933
 934	if (ri->ri_magic == 0)
 935		ri->ri_magic = cpu_to_be32(GFS2_MAGIC);
 936	if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC))
 937		ri->ri_generation_deleted = cpu_to_be64(generation);
 938}
 939
 940bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
 941{
 942	struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
 943
 944	if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC))
 945		return false;
 946	return generation <= be64_to_cpu(ri->ri_generation_deleted);
 947}
 948
 949static void gfs2_glock_poke(struct gfs2_glock *gl)
 950{
 951	int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP;
 952	struct gfs2_holder gh;
 953	int error;
 954
 955	__gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_);
 956	error = gfs2_glock_nq(&gh);
 957	if (!error)
 958		gfs2_glock_dq(&gh);
 959	gfs2_holder_uninit(&gh);
 960}
 961
 962static void gfs2_try_evict(struct gfs2_glock *gl)
 963{
 964	struct gfs2_inode *ip;
 965
 966	/*
 967	 * If there is contention on the iopen glock and we have an inode, try
 968	 * to grab and release the inode so that it can be evicted.  The
 969	 * GIF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode
 970	 * should not be deleted locally.  This will allow the remote node to
 971	 * go ahead and delete the inode without us having to do it, which will
 972	 * avoid rgrp glock thrashing.
 973	 *
 974	 * The remote node is likely still holding the corresponding inode
 975	 * glock, so it will run before we get to verify that the delete has
 976	 * happened below.  (Verification is triggered by the call to
 977	 * gfs2_queue_verify_delete() in gfs2_evict_inode().)
 978	 */
 979	spin_lock(&gl->gl_lockref.lock);
 980	ip = gl->gl_object;
 981	if (ip && !igrab(&ip->i_inode))
 982		ip = NULL;
 983	spin_unlock(&gl->gl_lockref.lock);
 984	if (ip) {
 985		wait_on_inode(&ip->i_inode);
 986		if (is_bad_inode(&ip->i_inode)) {
 987			iput(&ip->i_inode);
 988			ip = NULL;
 989		}
 990	}
 991	if (ip) {
 992		set_bit(GIF_DEFER_DELETE, &ip->i_flags);
 993		d_prune_aliases(&ip->i_inode);
 994		iput(&ip->i_inode);
 995
 996		/* If the inode was evicted, gl->gl_object will now be NULL. */
 997		spin_lock(&gl->gl_lockref.lock);
 998		ip = gl->gl_object;
 999		if (ip) {
1000			clear_bit(GIF_DEFER_DELETE, &ip->i_flags);
1001			if (!igrab(&ip->i_inode))
1002				ip = NULL;
1003		}
1004		spin_unlock(&gl->gl_lockref.lock);
1005		if (ip) {
1006			gfs2_glock_poke(ip->i_gl);
1007			iput(&ip->i_inode);
1008		}
1009	}
1010}
1011
1012bool gfs2_queue_try_to_evict(struct gfs2_glock *gl)
1013{
1014	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1015
1016	if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
1017		return false;
1018	return !mod_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, 0);
1019}
1020
1021bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later)
1022{
1023	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1024	unsigned long delay;
1025
1026	if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags))
1027		return false;
1028	delay = later ? HZ + get_random_long() % (HZ * 9) : 0;
1029	return queue_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, delay);
1030}
1031
1032static void delete_work_func(struct work_struct *work)
1033{
1034	struct delayed_work *dwork = to_delayed_work(work);
1035	struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
1036	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1037	bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
1038
1039	if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
1040		gfs2_try_evict(gl);
1041
1042	if (verify_delete) {
1043		u64 no_addr = gl->gl_name.ln_number;
1044		struct inode *inode;
1045
1046		inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
1047					    GFS2_BLKST_UNLINKED);
1048		if (IS_ERR(inode)) {
1049			if (PTR_ERR(inode) == -EAGAIN &&
1050			    !test_bit(SDF_KILL, &sdp->sd_flags) &&
1051			    gfs2_queue_verify_delete(gl, true))
1052				return;
1053		} else {
1054			d_prune_aliases(inode);
1055			iput(inode);
1056		}
1057	}
1058
1059	gfs2_glock_put(gl);
1060}
1061
1062static void glock_work_func(struct work_struct *work)
1063{
1064	unsigned long delay = 0;
1065	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
1066	unsigned int drop_refs = 1;
1067
1068	spin_lock(&gl->gl_lockref.lock);
1069	if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags)) {
1070		clear_bit(GLF_HAVE_REPLY, &gl->gl_flags);
1071		finish_xmote(gl, gl->gl_reply);
1072		drop_refs++;
1073	}
 
1074	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1075	    gl->gl_state != LM_ST_UNLOCKED &&
1076	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
1077		if (gl->gl_name.ln_type == LM_TYPE_INODE) {
1078			unsigned long holdtime, now = jiffies;
1079
1080			holdtime = gl->gl_tchange + gl->gl_hold_time;
1081			if (time_before(now, holdtime))
1082				delay = holdtime - now;
1083		}
1084
1085		if (!delay) {
1086			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
1087			gfs2_set_demote(GLF_DEMOTE, gl);
1088		}
1089	}
1090	run_queue(gl, 0);
1091	if (delay) {
1092		/* Keep one glock reference for the work we requeue. */
1093		drop_refs--;
1094		gfs2_glock_queue_work(gl, delay);
1095	}
1096
1097	/* Drop the remaining glock references manually. */
1098	GLOCK_BUG_ON(gl, gl->gl_lockref.count < drop_refs);
1099	gl->gl_lockref.count -= drop_refs;
1100	if (!gl->gl_lockref.count) {
1101		if (gl->gl_state == LM_ST_UNLOCKED) {
1102			__gfs2_glock_put(gl);
1103			return;
1104		}
1105		gfs2_glock_add_to_lru(gl);
1106	}
1107	spin_unlock(&gl->gl_lockref.lock);
1108}
1109
1110static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
1111					    struct gfs2_glock *new)
1112{
1113	struct wait_glock_queue wait;
1114	wait_queue_head_t *wq = glock_waitqueue(name);
1115	struct gfs2_glock *gl;
1116
1117	wait.name = name;
1118	init_wait(&wait.wait);
1119	wait.wait.func = glock_wake_function;
1120
1121again:
1122	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1123	rcu_read_lock();
1124	if (new) {
1125		gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
1126			&new->gl_node, ht_parms);
1127		if (IS_ERR(gl))
1128			goto out;
1129	} else {
1130		gl = rhashtable_lookup_fast(&gl_hash_table,
1131			name, ht_parms);
1132	}
1133	if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
1134		rcu_read_unlock();
1135		schedule();
1136		goto again;
1137	}
1138out:
1139	rcu_read_unlock();
1140	finish_wait(wq, &wait.wait);
1141	if (gl)
1142		gfs2_glock_remove_from_lru(gl);
1143	return gl;
1144}
1145
1146/**
1147 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
1148 * @sdp: The GFS2 superblock
1149 * @number: the lock number
1150 * @glops: The glock_operations to use
1151 * @create: If 0, don't create the glock if it doesn't exist
1152 * @glp: the glock is returned here
1153 *
1154 * This does not lock a glock, just finds/creates structures for one.
1155 *
1156 * Returns: errno
1157 */
1158
1159int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
1160		   const struct gfs2_glock_operations *glops, int create,
1161		   struct gfs2_glock **glp)
1162{
1163	struct super_block *s = sdp->sd_vfs;
1164	struct lm_lockname name = { .ln_number = number,
1165				    .ln_type = glops->go_type,
1166				    .ln_sbd = sdp };
1167	struct gfs2_glock *gl, *tmp;
1168	struct address_space *mapping;
 
 
 
 
 
 
1169
1170	gl = find_insert_glock(&name, NULL);
1171	if (gl)
1172		goto found;
1173	if (!create)
1174		return -ENOENT;
1175
1176	if (glops->go_flags & GLOF_ASPACE) {
1177		struct gfs2_glock_aspace *gla =
1178			kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS);
1179		if (!gla)
1180			return -ENOMEM;
1181		gl = &gla->glock;
1182	} else {
1183		gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS);
1184		if (!gl)
1185			return -ENOMEM;
1186	}
1187	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
1188	gl->gl_ops = glops;
1189
1190	if (glops->go_flags & GLOF_LVB) {
1191		gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
1192		if (!gl->gl_lksb.sb_lvbptr) {
1193			gfs2_glock_dealloc(&gl->gl_rcu);
1194			return -ENOMEM;
1195		}
1196	}
1197
1198	atomic_inc(&sdp->sd_glock_disposal);
1199	gl->gl_node.next = NULL;
1200	gl->gl_flags = BIT(GLF_INITIAL);
1201	if (glops->go_instantiate)
1202		gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED);
1203	gl->gl_name = name;
1204	lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
1205	gl->gl_lockref.count = 1;
1206	gl->gl_state = LM_ST_UNLOCKED;
1207	gl->gl_target = LM_ST_UNLOCKED;
1208	gl->gl_demote_state = LM_ST_EXCLUSIVE;
1209	gl->gl_dstamp = 0;
 
1210	preempt_disable();
1211	/* We use the global stats to estimate the initial per-glock stats */
1212	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
1213	preempt_enable();
1214	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
1215	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
1216	gl->gl_tchange = jiffies;
1217	gl->gl_object = NULL;
1218	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
1219	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
1220	if (gl->gl_name.ln_type == LM_TYPE_IOPEN)
1221		INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
1222
1223	mapping = gfs2_glock2aspace(gl);
1224	if (mapping) {
1225                mapping->a_ops = &gfs2_meta_aops;
1226		mapping->host = s->s_bdev->bd_mapping->host;
1227		mapping->flags = 0;
1228		mapping_set_gfp_mask(mapping, GFP_NOFS);
1229		mapping->i_private_data = NULL;
1230		mapping->writeback_index = 0;
1231	}
1232
1233	tmp = find_insert_glock(&name, gl);
1234	if (tmp) {
1235		gfs2_glock_dealloc(&gl->gl_rcu);
1236		if (atomic_dec_and_test(&sdp->sd_glock_disposal))
1237			wake_up(&sdp->sd_kill_wait);
1238
1239		if (IS_ERR(tmp))
1240			return PTR_ERR(tmp);
1241		gl = tmp;
 
 
 
 
 
 
 
 
 
 
 
 
1242	}
 
 
 
 
1243
1244found:
1245	*glp = gl;
1246	return 0;
1247}
1248
1249/**
1250 * __gfs2_holder_init - initialize a struct gfs2_holder in the default way
1251 * @gl: the glock
1252 * @state: the state we're requesting
1253 * @flags: the modifier flags
1254 * @gh: the holder structure
1255 *
1256 */
1257
1258void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
1259			struct gfs2_holder *gh, unsigned long ip)
1260{
1261	INIT_LIST_HEAD(&gh->gh_list);
1262	gh->gh_gl = gfs2_glock_hold(gl);
1263	gh->gh_ip = ip;
1264	gh->gh_owner_pid = get_pid(task_pid(current));
1265	gh->gh_state = state;
1266	gh->gh_flags = flags;
 
1267	gh->gh_iflags = 0;
 
1268}
1269
1270/**
1271 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
1272 * @state: the state we're requesting
1273 * @flags: the modifier flags
1274 * @gh: the holder structure
1275 *
1276 * Don't mess with the glock.
1277 *
1278 */
1279
1280void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
1281{
1282	gh->gh_state = state;
1283	gh->gh_flags = flags;
1284	gh->gh_iflags = 0;
1285	gh->gh_ip = _RET_IP_;
1286	put_pid(gh->gh_owner_pid);
1287	gh->gh_owner_pid = get_pid(task_pid(current));
1288}
1289
1290/**
1291 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
1292 * @gh: the holder structure
1293 *
1294 */
1295
1296void gfs2_holder_uninit(struct gfs2_holder *gh)
1297{
1298	put_pid(gh->gh_owner_pid);
1299	gfs2_glock_put(gh->gh_gl);
1300	gfs2_holder_mark_uninitialized(gh);
1301	gh->gh_ip = 0;
1302}
1303
1304static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
1305					unsigned long start_time)
1306{
1307	/* Have we waited longer that a second? */
1308	if (time_after(jiffies, start_time + HZ)) {
1309		/* Lengthen the minimum hold time. */
1310		gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
1311				       GL_GLOCK_MAX_HOLD);
1312	}
1313}
1314
1315/**
1316 * gfs2_glock_holder_ready - holder is ready and its error code can be collected
1317 * @gh: the glock holder
1318 *
1319 * Called when a glock holder no longer needs to be waited for because it is
1320 * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has
1321 * failed (gh_error != 0).
1322 */
1323
1324int gfs2_glock_holder_ready(struct gfs2_holder *gh)
1325{
1326	if (gh->gh_error || (gh->gh_flags & GL_SKIP))
1327		return gh->gh_error;
1328	gh->gh_error = gfs2_instantiate(gh);
1329	if (gh->gh_error)
1330		gfs2_glock_dq(gh);
1331	return gh->gh_error;
1332}
1333
1334/**
1335 * gfs2_glock_wait - wait on a glock acquisition
1336 * @gh: the glock holder
1337 *
1338 * Returns: 0 on success
1339 */
1340
1341int gfs2_glock_wait(struct gfs2_holder *gh)
1342{
1343	unsigned long start_time = jiffies;
1344
1345	might_sleep();
1346	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
1347	gfs2_glock_update_hold_time(gh->gh_gl, start_time);
1348	return gfs2_glock_holder_ready(gh);
1349}
1350
1351static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
1352{
1353	int i;
1354
1355	for (i = 0; i < num_gh; i++)
1356		if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
1357			return 1;
1358	return 0;
1359}
1360
1361/**
1362 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
1363 * @num_gh: the number of holders in the array
1364 * @ghs: the glock holder array
1365 *
1366 * Returns: 0 on success, meaning all glocks have been granted and are held.
1367 *          -ESTALE if the request timed out, meaning all glocks were released,
1368 *          and the caller should retry the operation.
1369 */
1370
1371int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
1372{
1373	struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
1374	int i, ret = 0, timeout = 0;
1375	unsigned long start_time = jiffies;
1376
1377	might_sleep();
1378	/*
1379	 * Total up the (minimum hold time * 2) of all glocks and use that to
1380	 * determine the max amount of time we should wait.
1381	 */
1382	for (i = 0; i < num_gh; i++)
1383		timeout += ghs[i].gh_gl->gl_hold_time << 1;
1384
1385	if (!wait_event_timeout(sdp->sd_async_glock_wait,
1386				!glocks_pending(num_gh, ghs), timeout)) {
1387		ret = -ESTALE; /* request timed out. */
1388		goto out;
1389	}
1390
1391	for (i = 0; i < num_gh; i++) {
1392		struct gfs2_holder *gh = &ghs[i];
1393		int ret2;
1394
1395		if (test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1396			gfs2_glock_update_hold_time(gh->gh_gl,
1397						    start_time);
1398		}
1399		ret2 = gfs2_glock_holder_ready(gh);
1400		if (!ret)
1401			ret = ret2;
1402	}
1403
1404out:
1405	if (ret) {
1406		for (i = 0; i < num_gh; i++) {
1407			struct gfs2_holder *gh = &ghs[i];
1408
1409			gfs2_glock_dq(gh);
1410		}
1411	}
1412	return ret;
1413}
1414
1415/**
1416 * request_demote - process a demote request
1417 * @gl: the glock
1418 * @state: the state the caller wants us to change to
1419 * @delay: zero to demote immediately; otherwise pending demote
1420 * @remote: true if this came from a different cluster node
1421 *
1422 * There are only two requests that we are going to see in actual
1423 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
1424 */
1425
1426static void request_demote(struct gfs2_glock *gl, unsigned int state,
1427			   unsigned long delay, bool remote)
1428{
1429	gfs2_set_demote(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, gl);
 
 
1430	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
1431		gl->gl_demote_state = state;
1432		gl->gl_demote_time = jiffies;
1433	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
1434			gl->gl_demote_state != state) {
1435		gl->gl_demote_state = LM_ST_UNLOCKED;
1436	}
1437	if (gl->gl_ops->go_callback)
1438		gl->gl_ops->go_callback(gl, remote);
1439	trace_gfs2_demote_rq(gl, remote);
1440}
1441
1442void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
1443{
1444	struct va_format vaf;
1445	va_list args;
1446
1447	va_start(args, fmt);
1448
1449	if (seq) {
1450		seq_vprintf(seq, fmt, args);
1451	} else {
1452		vaf.fmt = fmt;
1453		vaf.va = &args;
1454
1455		pr_err("%pV", &vaf);
1456	}
1457
1458	va_end(args);
1459}
1460
1461static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
1462{
1463        if (!(gh->gh_flags & GL_NOPID))
1464                return true;
1465        if (gh->gh_state == LM_ST_UNLOCKED)
1466                return true;
1467        return false;
1468}
1469
1470/**
1471 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1472 * @gh: the holder structure to add
1473 *
1474 * Eventually we should move the recursive locking trap to a
1475 * debugging option or something like that. This is the fast
1476 * path and needs to have the minimum number of distractions.
1477 * 
1478 */
1479
1480static inline void add_to_queue(struct gfs2_holder *gh)
1481__releases(&gl->gl_lockref.lock)
1482__acquires(&gl->gl_lockref.lock)
1483{
1484	struct gfs2_glock *gl = gh->gh_gl;
1485	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1486	struct list_head *insert_pt = NULL;
1487	struct gfs2_holder *gh2;
1488	int try_futile = 0;
1489
1490	GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1491	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1492		GLOCK_BUG_ON(gl, true);
1493
1494	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1495		if (test_bit(GLF_LOCK, &gl->gl_flags)) {
1496			struct gfs2_holder *current_gh;
1497
1498			current_gh = find_first_holder(gl);
1499			try_futile = !may_grant(gl, current_gh, gh);
1500		}
1501		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1502			goto fail;
1503	}
1504
1505	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1506		if (likely(gh2->gh_owner_pid != gh->gh_owner_pid))
1507			continue;
1508		if (gh->gh_gl->gl_ops->go_type == LM_TYPE_FLOCK)
1509			continue;
1510		if (!pid_is_meaningful(gh2))
1511			continue;
1512		goto trap_recursive;
1513	}
1514	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1515		if (try_futile &&
1516		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1517fail:
1518			gh->gh_error = GLR_TRYFAILED;
1519			gfs2_holder_wake(gh);
1520			return;
1521		}
1522		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1523			continue;
 
 
1524	}
 
1525	trace_gfs2_glock_queue(gh, 1);
1526	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1527	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1528	if (likely(insert_pt == NULL)) {
1529		list_add_tail(&gh->gh_list, &gl->gl_holders);
 
 
1530		return;
1531	}
1532	list_add_tail(&gh->gh_list, insert_pt);
1533	spin_unlock(&gl->gl_lockref.lock);
1534	if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1535		sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1536	spin_lock(&gl->gl_lockref.lock);
 
 
 
 
1537	return;
1538
1539trap_recursive:
1540	fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1541	fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1542	fs_err(sdp, "lock type: %d req lock state : %d\n",
1543	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1544	fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1545	fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1546	fs_err(sdp, "lock type: %d req lock state : %d\n",
1547	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1548	gfs2_dump_glock(NULL, gl, true);
1549	BUG();
1550}
1551
1552/**
1553 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1554 * @gh: the holder structure
1555 *
1556 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1557 *
1558 * Returns: 0, GLR_TRYFAILED, or errno on failure
1559 */
1560
1561int gfs2_glock_nq(struct gfs2_holder *gh)
1562{
1563	struct gfs2_glock *gl = gh->gh_gl;
1564	int error;
 
1565
1566	if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
1567		return -EIO;
1568
1569	if (gh->gh_flags & GL_NOBLOCK) {
1570		struct gfs2_holder *current_gh;
1571
1572		error = -ECHILD;
1573		spin_lock(&gl->gl_lockref.lock);
1574		if (find_last_waiter(gl))
1575			goto unlock;
1576		current_gh = find_first_holder(gl);
1577		if (!may_grant(gl, current_gh, gh))
1578			goto unlock;
1579		set_bit(HIF_HOLDER, &gh->gh_iflags);
1580		list_add_tail(&gh->gh_list, &gl->gl_holders);
1581		trace_gfs2_promote(gh);
1582		error = 0;
1583unlock:
1584		spin_unlock(&gl->gl_lockref.lock);
1585		return error;
1586	}
1587
1588	gh->gh_error = 0;
1589	spin_lock(&gl->gl_lockref.lock);
1590	add_to_queue(gh);
1591	if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1592		     test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))) {
1593		set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
1594		gl->gl_lockref.count++;
1595		gfs2_glock_queue_work(gl, 0);
 
1596	}
1597	run_queue(gl, 1);
1598	spin_unlock(&gl->gl_lockref.lock);
1599
1600	error = 0;
1601	if (!(gh->gh_flags & GL_ASYNC))
1602		error = gfs2_glock_wait(gh);
1603
1604	return error;
1605}
1606
1607/**
1608 * gfs2_glock_poll - poll to see if an async request has been completed
1609 * @gh: the holder
1610 *
1611 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1612 */
1613
1614int gfs2_glock_poll(struct gfs2_holder *gh)
1615{
1616	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1617}
1618
1619static void __gfs2_glock_dq(struct gfs2_holder *gh)
1620{
1621	struct gfs2_glock *gl = gh->gh_gl;
1622	unsigned delay = 0;
1623	int fast_path = 0;
1624
1625	/*
1626	 * This holder should not be cached, so mark it for demote.
1627	 * Note: this should be done before the glock_needs_demote
1628	 * check below.
1629	 */
1630	if (gh->gh_flags & GL_NOCACHE)
1631		request_demote(gl, LM_ST_UNLOCKED, 0, false);
1632
1633	list_del_init(&gh->gh_list);
1634	clear_bit(HIF_HOLDER, &gh->gh_iflags);
1635	trace_gfs2_glock_queue(gh, 0);
1636
1637	/*
1638	 * If there hasn't been a demote request we are done.
1639	 * (Let the remaining holders, if any, keep holding it.)
1640	 */
1641	if (!glock_needs_demote(gl)) {
1642		if (list_empty(&gl->gl_holders))
1643			fast_path = 1;
1644	}
1645
1646	if (unlikely(!fast_path)) {
1647		gl->gl_lockref.count++;
1648		if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1649		    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1650		    gl->gl_name.ln_type == LM_TYPE_INODE)
1651			delay = gl->gl_hold_time;
1652		gfs2_glock_queue_work(gl, delay);
1653	}
1654}
1655
1656/**
1657 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1658 * @gh: the glock holder
1659 *
1660 */
 
1661void gfs2_glock_dq(struct gfs2_holder *gh)
1662{
1663	struct gfs2_glock *gl = gh->gh_gl;
1664	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
 
1665
1666	spin_lock(&gl->gl_lockref.lock);
1667	if (!gfs2_holder_queued(gh)) {
1668		/*
1669		 * May have already been dequeued because the locking request
1670		 * was GL_ASYNC and it has failed in the meantime.
1671		 */
1672		goto out;
1673	}
1674
1675	if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
1676	    !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1677		spin_unlock(&gl->gl_lockref.lock);
1678		gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
1679		wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
1680		spin_lock(&gl->gl_lockref.lock);
 
 
 
 
 
 
 
 
1681	}
 
 
 
1682
1683	/*
1684	 * If we're in the process of file system withdraw, we cannot just
1685	 * dequeue any glocks until our journal is recovered, lest we introduce
1686	 * file system corruption. We need two exceptions to this rule: We need
1687	 * to allow unlocking of nondisk glocks and the glock for our own
1688	 * journal that needs recovery.
1689	 */
1690	if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
1691	    glock_blocked_by_withdraw(gl) &&
1692	    gh->gh_gl != sdp->sd_jinode_gl) {
1693		sdp->sd_glock_dqs_held++;
1694		spin_unlock(&gl->gl_lockref.lock);
1695		might_sleep();
1696		wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
1697			    TASK_UNINTERRUPTIBLE);
1698		spin_lock(&gl->gl_lockref.lock);
1699	}
1700
1701	__gfs2_glock_dq(gh);
1702out:
1703	spin_unlock(&gl->gl_lockref.lock);
 
 
 
 
1704}
1705
1706void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1707{
1708	struct gfs2_glock *gl = gh->gh_gl;
1709	gfs2_glock_dq(gh);
1710	might_sleep();
1711	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1712}
1713
1714/**
1715 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1716 * @gh: the holder structure
1717 *
1718 */
1719
1720void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1721{
1722	gfs2_glock_dq(gh);
1723	gfs2_holder_uninit(gh);
1724}
1725
1726/**
1727 * gfs2_glock_nq_num - acquire a glock based on lock number
1728 * @sdp: the filesystem
1729 * @number: the lock number
1730 * @glops: the glock operations for the type of glock
1731 * @state: the state to acquire the glock in
1732 * @flags: modifier flags for the acquisition
1733 * @gh: the struct gfs2_holder
1734 *
1735 * Returns: errno
1736 */
1737
1738int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1739		      const struct gfs2_glock_operations *glops,
1740		      unsigned int state, u16 flags, struct gfs2_holder *gh)
1741{
1742	struct gfs2_glock *gl;
1743	int error;
1744
1745	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1746	if (!error) {
1747		error = gfs2_glock_nq_init(gl, state, flags, gh);
1748		gfs2_glock_put(gl);
1749	}
1750
1751	return error;
1752}
1753
1754/**
1755 * glock_compare - Compare two struct gfs2_glock structures for sorting
1756 * @arg_a: the first structure
1757 * @arg_b: the second structure
1758 *
1759 */
1760
1761static int glock_compare(const void *arg_a, const void *arg_b)
1762{
1763	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1764	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1765	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1766	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1767
1768	if (a->ln_number > b->ln_number)
1769		return 1;
1770	if (a->ln_number < b->ln_number)
1771		return -1;
1772	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1773	return 0;
1774}
1775
1776/**
1777 * nq_m_sync - synchronously acquire more than one glock in deadlock free order
1778 * @num_gh: the number of structures
1779 * @ghs: an array of struct gfs2_holder structures
1780 * @p: placeholder for the holder structure to pass back
1781 *
1782 * Returns: 0 on success (all glocks acquired),
1783 *          errno on failure (no glocks acquired)
1784 */
1785
1786static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1787		     struct gfs2_holder **p)
1788{
1789	unsigned int x;
1790	int error = 0;
1791
1792	for (x = 0; x < num_gh; x++)
1793		p[x] = &ghs[x];
1794
1795	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1796
1797	for (x = 0; x < num_gh; x++) {
 
 
1798		error = gfs2_glock_nq(p[x]);
1799		if (error) {
1800			while (x--)
1801				gfs2_glock_dq(p[x]);
1802			break;
1803		}
1804	}
1805
1806	return error;
1807}
1808
1809/**
1810 * gfs2_glock_nq_m - acquire multiple glocks
1811 * @num_gh: the number of structures
1812 * @ghs: an array of struct gfs2_holder structures
1813 *
 
1814 * Returns: 0 on success (all glocks acquired),
1815 *          errno on failure (no glocks acquired)
1816 */
1817
1818int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1819{
1820	struct gfs2_holder *tmp[4];
1821	struct gfs2_holder **pph = tmp;
1822	int error = 0;
1823
1824	switch(num_gh) {
1825	case 0:
1826		return 0;
1827	case 1:
 
1828		return gfs2_glock_nq(ghs);
1829	default:
1830		if (num_gh <= 4)
1831			break;
1832		pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
1833				    GFP_NOFS);
1834		if (!pph)
1835			return -ENOMEM;
1836	}
1837
1838	error = nq_m_sync(num_gh, ghs, pph);
1839
1840	if (pph != tmp)
1841		kfree(pph);
1842
1843	return error;
1844}
1845
1846/**
1847 * gfs2_glock_dq_m - release multiple glocks
1848 * @num_gh: the number of structures
1849 * @ghs: an array of struct gfs2_holder structures
1850 *
1851 */
1852
1853void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1854{
1855	while (num_gh--)
1856		gfs2_glock_dq(&ghs[num_gh]);
1857}
1858
1859void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1860{
1861	unsigned long delay = 0;
 
 
1862
1863	gfs2_glock_hold(gl);
1864	spin_lock(&gl->gl_lockref.lock);
1865	if (!list_empty(&gl->gl_holders) &&
1866	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1867		unsigned long now = jiffies;
1868		unsigned long holdtime;
1869
1870		holdtime = gl->gl_tchange + gl->gl_hold_time;
1871
1872		if (time_before(now, holdtime))
1873			delay = holdtime - now;
1874		if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags))
1875			delay = gl->gl_hold_time;
1876	}
1877	request_demote(gl, state, delay, true);
1878	gfs2_glock_queue_work(gl, delay);
 
1879	spin_unlock(&gl->gl_lockref.lock);
 
 
1880}
1881
1882/**
1883 * gfs2_should_freeze - Figure out if glock should be frozen
1884 * @gl: The glock in question
1885 *
1886 * Glocks are not frozen if (a) the result of the dlm operation is
1887 * an error, (b) the locking operation was an unlock operation or
1888 * (c) if there is a "noexp" flagged request anywhere in the queue
1889 *
1890 * Returns: 1 if freezing should occur, 0 otherwise
1891 */
1892
1893static int gfs2_should_freeze(const struct gfs2_glock *gl)
1894{
1895	const struct gfs2_holder *gh;
1896
1897	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1898		return 0;
1899	if (gl->gl_target == LM_ST_UNLOCKED)
1900		return 0;
1901
1902	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1903		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1904			continue;
1905		if (LM_FLAG_NOEXP & gh->gh_flags)
1906			return 0;
1907	}
1908
1909	return 1;
1910}
1911
1912/**
1913 * gfs2_glock_complete - Callback used by locking
1914 * @gl: Pointer to the glock
1915 * @ret: The return value from the dlm
1916 *
1917 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1918 * to use a bitfield shared with other glock state fields.
1919 */
1920
1921void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1922{
1923	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1924
1925	spin_lock(&gl->gl_lockref.lock);
1926	gl->gl_reply = ret;
1927
1928	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1929		if (gfs2_should_freeze(gl)) {
1930			set_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags);
1931			spin_unlock(&gl->gl_lockref.lock);
1932			return;
1933		}
1934	}
1935
1936	gl->gl_lockref.count++;
1937	set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
1938	gfs2_glock_queue_work(gl, 0);
1939	spin_unlock(&gl->gl_lockref.lock);
 
 
 
1940}
1941
1942static int glock_cmp(void *priv, const struct list_head *a,
1943		     const struct list_head *b)
1944{
1945	struct gfs2_glock *gla, *glb;
1946
1947	gla = list_entry(a, struct gfs2_glock, gl_lru);
1948	glb = list_entry(b, struct gfs2_glock, gl_lru);
1949
1950	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1951		return 1;
1952	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1953		return -1;
1954
1955	return 0;
1956}
1957
1958static bool can_free_glock(struct gfs2_glock *gl)
1959{
1960	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1961
1962	return !test_bit(GLF_LOCK, &gl->gl_flags) &&
1963	       !gl->gl_lockref.count &&
1964	       (!test_bit(GLF_LFLUSH, &gl->gl_flags) ||
1965		test_bit(SDF_KILL, &sdp->sd_flags));
1966}
1967
1968/**
1969 * gfs2_dispose_glock_lru - Demote a list of glocks
1970 * @list: The list to dispose of
1971 *
1972 * Disposing of glocks may involve disk accesses, so that here we sort
1973 * the glocks by number (i.e. disk location of the inodes) so that if
1974 * there are any such accesses, they'll be sent in order (mostly).
1975 *
1976 * Must be called under the lru_lock, but may drop and retake this
1977 * lock. While the lru_lock is dropped, entries may vanish from the
1978 * list, but no new entries will appear on the list (since it is
1979 * private)
1980 */
1981
1982static unsigned long gfs2_dispose_glock_lru(struct list_head *list)
1983__releases(&lru_lock)
1984__acquires(&lru_lock)
1985{
1986	struct gfs2_glock *gl;
1987	unsigned long freed = 0;
1988
1989	list_sort(NULL, list, glock_cmp);
1990
1991	while(!list_empty(list)) {
1992		gl = list_first_entry(list, struct gfs2_glock, gl_lru);
 
1993		if (!spin_trylock(&gl->gl_lockref.lock)) {
1994add_back_to_lru:
1995			list_move(&gl->gl_lru, &lru_list);
 
1996			continue;
1997		}
1998		if (!can_free_glock(gl)) {
1999			spin_unlock(&gl->gl_lockref.lock);
2000			goto add_back_to_lru;
2001		}
2002		list_del_init(&gl->gl_lru);
2003		atomic_dec(&lru_count);
2004		clear_bit(GLF_LRU, &gl->gl_flags);
2005		freed++;
2006		gl->gl_lockref.count++;
2007		if (gl->gl_state != LM_ST_UNLOCKED)
2008			request_demote(gl, LM_ST_UNLOCKED, 0, false);
2009		gfs2_glock_queue_work(gl, 0);
 
 
2010		spin_unlock(&gl->gl_lockref.lock);
2011		cond_resched_lock(&lru_lock);
2012	}
2013	return freed;
2014}
2015
2016/**
2017 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
2018 * @nr: The number of entries to scan
2019 *
2020 * This function selects the entries on the LRU which are able to
2021 * be demoted, and then kicks off the process by calling
2022 * gfs2_dispose_glock_lru() above.
2023 */
2024
2025static unsigned long gfs2_scan_glock_lru(unsigned long nr)
2026{
2027	struct gfs2_glock *gl, *next;
 
2028	LIST_HEAD(dispose);
2029	unsigned long freed = 0;
2030
2031	spin_lock(&lru_lock);
2032	list_for_each_entry_safe(gl, next, &lru_list, gl_lru) {
2033		if (!nr--)
2034			break;
2035		if (can_free_glock(gl))
 
2036			list_move(&gl->gl_lru, &dispose);
 
 
 
 
 
 
2037	}
 
2038	if (!list_empty(&dispose))
2039		freed = gfs2_dispose_glock_lru(&dispose);
2040	spin_unlock(&lru_lock);
2041
2042	return freed;
2043}
2044
2045static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
2046					    struct shrink_control *sc)
2047{
2048	if (!(sc->gfp_mask & __GFP_FS))
2049		return SHRINK_STOP;
2050	return gfs2_scan_glock_lru(sc->nr_to_scan);
2051}
2052
2053static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
2054					     struct shrink_control *sc)
2055{
2056	return vfs_pressure_ratio(atomic_read(&lru_count));
2057}
2058
2059static struct shrinker *glock_shrinker;
 
 
 
 
2060
2061/**
2062 * glock_hash_walk - Call a function for glock in a hash bucket
2063 * @examiner: the function
2064 * @sdp: the filesystem
 
2065 *
2066 * Note that the function can be called multiple times on the same
2067 * object.  So the user must ensure that the function can cope with
2068 * that.
2069 */
2070
2071static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
2072{
2073	struct gfs2_glock *gl;
2074	struct rhashtable_iter iter;
 
 
2075
2076	rhashtable_walk_enter(&gl_hash_table, &iter);
2077
2078	do {
2079		rhashtable_walk_start(&iter);
2080
2081		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) {
2082			if (gl->gl_name.ln_sbd == sdp)
2083				examiner(gl);
2084		}
2085
2086		rhashtable_walk_stop(&iter);
2087	} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
2088
2089	rhashtable_walk_exit(&iter);
2090}
2091
2092void gfs2_cancel_delete_work(struct gfs2_glock *gl)
2093{
2094	clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags);
2095	clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
2096	if (cancel_delayed_work(&gl->gl_delete))
2097		gfs2_glock_put(gl);
2098}
2099
2100static void flush_delete_work(struct gfs2_glock *gl)
2101{
2102	if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
2103		struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
2104
2105		if (cancel_delayed_work(&gl->gl_delete)) {
2106			queue_delayed_work(sdp->sd_delete_wq,
2107					   &gl->gl_delete, 0);
2108		}
2109	}
2110}
2111
2112void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
2113{
2114	glock_hash_walk(flush_delete_work, sdp);
2115	flush_workqueue(sdp->sd_delete_wq);
2116}
2117
2118/**
2119 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
2120 * @gl: The glock to thaw
2121 *
2122 */
2123
2124static void thaw_glock(struct gfs2_glock *gl)
2125{
2126	if (!test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))
2127		return;
2128	if (!lockref_get_not_dead(&gl->gl_lockref))
2129		return;
2130
2131	gfs2_glock_remove_from_lru(gl);
2132	spin_lock(&gl->gl_lockref.lock);
2133	set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
2134	gfs2_glock_queue_work(gl, 0);
2135	spin_unlock(&gl->gl_lockref.lock);
2136}
2137
2138/**
2139 * clear_glock - look at a glock and see if we can free it from glock cache
2140 * @gl: the glock to look at
2141 *
2142 */
2143
2144static void clear_glock(struct gfs2_glock *gl)
2145{
2146	gfs2_glock_remove_from_lru(gl);
2147
2148	spin_lock(&gl->gl_lockref.lock);
2149	if (!__lockref_is_dead(&gl->gl_lockref)) {
2150		gl->gl_lockref.count++;
2151		if (gl->gl_state != LM_ST_UNLOCKED)
2152			request_demote(gl, LM_ST_UNLOCKED, 0, false);
2153		gfs2_glock_queue_work(gl, 0);
2154	}
2155	spin_unlock(&gl->gl_lockref.lock);
 
 
2156}
2157
2158/**
2159 * gfs2_glock_thaw - Thaw any frozen glocks
2160 * @sdp: The super block
2161 *
2162 */
2163
2164void gfs2_glock_thaw(struct gfs2_sbd *sdp)
2165{
2166	glock_hash_walk(thaw_glock, sdp);
2167}
2168
2169static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
2170{
2171	spin_lock(&gl->gl_lockref.lock);
2172	gfs2_dump_glock(seq, gl, fsid);
2173	spin_unlock(&gl->gl_lockref.lock);
2174}
2175
2176static void dump_glock_func(struct gfs2_glock *gl)
2177{
2178	dump_glock(NULL, gl, true);
2179}
2180
2181static void withdraw_dq(struct gfs2_glock *gl)
2182{
2183	spin_lock(&gl->gl_lockref.lock);
2184	if (!__lockref_is_dead(&gl->gl_lockref) &&
2185	    glock_blocked_by_withdraw(gl))
2186		do_error(gl, LM_OUT_ERROR); /* remove pending waiters */
2187	spin_unlock(&gl->gl_lockref.lock);
2188}
2189
2190void gfs2_gl_dq_holders(struct gfs2_sbd *sdp)
2191{
2192	glock_hash_walk(withdraw_dq, sdp);
2193}
2194
2195/**
2196 * gfs2_gl_hash_clear - Empty out the glock hash table
2197 * @sdp: the filesystem
 
2198 *
2199 * Called when unmounting the filesystem.
2200 */
2201
2202void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
2203{
2204	unsigned long start = jiffies;
2205	bool timed_out = false;
2206
2207	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
2208	flush_workqueue(sdp->sd_glock_wq);
2209	glock_hash_walk(clear_glock, sdp);
2210	flush_workqueue(sdp->sd_glock_wq);
 
 
 
 
 
 
 
 
 
 
 
 
 
2211
2212	while (!timed_out) {
2213		wait_event_timeout(sdp->sd_kill_wait,
2214				   !atomic_read(&sdp->sd_glock_disposal),
2215				   HZ * 60);
2216		if (!atomic_read(&sdp->sd_glock_disposal))
2217			break;
2218		timed_out = time_after(jiffies, start + (HZ * 600));
2219		fs_warn(sdp, "%u glocks left after %u seconds%s\n",
2220			atomic_read(&sdp->sd_glock_disposal),
2221			jiffies_to_msecs(jiffies - start) / 1000,
2222			timed_out ? ":" : "; still waiting");
2223	}
2224	gfs2_lm_unmount(sdp);
2225	gfs2_free_dead_glocks(sdp);
2226	glock_hash_walk(dump_glock_func, sdp);
2227	destroy_workqueue(sdp->sd_glock_wq);
2228	sdp->sd_glock_wq = NULL;
2229}
2230
2231static const char *state2str(unsigned state)
2232{
2233	switch(state) {
2234	case LM_ST_UNLOCKED:
2235		return "UN";
2236	case LM_ST_SHARED:
2237		return "SH";
2238	case LM_ST_DEFERRED:
2239		return "DF";
2240	case LM_ST_EXCLUSIVE:
2241		return "EX";
2242	}
2243	return "??";
2244}
2245
2246static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
2247{
2248	char *p = buf;
2249	if (flags & LM_FLAG_TRY)
2250		*p++ = 't';
2251	if (flags & LM_FLAG_TRY_1CB)
2252		*p++ = 'T';
2253	if (flags & LM_FLAG_NOEXP)
2254		*p++ = 'e';
2255	if (flags & LM_FLAG_ANY)
2256		*p++ = 'A';
2257	if (flags & LM_FLAG_NODE_SCOPE)
2258		*p++ = 'n';
2259	if (flags & GL_ASYNC)
2260		*p++ = 'a';
2261	if (flags & GL_EXACT)
2262		*p++ = 'E';
2263	if (flags & GL_NOCACHE)
2264		*p++ = 'c';
2265	if (test_bit(HIF_HOLDER, &iflags))
2266		*p++ = 'H';
2267	if (test_bit(HIF_WAIT, &iflags))
2268		*p++ = 'W';
2269	if (flags & GL_SKIP)
2270		*p++ = 's';
2271	*p = 0;
2272	return buf;
2273}
2274
2275/**
2276 * dump_holder - print information about a glock holder
2277 * @seq: the seq_file struct
2278 * @gh: the glock holder
2279 * @fs_id_buf: pointer to file system id (if requested)
2280 *
2281 */
2282
2283static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
2284			const char *fs_id_buf)
2285{
2286	const char *comm = "(none)";
2287	pid_t owner_pid = 0;
2288	char flags_buf[32];
2289
2290	rcu_read_lock();
2291	if (pid_is_meaningful(gh)) {
2292		struct task_struct *gh_owner;
2293
2294		comm = "(ended)";
2295		owner_pid = pid_nr(gh->gh_owner_pid);
2296		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
2297		if (gh_owner)
2298			comm = gh_owner->comm;
2299	}
2300	gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
2301		       fs_id_buf, state2str(gh->gh_state),
2302		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
2303		       gh->gh_error, (long)owner_pid, comm, (void *)gh->gh_ip);
 
 
 
2304	rcu_read_unlock();
2305}
2306
2307static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
2308{
2309	const unsigned long *gflags = &gl->gl_flags;
2310	char *p = buf;
2311
2312	if (test_bit(GLF_LOCK, gflags))
2313		*p++ = 'l';
2314	if (test_bit(GLF_DEMOTE, gflags))
2315		*p++ = 'D';
2316	if (test_bit(GLF_PENDING_DEMOTE, gflags))
2317		*p++ = 'd';
2318	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
2319		*p++ = 'p';
2320	if (test_bit(GLF_DIRTY, gflags))
2321		*p++ = 'y';
2322	if (test_bit(GLF_LFLUSH, gflags))
2323		*p++ = 'f';
2324	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
2325		*p++ = 'i';
2326	if (test_bit(GLF_HAVE_REPLY, gflags))
2327		*p++ = 'r';
2328	if (test_bit(GLF_INITIAL, gflags))
2329		*p++ = 'a';
2330	if (test_bit(GLF_HAVE_FROZEN_REPLY, gflags))
2331		*p++ = 'F';
2332	if (!list_empty(&gl->gl_holders))
2333		*p++ = 'q';
2334	if (test_bit(GLF_LRU, gflags))
2335		*p++ = 'L';
2336	if (gl->gl_object)
2337		*p++ = 'o';
2338	if (test_bit(GLF_BLOCKING, gflags))
2339		*p++ = 'b';
2340	if (test_bit(GLF_UNLOCKED, gflags))
2341		*p++ = 'x';
2342	if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))
2343		*p++ = 'n';
2344	if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags))
2345		*p++ = 'N';
2346	if (test_bit(GLF_TRY_TO_EVICT, gflags))
2347		*p++ = 'e';
2348	if (test_bit(GLF_VERIFY_DELETE, gflags))
2349		*p++ = 'E';
2350	*p = 0;
2351	return buf;
2352}
2353
2354/**
2355 * gfs2_dump_glock - print information about a glock
2356 * @seq: The seq_file struct
2357 * @gl: the glock
2358 * @fsid: If true, also dump the file system id
2359 *
2360 * The file format is as follows:
2361 * One line per object, capital letters are used to indicate objects
2362 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
2363 * other objects are indented by a single space and follow the glock to
2364 * which they are related. Fields are indicated by lower case letters
2365 * followed by a colon and the field value, except for strings which are in
2366 * [] so that its possible to see if they are composed of spaces for
2367 * example. The field's are n = number (id of the object), f = flags,
2368 * t = type, s = state, r = refcount, e = error, p = pid.
2369 *
2370 */
2371
2372void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
2373{
2374	const struct gfs2_glock_operations *glops = gl->gl_ops;
2375	unsigned long long dtime;
2376	const struct gfs2_holder *gh;
2377	char gflags_buf[32];
2378	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
2379	char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
2380	unsigned long nrpages = 0;
2381
2382	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
2383		struct address_space *mapping = gfs2_glock2aspace(gl);
2384
2385		nrpages = mapping->nrpages;
2386	}
2387	memset(fs_id_buf, 0, sizeof(fs_id_buf));
2388	if (fsid && sdp) /* safety precaution */
2389		sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
2390	dtime = jiffies - gl->gl_demote_time;
2391	dtime *= 1000000/HZ; /* demote time in uSec */
2392	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
2393		dtime = 0;
2394	gfs2_print_dbg(seq, "%sG:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
2395		       "v:%d r:%d m:%ld p:%lu\n",
2396		       fs_id_buf, state2str(gl->gl_state),
2397		       gl->gl_name.ln_type,
2398		       (unsigned long long)gl->gl_name.ln_number,
2399		       gflags2str(gflags_buf, gl),
2400		       state2str(gl->gl_target),
2401		       state2str(gl->gl_demote_state), dtime,
2402		       atomic_read(&gl->gl_ail_count),
2403		       atomic_read(&gl->gl_revokes),
2404		       (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages);
2405
2406	list_for_each_entry(gh, &gl->gl_holders, gh_list)
2407		dump_holder(seq, gh, fs_id_buf);
2408
2409	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
2410		glops->go_dump(seq, gl, fs_id_buf);
2411}
2412
2413static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
2414{
2415	struct gfs2_glock *gl = iter_ptr;
2416
2417	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
2418		   gl->gl_name.ln_type,
2419		   (unsigned long long)gl->gl_name.ln_number,
2420		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
2421		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
2422		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
2423		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
2424		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
2425		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
2426		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
2427		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
2428	return 0;
2429}
2430
2431static const char *gfs2_gltype[] = {
2432	"type",
2433	"reserved",
2434	"nondisk",
2435	"inode",
2436	"rgrp",
2437	"meta",
2438	"iopen",
2439	"flock",
2440	"plock",
2441	"quota",
2442	"journal",
2443};
2444
2445static const char *gfs2_stype[] = {
2446	[GFS2_LKS_SRTT]		= "srtt",
2447	[GFS2_LKS_SRTTVAR]	= "srttvar",
2448	[GFS2_LKS_SRTTB]	= "srttb",
2449	[GFS2_LKS_SRTTVARB]	= "srttvarb",
2450	[GFS2_LKS_SIRT]		= "sirt",
2451	[GFS2_LKS_SIRTVAR]	= "sirtvar",
2452	[GFS2_LKS_DCOUNT]	= "dlm",
2453	[GFS2_LKS_QCOUNT]	= "queue",
2454};
2455
2456#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
2457
2458static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
2459{
2460	struct gfs2_sbd *sdp = seq->private;
2461	loff_t pos = *(loff_t *)iter_ptr;
2462	unsigned index = pos >> 3;
2463	unsigned subindex = pos & 0x07;
2464	int i;
2465
2466	if (index == 0 && subindex != 0)
2467		return 0;
2468
2469	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
2470		   (index == 0) ? "cpu": gfs2_stype[subindex]);
2471
2472	for_each_possible_cpu(i) {
2473                const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
2474
2475		if (index == 0)
2476			seq_printf(seq, " %15u", i);
2477		else
2478			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
2479				   lkstats[index - 1].stats[subindex]);
2480	}
2481	seq_putc(seq, '\n');
2482	return 0;
2483}
2484
2485int __init gfs2_glock_init(void)
2486{
2487	int i, ret;
2488
2489	ret = rhashtable_init(&gl_hash_table, &ht_parms);
2490	if (ret < 0)
2491		return ret;
2492
2493	glock_shrinker = shrinker_alloc(0, "gfs2-glock");
2494	if (!glock_shrinker) {
 
 
 
 
 
 
 
 
 
2495		rhashtable_destroy(&gl_hash_table);
2496		return -ENOMEM;
2497	}
2498
2499	glock_shrinker->count_objects = gfs2_glock_shrink_count;
2500	glock_shrinker->scan_objects = gfs2_glock_shrink_scan;
2501
2502	shrinker_register(glock_shrinker);
2503
2504	for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
2505		init_waitqueue_head(glock_wait_table + i);
2506
2507	return 0;
2508}
2509
2510void gfs2_glock_exit(void)
2511{
2512	shrinker_free(glock_shrinker);
2513	rhashtable_destroy(&gl_hash_table);
 
 
2514}
2515
2516static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
2517{
2518	struct gfs2_glock *gl = gi->gl;
2519
2520	if (gl) {
2521		if (n == 0)
2522			return;
2523		gfs2_glock_put_async(gl);
2524	}
2525	for (;;) {
2526		gl = rhashtable_walk_next(&gi->hti);
2527		if (IS_ERR_OR_NULL(gl)) {
2528			if (gl == ERR_PTR(-EAGAIN)) {
2529				n = 1;
2530				continue;
2531			}
2532			gl = NULL;
2533			break;
2534		}
2535		if (gl->gl_name.ln_sbd != gi->sdp)
2536			continue;
2537		if (n <= 1) {
2538			if (!lockref_get_not_dead(&gl->gl_lockref))
2539				continue;
2540			break;
2541		} else {
2542			if (__lockref_is_dead(&gl->gl_lockref))
2543				continue;
2544			n--;
2545		}
2546	}
2547	gi->gl = gl;
 
2548}
2549
2550static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
2551	__acquires(RCU)
2552{
2553	struct gfs2_glock_iter *gi = seq->private;
2554	loff_t n;
 
 
 
 
2555
2556	/*
2557	 * We can either stay where we are, skip to the next hash table
2558	 * entry, or start from the beginning.
2559	 */
2560	if (*pos < gi->last_pos) {
2561		rhashtable_walk_exit(&gi->hti);
2562		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2563		n = *pos + 1;
2564	} else {
2565		n = *pos - gi->last_pos;
2566	}
2567
2568	rhashtable_walk_start(&gi->hti);
 
 
2569
2570	gfs2_glock_iter_next(gi, n);
2571	gi->last_pos = *pos;
2572	return gi->gl;
2573}
2574
2575static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
2576				 loff_t *pos)
2577{
2578	struct gfs2_glock_iter *gi = seq->private;
2579
2580	(*pos)++;
2581	gi->last_pos = *pos;
2582	gfs2_glock_iter_next(gi, 1);
2583	return gi->gl;
2584}
2585
2586static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
2587	__releases(RCU)
2588{
2589	struct gfs2_glock_iter *gi = seq->private;
2590
 
2591	rhashtable_walk_stop(&gi->hti);
2592}
2593
2594static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2595{
2596	dump_glock(seq, iter_ptr, false);
2597	return 0;
2598}
2599
2600static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2601{
2602	preempt_disable();
2603	if (*pos >= GFS2_NR_SBSTATS)
2604		return NULL;
2605	return pos;
2606}
2607
2608static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2609				   loff_t *pos)
2610{
2611	(*pos)++;
2612	if (*pos >= GFS2_NR_SBSTATS)
2613		return NULL;
2614	return pos;
2615}
2616
2617static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2618{
2619	preempt_enable();
2620}
2621
2622static const struct seq_operations gfs2_glock_seq_ops = {
2623	.start = gfs2_glock_seq_start,
2624	.next  = gfs2_glock_seq_next,
2625	.stop  = gfs2_glock_seq_stop,
2626	.show  = gfs2_glock_seq_show,
2627};
2628
2629static const struct seq_operations gfs2_glstats_seq_ops = {
2630	.start = gfs2_glock_seq_start,
2631	.next  = gfs2_glock_seq_next,
2632	.stop  = gfs2_glock_seq_stop,
2633	.show  = gfs2_glstats_seq_show,
2634};
2635
2636static const struct seq_operations gfs2_sbstats_sops = {
2637	.start = gfs2_sbstats_seq_start,
2638	.next  = gfs2_sbstats_seq_next,
2639	.stop  = gfs2_sbstats_seq_stop,
2640	.show  = gfs2_sbstats_seq_show,
2641};
2642
2643#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2644
2645static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2646			      const struct seq_operations *ops)
2647{
2648	int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
 
2649	if (ret == 0) {
2650		struct seq_file *seq = file->private_data;
2651		struct gfs2_glock_iter *gi = seq->private;
2652
2653		gi->sdp = inode->i_private;
 
2654		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2655		if (seq->buf)
2656			seq->size = GFS2_SEQ_GOODSIZE;
2657		/*
2658		 * Initially, we are "before" the first hash table entry; the
2659		 * first call to rhashtable_walk_next gets us the first entry.
2660		 */
2661		gi->last_pos = -1;
2662		gi->gl = NULL;
2663		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2664	}
2665	return ret;
2666}
2667
2668static int gfs2_glocks_open(struct inode *inode, struct file *file)
2669{
2670	return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2671}
2672
2673static int gfs2_glocks_release(struct inode *inode, struct file *file)
2674{
2675	struct seq_file *seq = file->private_data;
2676	struct gfs2_glock_iter *gi = seq->private;
2677
2678	if (gi->gl)
2679		gfs2_glock_put(gi->gl);
2680	rhashtable_walk_exit(&gi->hti);
2681	return seq_release_private(inode, file);
2682}
2683
2684static int gfs2_glstats_open(struct inode *inode, struct file *file)
2685{
2686	return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2687}
2688
2689static const struct file_operations gfs2_glocks_fops = {
2690	.owner   = THIS_MODULE,
2691	.open    = gfs2_glocks_open,
2692	.read    = seq_read,
2693	.llseek  = seq_lseek,
2694	.release = gfs2_glocks_release,
2695};
2696
2697static const struct file_operations gfs2_glstats_fops = {
2698	.owner   = THIS_MODULE,
2699	.open    = gfs2_glstats_open,
2700	.read    = seq_read,
2701	.llseek  = seq_lseek,
2702	.release = gfs2_glocks_release,
2703};
2704
2705struct gfs2_glockfd_iter {
2706	struct super_block *sb;
2707	unsigned int tgid;
2708	struct task_struct *task;
2709	unsigned int fd;
2710	struct file *file;
2711};
2712
2713static struct task_struct *gfs2_glockfd_next_task(struct gfs2_glockfd_iter *i)
2714{
2715	struct pid_namespace *ns = task_active_pid_ns(current);
2716	struct pid *pid;
2717
2718	if (i->task)
2719		put_task_struct(i->task);
2720
2721	rcu_read_lock();
2722retry:
2723	i->task = NULL;
2724	pid = find_ge_pid(i->tgid, ns);
2725	if (pid) {
2726		i->tgid = pid_nr_ns(pid, ns);
2727		i->task = pid_task(pid, PIDTYPE_TGID);
2728		if (!i->task) {
2729			i->tgid++;
2730			goto retry;
2731		}
2732		get_task_struct(i->task);
2733	}
2734	rcu_read_unlock();
2735	return i->task;
2736}
2737
2738static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i)
2739{
2740	if (i->file) {
2741		fput(i->file);
2742		i->file = NULL;
2743	}
2744
2745	for(;; i->fd++) {
2746		i->file = fget_task_next(i->task, &i->fd);
2747		if (!i->file) {
2748			i->fd = 0;
2749			break;
2750		}
2751
2752		if (file_inode(i->file)->i_sb == i->sb)
2753			break;
2754
2755		fput(i->file);
2756	}
2757	return i->file;
2758}
2759
2760static void *gfs2_glockfd_seq_start(struct seq_file *seq, loff_t *pos)
2761{
2762	struct gfs2_glockfd_iter *i = seq->private;
2763
2764	if (*pos)
2765		return NULL;
2766	while (gfs2_glockfd_next_task(i)) {
2767		if (gfs2_glockfd_next_file(i))
2768			return i;
2769		i->tgid++;
2770	}
2771	return NULL;
2772}
2773
2774static void *gfs2_glockfd_seq_next(struct seq_file *seq, void *iter_ptr,
2775				   loff_t *pos)
2776{
2777	struct gfs2_glockfd_iter *i = seq->private;
2778
2779	(*pos)++;
2780	i->fd++;
2781	do {
2782		if (gfs2_glockfd_next_file(i))
2783			return i;
2784		i->tgid++;
2785	} while (gfs2_glockfd_next_task(i));
2786	return NULL;
2787}
2788
2789static void gfs2_glockfd_seq_stop(struct seq_file *seq, void *iter_ptr)
2790{
2791	struct gfs2_glockfd_iter *i = seq->private;
2792
2793	if (i->file)
2794		fput(i->file);
2795	if (i->task)
2796		put_task_struct(i->task);
2797}
2798
2799static void gfs2_glockfd_seq_show_flock(struct seq_file *seq,
2800					struct gfs2_glockfd_iter *i)
2801{
2802	struct gfs2_file *fp = i->file->private_data;
2803	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
2804	struct lm_lockname gl_name = { .ln_type = LM_TYPE_RESERVED };
2805
2806	if (!READ_ONCE(fl_gh->gh_gl))
2807		return;
2808
2809	spin_lock(&i->file->f_lock);
2810	if (gfs2_holder_initialized(fl_gh))
2811		gl_name = fl_gh->gh_gl->gl_name;
2812	spin_unlock(&i->file->f_lock);
2813
2814	if (gl_name.ln_type != LM_TYPE_RESERVED) {
2815		seq_printf(seq, "%d %u %u/%llx\n",
2816			   i->tgid, i->fd, gl_name.ln_type,
2817			   (unsigned long long)gl_name.ln_number);
2818	}
2819}
2820
2821static int gfs2_glockfd_seq_show(struct seq_file *seq, void *iter_ptr)
2822{
2823	struct gfs2_glockfd_iter *i = seq->private;
2824	struct inode *inode = file_inode(i->file);
2825	struct gfs2_glock *gl;
2826
2827	inode_lock_shared(inode);
2828	gl = GFS2_I(inode)->i_iopen_gh.gh_gl;
2829	if (gl) {
2830		seq_printf(seq, "%d %u %u/%llx\n",
2831			   i->tgid, i->fd, gl->gl_name.ln_type,
2832			   (unsigned long long)gl->gl_name.ln_number);
2833	}
2834	gfs2_glockfd_seq_show_flock(seq, i);
2835	inode_unlock_shared(inode);
2836	return 0;
2837}
2838
2839static const struct seq_operations gfs2_glockfd_seq_ops = {
2840	.start = gfs2_glockfd_seq_start,
2841	.next  = gfs2_glockfd_seq_next,
2842	.stop  = gfs2_glockfd_seq_stop,
2843	.show  = gfs2_glockfd_seq_show,
2844};
2845
2846static int gfs2_glockfd_open(struct inode *inode, struct file *file)
2847{
2848	struct gfs2_glockfd_iter *i;
2849	struct gfs2_sbd *sdp = inode->i_private;
2850
2851	i = __seq_open_private(file, &gfs2_glockfd_seq_ops,
2852			       sizeof(struct gfs2_glockfd_iter));
2853	if (!i)
2854		return -ENOMEM;
2855	i->sb = sdp->sd_vfs;
2856	return 0;
2857}
2858
2859static const struct file_operations gfs2_glockfd_fops = {
2860	.owner   = THIS_MODULE,
2861	.open    = gfs2_glockfd_open,
2862	.read    = seq_read,
2863	.llseek  = seq_lseek,
2864	.release = seq_release_private,
2865};
2866
2867DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats);
2868
2869void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2870{
2871	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2872
2873	debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2874			    &gfs2_glocks_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2875
2876	debugfs_create_file("glockfd", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2877			    &gfs2_glockfd_fops);
2878
2879	debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2880			    &gfs2_glstats_fops);
2881
2882	debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2883			    &gfs2_sbstats_fops);
2884}
2885
2886void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2887{
2888	debugfs_remove_recursive(sdp->debugfs_dir);
2889	sdp->debugfs_dir = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2890}
2891
2892void gfs2_register_debugfs(void)
2893{
2894	gfs2_root = debugfs_create_dir("gfs2", NULL);
 
 
 
2895}
2896
2897void gfs2_unregister_debugfs(void)
2898{
2899	debugfs_remove(gfs2_root);
2900	gfs2_root = NULL;
2901}