Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11
  12#include <linux/sched.h>
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/buffer_head.h>
  16#include <linux/delay.h>
  17#include <linux/sort.h>
  18#include <linux/hash.h>
  19#include <linux/jhash.h>
  20#include <linux/kallsyms.h>
  21#include <linux/gfs2_ondisk.h>
  22#include <linux/list.h>
  23#include <linux/wait.h>
  24#include <linux/module.h>
  25#include <linux/uaccess.h>
  26#include <linux/seq_file.h>
  27#include <linux/debugfs.h>
  28#include <linux/kthread.h>
  29#include <linux/freezer.h>
  30#include <linux/workqueue.h>
  31#include <linux/jiffies.h>
  32#include <linux/rcupdate.h>
  33#include <linux/rculist_bl.h>
  34#include <linux/bit_spinlock.h>
  35#include <linux/percpu.h>
  36#include <linux/list_sort.h>
  37#include <linux/lockref.h>
  38#include <linux/rhashtable.h>
 
 
 
  39
  40#include "gfs2.h"
  41#include "incore.h"
  42#include "glock.h"
  43#include "glops.h"
  44#include "inode.h"
  45#include "lops.h"
  46#include "meta_io.h"
  47#include "quota.h"
  48#include "super.h"
  49#include "util.h"
  50#include "bmap.h"
  51#define CREATE_TRACE_POINTS
  52#include "trace_gfs2.h"
  53
  54struct gfs2_glock_iter {
  55	struct gfs2_sbd *sdp;		/* incore superblock           */
  56	struct rhashtable_iter hti;	/* rhashtable iterator         */
  57	struct gfs2_glock *gl;		/* current glock struct        */
  58	loff_t last_pos;		/* last position               */
  59};
  60
  61typedef void (*glock_examiner) (struct gfs2_glock * gl);
  62
  63static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
 
 
 
  64
  65static struct dentry *gfs2_root;
  66static struct workqueue_struct *glock_workqueue;
  67struct workqueue_struct *gfs2_delete_workqueue;
  68static LIST_HEAD(lru_list);
  69static atomic_t lru_count = ATOMIC_INIT(0);
  70static DEFINE_SPINLOCK(lru_lock);
  71
  72#define GFS2_GL_HASH_SHIFT      15
  73#define GFS2_GL_HASH_SIZE       BIT(GFS2_GL_HASH_SHIFT)
  74
  75static const struct rhashtable_params ht_parms = {
  76	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
  77	.key_len = offsetofend(struct lm_lockname, ln_type),
  78	.key_offset = offsetof(struct gfs2_glock, gl_name),
  79	.head_offset = offsetof(struct gfs2_glock, gl_node),
  80};
  81
  82static struct rhashtable gl_hash_table;
  83
  84#define GLOCK_WAIT_TABLE_BITS 12
  85#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
  86static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
  87
  88struct wait_glock_queue {
  89	struct lm_lockname *name;
  90	wait_queue_entry_t wait;
  91};
  92
  93static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
  94			       int sync, void *key)
  95{
  96	struct wait_glock_queue *wait_glock =
  97		container_of(wait, struct wait_glock_queue, wait);
  98	struct lm_lockname *wait_name = wait_glock->name;
  99	struct lm_lockname *wake_name = key;
 100
 101	if (wake_name->ln_sbd != wait_name->ln_sbd ||
 102	    wake_name->ln_number != wait_name->ln_number ||
 103	    wake_name->ln_type != wait_name->ln_type)
 104		return 0;
 105	return autoremove_wake_function(wait, mode, sync, key);
 106}
 107
 108static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
 109{
 110	u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
 111
 112	return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
 113}
 114
 115/**
 116 * wake_up_glock  -  Wake up waiters on a glock
 117 * @gl: the glock
 118 */
 119static void wake_up_glock(struct gfs2_glock *gl)
 120{
 121	wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
 122
 123	if (waitqueue_active(wq))
 124		__wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
 125}
 126
 127static void gfs2_glock_dealloc(struct rcu_head *rcu)
 128{
 129	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
 130
 
 131	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
 132		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
 133	} else {
 134		kfree(gl->gl_lksb.sb_lvbptr);
 
 135		kmem_cache_free(gfs2_glock_cachep, gl);
 136	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137}
 138
 139void gfs2_glock_free(struct gfs2_glock *gl)
 140{
 141	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 142
 
 143	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
 144	smp_mb();
 145	wake_up_glock(gl);
 146	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
 147	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
 148		wake_up(&sdp->sd_glock_wait);
 149}
 150
 151/**
 152 * gfs2_glock_hold() - increment reference count on glock
 153 * @gl: The glock to hold
 154 *
 155 */
 156
 157void gfs2_glock_hold(struct gfs2_glock *gl)
 158{
 159	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
 160	lockref_get(&gl->gl_lockref);
 
 161}
 162
 163/**
 164 * demote_ok - Check to see if it's ok to unlock a glock
 165 * @gl: the glock
 166 *
 167 * Returns: 1 if it's ok
 168 */
 169
 170static int demote_ok(const struct gfs2_glock *gl)
 171{
 172	const struct gfs2_glock_operations *glops = gl->gl_ops;
 173
 174	if (gl->gl_state == LM_ST_UNLOCKED)
 175		return 0;
 176	if (!list_empty(&gl->gl_holders))
 177		return 0;
 178	if (glops->go_demote_ok)
 179		return glops->go_demote_ok(gl);
 180	return 1;
 181}
 182
 183
 184void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
 185{
 
 
 
 186	spin_lock(&lru_lock);
 187
 188	if (!list_empty(&gl->gl_lru))
 189		list_del_init(&gl->gl_lru);
 190	else
 
 191		atomic_inc(&lru_count);
 
 192
 193	list_add_tail(&gl->gl_lru, &lru_list);
 194	set_bit(GLF_LRU, &gl->gl_flags);
 195	spin_unlock(&lru_lock);
 196}
 197
 198static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
 199{
 200	if (!(gl->gl_ops->go_flags & GLOF_LRU))
 201		return;
 202
 203	spin_lock(&lru_lock);
 204	if (!list_empty(&gl->gl_lru)) {
 205		list_del_init(&gl->gl_lru);
 206		atomic_dec(&lru_count);
 207		clear_bit(GLF_LRU, &gl->gl_flags);
 208	}
 209	spin_unlock(&lru_lock);
 210}
 211
 212/*
 213 * Enqueue the glock on the work queue.  Passes one glock reference on to the
 214 * work queue.
 215 */
 216static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
 217	if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
 218		/*
 219		 * We are holding the lockref spinlock, and the work was still
 220		 * queued above.  The queued work (glock_work_func) takes that
 221		 * spinlock before dropping its glock reference(s), so it
 222		 * cannot have dropped them in the meantime.
 223		 */
 224		GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
 225		gl->gl_lockref.count--;
 226	}
 227}
 228
 229static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
 230	spin_lock(&gl->gl_lockref.lock);
 231	__gfs2_glock_queue_work(gl, delay);
 232	spin_unlock(&gl->gl_lockref.lock);
 233}
 234
 235static void __gfs2_glock_put(struct gfs2_glock *gl)
 236{
 237	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 238	struct address_space *mapping = gfs2_glock2aspace(gl);
 239
 240	lockref_mark_dead(&gl->gl_lockref);
 241
 242	gfs2_glock_remove_from_lru(gl);
 243	spin_unlock(&gl->gl_lockref.lock);
 244	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
 245	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
 
 
 
 
 246	trace_gfs2_glock_put(gl);
 247	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
 248}
 249
 250/*
 251 * Cause the glock to be put in work queue context.
 252 */
 253void gfs2_glock_queue_put(struct gfs2_glock *gl)
 254{
 255	gfs2_glock_queue_work(gl, 0);
 256}
 257
 258/**
 259 * gfs2_glock_put() - Decrement reference count on glock
 260 * @gl: The glock to put
 261 *
 262 */
 263
 264void gfs2_glock_put(struct gfs2_glock *gl)
 265{
 266	if (lockref_put_or_lock(&gl->gl_lockref))
 267		return;
 268
 269	__gfs2_glock_put(gl);
 270}
 271
 272/**
 273 * may_grant - check if its ok to grant a new lock
 274 * @gl: The glock
 
 275 * @gh: The lock request which we wish to grant
 276 *
 277 * Returns: true if its ok to grant the lock
 278 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279
 280static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
 281{
 282	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
 283	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
 284	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
 285		return 0;
 286	if (gl->gl_state == gh->gh_state)
 287		return 1;
 288	if (gh->gh_flags & GL_EXACT)
 289		return 0;
 290	if (gl->gl_state == LM_ST_EXCLUSIVE) {
 291		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
 292			return 1;
 293		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
 294			return 1;
 295	}
 296	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
 297		return 1;
 298	return 0;
 299}
 300
 301static void gfs2_holder_wake(struct gfs2_holder *gh)
 302{
 303	clear_bit(HIF_WAIT, &gh->gh_iflags);
 304	smp_mb__after_atomic();
 305	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
 
 
 
 
 
 306}
 307
 308/**
 309 * do_error - Something unexpected has happened during a lock request
 310 *
 
 311 */
 312
 313static void do_error(struct gfs2_glock *gl, const int ret)
 314{
 315	struct gfs2_holder *gh, *tmp;
 316
 317	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 318		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 319			continue;
 320		if (ret & LM_OUT_ERROR)
 321			gh->gh_error = -EIO;
 322		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
 323			gh->gh_error = GLR_TRYFAILED;
 324		else
 325			continue;
 326		list_del_init(&gh->gh_list);
 327		trace_gfs2_glock_queue(gh, 0);
 328		gfs2_holder_wake(gh);
 329	}
 330}
 331
 332/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 333 * do_promote - promote as many requests as possible on the current queue
 334 * @gl: The glock
 335 * 
 336 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 337 *          if a type specific operation is underway.
 338 */
 339
 340static int do_promote(struct gfs2_glock *gl)
 341__releases(&gl->gl_lockref.lock)
 342__acquires(&gl->gl_lockref.lock)
 343{
 344	const struct gfs2_glock_operations *glops = gl->gl_ops;
 345	struct gfs2_holder *gh, *tmp;
 346	int ret;
 347
 348restart:
 349	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 350		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 351			continue;
 352		if (may_grant(gl, gh)) {
 353			if (gh->gh_list.prev == &gl->gl_holders &&
 354			    glops->go_lock) {
 355				spin_unlock(&gl->gl_lockref.lock);
 356				/* FIXME: eliminate this eventually */
 357				ret = glops->go_lock(gh);
 358				spin_lock(&gl->gl_lockref.lock);
 359				if (ret) {
 360					if (ret == 1)
 361						return 2;
 362					gh->gh_error = ret;
 363					list_del_init(&gh->gh_list);
 364					trace_gfs2_glock_queue(gh, 0);
 365					gfs2_holder_wake(gh);
 366					goto restart;
 367				}
 368				set_bit(HIF_HOLDER, &gh->gh_iflags);
 369				trace_gfs2_promote(gh, 1);
 370				gfs2_holder_wake(gh);
 371				goto restart;
 372			}
 373			set_bit(HIF_HOLDER, &gh->gh_iflags);
 374			trace_gfs2_promote(gh, 0);
 375			gfs2_holder_wake(gh);
 376			continue;
 377		}
 378		if (gh->gh_list.prev == &gl->gl_holders)
 379			return 1;
 380		do_error(gl, 0);
 381		break;
 
 382	}
 383	return 0;
 384}
 385
 386/**
 387 * find_first_waiter - find the first gh that's waiting for the glock
 388 * @gl: the glock
 389 */
 390
 391static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
 392{
 393	struct gfs2_holder *gh;
 394
 395	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
 396		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
 397			return gh;
 398	}
 399	return NULL;
 400}
 401
 402/**
 403 * state_change - record that the glock is now in a different state
 404 * @gl: the glock
 405 * @new_state the new state
 406 *
 407 */
 408
 409static void state_change(struct gfs2_glock *gl, unsigned int new_state)
 410{
 411	int held1, held2;
 412
 413	held1 = (gl->gl_state != LM_ST_UNLOCKED);
 414	held2 = (new_state != LM_ST_UNLOCKED);
 415
 416	if (held1 != held2) {
 417		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
 418		if (held2)
 419			gl->gl_lockref.count++;
 420		else
 421			gl->gl_lockref.count--;
 422	}
 423	if (held1 && held2 && list_empty(&gl->gl_holders))
 424		clear_bit(GLF_QUEUED, &gl->gl_flags);
 425
 426	if (new_state != gl->gl_target)
 427		/* shorten our minimum hold time */
 428		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
 429				       GL_GLOCK_MIN_HOLD);
 430	gl->gl_state = new_state;
 431	gl->gl_tchange = jiffies;
 432}
 433
 
 
 
 
 
 
 
 
 
 434static void gfs2_demote_wake(struct gfs2_glock *gl)
 435{
 436	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 437	clear_bit(GLF_DEMOTE, &gl->gl_flags);
 438	smp_mb__after_atomic();
 439	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
 440}
 441
 442/**
 443 * finish_xmote - The DLM has replied to one of our lock requests
 444 * @gl: The glock
 445 * @ret: The status from the DLM
 446 *
 447 */
 448
 449static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
 450{
 451	const struct gfs2_glock_operations *glops = gl->gl_ops;
 452	struct gfs2_holder *gh;
 453	unsigned state = ret & LM_OUT_ST_MASK;
 454	int rv;
 455
 456	spin_lock(&gl->gl_lockref.lock);
 457	trace_gfs2_glock_state_change(gl, state);
 458	state_change(gl, state);
 459	gh = find_first_waiter(gl);
 460
 461	/* Demote to UN request arrived during demote to SH or DF */
 462	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
 463	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
 464		gl->gl_target = LM_ST_UNLOCKED;
 465
 466	/* Check for state != intended state */
 467	if (unlikely(state != gl->gl_target)) {
 
 
 468		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
 469			/* move to back of queue and try next entry */
 470			if (ret & LM_OUT_CANCELED) {
 471				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
 472					list_move_tail(&gh->gh_list, &gl->gl_holders);
 473				gh = find_first_waiter(gl);
 474				gl->gl_target = gh->gh_state;
 475				goto retry;
 476			}
 477			/* Some error or failed "try lock" - report it */
 478			if ((ret & LM_OUT_ERROR) ||
 479			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
 480				gl->gl_target = gl->gl_state;
 481				do_error(gl, ret);
 482				goto out;
 483			}
 484		}
 485		switch(state) {
 486		/* Unlocked due to conversion deadlock, try again */
 487		case LM_ST_UNLOCKED:
 488retry:
 489			do_xmote(gl, gh, gl->gl_target);
 490			break;
 491		/* Conversion fails, unlock and try again */
 492		case LM_ST_SHARED:
 493		case LM_ST_DEFERRED:
 494			do_xmote(gl, gh, LM_ST_UNLOCKED);
 495			break;
 496		default: /* Everything else */
 497			pr_err("wanted %u got %u\n", gl->gl_target, state);
 
 498			GLOCK_BUG_ON(gl, 1);
 499		}
 500		spin_unlock(&gl->gl_lockref.lock);
 501		return;
 502	}
 503
 504	/* Fast path - we got what we asked for */
 505	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
 506		gfs2_demote_wake(gl);
 507	if (state != LM_ST_UNLOCKED) {
 508		if (glops->go_xmote_bh) {
 
 
 509			spin_unlock(&gl->gl_lockref.lock);
 510			rv = glops->go_xmote_bh(gl, gh);
 511			spin_lock(&gl->gl_lockref.lock);
 512			if (rv) {
 513				do_error(gl, rv);
 514				goto out;
 515			}
 516		}
 517		rv = do_promote(gl);
 518		if (rv == 2)
 519			goto out_locked;
 520	}
 521out:
 522	clear_bit(GLF_LOCK, &gl->gl_flags);
 523out_locked:
 524	spin_unlock(&gl->gl_lockref.lock);
 525}
 526
 
 
 
 
 
 
 
 
 
 
 527/**
 528 * do_xmote - Calls the DLM to change the state of a lock
 529 * @gl: The lock state
 530 * @gh: The holder (only for promotes)
 531 * @target: The target lock state
 532 *
 533 */
 534
 535static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
 
 536__releases(&gl->gl_lockref.lock)
 537__acquires(&gl->gl_lockref.lock)
 538{
 539	const struct gfs2_glock_operations *glops = gl->gl_ops;
 540	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 541	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
 542	int ret;
 543
 544	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
 545	    target != LM_ST_UNLOCKED)
 546		return;
 
 547	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
 548		      LM_FLAG_PRIORITY);
 549	GLOCK_BUG_ON(gl, gl->gl_state == target);
 550	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
 551	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
 552	    glops->go_inval) {
 553		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 
 
 
 
 
 
 
 554		do_error(gl, 0); /* Fail queued try locks */
 555	}
 556	gl->gl_req = target;
 557	set_bit(GLF_BLOCKING, &gl->gl_flags);
 558	if ((gl->gl_req == LM_ST_UNLOCKED) ||
 559	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
 560	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
 561		clear_bit(GLF_BLOCKING, &gl->gl_flags);
 562	spin_unlock(&gl->gl_lockref.lock);
 563	if (glops->go_sync)
 564		glops->go_sync(gl);
 565	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
 567	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 
 568
 
 569	gfs2_glock_hold(gl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 570	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
 571		/* lock_dlm */
 572		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
 573		if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
 574		    target == LM_ST_UNLOCKED &&
 575		    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
 576			finish_xmote(gl, target);
 577			gfs2_glock_queue_work(gl, 0);
 578		}
 579		else if (ret) {
 580			pr_err("lm_lock ret %d\n", ret);
 581			GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
 582						   &sdp->sd_flags));
 583		}
 584	} else { /* lock_nolock */
 585		finish_xmote(gl, target);
 586		gfs2_glock_queue_work(gl, 0);
 587	}
 588
 589	spin_lock(&gl->gl_lockref.lock);
 590}
 591
 592/**
 593 * find_first_holder - find the first "holder" gh
 594 * @gl: the glock
 595 */
 596
 597static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
 598{
 599	struct gfs2_holder *gh;
 600
 601	if (!list_empty(&gl->gl_holders)) {
 602		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
 603		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 604			return gh;
 605	}
 606	return NULL;
 607}
 608
 609/**
 610 * run_queue - do all outstanding tasks related to a glock
 611 * @gl: The glock in question
 612 * @nonblock: True if we must not block in run_queue
 613 *
 614 */
 615
 616static void run_queue(struct gfs2_glock *gl, const int nonblock)
 617__releases(&gl->gl_lockref.lock)
 618__acquires(&gl->gl_lockref.lock)
 619{
 620	struct gfs2_holder *gh = NULL;
 621	int ret;
 622
 623	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
 624		return;
 625
 626	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
 627
 628	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
 629	    gl->gl_demote_state != gl->gl_state) {
 630		if (find_first_holder(gl))
 631			goto out_unlock;
 632		if (nonblock)
 633			goto out_sched;
 634		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
 635		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
 636		gl->gl_target = gl->gl_demote_state;
 637	} else {
 638		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
 639			gfs2_demote_wake(gl);
 640		ret = do_promote(gl);
 641		if (ret == 0)
 642			goto out_unlock;
 643		if (ret == 2)
 644			goto out;
 645		gh = find_first_waiter(gl);
 646		gl->gl_target = gh->gh_state;
 647		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
 648			do_error(gl, 0); /* Fail queued try locks */
 649	}
 650	do_xmote(gl, gh, gl->gl_target);
 651out:
 652	return;
 653
 654out_sched:
 655	clear_bit(GLF_LOCK, &gl->gl_flags);
 656	smp_mb__after_atomic();
 657	gl->gl_lockref.count++;
 658	__gfs2_glock_queue_work(gl, 0);
 659	return;
 660
 661out_unlock:
 662	clear_bit(GLF_LOCK, &gl->gl_flags);
 663	smp_mb__after_atomic();
 664	return;
 665}
 666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667static void delete_work_func(struct work_struct *work)
 668{
 669	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
 
 670	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 671	struct inode *inode;
 672	u64 no_addr = gl->gl_name.ln_number;
 673
 674	/* If someone's using this glock to create a new dinode, the block must
 675	   have been freed by another node, then re-used, in which case our
 676	   iopen callback is too late after the fact. Ignore it. */
 677	if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 678		goto out;
 
 679
 680	inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
 681	if (inode && !IS_ERR(inode)) {
 
 
 
 
 
 682		d_prune_aliases(inode);
 683		iput(inode);
 684	}
 685out:
 686	gfs2_glock_put(gl);
 687}
 688
 689static void glock_work_func(struct work_struct *work)
 690{
 691	unsigned long delay = 0;
 692	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
 693	unsigned int drop_refs = 1;
 694
 695	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
 696		finish_xmote(gl, gl->gl_reply);
 697		drop_refs++;
 698	}
 699	spin_lock(&gl->gl_lockref.lock);
 700	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
 701	    gl->gl_state != LM_ST_UNLOCKED &&
 702	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
 703		unsigned long holdtime, now = jiffies;
 704
 705		holdtime = gl->gl_tchange + gl->gl_hold_time;
 706		if (time_before(now, holdtime))
 707			delay = holdtime - now;
 708
 709		if (!delay) {
 710			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
 711			set_bit(GLF_DEMOTE, &gl->gl_flags);
 712		}
 713	}
 714	run_queue(gl, 0);
 715	if (delay) {
 716		/* Keep one glock reference for the work we requeue. */
 717		drop_refs--;
 718		if (gl->gl_name.ln_type != LM_TYPE_INODE)
 719			delay = 0;
 720		__gfs2_glock_queue_work(gl, delay);
 721	}
 722
 723	/*
 724	 * Drop the remaining glock references manually here. (Mind that
 725	 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
 726	 * here as well.)
 727	 */
 728	gl->gl_lockref.count -= drop_refs;
 729	if (!gl->gl_lockref.count) {
 730		__gfs2_glock_put(gl);
 731		return;
 732	}
 733	spin_unlock(&gl->gl_lockref.lock);
 734}
 735
 736static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
 737					    struct gfs2_glock *new)
 738{
 739	struct wait_glock_queue wait;
 740	wait_queue_head_t *wq = glock_waitqueue(name);
 741	struct gfs2_glock *gl;
 742
 743	wait.name = name;
 744	init_wait(&wait.wait);
 745	wait.wait.func = glock_wake_function;
 746
 747again:
 748	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
 749	rcu_read_lock();
 750	if (new) {
 751		gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
 752			&new->gl_node, ht_parms);
 753		if (IS_ERR(gl))
 754			goto out;
 755	} else {
 756		gl = rhashtable_lookup_fast(&gl_hash_table,
 757			name, ht_parms);
 758	}
 759	if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
 760		rcu_read_unlock();
 761		schedule();
 762		goto again;
 763	}
 764out:
 765	rcu_read_unlock();
 766	finish_wait(wq, &wait.wait);
 767	return gl;
 768}
 769
 770/**
 771 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 772 * @sdp: The GFS2 superblock
 773 * @number: the lock number
 774 * @glops: The glock_operations to use
 775 * @create: If 0, don't create the glock if it doesn't exist
 776 * @glp: the glock is returned here
 777 *
 778 * This does not lock a glock, just finds/creates structures for one.
 779 *
 780 * Returns: errno
 781 */
 782
 783int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
 784		   const struct gfs2_glock_operations *glops, int create,
 785		   struct gfs2_glock **glp)
 786{
 787	struct super_block *s = sdp->sd_vfs;
 788	struct lm_lockname name = { .ln_number = number,
 789				    .ln_type = glops->go_type,
 790				    .ln_sbd = sdp };
 791	struct gfs2_glock *gl, *tmp;
 792	struct address_space *mapping;
 793	struct kmem_cache *cachep;
 794	int ret = 0;
 795
 796	gl = find_insert_glock(&name, NULL);
 797	if (gl) {
 798		*glp = gl;
 799		return 0;
 800	}
 801	if (!create)
 802		return -ENOENT;
 803
 804	if (glops->go_flags & GLOF_ASPACE)
 805		cachep = gfs2_glock_aspace_cachep;
 806	else
 807		cachep = gfs2_glock_cachep;
 808	gl = kmem_cache_alloc(cachep, GFP_NOFS);
 809	if (!gl)
 810		return -ENOMEM;
 811
 
 
 
 812	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
 
 813
 814	if (glops->go_flags & GLOF_LVB) {
 815		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
 816		if (!gl->gl_lksb.sb_lvbptr) {
 817			kmem_cache_free(cachep, gl);
 818			return -ENOMEM;
 819		}
 820	}
 821
 822	atomic_inc(&sdp->sd_glock_disposal);
 823	gl->gl_node.next = NULL;
 824	gl->gl_flags = 0;
 825	gl->gl_name = name;
 
 826	gl->gl_lockref.count = 1;
 827	gl->gl_state = LM_ST_UNLOCKED;
 828	gl->gl_target = LM_ST_UNLOCKED;
 829	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 830	gl->gl_ops = glops;
 831	gl->gl_dstamp = 0;
 832	preempt_disable();
 833	/* We use the global stats to estimate the initial per-glock stats */
 834	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
 835	preempt_enable();
 836	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
 837	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
 838	gl->gl_tchange = jiffies;
 839	gl->gl_object = NULL;
 840	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
 841	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
 842	INIT_WORK(&gl->gl_delete, delete_work_func);
 
 843
 844	mapping = gfs2_glock2aspace(gl);
 845	if (mapping) {
 846                mapping->a_ops = &gfs2_meta_aops;
 847		mapping->host = s->s_bdev->bd_inode;
 848		mapping->flags = 0;
 849		mapping_set_gfp_mask(mapping, GFP_NOFS);
 850		mapping->private_data = NULL;
 851		mapping->writeback_index = 0;
 852	}
 853
 854	tmp = find_insert_glock(&name, gl);
 855	if (!tmp) {
 856		*glp = gl;
 857		goto out;
 858	}
 859	if (IS_ERR(tmp)) {
 860		ret = PTR_ERR(tmp);
 861		goto out_free;
 862	}
 863	*glp = tmp;
 864
 865out_free:
 866	kfree(gl->gl_lksb.sb_lvbptr);
 867	kmem_cache_free(cachep, gl);
 868	atomic_dec(&sdp->sd_glock_disposal);
 869
 870out:
 871	return ret;
 872}
 873
 874/**
 875 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 876 * @gl: the glock
 877 * @state: the state we're requesting
 878 * @flags: the modifier flags
 879 * @gh: the holder structure
 880 *
 881 */
 882
 883void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
 884		      struct gfs2_holder *gh)
 885{
 886	INIT_LIST_HEAD(&gh->gh_list);
 887	gh->gh_gl = gl;
 888	gh->gh_ip = _RET_IP_;
 889	gh->gh_owner_pid = get_pid(task_pid(current));
 890	gh->gh_state = state;
 891	gh->gh_flags = flags;
 892	gh->gh_error = 0;
 893	gh->gh_iflags = 0;
 894	gfs2_glock_hold(gl);
 895}
 896
 897/**
 898 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 899 * @state: the state we're requesting
 900 * @flags: the modifier flags
 901 * @gh: the holder structure
 902 *
 903 * Don't mess with the glock.
 904 *
 905 */
 906
 907void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
 908{
 909	gh->gh_state = state;
 910	gh->gh_flags = flags;
 911	gh->gh_iflags = 0;
 912	gh->gh_ip = _RET_IP_;
 913	put_pid(gh->gh_owner_pid);
 914	gh->gh_owner_pid = get_pid(task_pid(current));
 915}
 916
 917/**
 918 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 919 * @gh: the holder structure
 920 *
 921 */
 922
 923void gfs2_holder_uninit(struct gfs2_holder *gh)
 924{
 925	put_pid(gh->gh_owner_pid);
 926	gfs2_glock_put(gh->gh_gl);
 927	gfs2_holder_mark_uninitialized(gh);
 928	gh->gh_ip = 0;
 929}
 930
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 931/**
 932 * gfs2_glock_wait - wait on a glock acquisition
 933 * @gh: the glock holder
 934 *
 935 * Returns: 0 on success
 936 */
 937
 938int gfs2_glock_wait(struct gfs2_holder *gh)
 939{
 940	unsigned long time1 = jiffies;
 941
 942	might_sleep();
 943	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
 944	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
 945		/* Lengthen the minimum hold time. */
 946		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
 947					      GL_GLOCK_HOLD_INCR,
 948					      GL_GLOCK_MAX_HOLD);
 949	return gh->gh_error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 950}
 951
 952/**
 953 * handle_callback - process a demote request
 954 * @gl: the glock
 955 * @state: the state the caller wants us to change to
 
 
 956 *
 957 * There are only two requests that we are going to see in actual
 958 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
 959 */
 960
 961static void handle_callback(struct gfs2_glock *gl, unsigned int state,
 962			    unsigned long delay, bool remote)
 963{
 964	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
 965
 966	set_bit(bit, &gl->gl_flags);
 
 967	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
 968		gl->gl_demote_state = state;
 969		gl->gl_demote_time = jiffies;
 970	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
 971			gl->gl_demote_state != state) {
 972		gl->gl_demote_state = LM_ST_UNLOCKED;
 973	}
 974	if (gl->gl_ops->go_callback)
 975		gl->gl_ops->go_callback(gl, remote);
 976	trace_gfs2_demote_rq(gl, remote);
 977}
 978
 979void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
 980{
 981	struct va_format vaf;
 982	va_list args;
 983
 984	va_start(args, fmt);
 985
 986	if (seq) {
 987		seq_vprintf(seq, fmt, args);
 988	} else {
 989		vaf.fmt = fmt;
 990		vaf.va = &args;
 991
 992		pr_err("%pV", &vaf);
 993	}
 994
 995	va_end(args);
 996}
 997
 
 
 
 
 
 
 
 
 
 998/**
 999 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1000 * @gh: the holder structure to add
1001 *
1002 * Eventually we should move the recursive locking trap to a
1003 * debugging option or something like that. This is the fast
1004 * path and needs to have the minimum number of distractions.
1005 * 
1006 */
1007
1008static inline void add_to_queue(struct gfs2_holder *gh)
1009__releases(&gl->gl_lockref.lock)
1010__acquires(&gl->gl_lockref.lock)
1011{
1012	struct gfs2_glock *gl = gh->gh_gl;
1013	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1014	struct list_head *insert_pt = NULL;
1015	struct gfs2_holder *gh2;
1016	int try_futile = 0;
1017
1018	BUG_ON(gh->gh_owner_pid == NULL);
1019	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1020		BUG();
1021
1022	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1023		if (test_bit(GLF_LOCK, &gl->gl_flags))
1024			try_futile = !may_grant(gl, gh);
 
 
 
 
1025		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1026			goto fail;
1027	}
1028
1029	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1030		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1031		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1032			goto trap_recursive;
 
 
 
 
 
 
1033		if (try_futile &&
1034		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1035fail:
1036			gh->gh_error = GLR_TRYFAILED;
1037			gfs2_holder_wake(gh);
1038			return;
1039		}
1040		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1041			continue;
1042		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1043			insert_pt = &gh2->gh_list;
1044	}
1045	set_bit(GLF_QUEUED, &gl->gl_flags);
1046	trace_gfs2_glock_queue(gh, 1);
1047	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1048	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1049	if (likely(insert_pt == NULL)) {
1050		list_add_tail(&gh->gh_list, &gl->gl_holders);
1051		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1052			goto do_cancel;
1053		return;
1054	}
1055	list_add_tail(&gh->gh_list, insert_pt);
1056do_cancel:
1057	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1058	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1059		spin_unlock(&gl->gl_lockref.lock);
1060		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1061			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1062		spin_lock(&gl->gl_lockref.lock);
1063	}
1064	return;
1065
1066trap_recursive:
1067	pr_err("original: %pSR\n", (void *)gh2->gh_ip);
1068	pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
1069	pr_err("lock type: %d req lock state : %d\n",
1070	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1071	pr_err("new: %pSR\n", (void *)gh->gh_ip);
1072	pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
1073	pr_err("lock type: %d req lock state : %d\n",
1074	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1075	gfs2_dump_glock(NULL, gl);
1076	BUG();
1077}
1078
1079/**
1080 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1081 * @gh: the holder structure
1082 *
1083 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1084 *
1085 * Returns: 0, GLR_TRYFAILED, or errno on failure
1086 */
1087
1088int gfs2_glock_nq(struct gfs2_holder *gh)
1089{
1090	struct gfs2_glock *gl = gh->gh_gl;
1091	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1092	int error = 0;
1093
1094	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1095		return -EIO;
1096
1097	if (test_bit(GLF_LRU, &gl->gl_flags))
1098		gfs2_glock_remove_from_lru(gl);
1099
 
1100	spin_lock(&gl->gl_lockref.lock);
1101	add_to_queue(gh);
1102	if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1103		     test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1104		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1105		gl->gl_lockref.count++;
1106		__gfs2_glock_queue_work(gl, 0);
1107	}
1108	run_queue(gl, 1);
1109	spin_unlock(&gl->gl_lockref.lock);
1110
1111	if (!(gh->gh_flags & GL_ASYNC))
1112		error = gfs2_glock_wait(gh);
1113
1114	return error;
1115}
1116
1117/**
1118 * gfs2_glock_poll - poll to see if an async request has been completed
1119 * @gh: the holder
1120 *
1121 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1122 */
1123
1124int gfs2_glock_poll(struct gfs2_holder *gh)
1125{
1126	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1127}
1128
1129/**
1130 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1131 * @gh: the glock holder
1132 *
1133 */
1134
1135void gfs2_glock_dq(struct gfs2_holder *gh)
1136{
1137	struct gfs2_glock *gl = gh->gh_gl;
1138	const struct gfs2_glock_operations *glops = gl->gl_ops;
1139	unsigned delay = 0;
1140	int fast_path = 0;
1141
1142	spin_lock(&gl->gl_lockref.lock);
 
 
 
 
1143	if (gh->gh_flags & GL_NOCACHE)
1144		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1145
1146	list_del_init(&gh->gh_list);
1147	clear_bit(HIF_HOLDER, &gh->gh_iflags);
1148	if (find_first_holder(gl) == NULL) {
1149		if (glops->go_unlock) {
1150			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1151			spin_unlock(&gl->gl_lockref.lock);
1152			glops->go_unlock(gh);
1153			spin_lock(&gl->gl_lockref.lock);
1154			clear_bit(GLF_LOCK, &gl->gl_flags);
1155		}
1156		if (list_empty(&gl->gl_holders) &&
1157		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1158		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1159			fast_path = 1;
1160	}
1161	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1162	    (glops->go_flags & GLOF_LRU))
1163		gfs2_glock_add_to_lru(gl);
1164
1165	trace_gfs2_glock_queue(gh, 0);
1166	if (unlikely(!fast_path)) {
1167		gl->gl_lockref.count++;
1168		if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1169		    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1170		    gl->gl_name.ln_type == LM_TYPE_INODE)
1171			delay = gl->gl_hold_time;
1172		__gfs2_glock_queue_work(gl, delay);
1173	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174	spin_unlock(&gl->gl_lockref.lock);
1175}
1176
1177void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1178{
1179	struct gfs2_glock *gl = gh->gh_gl;
1180	gfs2_glock_dq(gh);
1181	might_sleep();
1182	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1183}
1184
1185/**
1186 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1187 * @gh: the holder structure
1188 *
1189 */
1190
1191void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1192{
1193	gfs2_glock_dq(gh);
1194	gfs2_holder_uninit(gh);
1195}
1196
1197/**
1198 * gfs2_glock_nq_num - acquire a glock based on lock number
1199 * @sdp: the filesystem
1200 * @number: the lock number
1201 * @glops: the glock operations for the type of glock
1202 * @state: the state to acquire the glock in
1203 * @flags: modifier flags for the acquisition
1204 * @gh: the struct gfs2_holder
1205 *
1206 * Returns: errno
1207 */
1208
1209int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1210		      const struct gfs2_glock_operations *glops,
1211		      unsigned int state, u16 flags, struct gfs2_holder *gh)
1212{
1213	struct gfs2_glock *gl;
1214	int error;
1215
1216	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1217	if (!error) {
1218		error = gfs2_glock_nq_init(gl, state, flags, gh);
1219		gfs2_glock_put(gl);
1220	}
1221
1222	return error;
1223}
1224
1225/**
1226 * glock_compare - Compare two struct gfs2_glock structures for sorting
1227 * @arg_a: the first structure
1228 * @arg_b: the second structure
1229 *
1230 */
1231
1232static int glock_compare(const void *arg_a, const void *arg_b)
1233{
1234	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1235	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1236	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1237	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1238
1239	if (a->ln_number > b->ln_number)
1240		return 1;
1241	if (a->ln_number < b->ln_number)
1242		return -1;
1243	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1244	return 0;
1245}
1246
1247/**
1248 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1249 * @num_gh: the number of structures
1250 * @ghs: an array of struct gfs2_holder structures
 
1251 *
1252 * Returns: 0 on success (all glocks acquired),
1253 *          errno on failure (no glocks acquired)
1254 */
1255
1256static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1257		     struct gfs2_holder **p)
1258{
1259	unsigned int x;
1260	int error = 0;
1261
1262	for (x = 0; x < num_gh; x++)
1263		p[x] = &ghs[x];
1264
1265	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1266
1267	for (x = 0; x < num_gh; x++) {
1268		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1269
1270		error = gfs2_glock_nq(p[x]);
1271		if (error) {
1272			while (x--)
1273				gfs2_glock_dq(p[x]);
1274			break;
1275		}
1276	}
1277
1278	return error;
1279}
1280
1281/**
1282 * gfs2_glock_nq_m - acquire multiple glocks
1283 * @num_gh: the number of structures
1284 * @ghs: an array of struct gfs2_holder structures
1285 *
1286 *
1287 * Returns: 0 on success (all glocks acquired),
1288 *          errno on failure (no glocks acquired)
1289 */
1290
1291int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1292{
1293	struct gfs2_holder *tmp[4];
1294	struct gfs2_holder **pph = tmp;
1295	int error = 0;
1296
1297	switch(num_gh) {
1298	case 0:
1299		return 0;
1300	case 1:
1301		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1302		return gfs2_glock_nq(ghs);
1303	default:
1304		if (num_gh <= 4)
1305			break;
1306		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
 
1307		if (!pph)
1308			return -ENOMEM;
1309	}
1310
1311	error = nq_m_sync(num_gh, ghs, pph);
1312
1313	if (pph != tmp)
1314		kfree(pph);
1315
1316	return error;
1317}
1318
1319/**
1320 * gfs2_glock_dq_m - release multiple glocks
1321 * @num_gh: the number of structures
1322 * @ghs: an array of struct gfs2_holder structures
1323 *
1324 */
1325
1326void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1327{
1328	while (num_gh--)
1329		gfs2_glock_dq(&ghs[num_gh]);
1330}
1331
1332void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1333{
1334	unsigned long delay = 0;
1335	unsigned long holdtime;
1336	unsigned long now = jiffies;
1337
1338	gfs2_glock_hold(gl);
 
1339	holdtime = gl->gl_tchange + gl->gl_hold_time;
1340	if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1341	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1342		if (time_before(now, holdtime))
1343			delay = holdtime - now;
1344		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1345			delay = gl->gl_hold_time;
1346	}
1347
1348	spin_lock(&gl->gl_lockref.lock);
1349	handle_callback(gl, state, delay, true);
1350	__gfs2_glock_queue_work(gl, delay);
1351	spin_unlock(&gl->gl_lockref.lock);
1352}
1353
1354/**
1355 * gfs2_should_freeze - Figure out if glock should be frozen
1356 * @gl: The glock in question
1357 *
1358 * Glocks are not frozen if (a) the result of the dlm operation is
1359 * an error, (b) the locking operation was an unlock operation or
1360 * (c) if there is a "noexp" flagged request anywhere in the queue
1361 *
1362 * Returns: 1 if freezing should occur, 0 otherwise
1363 */
1364
1365static int gfs2_should_freeze(const struct gfs2_glock *gl)
1366{
1367	const struct gfs2_holder *gh;
1368
1369	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1370		return 0;
1371	if (gl->gl_target == LM_ST_UNLOCKED)
1372		return 0;
1373
1374	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1375		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1376			continue;
1377		if (LM_FLAG_NOEXP & gh->gh_flags)
1378			return 0;
1379	}
1380
1381	return 1;
1382}
1383
1384/**
1385 * gfs2_glock_complete - Callback used by locking
1386 * @gl: Pointer to the glock
1387 * @ret: The return value from the dlm
1388 *
1389 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1390 * to use a bitfield shared with other glock state fields.
1391 */
1392
1393void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1394{
1395	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1396
1397	spin_lock(&gl->gl_lockref.lock);
1398	gl->gl_reply = ret;
1399
1400	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1401		if (gfs2_should_freeze(gl)) {
1402			set_bit(GLF_FROZEN, &gl->gl_flags);
1403			spin_unlock(&gl->gl_lockref.lock);
1404			return;
1405		}
1406	}
1407
1408	gl->gl_lockref.count++;
1409	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1410	__gfs2_glock_queue_work(gl, 0);
1411	spin_unlock(&gl->gl_lockref.lock);
1412}
1413
1414static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
 
1415{
1416	struct gfs2_glock *gla, *glb;
1417
1418	gla = list_entry(a, struct gfs2_glock, gl_lru);
1419	glb = list_entry(b, struct gfs2_glock, gl_lru);
1420
1421	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1422		return 1;
1423	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1424		return -1;
1425
1426	return 0;
1427}
1428
1429/**
1430 * gfs2_dispose_glock_lru - Demote a list of glocks
1431 * @list: The list to dispose of
1432 *
1433 * Disposing of glocks may involve disk accesses, so that here we sort
1434 * the glocks by number (i.e. disk location of the inodes) so that if
1435 * there are any such accesses, they'll be sent in order (mostly).
1436 *
1437 * Must be called under the lru_lock, but may drop and retake this
1438 * lock. While the lru_lock is dropped, entries may vanish from the
1439 * list, but no new entries will appear on the list (since it is
1440 * private)
1441 */
1442
1443static void gfs2_dispose_glock_lru(struct list_head *list)
1444__releases(&lru_lock)
1445__acquires(&lru_lock)
1446{
1447	struct gfs2_glock *gl;
1448
1449	list_sort(NULL, list, glock_cmp);
1450
1451	while(!list_empty(list)) {
1452		gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1453		list_del_init(&gl->gl_lru);
 
1454		if (!spin_trylock(&gl->gl_lockref.lock)) {
1455add_back_to_lru:
1456			list_add(&gl->gl_lru, &lru_list);
 
1457			atomic_inc(&lru_count);
1458			continue;
1459		}
1460		if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1461			spin_unlock(&gl->gl_lockref.lock);
1462			goto add_back_to_lru;
1463		}
1464		clear_bit(GLF_LRU, &gl->gl_flags);
1465		gl->gl_lockref.count++;
1466		if (demote_ok(gl))
1467			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1468		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1469		__gfs2_glock_queue_work(gl, 0);
1470		spin_unlock(&gl->gl_lockref.lock);
1471		cond_resched_lock(&lru_lock);
1472	}
1473}
1474
1475/**
1476 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1477 * @nr: The number of entries to scan
1478 *
1479 * This function selects the entries on the LRU which are able to
1480 * be demoted, and then kicks off the process by calling
1481 * gfs2_dispose_glock_lru() above.
1482 */
1483
1484static long gfs2_scan_glock_lru(int nr)
1485{
1486	struct gfs2_glock *gl;
1487	LIST_HEAD(skipped);
1488	LIST_HEAD(dispose);
1489	long freed = 0;
1490
1491	spin_lock(&lru_lock);
1492	while ((nr-- >= 0) && !list_empty(&lru_list)) {
1493		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1494
1495		/* Test for being demotable */
1496		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1497			list_move(&gl->gl_lru, &dispose);
1498			atomic_dec(&lru_count);
1499			freed++;
1500			continue;
1501		}
1502
1503		list_move(&gl->gl_lru, &skipped);
1504	}
1505	list_splice(&skipped, &lru_list);
1506	if (!list_empty(&dispose))
1507		gfs2_dispose_glock_lru(&dispose);
1508	spin_unlock(&lru_lock);
1509
1510	return freed;
1511}
1512
1513static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1514					    struct shrink_control *sc)
1515{
1516	if (!(sc->gfp_mask & __GFP_FS))
1517		return SHRINK_STOP;
1518	return gfs2_scan_glock_lru(sc->nr_to_scan);
1519}
1520
1521static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1522					     struct shrink_control *sc)
1523{
1524	return vfs_pressure_ratio(atomic_read(&lru_count));
1525}
1526
1527static struct shrinker glock_shrinker = {
1528	.seeks = DEFAULT_SEEKS,
1529	.count_objects = gfs2_glock_shrink_count,
1530	.scan_objects = gfs2_glock_shrink_scan,
1531};
1532
1533/**
1534 * examine_bucket - Call a function for glock in a hash bucket
1535 * @examiner: the function
1536 * @sdp: the filesystem
1537 * @bucket: the bucket
1538 *
1539 * Note that the function can be called multiple times on the same
1540 * object.  So the user must ensure that the function can cope with
1541 * that.
1542 */
1543
1544static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1545{
1546	struct gfs2_glock *gl;
1547	struct rhashtable_iter iter;
1548
1549	rhashtable_walk_enter(&gl_hash_table, &iter);
1550
1551	do {
1552		rhashtable_walk_start(&iter);
1553
1554		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
1555			if (gl->gl_name.ln_sbd == sdp &&
1556			    lockref_get_not_dead(&gl->gl_lockref))
1557				examiner(gl);
 
1558
1559		rhashtable_walk_stop(&iter);
1560	} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1561
1562	rhashtable_walk_exit(&iter);
1563}
1564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1565/**
1566 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1567 * @gl: The glock to thaw
1568 *
1569 */
1570
1571static void thaw_glock(struct gfs2_glock *gl)
1572{
1573	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1574		gfs2_glock_put(gl);
 
1575		return;
1576	}
1577	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1578	gfs2_glock_queue_work(gl, 0);
1579}
1580
1581/**
1582 * clear_glock - look at a glock and see if we can free it from glock cache
1583 * @gl: the glock to look at
1584 *
1585 */
1586
1587static void clear_glock(struct gfs2_glock *gl)
1588{
1589	gfs2_glock_remove_from_lru(gl);
1590
1591	spin_lock(&gl->gl_lockref.lock);
1592	if (gl->gl_state != LM_ST_UNLOCKED)
1593		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1594	__gfs2_glock_queue_work(gl, 0);
 
 
 
1595	spin_unlock(&gl->gl_lockref.lock);
1596}
1597
1598/**
1599 * gfs2_glock_thaw - Thaw any frozen glocks
1600 * @sdp: The super block
1601 *
1602 */
1603
1604void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1605{
1606	glock_hash_walk(thaw_glock, sdp);
1607}
1608
1609static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1610{
1611	spin_lock(&gl->gl_lockref.lock);
1612	gfs2_dump_glock(seq, gl);
1613	spin_unlock(&gl->gl_lockref.lock);
1614}
1615
1616static void dump_glock_func(struct gfs2_glock *gl)
1617{
1618	dump_glock(NULL, gl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1619}
1620
1621/**
1622 * gfs2_gl_hash_clear - Empty out the glock hash table
1623 * @sdp: the filesystem
1624 * @wait: wait until it's all gone
1625 *
1626 * Called when unmounting the filesystem.
1627 */
1628
1629void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1630{
1631	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1632	flush_workqueue(glock_workqueue);
1633	glock_hash_walk(clear_glock, sdp);
1634	flush_workqueue(glock_workqueue);
1635	wait_event_timeout(sdp->sd_glock_wait,
1636			   atomic_read(&sdp->sd_glock_disposal) == 0,
1637			   HZ * 600);
1638	glock_hash_walk(dump_glock_func, sdp);
1639}
1640
1641void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1642{
1643	struct gfs2_glock *gl = ip->i_gl;
1644	int ret;
1645
1646	ret = gfs2_truncatei_resume(ip);
1647	gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1648
1649	spin_lock(&gl->gl_lockref.lock);
1650	clear_bit(GLF_LOCK, &gl->gl_flags);
1651	run_queue(gl, 1);
1652	spin_unlock(&gl->gl_lockref.lock);
1653}
1654
1655static const char *state2str(unsigned state)
1656{
1657	switch(state) {
1658	case LM_ST_UNLOCKED:
1659		return "UN";
1660	case LM_ST_SHARED:
1661		return "SH";
1662	case LM_ST_DEFERRED:
1663		return "DF";
1664	case LM_ST_EXCLUSIVE:
1665		return "EX";
1666	}
1667	return "??";
1668}
1669
1670static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1671{
1672	char *p = buf;
1673	if (flags & LM_FLAG_TRY)
1674		*p++ = 't';
1675	if (flags & LM_FLAG_TRY_1CB)
1676		*p++ = 'T';
1677	if (flags & LM_FLAG_NOEXP)
1678		*p++ = 'e';
1679	if (flags & LM_FLAG_ANY)
1680		*p++ = 'A';
1681	if (flags & LM_FLAG_PRIORITY)
1682		*p++ = 'p';
 
 
1683	if (flags & GL_ASYNC)
1684		*p++ = 'a';
1685	if (flags & GL_EXACT)
1686		*p++ = 'E';
1687	if (flags & GL_NOCACHE)
1688		*p++ = 'c';
1689	if (test_bit(HIF_HOLDER, &iflags))
1690		*p++ = 'H';
1691	if (test_bit(HIF_WAIT, &iflags))
1692		*p++ = 'W';
1693	if (test_bit(HIF_FIRST, &iflags))
1694		*p++ = 'F';
1695	*p = 0;
1696	return buf;
1697}
1698
1699/**
1700 * dump_holder - print information about a glock holder
1701 * @seq: the seq_file struct
1702 * @gh: the glock holder
 
1703 *
1704 */
1705
1706static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
 
1707{
1708	struct task_struct *gh_owner = NULL;
 
1709	char flags_buf[32];
1710
1711	rcu_read_lock();
1712	if (gh->gh_owner_pid)
 
 
 
 
1713		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1714	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1715		       state2str(gh->gh_state),
 
 
 
1716		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1717		       gh->gh_error,
1718		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1719		       gh_owner ? gh_owner->comm : "(ended)",
1720		       (void *)gh->gh_ip);
1721	rcu_read_unlock();
1722}
1723
1724static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1725{
1726	const unsigned long *gflags = &gl->gl_flags;
1727	char *p = buf;
1728
1729	if (test_bit(GLF_LOCK, gflags))
1730		*p++ = 'l';
1731	if (test_bit(GLF_DEMOTE, gflags))
1732		*p++ = 'D';
1733	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1734		*p++ = 'd';
1735	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1736		*p++ = 'p';
1737	if (test_bit(GLF_DIRTY, gflags))
1738		*p++ = 'y';
1739	if (test_bit(GLF_LFLUSH, gflags))
1740		*p++ = 'f';
1741	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1742		*p++ = 'i';
1743	if (test_bit(GLF_REPLY_PENDING, gflags))
1744		*p++ = 'r';
1745	if (test_bit(GLF_INITIAL, gflags))
1746		*p++ = 'I';
1747	if (test_bit(GLF_FROZEN, gflags))
1748		*p++ = 'F';
1749	if (test_bit(GLF_QUEUED, gflags))
1750		*p++ = 'q';
1751	if (test_bit(GLF_LRU, gflags))
1752		*p++ = 'L';
1753	if (gl->gl_object)
1754		*p++ = 'o';
1755	if (test_bit(GLF_BLOCKING, gflags))
1756		*p++ = 'b';
 
 
 
 
 
 
 
 
1757	*p = 0;
1758	return buf;
1759}
1760
1761/**
1762 * gfs2_dump_glock - print information about a glock
1763 * @seq: The seq_file struct
1764 * @gl: the glock
 
1765 *
1766 * The file format is as follows:
1767 * One line per object, capital letters are used to indicate objects
1768 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1769 * other objects are indented by a single space and follow the glock to
1770 * which they are related. Fields are indicated by lower case letters
1771 * followed by a colon and the field value, except for strings which are in
1772 * [] so that its possible to see if they are composed of spaces for
1773 * example. The field's are n = number (id of the object), f = flags,
1774 * t = type, s = state, r = refcount, e = error, p = pid.
1775 *
1776 */
1777
1778void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1779{
1780	const struct gfs2_glock_operations *glops = gl->gl_ops;
1781	unsigned long long dtime;
1782	const struct gfs2_holder *gh;
1783	char gflags_buf[32];
 
 
 
 
 
 
1784
 
 
 
 
 
1785	dtime = jiffies - gl->gl_demote_time;
1786	dtime *= 1000000/HZ; /* demote time in uSec */
1787	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1788		dtime = 0;
1789	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1790		  state2str(gl->gl_state),
1791		  gl->gl_name.ln_type,
1792		  (unsigned long long)gl->gl_name.ln_number,
1793		  gflags2str(gflags_buf, gl),
1794		  state2str(gl->gl_target),
1795		  state2str(gl->gl_demote_state), dtime,
1796		  atomic_read(&gl->gl_ail_count),
1797		  atomic_read(&gl->gl_revokes),
1798		  (int)gl->gl_lockref.count, gl->gl_hold_time);
 
1799
1800	list_for_each_entry(gh, &gl->gl_holders, gh_list)
1801		dump_holder(seq, gh);
1802
1803	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1804		glops->go_dump(seq, gl);
1805}
1806
1807static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1808{
1809	struct gfs2_glock *gl = iter_ptr;
1810
1811	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1812		   gl->gl_name.ln_type,
1813		   (unsigned long long)gl->gl_name.ln_number,
1814		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1815		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1816		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1817		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1818		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1819		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1820		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1821		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1822	return 0;
1823}
1824
1825static const char *gfs2_gltype[] = {
1826	"type",
1827	"reserved",
1828	"nondisk",
1829	"inode",
1830	"rgrp",
1831	"meta",
1832	"iopen",
1833	"flock",
1834	"plock",
1835	"quota",
1836	"journal",
1837};
1838
1839static const char *gfs2_stype[] = {
1840	[GFS2_LKS_SRTT]		= "srtt",
1841	[GFS2_LKS_SRTTVAR]	= "srttvar",
1842	[GFS2_LKS_SRTTB]	= "srttb",
1843	[GFS2_LKS_SRTTVARB]	= "srttvarb",
1844	[GFS2_LKS_SIRT]		= "sirt",
1845	[GFS2_LKS_SIRTVAR]	= "sirtvar",
1846	[GFS2_LKS_DCOUNT]	= "dlm",
1847	[GFS2_LKS_QCOUNT]	= "queue",
1848};
1849
1850#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1851
1852static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1853{
1854	struct gfs2_sbd *sdp = seq->private;
1855	loff_t pos = *(loff_t *)iter_ptr;
1856	unsigned index = pos >> 3;
1857	unsigned subindex = pos & 0x07;
1858	int i;
1859
1860	if (index == 0 && subindex != 0)
1861		return 0;
1862
1863	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1864		   (index == 0) ? "cpu": gfs2_stype[subindex]);
1865
1866	for_each_possible_cpu(i) {
1867                const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1868
1869		if (index == 0)
1870			seq_printf(seq, " %15u", i);
1871		else
1872			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1873				   lkstats[index - 1].stats[subindex]);
1874	}
1875	seq_putc(seq, '\n');
1876	return 0;
1877}
1878
1879int __init gfs2_glock_init(void)
1880{
1881	int i, ret;
1882
1883	ret = rhashtable_init(&gl_hash_table, &ht_parms);
1884	if (ret < 0)
1885		return ret;
1886
1887	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1888					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1889	if (!glock_workqueue) {
1890		rhashtable_destroy(&gl_hash_table);
1891		return -ENOMEM;
1892	}
1893	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1894						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1895						0);
1896	if (!gfs2_delete_workqueue) {
1897		destroy_workqueue(glock_workqueue);
1898		rhashtable_destroy(&gl_hash_table);
1899		return -ENOMEM;
1900	}
1901
1902	ret = register_shrinker(&glock_shrinker);
1903	if (ret) {
1904		destroy_workqueue(gfs2_delete_workqueue);
1905		destroy_workqueue(glock_workqueue);
1906		rhashtable_destroy(&gl_hash_table);
1907		return ret;
1908	}
1909
1910	for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
1911		init_waitqueue_head(glock_wait_table + i);
1912
1913	return 0;
1914}
1915
1916void gfs2_glock_exit(void)
1917{
1918	unregister_shrinker(&glock_shrinker);
1919	rhashtable_destroy(&gl_hash_table);
1920	destroy_workqueue(glock_workqueue);
1921	destroy_workqueue(gfs2_delete_workqueue);
1922}
1923
1924static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
1925{
1926	struct gfs2_glock *gl = gi->gl;
1927
1928	if (gl) {
1929		if (n == 0)
1930			return;
1931		if (!lockref_put_not_zero(&gl->gl_lockref))
1932			gfs2_glock_queue_put(gl);
1933	}
1934	for (;;) {
1935		gl = rhashtable_walk_next(&gi->hti);
1936		if (IS_ERR_OR_NULL(gl)) {
1937			if (gl == ERR_PTR(-EAGAIN)) {
1938				n = 1;
1939				continue;
1940			}
1941			gl = NULL;
1942			break;
1943		}
1944		if (gl->gl_name.ln_sbd != gi->sdp)
1945			continue;
1946		if (n <= 1) {
1947			if (!lockref_get_not_dead(&gl->gl_lockref))
1948				continue;
1949			break;
1950		} else {
1951			if (__lockref_is_dead(&gl->gl_lockref))
1952				continue;
1953			n--;
1954		}
1955	}
1956	gi->gl = gl;
1957}
1958
1959static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1960	__acquires(RCU)
1961{
1962	struct gfs2_glock_iter *gi = seq->private;
1963	loff_t n;
1964
1965	/*
1966	 * We can either stay where we are, skip to the next hash table
1967	 * entry, or start from the beginning.
1968	 */
1969	if (*pos < gi->last_pos) {
1970		rhashtable_walk_exit(&gi->hti);
1971		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
1972		n = *pos + 1;
1973	} else {
1974		n = *pos - gi->last_pos;
1975	}
1976
1977	rhashtable_walk_start(&gi->hti);
1978
1979	gfs2_glock_iter_next(gi, n);
1980	gi->last_pos = *pos;
1981	return gi->gl;
1982}
1983
1984static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1985				 loff_t *pos)
1986{
1987	struct gfs2_glock_iter *gi = seq->private;
1988
1989	(*pos)++;
1990	gi->last_pos = *pos;
1991	gfs2_glock_iter_next(gi, 1);
1992	return gi->gl;
1993}
1994
1995static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1996	__releases(RCU)
1997{
1998	struct gfs2_glock_iter *gi = seq->private;
1999
2000	rhashtable_walk_stop(&gi->hti);
2001}
2002
2003static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2004{
2005	dump_glock(seq, iter_ptr);
2006	return 0;
2007}
2008
2009static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2010{
2011	preempt_disable();
2012	if (*pos >= GFS2_NR_SBSTATS)
2013		return NULL;
2014	return pos;
2015}
2016
2017static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2018				   loff_t *pos)
2019{
2020	(*pos)++;
2021	if (*pos >= GFS2_NR_SBSTATS)
2022		return NULL;
2023	return pos;
2024}
2025
2026static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2027{
2028	preempt_enable();
2029}
2030
2031static const struct seq_operations gfs2_glock_seq_ops = {
2032	.start = gfs2_glock_seq_start,
2033	.next  = gfs2_glock_seq_next,
2034	.stop  = gfs2_glock_seq_stop,
2035	.show  = gfs2_glock_seq_show,
2036};
2037
2038static const struct seq_operations gfs2_glstats_seq_ops = {
2039	.start = gfs2_glock_seq_start,
2040	.next  = gfs2_glock_seq_next,
2041	.stop  = gfs2_glock_seq_stop,
2042	.show  = gfs2_glstats_seq_show,
2043};
2044
2045static const struct seq_operations gfs2_sbstats_seq_ops = {
2046	.start = gfs2_sbstats_seq_start,
2047	.next  = gfs2_sbstats_seq_next,
2048	.stop  = gfs2_sbstats_seq_stop,
2049	.show  = gfs2_sbstats_seq_show,
2050};
2051
2052#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2053
2054static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2055			      const struct seq_operations *ops)
2056{
2057	int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2058	if (ret == 0) {
2059		struct seq_file *seq = file->private_data;
2060		struct gfs2_glock_iter *gi = seq->private;
2061
2062		gi->sdp = inode->i_private;
2063		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2064		if (seq->buf)
2065			seq->size = GFS2_SEQ_GOODSIZE;
2066		/*
2067		 * Initially, we are "before" the first hash table entry; the
2068		 * first call to rhashtable_walk_next gets us the first entry.
2069		 */
2070		gi->last_pos = -1;
2071		gi->gl = NULL;
2072		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2073	}
2074	return ret;
2075}
2076
2077static int gfs2_glocks_open(struct inode *inode, struct file *file)
2078{
2079	return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2080}
2081
2082static int gfs2_glocks_release(struct inode *inode, struct file *file)
2083{
2084	struct seq_file *seq = file->private_data;
2085	struct gfs2_glock_iter *gi = seq->private;
2086
2087	if (gi->gl)
2088		gfs2_glock_put(gi->gl);
2089	rhashtable_walk_exit(&gi->hti);
2090	return seq_release_private(inode, file);
2091}
2092
2093static int gfs2_glstats_open(struct inode *inode, struct file *file)
2094{
2095	return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2096}
2097
2098static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2099{
2100	int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2101	if (ret == 0) {
2102		struct seq_file *seq = file->private_data;
2103		seq->private = inode->i_private;  /* sdp */
2104	}
2105	return ret;
2106}
2107
2108static const struct file_operations gfs2_glocks_fops = {
2109	.owner   = THIS_MODULE,
2110	.open    = gfs2_glocks_open,
2111	.read    = seq_read,
2112	.llseek  = seq_lseek,
2113	.release = gfs2_glocks_release,
2114};
2115
2116static const struct file_operations gfs2_glstats_fops = {
2117	.owner   = THIS_MODULE,
2118	.open    = gfs2_glstats_open,
2119	.read    = seq_read,
2120	.llseek  = seq_lseek,
2121	.release = gfs2_glocks_release,
2122};
2123
2124static const struct file_operations gfs2_sbstats_fops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2125	.owner   = THIS_MODULE,
2126	.open	 = gfs2_sbstats_open,
2127	.read    = seq_read,
2128	.llseek  = seq_lseek,
2129	.release = seq_release,
2130};
2131
2132int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
 
 
2133{
2134	struct dentry *dent;
2135
2136	dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2137	if (IS_ERR_OR_NULL(dent))
2138		goto fail;
2139	sdp->debugfs_dir = dent;
2140
2141	dent = debugfs_create_file("glocks",
2142				   S_IFREG | S_IRUGO,
2143				   sdp->debugfs_dir, sdp,
2144				   &gfs2_glocks_fops);
2145	if (IS_ERR_OR_NULL(dent))
2146		goto fail;
2147	sdp->debugfs_dentry_glocks = dent;
2148
2149	dent = debugfs_create_file("glstats",
2150				   S_IFREG | S_IRUGO,
2151				   sdp->debugfs_dir, sdp,
2152				   &gfs2_glstats_fops);
2153	if (IS_ERR_OR_NULL(dent))
2154		goto fail;
2155	sdp->debugfs_dentry_glstats = dent;
2156
2157	dent = debugfs_create_file("sbstats",
2158				   S_IFREG | S_IRUGO,
2159				   sdp->debugfs_dir, sdp,
2160				   &gfs2_sbstats_fops);
2161	if (IS_ERR_OR_NULL(dent))
2162		goto fail;
2163	sdp->debugfs_dentry_sbstats = dent;
2164
2165	return 0;
2166fail:
2167	gfs2_delete_debugfs_file(sdp);
2168	return dent ? PTR_ERR(dent) : -ENOMEM;
 
 
 
 
2169}
2170
2171void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2172{
2173	if (sdp->debugfs_dir) {
2174		if (sdp->debugfs_dentry_glocks) {
2175			debugfs_remove(sdp->debugfs_dentry_glocks);
2176			sdp->debugfs_dentry_glocks = NULL;
2177		}
2178		if (sdp->debugfs_dentry_glstats) {
2179			debugfs_remove(sdp->debugfs_dentry_glstats);
2180			sdp->debugfs_dentry_glstats = NULL;
2181		}
2182		if (sdp->debugfs_dentry_sbstats) {
2183			debugfs_remove(sdp->debugfs_dentry_sbstats);
2184			sdp->debugfs_dentry_sbstats = NULL;
2185		}
2186		debugfs_remove(sdp->debugfs_dir);
2187		sdp->debugfs_dir = NULL;
2188	}
2189}
2190
2191int gfs2_register_debugfs(void)
2192{
2193	gfs2_root = debugfs_create_dir("gfs2", NULL);
2194	if (IS_ERR(gfs2_root))
2195		return PTR_ERR(gfs2_root);
2196	return gfs2_root ? 0 : -ENOMEM;
2197}
2198
2199void gfs2_unregister_debugfs(void)
2200{
2201	debugfs_remove(gfs2_root);
2202	gfs2_root = NULL;
2203}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/sched.h>
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <linux/buffer_head.h>
  13#include <linux/delay.h>
  14#include <linux/sort.h>
  15#include <linux/hash.h>
  16#include <linux/jhash.h>
  17#include <linux/kallsyms.h>
  18#include <linux/gfs2_ondisk.h>
  19#include <linux/list.h>
  20#include <linux/wait.h>
  21#include <linux/module.h>
  22#include <linux/uaccess.h>
  23#include <linux/seq_file.h>
  24#include <linux/debugfs.h>
  25#include <linux/kthread.h>
  26#include <linux/freezer.h>
  27#include <linux/workqueue.h>
  28#include <linux/jiffies.h>
  29#include <linux/rcupdate.h>
  30#include <linux/rculist_bl.h>
  31#include <linux/bit_spinlock.h>
  32#include <linux/percpu.h>
  33#include <linux/list_sort.h>
  34#include <linux/lockref.h>
  35#include <linux/rhashtable.h>
  36#include <linux/pid_namespace.h>
  37#include <linux/fdtable.h>
  38#include <linux/file.h>
  39
  40#include "gfs2.h"
  41#include "incore.h"
  42#include "glock.h"
  43#include "glops.h"
  44#include "inode.h"
  45#include "lops.h"
  46#include "meta_io.h"
  47#include "quota.h"
  48#include "super.h"
  49#include "util.h"
  50#include "bmap.h"
  51#define CREATE_TRACE_POINTS
  52#include "trace_gfs2.h"
  53
  54struct gfs2_glock_iter {
  55	struct gfs2_sbd *sdp;		/* incore superblock           */
  56	struct rhashtable_iter hti;	/* rhashtable iterator         */
  57	struct gfs2_glock *gl;		/* current glock struct        */
  58	loff_t last_pos;		/* last position               */
  59};
  60
  61typedef void (*glock_examiner) (struct gfs2_glock * gl);
  62
  63static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  64static void __gfs2_glock_dq(struct gfs2_holder *gh);
  65static void handle_callback(struct gfs2_glock *gl, unsigned int state,
  66			    unsigned long delay, bool remote);
  67
  68static struct dentry *gfs2_root;
  69static struct workqueue_struct *glock_workqueue;
  70struct workqueue_struct *gfs2_delete_workqueue;
  71static LIST_HEAD(lru_list);
  72static atomic_t lru_count = ATOMIC_INIT(0);
  73static DEFINE_SPINLOCK(lru_lock);
  74
  75#define GFS2_GL_HASH_SHIFT      15
  76#define GFS2_GL_HASH_SIZE       BIT(GFS2_GL_HASH_SHIFT)
  77
  78static const struct rhashtable_params ht_parms = {
  79	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
  80	.key_len = offsetofend(struct lm_lockname, ln_type),
  81	.key_offset = offsetof(struct gfs2_glock, gl_name),
  82	.head_offset = offsetof(struct gfs2_glock, gl_node),
  83};
  84
  85static struct rhashtable gl_hash_table;
  86
  87#define GLOCK_WAIT_TABLE_BITS 12
  88#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
  89static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
  90
  91struct wait_glock_queue {
  92	struct lm_lockname *name;
  93	wait_queue_entry_t wait;
  94};
  95
  96static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
  97			       int sync, void *key)
  98{
  99	struct wait_glock_queue *wait_glock =
 100		container_of(wait, struct wait_glock_queue, wait);
 101	struct lm_lockname *wait_name = wait_glock->name;
 102	struct lm_lockname *wake_name = key;
 103
 104	if (wake_name->ln_sbd != wait_name->ln_sbd ||
 105	    wake_name->ln_number != wait_name->ln_number ||
 106	    wake_name->ln_type != wait_name->ln_type)
 107		return 0;
 108	return autoremove_wake_function(wait, mode, sync, key);
 109}
 110
 111static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
 112{
 113	u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
 114
 115	return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
 116}
 117
 118/**
 119 * wake_up_glock  -  Wake up waiters on a glock
 120 * @gl: the glock
 121 */
 122static void wake_up_glock(struct gfs2_glock *gl)
 123{
 124	wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
 125
 126	if (waitqueue_active(wq))
 127		__wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
 128}
 129
 130static void gfs2_glock_dealloc(struct rcu_head *rcu)
 131{
 132	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
 133
 134	kfree(gl->gl_lksb.sb_lvbptr);
 135	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
 136		struct gfs2_glock_aspace *gla =
 137			container_of(gl, struct gfs2_glock_aspace, glock);
 138		kmem_cache_free(gfs2_glock_aspace_cachep, gla);
 139	} else
 140		kmem_cache_free(gfs2_glock_cachep, gl);
 141}
 142
 143/**
 144 * glock_blocked_by_withdraw - determine if we can still use a glock
 145 * @gl: the glock
 146 *
 147 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
 148 * when we're withdrawn. For example, to maintain metadata integrity, we should
 149 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
 150 * iopen or the transaction glocks may be safely used because none of their
 151 * metadata goes through the journal. So in general, we should disallow all
 152 * glocks that are journaled, and allow all the others. One exception is:
 153 * we need to allow our active journal to be promoted and demoted so others
 154 * may recover it and we can reacquire it when they're done.
 155 */
 156static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
 157{
 158	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 159
 160	if (likely(!gfs2_withdrawn(sdp)))
 161		return false;
 162	if (gl->gl_ops->go_flags & GLOF_NONDISK)
 163		return false;
 164	if (!sdp->sd_jdesc ||
 165	    gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
 166		return false;
 167	return true;
 168}
 169
 170void gfs2_glock_free(struct gfs2_glock *gl)
 171{
 172	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 173
 174	gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
 175	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
 176	smp_mb();
 177	wake_up_glock(gl);
 178	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
 179	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
 180		wake_up(&sdp->sd_glock_wait);
 181}
 182
 183/**
 184 * gfs2_glock_hold() - increment reference count on glock
 185 * @gl: The glock to hold
 186 *
 187 */
 188
 189struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl)
 190{
 191	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
 192	lockref_get(&gl->gl_lockref);
 193	return gl;
 194}
 195
 196/**
 197 * demote_ok - Check to see if it's ok to unlock a glock
 198 * @gl: the glock
 199 *
 200 * Returns: 1 if it's ok
 201 */
 202
 203static int demote_ok(const struct gfs2_glock *gl)
 204{
 205	const struct gfs2_glock_operations *glops = gl->gl_ops;
 206
 207	if (gl->gl_state == LM_ST_UNLOCKED)
 208		return 0;
 209	if (!list_empty(&gl->gl_holders))
 210		return 0;
 211	if (glops->go_demote_ok)
 212		return glops->go_demote_ok(gl);
 213	return 1;
 214}
 215
 216
 217void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
 218{
 219	if (!(gl->gl_ops->go_flags & GLOF_LRU))
 220		return;
 221
 222	spin_lock(&lru_lock);
 223
 224	list_move_tail(&gl->gl_lru, &lru_list);
 225
 226	if (!test_bit(GLF_LRU, &gl->gl_flags)) {
 227		set_bit(GLF_LRU, &gl->gl_flags);
 228		atomic_inc(&lru_count);
 229	}
 230
 
 
 231	spin_unlock(&lru_lock);
 232}
 233
 234static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
 235{
 236	if (!(gl->gl_ops->go_flags & GLOF_LRU))
 237		return;
 238
 239	spin_lock(&lru_lock);
 240	if (test_bit(GLF_LRU, &gl->gl_flags)) {
 241		list_del_init(&gl->gl_lru);
 242		atomic_dec(&lru_count);
 243		clear_bit(GLF_LRU, &gl->gl_flags);
 244	}
 245	spin_unlock(&lru_lock);
 246}
 247
 248/*
 249 * Enqueue the glock on the work queue.  Passes one glock reference on to the
 250 * work queue.
 251 */
 252static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
 253	if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
 254		/*
 255		 * We are holding the lockref spinlock, and the work was still
 256		 * queued above.  The queued work (glock_work_func) takes that
 257		 * spinlock before dropping its glock reference(s), so it
 258		 * cannot have dropped them in the meantime.
 259		 */
 260		GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
 261		gl->gl_lockref.count--;
 262	}
 263}
 264
 265static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
 266	spin_lock(&gl->gl_lockref.lock);
 267	__gfs2_glock_queue_work(gl, delay);
 268	spin_unlock(&gl->gl_lockref.lock);
 269}
 270
 271static void __gfs2_glock_put(struct gfs2_glock *gl)
 272{
 273	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 274	struct address_space *mapping = gfs2_glock2aspace(gl);
 275
 276	lockref_mark_dead(&gl->gl_lockref);
 277
 278	gfs2_glock_remove_from_lru(gl);
 279	spin_unlock(&gl->gl_lockref.lock);
 280	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
 281	if (mapping) {
 282		truncate_inode_pages_final(mapping);
 283		if (!gfs2_withdrawn(sdp))
 284			GLOCK_BUG_ON(gl, !mapping_empty(mapping));
 285	}
 286	trace_gfs2_glock_put(gl);
 287	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
 288}
 289
 290/*
 291 * Cause the glock to be put in work queue context.
 292 */
 293void gfs2_glock_queue_put(struct gfs2_glock *gl)
 294{
 295	gfs2_glock_queue_work(gl, 0);
 296}
 297
 298/**
 299 * gfs2_glock_put() - Decrement reference count on glock
 300 * @gl: The glock to put
 301 *
 302 */
 303
 304void gfs2_glock_put(struct gfs2_glock *gl)
 305{
 306	if (lockref_put_or_lock(&gl->gl_lockref))
 307		return;
 308
 309	__gfs2_glock_put(gl);
 310}
 311
 312/**
 313 * may_grant - check if it's ok to grant a new lock
 314 * @gl: The glock
 315 * @current_gh: One of the current holders of @gl
 316 * @gh: The lock request which we wish to grant
 317 *
 318 * With our current compatibility rules, if a glock has one or more active
 319 * holders (HIF_HOLDER flag set), any of those holders can be passed in as
 320 * @current_gh; they are all the same as far as compatibility with the new @gh
 321 * goes.
 322 *
 323 * Returns true if it's ok to grant the lock.
 324 */
 325
 326static inline bool may_grant(struct gfs2_glock *gl,
 327			     struct gfs2_holder *current_gh,
 328			     struct gfs2_holder *gh)
 329{
 330	if (current_gh) {
 331		GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, &current_gh->gh_iflags));
 332
 333		switch(current_gh->gh_state) {
 334		case LM_ST_EXCLUSIVE:
 335			/*
 336			 * Here we make a special exception to grant holders
 337			 * who agree to share the EX lock with other holders
 338			 * who also have the bit set. If the original holder
 339			 * has the LM_FLAG_NODE_SCOPE bit set, we grant more
 340			 * holders with the bit set.
 341			 */
 342			return gh->gh_state == LM_ST_EXCLUSIVE &&
 343			       (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) &&
 344			       (gh->gh_flags & LM_FLAG_NODE_SCOPE);
 345
 346		case LM_ST_SHARED:
 347		case LM_ST_DEFERRED:
 348			return gh->gh_state == current_gh->gh_state;
 349
 350		default:
 351			return false;
 352		}
 353	}
 354
 
 
 
 
 
 
 355	if (gl->gl_state == gh->gh_state)
 356		return true;
 357	if (gh->gh_flags & GL_EXACT)
 358		return false;
 359	if (gl->gl_state == LM_ST_EXCLUSIVE) {
 360		return gh->gh_state == LM_ST_SHARED ||
 361		       gh->gh_state == LM_ST_DEFERRED;
 
 
 362	}
 363	if (gh->gh_flags & LM_FLAG_ANY)
 364		return gl->gl_state != LM_ST_UNLOCKED;
 365	return false;
 366}
 367
 368static void gfs2_holder_wake(struct gfs2_holder *gh)
 369{
 370	clear_bit(HIF_WAIT, &gh->gh_iflags);
 371	smp_mb__after_atomic();
 372	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
 373	if (gh->gh_flags & GL_ASYNC) {
 374		struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
 375
 376		wake_up(&sdp->sd_async_glock_wait);
 377	}
 378}
 379
 380/**
 381 * do_error - Something unexpected has happened during a lock request
 382 * @gl: The glock
 383 * @ret: The status from the DLM
 384 */
 385
 386static void do_error(struct gfs2_glock *gl, const int ret)
 387{
 388	struct gfs2_holder *gh, *tmp;
 389
 390	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 391		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 392			continue;
 393		if (ret & LM_OUT_ERROR)
 394			gh->gh_error = -EIO;
 395		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
 396			gh->gh_error = GLR_TRYFAILED;
 397		else
 398			continue;
 399		list_del_init(&gh->gh_list);
 400		trace_gfs2_glock_queue(gh, 0);
 401		gfs2_holder_wake(gh);
 402	}
 403}
 404
 405/**
 406 * find_first_holder - find the first "holder" gh
 407 * @gl: the glock
 408 */
 409
 410static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
 411{
 412	struct gfs2_holder *gh;
 413
 414	if (!list_empty(&gl->gl_holders)) {
 415		gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
 416				      gh_list);
 417		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 418			return gh;
 419	}
 420	return NULL;
 421}
 422
 423/*
 424 * gfs2_instantiate - Call the glops instantiate function
 425 * @gh: The glock holder
 426 *
 427 * Returns: 0 if instantiate was successful, or error.
 428 */
 429int gfs2_instantiate(struct gfs2_holder *gh)
 430{
 431	struct gfs2_glock *gl = gh->gh_gl;
 432	const struct gfs2_glock_operations *glops = gl->gl_ops;
 433	int ret;
 434
 435again:
 436	if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags))
 437		goto done;
 438
 439	/*
 440	 * Since we unlock the lockref lock, we set a flag to indicate
 441	 * instantiate is in progress.
 442	 */
 443	if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
 444		wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG,
 445			    TASK_UNINTERRUPTIBLE);
 446		/*
 447		 * Here we just waited for a different instantiate to finish.
 448		 * But that may not have been successful, as when a process
 449		 * locks an inode glock _before_ it has an actual inode to
 450		 * instantiate into. So we check again. This process might
 451		 * have an inode to instantiate, so might be successful.
 452		 */
 453		goto again;
 454	}
 455
 456	ret = glops->go_instantiate(gl);
 457	if (!ret)
 458		clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
 459	clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
 460	if (ret)
 461		return ret;
 462
 463done:
 464	if (glops->go_held)
 465		return glops->go_held(gh);
 466	return 0;
 467}
 468
 469/**
 470 * do_promote - promote as many requests as possible on the current queue
 471 * @gl: The glock
 472 * 
 473 * Returns: 1 if there is a blocked holder at the head of the list
 
 474 */
 475
 476static int do_promote(struct gfs2_glock *gl)
 
 
 477{
 478	struct gfs2_holder *gh, *current_gh;
 
 
 479
 480	current_gh = find_first_holder(gl);
 481	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
 482		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 483			continue;
 484		if (!may_grant(gl, current_gh, gh)) {
 485			/*
 486			 * If we get here, it means we may not grant this
 487			 * holder for some reason. If this holder is at the
 488			 * head of the list, it means we have a blocked holder
 489			 * at the head, so return 1.
 490			 */
 491			if (list_is_first(&gh->gh_list, &gl->gl_holders))
 492				return 1;
 493			do_error(gl, 0);
 494			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495		}
 496		set_bit(HIF_HOLDER, &gh->gh_iflags);
 497		trace_gfs2_promote(gh);
 498		gfs2_holder_wake(gh);
 499		if (!current_gh)
 500			current_gh = gh;
 501	}
 502	return 0;
 503}
 504
 505/**
 506 * find_first_waiter - find the first gh that's waiting for the glock
 507 * @gl: the glock
 508 */
 509
 510static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
 511{
 512	struct gfs2_holder *gh;
 513
 514	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
 515		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
 516			return gh;
 517	}
 518	return NULL;
 519}
 520
 521/**
 522 * state_change - record that the glock is now in a different state
 523 * @gl: the glock
 524 * @new_state: the new state
 
 525 */
 526
 527static void state_change(struct gfs2_glock *gl, unsigned int new_state)
 528{
 529	int held1, held2;
 530
 531	held1 = (gl->gl_state != LM_ST_UNLOCKED);
 532	held2 = (new_state != LM_ST_UNLOCKED);
 533
 534	if (held1 != held2) {
 535		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
 536		if (held2)
 537			gl->gl_lockref.count++;
 538		else
 539			gl->gl_lockref.count--;
 540	}
 
 
 
 541	if (new_state != gl->gl_target)
 542		/* shorten our minimum hold time */
 543		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
 544				       GL_GLOCK_MIN_HOLD);
 545	gl->gl_state = new_state;
 546	gl->gl_tchange = jiffies;
 547}
 548
 549static void gfs2_set_demote(struct gfs2_glock *gl)
 550{
 551	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 552
 553	set_bit(GLF_DEMOTE, &gl->gl_flags);
 554	smp_mb();
 555	wake_up(&sdp->sd_async_glock_wait);
 556}
 557
 558static void gfs2_demote_wake(struct gfs2_glock *gl)
 559{
 560	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 561	clear_bit(GLF_DEMOTE, &gl->gl_flags);
 562	smp_mb__after_atomic();
 563	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
 564}
 565
 566/**
 567 * finish_xmote - The DLM has replied to one of our lock requests
 568 * @gl: The glock
 569 * @ret: The status from the DLM
 570 *
 571 */
 572
 573static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
 574{
 575	const struct gfs2_glock_operations *glops = gl->gl_ops;
 576	struct gfs2_holder *gh;
 577	unsigned state = ret & LM_OUT_ST_MASK;
 
 578
 579	spin_lock(&gl->gl_lockref.lock);
 580	trace_gfs2_glock_state_change(gl, state);
 581	state_change(gl, state);
 582	gh = find_first_waiter(gl);
 583
 584	/* Demote to UN request arrived during demote to SH or DF */
 585	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
 586	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
 587		gl->gl_target = LM_ST_UNLOCKED;
 588
 589	/* Check for state != intended state */
 590	if (unlikely(state != gl->gl_target)) {
 591		if (gh && (ret & LM_OUT_CANCELED))
 592			gfs2_holder_wake(gh);
 593		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
 594			/* move to back of queue and try next entry */
 595			if (ret & LM_OUT_CANCELED) {
 596				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
 597					list_move_tail(&gh->gh_list, &gl->gl_holders);
 598				gh = find_first_waiter(gl);
 599				gl->gl_target = gh->gh_state;
 600				goto retry;
 601			}
 602			/* Some error or failed "try lock" - report it */
 603			if ((ret & LM_OUT_ERROR) ||
 604			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
 605				gl->gl_target = gl->gl_state;
 606				do_error(gl, ret);
 607				goto out;
 608			}
 609		}
 610		switch(state) {
 611		/* Unlocked due to conversion deadlock, try again */
 612		case LM_ST_UNLOCKED:
 613retry:
 614			do_xmote(gl, gh, gl->gl_target);
 615			break;
 616		/* Conversion fails, unlock and try again */
 617		case LM_ST_SHARED:
 618		case LM_ST_DEFERRED:
 619			do_xmote(gl, gh, LM_ST_UNLOCKED);
 620			break;
 621		default: /* Everything else */
 622			fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
 623			       gl->gl_target, state);
 624			GLOCK_BUG_ON(gl, 1);
 625		}
 626		spin_unlock(&gl->gl_lockref.lock);
 627		return;
 628	}
 629
 630	/* Fast path - we got what we asked for */
 631	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
 632		gfs2_demote_wake(gl);
 633	if (state != LM_ST_UNLOCKED) {
 634		if (glops->go_xmote_bh) {
 635			int rv;
 636
 637			spin_unlock(&gl->gl_lockref.lock);
 638			rv = glops->go_xmote_bh(gl);
 639			spin_lock(&gl->gl_lockref.lock);
 640			if (rv) {
 641				do_error(gl, rv);
 642				goto out;
 643			}
 644		}
 645		do_promote(gl);
 
 
 646	}
 647out:
 648	clear_bit(GLF_LOCK, &gl->gl_flags);
 
 649	spin_unlock(&gl->gl_lockref.lock);
 650}
 651
 652static bool is_system_glock(struct gfs2_glock *gl)
 653{
 654	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 655	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 656
 657	if (gl == m_ip->i_gl)
 658		return true;
 659	return false;
 660}
 661
 662/**
 663 * do_xmote - Calls the DLM to change the state of a lock
 664 * @gl: The lock state
 665 * @gh: The holder (only for promotes)
 666 * @target: The target lock state
 667 *
 668 */
 669
 670static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh,
 671					 unsigned int target)
 672__releases(&gl->gl_lockref.lock)
 673__acquires(&gl->gl_lockref.lock)
 674{
 675	const struct gfs2_glock_operations *glops = gl->gl_ops;
 676	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 677	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
 678	int ret;
 679
 680	if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
 681	    gh && !(gh->gh_flags & LM_FLAG_NOEXP))
 682		goto skip_inval;
 683
 684	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
 685		      LM_FLAG_PRIORITY);
 686	GLOCK_BUG_ON(gl, gl->gl_state == target);
 687	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
 688	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
 689	    glops->go_inval) {
 690		/*
 691		 * If another process is already doing the invalidate, let that
 692		 * finish first.  The glock state machine will get back to this
 693		 * holder again later.
 694		 */
 695		if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
 696				     &gl->gl_flags))
 697			return;
 698		do_error(gl, 0); /* Fail queued try locks */
 699	}
 700	gl->gl_req = target;
 701	set_bit(GLF_BLOCKING, &gl->gl_flags);
 702	if ((gl->gl_req == LM_ST_UNLOCKED) ||
 703	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
 704	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
 705		clear_bit(GLF_BLOCKING, &gl->gl_flags);
 706	spin_unlock(&gl->gl_lockref.lock);
 707	if (glops->go_sync) {
 708		ret = glops->go_sync(gl);
 709		/* If we had a problem syncing (due to io errors or whatever,
 710		 * we should not invalidate the metadata or tell dlm to
 711		 * release the glock to other nodes.
 712		 */
 713		if (ret) {
 714			if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
 715				fs_err(sdp, "Error %d syncing glock \n", ret);
 716				gfs2_dump_glock(NULL, gl, true);
 717			}
 718			goto skip_inval;
 719		}
 720	}
 721	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
 722		/*
 723		 * The call to go_sync should have cleared out the ail list.
 724		 * If there are still items, we have a problem. We ought to
 725		 * withdraw, but we can't because the withdraw code also uses
 726		 * glocks. Warn about the error, dump the glock, then fall
 727		 * through and wait for logd to do the withdraw for us.
 728		 */
 729		if ((atomic_read(&gl->gl_ail_count) != 0) &&
 730		    (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
 731			gfs2_glock_assert_warn(gl,
 732					       !atomic_read(&gl->gl_ail_count));
 733			gfs2_dump_glock(NULL, gl, true);
 734		}
 735		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
 736		clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 737	}
 738
 739skip_inval:
 740	gfs2_glock_hold(gl);
 741	/*
 742	 * Check for an error encountered since we called go_sync and go_inval.
 743	 * If so, we can't withdraw from the glock code because the withdraw
 744	 * code itself uses glocks (see function signal_our_withdraw) to
 745	 * change the mount to read-only. Most importantly, we must not call
 746	 * dlm to unlock the glock until the journal is in a known good state
 747	 * (after journal replay) otherwise other nodes may use the object
 748	 * (rgrp or dinode) and then later, journal replay will corrupt the
 749	 * file system. The best we can do here is wait for the logd daemon
 750	 * to see sd_log_error and withdraw, and in the meantime, requeue the
 751	 * work for later.
 752	 *
 753	 * We make a special exception for some system glocks, such as the
 754	 * system statfs inode glock, which needs to be granted before the
 755	 * gfs2_quotad daemon can exit, and that exit needs to finish before
 756	 * we can unmount the withdrawn file system.
 757	 *
 758	 * However, if we're just unlocking the lock (say, for unmount, when
 759	 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
 760	 * then it's okay to tell dlm to unlock it.
 761	 */
 762	if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
 763		gfs2_withdraw_delayed(sdp);
 764	if (glock_blocked_by_withdraw(gl) &&
 765	    (target != LM_ST_UNLOCKED ||
 766	     test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
 767		if (!is_system_glock(gl)) {
 768			handle_callback(gl, LM_ST_UNLOCKED, 0, false); /* sets demote */
 769			/*
 770			 * Ordinarily, we would call dlm and its callback would call
 771			 * finish_xmote, which would call state_change() to the new state.
 772			 * Since we withdrew, we won't call dlm, so call state_change
 773			 * manually, but to the UNLOCKED state we desire.
 774			 */
 775			state_change(gl, LM_ST_UNLOCKED);
 776			/*
 777			 * We skip telling dlm to do the locking, so we won't get a
 778			 * reply that would otherwise clear GLF_LOCK. So we clear it here.
 779			 */
 780			clear_bit(GLF_LOCK, &gl->gl_flags);
 781			clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
 782			gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
 783			goto out;
 784		} else {
 785			clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 786		}
 787	}
 788
 789	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
 790		/* lock_dlm */
 791		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
 792		if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
 793		    target == LM_ST_UNLOCKED &&
 794		    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
 795			finish_xmote(gl, target);
 796			gfs2_glock_queue_work(gl, 0);
 797		} else if (ret) {
 798			fs_err(sdp, "lm_lock ret %d\n", ret);
 799			GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
 
 
 800		}
 801	} else { /* lock_nolock */
 802		finish_xmote(gl, target);
 803		gfs2_glock_queue_work(gl, 0);
 804	}
 805out:
 806	spin_lock(&gl->gl_lockref.lock);
 807}
 808
 809/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 810 * run_queue - do all outstanding tasks related to a glock
 811 * @gl: The glock in question
 812 * @nonblock: True if we must not block in run_queue
 813 *
 814 */
 815
 816static void run_queue(struct gfs2_glock *gl, const int nonblock)
 817__releases(&gl->gl_lockref.lock)
 818__acquires(&gl->gl_lockref.lock)
 819{
 820	struct gfs2_holder *gh = NULL;
 
 821
 822	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
 823		return;
 824
 825	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
 826
 827	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
 828	    gl->gl_demote_state != gl->gl_state) {
 829		if (find_first_holder(gl))
 830			goto out_unlock;
 831		if (nonblock)
 832			goto out_sched;
 833		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
 834		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
 835		gl->gl_target = gl->gl_demote_state;
 836	} else {
 837		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
 838			gfs2_demote_wake(gl);
 839		if (do_promote(gl) == 0)
 
 840			goto out_unlock;
 
 
 841		gh = find_first_waiter(gl);
 842		gl->gl_target = gh->gh_state;
 843		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
 844			do_error(gl, 0); /* Fail queued try locks */
 845	}
 846	do_xmote(gl, gh, gl->gl_target);
 
 847	return;
 848
 849out_sched:
 850	clear_bit(GLF_LOCK, &gl->gl_flags);
 851	smp_mb__after_atomic();
 852	gl->gl_lockref.count++;
 853	__gfs2_glock_queue_work(gl, 0);
 854	return;
 855
 856out_unlock:
 857	clear_bit(GLF_LOCK, &gl->gl_flags);
 858	smp_mb__after_atomic();
 859	return;
 860}
 861
 862/**
 863 * glock_set_object - set the gl_object field of a glock
 864 * @gl: the glock
 865 * @object: the object
 866 */
 867void glock_set_object(struct gfs2_glock *gl, void *object)
 868{
 869	void *prev_object;
 870
 871	spin_lock(&gl->gl_lockref.lock);
 872	prev_object = gl->gl_object;
 873	gl->gl_object = object;
 874	spin_unlock(&gl->gl_lockref.lock);
 875	if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) {
 876		pr_warn("glock=%u/%llx\n",
 877			gl->gl_name.ln_type,
 878			(unsigned long long)gl->gl_name.ln_number);
 879		gfs2_dump_glock(NULL, gl, true);
 880	}
 881}
 882
 883/**
 884 * glock_clear_object - clear the gl_object field of a glock
 885 * @gl: the glock
 886 */
 887void glock_clear_object(struct gfs2_glock *gl, void *object)
 888{
 889	void *prev_object;
 890
 891	spin_lock(&gl->gl_lockref.lock);
 892	prev_object = gl->gl_object;
 893	gl->gl_object = NULL;
 894	spin_unlock(&gl->gl_lockref.lock);
 895	if (gfs2_assert_warn(gl->gl_name.ln_sbd,
 896			     prev_object == object || prev_object == NULL)) {
 897		pr_warn("glock=%u/%llx\n",
 898			gl->gl_name.ln_type,
 899			(unsigned long long)gl->gl_name.ln_number);
 900		gfs2_dump_glock(NULL, gl, true);
 901	}
 902}
 903
 904void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
 905{
 906	struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
 907
 908	if (ri->ri_magic == 0)
 909		ri->ri_magic = cpu_to_be32(GFS2_MAGIC);
 910	if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC))
 911		ri->ri_generation_deleted = cpu_to_be64(generation);
 912}
 913
 914bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
 915{
 916	struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
 917
 918	if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC))
 919		return false;
 920	return generation <= be64_to_cpu(ri->ri_generation_deleted);
 921}
 922
 923static void gfs2_glock_poke(struct gfs2_glock *gl)
 924{
 925	int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP;
 926	struct gfs2_holder gh;
 927	int error;
 928
 929	__gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_);
 930	error = gfs2_glock_nq(&gh);
 931	if (!error)
 932		gfs2_glock_dq(&gh);
 933	gfs2_holder_uninit(&gh);
 934}
 935
 936static bool gfs2_try_evict(struct gfs2_glock *gl)
 937{
 938	struct gfs2_inode *ip;
 939	bool evicted = false;
 940
 941	/*
 942	 * If there is contention on the iopen glock and we have an inode, try
 943	 * to grab and release the inode so that it can be evicted.  This will
 944	 * allow the remote node to go ahead and delete the inode without us
 945	 * having to do it, which will avoid rgrp glock thrashing.
 946	 *
 947	 * The remote node is likely still holding the corresponding inode
 948	 * glock, so it will run before we get to verify that the delete has
 949	 * happened below.
 950	 */
 951	spin_lock(&gl->gl_lockref.lock);
 952	ip = gl->gl_object;
 953	if (ip && !igrab(&ip->i_inode))
 954		ip = NULL;
 955	spin_unlock(&gl->gl_lockref.lock);
 956	if (ip) {
 957		gl->gl_no_formal_ino = ip->i_no_formal_ino;
 958		set_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
 959		d_prune_aliases(&ip->i_inode);
 960		iput(&ip->i_inode);
 961
 962		/* If the inode was evicted, gl->gl_object will now be NULL. */
 963		spin_lock(&gl->gl_lockref.lock);
 964		ip = gl->gl_object;
 965		if (ip) {
 966			clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
 967			if (!igrab(&ip->i_inode))
 968				ip = NULL;
 969		}
 970		spin_unlock(&gl->gl_lockref.lock);
 971		if (ip) {
 972			gfs2_glock_poke(ip->i_gl);
 973			iput(&ip->i_inode);
 974		}
 975		evicted = !ip;
 976	}
 977	return evicted;
 978}
 979
 980static void delete_work_func(struct work_struct *work)
 981{
 982	struct delayed_work *dwork = to_delayed_work(work);
 983	struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
 984	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 985	struct inode *inode;
 986	u64 no_addr = gl->gl_name.ln_number;
 987
 988	spin_lock(&gl->gl_lockref.lock);
 989	clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
 990	spin_unlock(&gl->gl_lockref.lock);
 991
 992	if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
 993		/*
 994		 * If we can evict the inode, give the remote node trying to
 995		 * delete the inode some time before verifying that the delete
 996		 * has happened.  Otherwise, if we cause contention on the inode glock
 997		 * immediately, the remote node will think that we still have
 998		 * the inode in use, and so it will give up waiting.
 999		 *
1000		 * If we can't evict the inode, signal to the remote node that
1001		 * the inode is still in use.  We'll later try to delete the
1002		 * inode locally in gfs2_evict_inode.
1003		 *
1004		 * FIXME: We only need to verify that the remote node has
1005		 * deleted the inode because nodes before this remote delete
1006		 * rework won't cooperate.  At a later time, when we no longer
1007		 * care about compatibility with such nodes, we can skip this
1008		 * step entirely.
1009		 */
1010		if (gfs2_try_evict(gl)) {
1011			if (gfs2_queue_delete_work(gl, 5 * HZ))
1012				return;
1013		}
1014		goto out;
1015	}
1016
1017	inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
1018				    GFS2_BLKST_UNLINKED);
1019	if (IS_ERR(inode)) {
1020		if (PTR_ERR(inode) == -EAGAIN &&
1021			(gfs2_queue_delete_work(gl, 5 * HZ)))
1022				return;
1023	} else {
1024		d_prune_aliases(inode);
1025		iput(inode);
1026	}
1027out:
1028	gfs2_glock_put(gl);
1029}
1030
1031static void glock_work_func(struct work_struct *work)
1032{
1033	unsigned long delay = 0;
1034	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
1035	unsigned int drop_refs = 1;
1036
1037	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
1038		finish_xmote(gl, gl->gl_reply);
1039		drop_refs++;
1040	}
1041	spin_lock(&gl->gl_lockref.lock);
1042	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1043	    gl->gl_state != LM_ST_UNLOCKED &&
1044	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
1045		unsigned long holdtime, now = jiffies;
1046
1047		holdtime = gl->gl_tchange + gl->gl_hold_time;
1048		if (time_before(now, holdtime))
1049			delay = holdtime - now;
1050
1051		if (!delay) {
1052			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
1053			gfs2_set_demote(gl);
1054		}
1055	}
1056	run_queue(gl, 0);
1057	if (delay) {
1058		/* Keep one glock reference for the work we requeue. */
1059		drop_refs--;
1060		if (gl->gl_name.ln_type != LM_TYPE_INODE)
1061			delay = 0;
1062		__gfs2_glock_queue_work(gl, delay);
1063	}
1064
1065	/*
1066	 * Drop the remaining glock references manually here. (Mind that
1067	 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
1068	 * here as well.)
1069	 */
1070	gl->gl_lockref.count -= drop_refs;
1071	if (!gl->gl_lockref.count) {
1072		__gfs2_glock_put(gl);
1073		return;
1074	}
1075	spin_unlock(&gl->gl_lockref.lock);
1076}
1077
1078static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
1079					    struct gfs2_glock *new)
1080{
1081	struct wait_glock_queue wait;
1082	wait_queue_head_t *wq = glock_waitqueue(name);
1083	struct gfs2_glock *gl;
1084
1085	wait.name = name;
1086	init_wait(&wait.wait);
1087	wait.wait.func = glock_wake_function;
1088
1089again:
1090	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1091	rcu_read_lock();
1092	if (new) {
1093		gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
1094			&new->gl_node, ht_parms);
1095		if (IS_ERR(gl))
1096			goto out;
1097	} else {
1098		gl = rhashtable_lookup_fast(&gl_hash_table,
1099			name, ht_parms);
1100	}
1101	if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
1102		rcu_read_unlock();
1103		schedule();
1104		goto again;
1105	}
1106out:
1107	rcu_read_unlock();
1108	finish_wait(wq, &wait.wait);
1109	return gl;
1110}
1111
1112/**
1113 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
1114 * @sdp: The GFS2 superblock
1115 * @number: the lock number
1116 * @glops: The glock_operations to use
1117 * @create: If 0, don't create the glock if it doesn't exist
1118 * @glp: the glock is returned here
1119 *
1120 * This does not lock a glock, just finds/creates structures for one.
1121 *
1122 * Returns: errno
1123 */
1124
1125int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
1126		   const struct gfs2_glock_operations *glops, int create,
1127		   struct gfs2_glock **glp)
1128{
1129	struct super_block *s = sdp->sd_vfs;
1130	struct lm_lockname name = { .ln_number = number,
1131				    .ln_type = glops->go_type,
1132				    .ln_sbd = sdp };
1133	struct gfs2_glock *gl, *tmp;
1134	struct address_space *mapping;
 
1135	int ret = 0;
1136
1137	gl = find_insert_glock(&name, NULL);
1138	if (gl) {
1139		*glp = gl;
1140		return 0;
1141	}
1142	if (!create)
1143		return -ENOENT;
1144
1145	if (glops->go_flags & GLOF_ASPACE) {
1146		struct gfs2_glock_aspace *gla =
1147			kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS);
1148		if (!gla)
1149			return -ENOMEM;
1150		gl = &gla->glock;
1151	} else {
1152		gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS);
1153		if (!gl)
1154			return -ENOMEM;
1155	}
1156	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
1157	gl->gl_ops = glops;
1158
1159	if (glops->go_flags & GLOF_LVB) {
1160		gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
1161		if (!gl->gl_lksb.sb_lvbptr) {
1162			gfs2_glock_dealloc(&gl->gl_rcu);
1163			return -ENOMEM;
1164		}
1165	}
1166
1167	atomic_inc(&sdp->sd_glock_disposal);
1168	gl->gl_node.next = NULL;
1169	gl->gl_flags = glops->go_instantiate ? BIT(GLF_INSTANTIATE_NEEDED) : 0;
1170	gl->gl_name = name;
1171	lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
1172	gl->gl_lockref.count = 1;
1173	gl->gl_state = LM_ST_UNLOCKED;
1174	gl->gl_target = LM_ST_UNLOCKED;
1175	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 
1176	gl->gl_dstamp = 0;
1177	preempt_disable();
1178	/* We use the global stats to estimate the initial per-glock stats */
1179	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
1180	preempt_enable();
1181	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
1182	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
1183	gl->gl_tchange = jiffies;
1184	gl->gl_object = NULL;
1185	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
1186	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
1187	if (gl->gl_name.ln_type == LM_TYPE_IOPEN)
1188		INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
1189
1190	mapping = gfs2_glock2aspace(gl);
1191	if (mapping) {
1192                mapping->a_ops = &gfs2_meta_aops;
1193		mapping->host = s->s_bdev->bd_inode;
1194		mapping->flags = 0;
1195		mapping_set_gfp_mask(mapping, GFP_NOFS);
1196		mapping->private_data = NULL;
1197		mapping->writeback_index = 0;
1198	}
1199
1200	tmp = find_insert_glock(&name, gl);
1201	if (!tmp) {
1202		*glp = gl;
1203		goto out;
1204	}
1205	if (IS_ERR(tmp)) {
1206		ret = PTR_ERR(tmp);
1207		goto out_free;
1208	}
1209	*glp = tmp;
1210
1211out_free:
1212	gfs2_glock_dealloc(&gl->gl_rcu);
1213	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
1214		wake_up(&sdp->sd_glock_wait);
1215
1216out:
1217	return ret;
1218}
1219
1220/**
1221 * __gfs2_holder_init - initialize a struct gfs2_holder in the default way
1222 * @gl: the glock
1223 * @state: the state we're requesting
1224 * @flags: the modifier flags
1225 * @gh: the holder structure
1226 *
1227 */
1228
1229void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
1230			struct gfs2_holder *gh, unsigned long ip)
1231{
1232	INIT_LIST_HEAD(&gh->gh_list);
1233	gh->gh_gl = gfs2_glock_hold(gl);
1234	gh->gh_ip = ip;
1235	gh->gh_owner_pid = get_pid(task_pid(current));
1236	gh->gh_state = state;
1237	gh->gh_flags = flags;
 
1238	gh->gh_iflags = 0;
 
1239}
1240
1241/**
1242 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
1243 * @state: the state we're requesting
1244 * @flags: the modifier flags
1245 * @gh: the holder structure
1246 *
1247 * Don't mess with the glock.
1248 *
1249 */
1250
1251void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
1252{
1253	gh->gh_state = state;
1254	gh->gh_flags = flags;
1255	gh->gh_iflags = 0;
1256	gh->gh_ip = _RET_IP_;
1257	put_pid(gh->gh_owner_pid);
1258	gh->gh_owner_pid = get_pid(task_pid(current));
1259}
1260
1261/**
1262 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
1263 * @gh: the holder structure
1264 *
1265 */
1266
1267void gfs2_holder_uninit(struct gfs2_holder *gh)
1268{
1269	put_pid(gh->gh_owner_pid);
1270	gfs2_glock_put(gh->gh_gl);
1271	gfs2_holder_mark_uninitialized(gh);
1272	gh->gh_ip = 0;
1273}
1274
1275static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
1276					unsigned long start_time)
1277{
1278	/* Have we waited longer that a second? */
1279	if (time_after(jiffies, start_time + HZ)) {
1280		/* Lengthen the minimum hold time. */
1281		gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
1282				       GL_GLOCK_MAX_HOLD);
1283	}
1284}
1285
1286/**
1287 * gfs2_glock_holder_ready - holder is ready and its error code can be collected
1288 * @gh: the glock holder
1289 *
1290 * Called when a glock holder no longer needs to be waited for because it is
1291 * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has
1292 * failed (gh_error != 0).
1293 */
1294
1295int gfs2_glock_holder_ready(struct gfs2_holder *gh)
1296{
1297	if (gh->gh_error || (gh->gh_flags & GL_SKIP))
1298		return gh->gh_error;
1299	gh->gh_error = gfs2_instantiate(gh);
1300	if (gh->gh_error)
1301		gfs2_glock_dq(gh);
1302	return gh->gh_error;
1303}
1304
1305/**
1306 * gfs2_glock_wait - wait on a glock acquisition
1307 * @gh: the glock holder
1308 *
1309 * Returns: 0 on success
1310 */
1311
1312int gfs2_glock_wait(struct gfs2_holder *gh)
1313{
1314	unsigned long start_time = jiffies;
1315
1316	might_sleep();
1317	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
1318	gfs2_glock_update_hold_time(gh->gh_gl, start_time);
1319	return gfs2_glock_holder_ready(gh);
1320}
1321
1322static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
1323{
1324	int i;
1325
1326	for (i = 0; i < num_gh; i++)
1327		if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
1328			return 1;
1329	return 0;
1330}
1331
1332/**
1333 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
1334 * @num_gh: the number of holders in the array
1335 * @ghs: the glock holder array
1336 *
1337 * Returns: 0 on success, meaning all glocks have been granted and are held.
1338 *          -ESTALE if the request timed out, meaning all glocks were released,
1339 *          and the caller should retry the operation.
1340 */
1341
1342int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
1343{
1344	struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
1345	int i, ret = 0, timeout = 0;
1346	unsigned long start_time = jiffies;
1347
1348	might_sleep();
1349	/*
1350	 * Total up the (minimum hold time * 2) of all glocks and use that to
1351	 * determine the max amount of time we should wait.
1352	 */
1353	for (i = 0; i < num_gh; i++)
1354		timeout += ghs[i].gh_gl->gl_hold_time << 1;
1355
1356	if (!wait_event_timeout(sdp->sd_async_glock_wait,
1357				!glocks_pending(num_gh, ghs), timeout)) {
1358		ret = -ESTALE; /* request timed out. */
1359		goto out;
1360	}
1361
1362	for (i = 0; i < num_gh; i++) {
1363		struct gfs2_holder *gh = &ghs[i];
1364		int ret2;
1365
1366		if (test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1367			gfs2_glock_update_hold_time(gh->gh_gl,
1368						    start_time);
1369		}
1370		ret2 = gfs2_glock_holder_ready(gh);
1371		if (!ret)
1372			ret = ret2;
1373	}
1374
1375out:
1376	if (ret) {
1377		for (i = 0; i < num_gh; i++) {
1378			struct gfs2_holder *gh = &ghs[i];
1379
1380			gfs2_glock_dq(gh);
1381		}
1382	}
1383	return ret;
1384}
1385
1386/**
1387 * handle_callback - process a demote request
1388 * @gl: the glock
1389 * @state: the state the caller wants us to change to
1390 * @delay: zero to demote immediately; otherwise pending demote
1391 * @remote: true if this came from a different cluster node
1392 *
1393 * There are only two requests that we are going to see in actual
1394 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
1395 */
1396
1397static void handle_callback(struct gfs2_glock *gl, unsigned int state,
1398			    unsigned long delay, bool remote)
1399{
1400	if (delay)
1401		set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
1402	else
1403		gfs2_set_demote(gl);
1404	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
1405		gl->gl_demote_state = state;
1406		gl->gl_demote_time = jiffies;
1407	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
1408			gl->gl_demote_state != state) {
1409		gl->gl_demote_state = LM_ST_UNLOCKED;
1410	}
1411	if (gl->gl_ops->go_callback)
1412		gl->gl_ops->go_callback(gl, remote);
1413	trace_gfs2_demote_rq(gl, remote);
1414}
1415
1416void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
1417{
1418	struct va_format vaf;
1419	va_list args;
1420
1421	va_start(args, fmt);
1422
1423	if (seq) {
1424		seq_vprintf(seq, fmt, args);
1425	} else {
1426		vaf.fmt = fmt;
1427		vaf.va = &args;
1428
1429		pr_err("%pV", &vaf);
1430	}
1431
1432	va_end(args);
1433}
1434
1435static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
1436{
1437        if (!(gh->gh_flags & GL_NOPID))
1438                return true;
1439        if (gh->gh_state == LM_ST_UNLOCKED)
1440                return true;
1441        return false;
1442}
1443
1444/**
1445 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1446 * @gh: the holder structure to add
1447 *
1448 * Eventually we should move the recursive locking trap to a
1449 * debugging option or something like that. This is the fast
1450 * path and needs to have the minimum number of distractions.
1451 * 
1452 */
1453
1454static inline void add_to_queue(struct gfs2_holder *gh)
1455__releases(&gl->gl_lockref.lock)
1456__acquires(&gl->gl_lockref.lock)
1457{
1458	struct gfs2_glock *gl = gh->gh_gl;
1459	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1460	struct list_head *insert_pt = NULL;
1461	struct gfs2_holder *gh2;
1462	int try_futile = 0;
1463
1464	GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1465	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1466		GLOCK_BUG_ON(gl, true);
1467
1468	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1469		if (test_bit(GLF_LOCK, &gl->gl_flags)) {
1470			struct gfs2_holder *current_gh;
1471
1472			current_gh = find_first_holder(gl);
1473			try_futile = !may_grant(gl, current_gh, gh);
1474		}
1475		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1476			goto fail;
1477	}
1478
1479	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1480		if (likely(gh2->gh_owner_pid != gh->gh_owner_pid))
1481			continue;
1482		if (gh->gh_gl->gl_ops->go_type == LM_TYPE_FLOCK)
1483			continue;
1484		if (!pid_is_meaningful(gh2))
1485			continue;
1486		goto trap_recursive;
1487	}
1488	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1489		if (try_futile &&
1490		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1491fail:
1492			gh->gh_error = GLR_TRYFAILED;
1493			gfs2_holder_wake(gh);
1494			return;
1495		}
1496		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1497			continue;
1498		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1499			insert_pt = &gh2->gh_list;
1500	}
 
1501	trace_gfs2_glock_queue(gh, 1);
1502	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1503	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1504	if (likely(insert_pt == NULL)) {
1505		list_add_tail(&gh->gh_list, &gl->gl_holders);
1506		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1507			goto do_cancel;
1508		return;
1509	}
1510	list_add_tail(&gh->gh_list, insert_pt);
1511do_cancel:
1512	gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
1513	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1514		spin_unlock(&gl->gl_lockref.lock);
1515		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1516			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1517		spin_lock(&gl->gl_lockref.lock);
1518	}
1519	return;
1520
1521trap_recursive:
1522	fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1523	fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1524	fs_err(sdp, "lock type: %d req lock state : %d\n",
1525	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1526	fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1527	fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1528	fs_err(sdp, "lock type: %d req lock state : %d\n",
1529	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1530	gfs2_dump_glock(NULL, gl, true);
1531	BUG();
1532}
1533
1534/**
1535 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1536 * @gh: the holder structure
1537 *
1538 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1539 *
1540 * Returns: 0, GLR_TRYFAILED, or errno on failure
1541 */
1542
1543int gfs2_glock_nq(struct gfs2_holder *gh)
1544{
1545	struct gfs2_glock *gl = gh->gh_gl;
 
1546	int error = 0;
1547
1548	if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
1549		return -EIO;
1550
1551	if (test_bit(GLF_LRU, &gl->gl_flags))
1552		gfs2_glock_remove_from_lru(gl);
1553
1554	gh->gh_error = 0;
1555	spin_lock(&gl->gl_lockref.lock);
1556	add_to_queue(gh);
1557	if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1558		     test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1559		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1560		gl->gl_lockref.count++;
1561		__gfs2_glock_queue_work(gl, 0);
1562	}
1563	run_queue(gl, 1);
1564	spin_unlock(&gl->gl_lockref.lock);
1565
1566	if (!(gh->gh_flags & GL_ASYNC))
1567		error = gfs2_glock_wait(gh);
1568
1569	return error;
1570}
1571
1572/**
1573 * gfs2_glock_poll - poll to see if an async request has been completed
1574 * @gh: the holder
1575 *
1576 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1577 */
1578
1579int gfs2_glock_poll(struct gfs2_holder *gh)
1580{
1581	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1582}
1583
1584static inline bool needs_demote(struct gfs2_glock *gl)
1585{
1586	return (test_bit(GLF_DEMOTE, &gl->gl_flags) ||
1587		test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags));
1588}
1589
1590static void __gfs2_glock_dq(struct gfs2_holder *gh)
1591{
1592	struct gfs2_glock *gl = gh->gh_gl;
 
1593	unsigned delay = 0;
1594	int fast_path = 0;
1595
1596	/*
1597	 * This holder should not be cached, so mark it for demote.
1598	 * Note: this should be done before the check for needs_demote
1599	 * below.
1600	 */
1601	if (gh->gh_flags & GL_NOCACHE)
1602		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1603
1604	list_del_init(&gh->gh_list);
1605	clear_bit(HIF_HOLDER, &gh->gh_iflags);
1606	trace_gfs2_glock_queue(gh, 0);
1607
1608	/*
1609	 * If there hasn't been a demote request we are done.
1610	 * (Let the remaining holders, if any, keep holding it.)
1611	 */
1612	if (!needs_demote(gl)) {
1613		if (list_empty(&gl->gl_holders))
 
 
 
1614			fast_path = 1;
1615	}
1616
1617	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1618		gfs2_glock_add_to_lru(gl);
1619
 
1620	if (unlikely(!fast_path)) {
1621		gl->gl_lockref.count++;
1622		if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1623		    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1624		    gl->gl_name.ln_type == LM_TYPE_INODE)
1625			delay = gl->gl_hold_time;
1626		__gfs2_glock_queue_work(gl, delay);
1627	}
1628}
1629
1630/**
1631 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1632 * @gh: the glock holder
1633 *
1634 */
1635void gfs2_glock_dq(struct gfs2_holder *gh)
1636{
1637	struct gfs2_glock *gl = gh->gh_gl;
1638	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1639
1640	spin_lock(&gl->gl_lockref.lock);
1641	if (!gfs2_holder_queued(gh)) {
1642		/*
1643		 * May have already been dequeued because the locking request
1644		 * was GL_ASYNC and it has failed in the meantime.
1645		 */
1646		goto out;
1647	}
1648
1649	if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
1650	    !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1651		spin_unlock(&gl->gl_lockref.lock);
1652		gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
1653		wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
1654		spin_lock(&gl->gl_lockref.lock);
1655	}
1656
1657	/*
1658	 * If we're in the process of file system withdraw, we cannot just
1659	 * dequeue any glocks until our journal is recovered, lest we introduce
1660	 * file system corruption. We need two exceptions to this rule: We need
1661	 * to allow unlocking of nondisk glocks and the glock for our own
1662	 * journal that needs recovery.
1663	 */
1664	if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
1665	    glock_blocked_by_withdraw(gl) &&
1666	    gh->gh_gl != sdp->sd_jinode_gl) {
1667		sdp->sd_glock_dqs_held++;
1668		spin_unlock(&gl->gl_lockref.lock);
1669		might_sleep();
1670		wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
1671			    TASK_UNINTERRUPTIBLE);
1672		spin_lock(&gl->gl_lockref.lock);
1673	}
1674
1675	__gfs2_glock_dq(gh);
1676out:
1677	spin_unlock(&gl->gl_lockref.lock);
1678}
1679
1680void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1681{
1682	struct gfs2_glock *gl = gh->gh_gl;
1683	gfs2_glock_dq(gh);
1684	might_sleep();
1685	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1686}
1687
1688/**
1689 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1690 * @gh: the holder structure
1691 *
1692 */
1693
1694void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1695{
1696	gfs2_glock_dq(gh);
1697	gfs2_holder_uninit(gh);
1698}
1699
1700/**
1701 * gfs2_glock_nq_num - acquire a glock based on lock number
1702 * @sdp: the filesystem
1703 * @number: the lock number
1704 * @glops: the glock operations for the type of glock
1705 * @state: the state to acquire the glock in
1706 * @flags: modifier flags for the acquisition
1707 * @gh: the struct gfs2_holder
1708 *
1709 * Returns: errno
1710 */
1711
1712int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1713		      const struct gfs2_glock_operations *glops,
1714		      unsigned int state, u16 flags, struct gfs2_holder *gh)
1715{
1716	struct gfs2_glock *gl;
1717	int error;
1718
1719	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1720	if (!error) {
1721		error = gfs2_glock_nq_init(gl, state, flags, gh);
1722		gfs2_glock_put(gl);
1723	}
1724
1725	return error;
1726}
1727
1728/**
1729 * glock_compare - Compare two struct gfs2_glock structures for sorting
1730 * @arg_a: the first structure
1731 * @arg_b: the second structure
1732 *
1733 */
1734
1735static int glock_compare(const void *arg_a, const void *arg_b)
1736{
1737	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1738	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1739	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1740	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1741
1742	if (a->ln_number > b->ln_number)
1743		return 1;
1744	if (a->ln_number < b->ln_number)
1745		return -1;
1746	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1747	return 0;
1748}
1749
1750/**
1751 * nq_m_sync - synchronously acquire more than one glock in deadlock free order
1752 * @num_gh: the number of structures
1753 * @ghs: an array of struct gfs2_holder structures
1754 * @p: placeholder for the holder structure to pass back
1755 *
1756 * Returns: 0 on success (all glocks acquired),
1757 *          errno on failure (no glocks acquired)
1758 */
1759
1760static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1761		     struct gfs2_holder **p)
1762{
1763	unsigned int x;
1764	int error = 0;
1765
1766	for (x = 0; x < num_gh; x++)
1767		p[x] = &ghs[x];
1768
1769	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1770
1771	for (x = 0; x < num_gh; x++) {
 
 
1772		error = gfs2_glock_nq(p[x]);
1773		if (error) {
1774			while (x--)
1775				gfs2_glock_dq(p[x]);
1776			break;
1777		}
1778	}
1779
1780	return error;
1781}
1782
1783/**
1784 * gfs2_glock_nq_m - acquire multiple glocks
1785 * @num_gh: the number of structures
1786 * @ghs: an array of struct gfs2_holder structures
1787 *
 
1788 * Returns: 0 on success (all glocks acquired),
1789 *          errno on failure (no glocks acquired)
1790 */
1791
1792int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1793{
1794	struct gfs2_holder *tmp[4];
1795	struct gfs2_holder **pph = tmp;
1796	int error = 0;
1797
1798	switch(num_gh) {
1799	case 0:
1800		return 0;
1801	case 1:
 
1802		return gfs2_glock_nq(ghs);
1803	default:
1804		if (num_gh <= 4)
1805			break;
1806		pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
1807				    GFP_NOFS);
1808		if (!pph)
1809			return -ENOMEM;
1810	}
1811
1812	error = nq_m_sync(num_gh, ghs, pph);
1813
1814	if (pph != tmp)
1815		kfree(pph);
1816
1817	return error;
1818}
1819
1820/**
1821 * gfs2_glock_dq_m - release multiple glocks
1822 * @num_gh: the number of structures
1823 * @ghs: an array of struct gfs2_holder structures
1824 *
1825 */
1826
1827void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1828{
1829	while (num_gh--)
1830		gfs2_glock_dq(&ghs[num_gh]);
1831}
1832
1833void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1834{
1835	unsigned long delay = 0;
1836	unsigned long holdtime;
1837	unsigned long now = jiffies;
1838
1839	gfs2_glock_hold(gl);
1840	spin_lock(&gl->gl_lockref.lock);
1841	holdtime = gl->gl_tchange + gl->gl_hold_time;
1842	if (!list_empty(&gl->gl_holders) &&
1843	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1844		if (time_before(now, holdtime))
1845			delay = holdtime - now;
1846		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1847			delay = gl->gl_hold_time;
1848	}
 
 
1849	handle_callback(gl, state, delay, true);
1850	__gfs2_glock_queue_work(gl, delay);
1851	spin_unlock(&gl->gl_lockref.lock);
1852}
1853
1854/**
1855 * gfs2_should_freeze - Figure out if glock should be frozen
1856 * @gl: The glock in question
1857 *
1858 * Glocks are not frozen if (a) the result of the dlm operation is
1859 * an error, (b) the locking operation was an unlock operation or
1860 * (c) if there is a "noexp" flagged request anywhere in the queue
1861 *
1862 * Returns: 1 if freezing should occur, 0 otherwise
1863 */
1864
1865static int gfs2_should_freeze(const struct gfs2_glock *gl)
1866{
1867	const struct gfs2_holder *gh;
1868
1869	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1870		return 0;
1871	if (gl->gl_target == LM_ST_UNLOCKED)
1872		return 0;
1873
1874	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1875		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1876			continue;
1877		if (LM_FLAG_NOEXP & gh->gh_flags)
1878			return 0;
1879	}
1880
1881	return 1;
1882}
1883
1884/**
1885 * gfs2_glock_complete - Callback used by locking
1886 * @gl: Pointer to the glock
1887 * @ret: The return value from the dlm
1888 *
1889 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1890 * to use a bitfield shared with other glock state fields.
1891 */
1892
1893void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1894{
1895	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1896
1897	spin_lock(&gl->gl_lockref.lock);
1898	gl->gl_reply = ret;
1899
1900	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1901		if (gfs2_should_freeze(gl)) {
1902			set_bit(GLF_FROZEN, &gl->gl_flags);
1903			spin_unlock(&gl->gl_lockref.lock);
1904			return;
1905		}
1906	}
1907
1908	gl->gl_lockref.count++;
1909	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1910	__gfs2_glock_queue_work(gl, 0);
1911	spin_unlock(&gl->gl_lockref.lock);
1912}
1913
1914static int glock_cmp(void *priv, const struct list_head *a,
1915		     const struct list_head *b)
1916{
1917	struct gfs2_glock *gla, *glb;
1918
1919	gla = list_entry(a, struct gfs2_glock, gl_lru);
1920	glb = list_entry(b, struct gfs2_glock, gl_lru);
1921
1922	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1923		return 1;
1924	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1925		return -1;
1926
1927	return 0;
1928}
1929
1930/**
1931 * gfs2_dispose_glock_lru - Demote a list of glocks
1932 * @list: The list to dispose of
1933 *
1934 * Disposing of glocks may involve disk accesses, so that here we sort
1935 * the glocks by number (i.e. disk location of the inodes) so that if
1936 * there are any such accesses, they'll be sent in order (mostly).
1937 *
1938 * Must be called under the lru_lock, but may drop and retake this
1939 * lock. While the lru_lock is dropped, entries may vanish from the
1940 * list, but no new entries will appear on the list (since it is
1941 * private)
1942 */
1943
1944static void gfs2_dispose_glock_lru(struct list_head *list)
1945__releases(&lru_lock)
1946__acquires(&lru_lock)
1947{
1948	struct gfs2_glock *gl;
1949
1950	list_sort(NULL, list, glock_cmp);
1951
1952	while(!list_empty(list)) {
1953		gl = list_first_entry(list, struct gfs2_glock, gl_lru);
1954		list_del_init(&gl->gl_lru);
1955		clear_bit(GLF_LRU, &gl->gl_flags);
1956		if (!spin_trylock(&gl->gl_lockref.lock)) {
1957add_back_to_lru:
1958			list_add(&gl->gl_lru, &lru_list);
1959			set_bit(GLF_LRU, &gl->gl_flags);
1960			atomic_inc(&lru_count);
1961			continue;
1962		}
1963		if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1964			spin_unlock(&gl->gl_lockref.lock);
1965			goto add_back_to_lru;
1966		}
 
1967		gl->gl_lockref.count++;
1968		if (demote_ok(gl))
1969			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1970		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1971		__gfs2_glock_queue_work(gl, 0);
1972		spin_unlock(&gl->gl_lockref.lock);
1973		cond_resched_lock(&lru_lock);
1974	}
1975}
1976
1977/**
1978 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1979 * @nr: The number of entries to scan
1980 *
1981 * This function selects the entries on the LRU which are able to
1982 * be demoted, and then kicks off the process by calling
1983 * gfs2_dispose_glock_lru() above.
1984 */
1985
1986static long gfs2_scan_glock_lru(int nr)
1987{
1988	struct gfs2_glock *gl;
1989	LIST_HEAD(skipped);
1990	LIST_HEAD(dispose);
1991	long freed = 0;
1992
1993	spin_lock(&lru_lock);
1994	while ((nr-- >= 0) && !list_empty(&lru_list)) {
1995		gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru);
1996
1997		/* Test for being demotable */
1998		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1999			list_move(&gl->gl_lru, &dispose);
2000			atomic_dec(&lru_count);
2001			freed++;
2002			continue;
2003		}
2004
2005		list_move(&gl->gl_lru, &skipped);
2006	}
2007	list_splice(&skipped, &lru_list);
2008	if (!list_empty(&dispose))
2009		gfs2_dispose_glock_lru(&dispose);
2010	spin_unlock(&lru_lock);
2011
2012	return freed;
2013}
2014
2015static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
2016					    struct shrink_control *sc)
2017{
2018	if (!(sc->gfp_mask & __GFP_FS))
2019		return SHRINK_STOP;
2020	return gfs2_scan_glock_lru(sc->nr_to_scan);
2021}
2022
2023static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
2024					     struct shrink_control *sc)
2025{
2026	return vfs_pressure_ratio(atomic_read(&lru_count));
2027}
2028
2029static struct shrinker glock_shrinker = {
2030	.seeks = DEFAULT_SEEKS,
2031	.count_objects = gfs2_glock_shrink_count,
2032	.scan_objects = gfs2_glock_shrink_scan,
2033};
2034
2035/**
2036 * glock_hash_walk - Call a function for glock in a hash bucket
2037 * @examiner: the function
2038 * @sdp: the filesystem
 
2039 *
2040 * Note that the function can be called multiple times on the same
2041 * object.  So the user must ensure that the function can cope with
2042 * that.
2043 */
2044
2045static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
2046{
2047	struct gfs2_glock *gl;
2048	struct rhashtable_iter iter;
2049
2050	rhashtable_walk_enter(&gl_hash_table, &iter);
2051
2052	do {
2053		rhashtable_walk_start(&iter);
2054
2055		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) {
2056			if (gl->gl_name.ln_sbd == sdp)
 
2057				examiner(gl);
2058		}
2059
2060		rhashtable_walk_stop(&iter);
2061	} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
2062
2063	rhashtable_walk_exit(&iter);
2064}
2065
2066bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
2067{
2068	bool queued;
2069
2070	spin_lock(&gl->gl_lockref.lock);
2071	queued = queue_delayed_work(gfs2_delete_workqueue,
2072				    &gl->gl_delete, delay);
2073	if (queued)
2074		set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
2075	spin_unlock(&gl->gl_lockref.lock);
2076	return queued;
2077}
2078
2079void gfs2_cancel_delete_work(struct gfs2_glock *gl)
2080{
2081	if (cancel_delayed_work(&gl->gl_delete)) {
2082		clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
2083		gfs2_glock_put(gl);
2084	}
2085}
2086
2087bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
2088{
2089	return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
2090}
2091
2092static void flush_delete_work(struct gfs2_glock *gl)
2093{
2094	if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
2095		if (cancel_delayed_work(&gl->gl_delete)) {
2096			queue_delayed_work(gfs2_delete_workqueue,
2097					   &gl->gl_delete, 0);
2098		}
2099	}
2100}
2101
2102void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
2103{
2104	glock_hash_walk(flush_delete_work, sdp);
2105	flush_workqueue(gfs2_delete_workqueue);
2106}
2107
2108/**
2109 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
2110 * @gl: The glock to thaw
2111 *
2112 */
2113
2114static void thaw_glock(struct gfs2_glock *gl)
2115{
2116	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
2117		return;
2118	if (!lockref_get_not_dead(&gl->gl_lockref))
2119		return;
 
2120	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
2121	gfs2_glock_queue_work(gl, 0);
2122}
2123
2124/**
2125 * clear_glock - look at a glock and see if we can free it from glock cache
2126 * @gl: the glock to look at
2127 *
2128 */
2129
2130static void clear_glock(struct gfs2_glock *gl)
2131{
2132	gfs2_glock_remove_from_lru(gl);
2133
2134	spin_lock(&gl->gl_lockref.lock);
2135	if (!__lockref_is_dead(&gl->gl_lockref)) {
2136		gl->gl_lockref.count++;
2137		if (gl->gl_state != LM_ST_UNLOCKED)
2138			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
2139		__gfs2_glock_queue_work(gl, 0);
2140	}
2141	spin_unlock(&gl->gl_lockref.lock);
2142}
2143
2144/**
2145 * gfs2_glock_thaw - Thaw any frozen glocks
2146 * @sdp: The super block
2147 *
2148 */
2149
2150void gfs2_glock_thaw(struct gfs2_sbd *sdp)
2151{
2152	glock_hash_walk(thaw_glock, sdp);
2153}
2154
2155static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
2156{
2157	spin_lock(&gl->gl_lockref.lock);
2158	gfs2_dump_glock(seq, gl, fsid);
2159	spin_unlock(&gl->gl_lockref.lock);
2160}
2161
2162static void dump_glock_func(struct gfs2_glock *gl)
2163{
2164	dump_glock(NULL, gl, true);
2165}
2166
2167static void withdraw_dq(struct gfs2_glock *gl)
2168{
2169	spin_lock(&gl->gl_lockref.lock);
2170	if (!__lockref_is_dead(&gl->gl_lockref) &&
2171	    glock_blocked_by_withdraw(gl))
2172		do_error(gl, LM_OUT_ERROR); /* remove pending waiters */
2173	spin_unlock(&gl->gl_lockref.lock);
2174}
2175
2176void gfs2_gl_dq_holders(struct gfs2_sbd *sdp)
2177{
2178	glock_hash_walk(withdraw_dq, sdp);
2179}
2180
2181/**
2182 * gfs2_gl_hash_clear - Empty out the glock hash table
2183 * @sdp: the filesystem
 
2184 *
2185 * Called when unmounting the filesystem.
2186 */
2187
2188void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
2189{
2190	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
2191	flush_workqueue(glock_workqueue);
2192	glock_hash_walk(clear_glock, sdp);
2193	flush_workqueue(glock_workqueue);
2194	wait_event_timeout(sdp->sd_glock_wait,
2195			   atomic_read(&sdp->sd_glock_disposal) == 0,
2196			   HZ * 600);
2197	glock_hash_walk(dump_glock_func, sdp);
2198}
2199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2200static const char *state2str(unsigned state)
2201{
2202	switch(state) {
2203	case LM_ST_UNLOCKED:
2204		return "UN";
2205	case LM_ST_SHARED:
2206		return "SH";
2207	case LM_ST_DEFERRED:
2208		return "DF";
2209	case LM_ST_EXCLUSIVE:
2210		return "EX";
2211	}
2212	return "??";
2213}
2214
2215static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
2216{
2217	char *p = buf;
2218	if (flags & LM_FLAG_TRY)
2219		*p++ = 't';
2220	if (flags & LM_FLAG_TRY_1CB)
2221		*p++ = 'T';
2222	if (flags & LM_FLAG_NOEXP)
2223		*p++ = 'e';
2224	if (flags & LM_FLAG_ANY)
2225		*p++ = 'A';
2226	if (flags & LM_FLAG_PRIORITY)
2227		*p++ = 'p';
2228	if (flags & LM_FLAG_NODE_SCOPE)
2229		*p++ = 'n';
2230	if (flags & GL_ASYNC)
2231		*p++ = 'a';
2232	if (flags & GL_EXACT)
2233		*p++ = 'E';
2234	if (flags & GL_NOCACHE)
2235		*p++ = 'c';
2236	if (test_bit(HIF_HOLDER, &iflags))
2237		*p++ = 'H';
2238	if (test_bit(HIF_WAIT, &iflags))
2239		*p++ = 'W';
2240	if (flags & GL_SKIP)
2241		*p++ = 's';
2242	*p = 0;
2243	return buf;
2244}
2245
2246/**
2247 * dump_holder - print information about a glock holder
2248 * @seq: the seq_file struct
2249 * @gh: the glock holder
2250 * @fs_id_buf: pointer to file system id (if requested)
2251 *
2252 */
2253
2254static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
2255			const char *fs_id_buf)
2256{
2257	const char *comm = "(none)";
2258	pid_t owner_pid = 0;
2259	char flags_buf[32];
2260
2261	rcu_read_lock();
2262	if (pid_is_meaningful(gh)) {
2263		struct task_struct *gh_owner;
2264
2265		comm = "(ended)";
2266		owner_pid = pid_nr(gh->gh_owner_pid);
2267		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
2268		if (gh_owner)
2269			comm = gh_owner->comm;
2270	}
2271	gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
2272		       fs_id_buf, state2str(gh->gh_state),
2273		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
2274		       gh->gh_error, (long)owner_pid, comm, (void *)gh->gh_ip);
 
 
 
2275	rcu_read_unlock();
2276}
2277
2278static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
2279{
2280	const unsigned long *gflags = &gl->gl_flags;
2281	char *p = buf;
2282
2283	if (test_bit(GLF_LOCK, gflags))
2284		*p++ = 'l';
2285	if (test_bit(GLF_DEMOTE, gflags))
2286		*p++ = 'D';
2287	if (test_bit(GLF_PENDING_DEMOTE, gflags))
2288		*p++ = 'd';
2289	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
2290		*p++ = 'p';
2291	if (test_bit(GLF_DIRTY, gflags))
2292		*p++ = 'y';
2293	if (test_bit(GLF_LFLUSH, gflags))
2294		*p++ = 'f';
2295	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
2296		*p++ = 'i';
2297	if (test_bit(GLF_REPLY_PENDING, gflags))
2298		*p++ = 'r';
2299	if (test_bit(GLF_INITIAL, gflags))
2300		*p++ = 'I';
2301	if (test_bit(GLF_FROZEN, gflags))
2302		*p++ = 'F';
2303	if (!list_empty(&gl->gl_holders))
2304		*p++ = 'q';
2305	if (test_bit(GLF_LRU, gflags))
2306		*p++ = 'L';
2307	if (gl->gl_object)
2308		*p++ = 'o';
2309	if (test_bit(GLF_BLOCKING, gflags))
2310		*p++ = 'b';
2311	if (test_bit(GLF_PENDING_DELETE, gflags))
2312		*p++ = 'P';
2313	if (test_bit(GLF_FREEING, gflags))
2314		*p++ = 'x';
2315	if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))
2316		*p++ = 'n';
2317	if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags))
2318		*p++ = 'N';
2319	*p = 0;
2320	return buf;
2321}
2322
2323/**
2324 * gfs2_dump_glock - print information about a glock
2325 * @seq: The seq_file struct
2326 * @gl: the glock
2327 * @fsid: If true, also dump the file system id
2328 *
2329 * The file format is as follows:
2330 * One line per object, capital letters are used to indicate objects
2331 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
2332 * other objects are indented by a single space and follow the glock to
2333 * which they are related. Fields are indicated by lower case letters
2334 * followed by a colon and the field value, except for strings which are in
2335 * [] so that its possible to see if they are composed of spaces for
2336 * example. The field's are n = number (id of the object), f = flags,
2337 * t = type, s = state, r = refcount, e = error, p = pid.
2338 *
2339 */
2340
2341void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
2342{
2343	const struct gfs2_glock_operations *glops = gl->gl_ops;
2344	unsigned long long dtime;
2345	const struct gfs2_holder *gh;
2346	char gflags_buf[32];
2347	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
2348	char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
2349	unsigned long nrpages = 0;
2350
2351	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
2352		struct address_space *mapping = gfs2_glock2aspace(gl);
2353
2354		nrpages = mapping->nrpages;
2355	}
2356	memset(fs_id_buf, 0, sizeof(fs_id_buf));
2357	if (fsid && sdp) /* safety precaution */
2358		sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
2359	dtime = jiffies - gl->gl_demote_time;
2360	dtime *= 1000000/HZ; /* demote time in uSec */
2361	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
2362		dtime = 0;
2363	gfs2_print_dbg(seq, "%sG:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
2364		       "v:%d r:%d m:%ld p:%lu\n",
2365		       fs_id_buf, state2str(gl->gl_state),
2366		       gl->gl_name.ln_type,
2367		       (unsigned long long)gl->gl_name.ln_number,
2368		       gflags2str(gflags_buf, gl),
2369		       state2str(gl->gl_target),
2370		       state2str(gl->gl_demote_state), dtime,
2371		       atomic_read(&gl->gl_ail_count),
2372		       atomic_read(&gl->gl_revokes),
2373		       (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages);
2374
2375	list_for_each_entry(gh, &gl->gl_holders, gh_list)
2376		dump_holder(seq, gh, fs_id_buf);
2377
2378	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
2379		glops->go_dump(seq, gl, fs_id_buf);
2380}
2381
2382static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
2383{
2384	struct gfs2_glock *gl = iter_ptr;
2385
2386	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
2387		   gl->gl_name.ln_type,
2388		   (unsigned long long)gl->gl_name.ln_number,
2389		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
2390		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
2391		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
2392		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
2393		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
2394		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
2395		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
2396		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
2397	return 0;
2398}
2399
2400static const char *gfs2_gltype[] = {
2401	"type",
2402	"reserved",
2403	"nondisk",
2404	"inode",
2405	"rgrp",
2406	"meta",
2407	"iopen",
2408	"flock",
2409	"plock",
2410	"quota",
2411	"journal",
2412};
2413
2414static const char *gfs2_stype[] = {
2415	[GFS2_LKS_SRTT]		= "srtt",
2416	[GFS2_LKS_SRTTVAR]	= "srttvar",
2417	[GFS2_LKS_SRTTB]	= "srttb",
2418	[GFS2_LKS_SRTTVARB]	= "srttvarb",
2419	[GFS2_LKS_SIRT]		= "sirt",
2420	[GFS2_LKS_SIRTVAR]	= "sirtvar",
2421	[GFS2_LKS_DCOUNT]	= "dlm",
2422	[GFS2_LKS_QCOUNT]	= "queue",
2423};
2424
2425#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
2426
2427static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
2428{
2429	struct gfs2_sbd *sdp = seq->private;
2430	loff_t pos = *(loff_t *)iter_ptr;
2431	unsigned index = pos >> 3;
2432	unsigned subindex = pos & 0x07;
2433	int i;
2434
2435	if (index == 0 && subindex != 0)
2436		return 0;
2437
2438	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
2439		   (index == 0) ? "cpu": gfs2_stype[subindex]);
2440
2441	for_each_possible_cpu(i) {
2442                const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
2443
2444		if (index == 0)
2445			seq_printf(seq, " %15u", i);
2446		else
2447			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
2448				   lkstats[index - 1].stats[subindex]);
2449	}
2450	seq_putc(seq, '\n');
2451	return 0;
2452}
2453
2454int __init gfs2_glock_init(void)
2455{
2456	int i, ret;
2457
2458	ret = rhashtable_init(&gl_hash_table, &ht_parms);
2459	if (ret < 0)
2460		return ret;
2461
2462	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
2463					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
2464	if (!glock_workqueue) {
2465		rhashtable_destroy(&gl_hash_table);
2466		return -ENOMEM;
2467	}
2468	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
2469						WQ_MEM_RECLAIM | WQ_FREEZABLE,
2470						0);
2471	if (!gfs2_delete_workqueue) {
2472		destroy_workqueue(glock_workqueue);
2473		rhashtable_destroy(&gl_hash_table);
2474		return -ENOMEM;
2475	}
2476
2477	ret = register_shrinker(&glock_shrinker, "gfs2-glock");
2478	if (ret) {
2479		destroy_workqueue(gfs2_delete_workqueue);
2480		destroy_workqueue(glock_workqueue);
2481		rhashtable_destroy(&gl_hash_table);
2482		return ret;
2483	}
2484
2485	for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
2486		init_waitqueue_head(glock_wait_table + i);
2487
2488	return 0;
2489}
2490
2491void gfs2_glock_exit(void)
2492{
2493	unregister_shrinker(&glock_shrinker);
2494	rhashtable_destroy(&gl_hash_table);
2495	destroy_workqueue(glock_workqueue);
2496	destroy_workqueue(gfs2_delete_workqueue);
2497}
2498
2499static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
2500{
2501	struct gfs2_glock *gl = gi->gl;
2502
2503	if (gl) {
2504		if (n == 0)
2505			return;
2506		if (!lockref_put_not_zero(&gl->gl_lockref))
2507			gfs2_glock_queue_put(gl);
2508	}
2509	for (;;) {
2510		gl = rhashtable_walk_next(&gi->hti);
2511		if (IS_ERR_OR_NULL(gl)) {
2512			if (gl == ERR_PTR(-EAGAIN)) {
2513				n = 1;
2514				continue;
2515			}
2516			gl = NULL;
2517			break;
2518		}
2519		if (gl->gl_name.ln_sbd != gi->sdp)
2520			continue;
2521		if (n <= 1) {
2522			if (!lockref_get_not_dead(&gl->gl_lockref))
2523				continue;
2524			break;
2525		} else {
2526			if (__lockref_is_dead(&gl->gl_lockref))
2527				continue;
2528			n--;
2529		}
2530	}
2531	gi->gl = gl;
2532}
2533
2534static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
2535	__acquires(RCU)
2536{
2537	struct gfs2_glock_iter *gi = seq->private;
2538	loff_t n;
2539
2540	/*
2541	 * We can either stay where we are, skip to the next hash table
2542	 * entry, or start from the beginning.
2543	 */
2544	if (*pos < gi->last_pos) {
2545		rhashtable_walk_exit(&gi->hti);
2546		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2547		n = *pos + 1;
2548	} else {
2549		n = *pos - gi->last_pos;
2550	}
2551
2552	rhashtable_walk_start(&gi->hti);
2553
2554	gfs2_glock_iter_next(gi, n);
2555	gi->last_pos = *pos;
2556	return gi->gl;
2557}
2558
2559static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
2560				 loff_t *pos)
2561{
2562	struct gfs2_glock_iter *gi = seq->private;
2563
2564	(*pos)++;
2565	gi->last_pos = *pos;
2566	gfs2_glock_iter_next(gi, 1);
2567	return gi->gl;
2568}
2569
2570static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
2571	__releases(RCU)
2572{
2573	struct gfs2_glock_iter *gi = seq->private;
2574
2575	rhashtable_walk_stop(&gi->hti);
2576}
2577
2578static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2579{
2580	dump_glock(seq, iter_ptr, false);
2581	return 0;
2582}
2583
2584static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2585{
2586	preempt_disable();
2587	if (*pos >= GFS2_NR_SBSTATS)
2588		return NULL;
2589	return pos;
2590}
2591
2592static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2593				   loff_t *pos)
2594{
2595	(*pos)++;
2596	if (*pos >= GFS2_NR_SBSTATS)
2597		return NULL;
2598	return pos;
2599}
2600
2601static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2602{
2603	preempt_enable();
2604}
2605
2606static const struct seq_operations gfs2_glock_seq_ops = {
2607	.start = gfs2_glock_seq_start,
2608	.next  = gfs2_glock_seq_next,
2609	.stop  = gfs2_glock_seq_stop,
2610	.show  = gfs2_glock_seq_show,
2611};
2612
2613static const struct seq_operations gfs2_glstats_seq_ops = {
2614	.start = gfs2_glock_seq_start,
2615	.next  = gfs2_glock_seq_next,
2616	.stop  = gfs2_glock_seq_stop,
2617	.show  = gfs2_glstats_seq_show,
2618};
2619
2620static const struct seq_operations gfs2_sbstats_sops = {
2621	.start = gfs2_sbstats_seq_start,
2622	.next  = gfs2_sbstats_seq_next,
2623	.stop  = gfs2_sbstats_seq_stop,
2624	.show  = gfs2_sbstats_seq_show,
2625};
2626
2627#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2628
2629static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2630			      const struct seq_operations *ops)
2631{
2632	int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2633	if (ret == 0) {
2634		struct seq_file *seq = file->private_data;
2635		struct gfs2_glock_iter *gi = seq->private;
2636
2637		gi->sdp = inode->i_private;
2638		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2639		if (seq->buf)
2640			seq->size = GFS2_SEQ_GOODSIZE;
2641		/*
2642		 * Initially, we are "before" the first hash table entry; the
2643		 * first call to rhashtable_walk_next gets us the first entry.
2644		 */
2645		gi->last_pos = -1;
2646		gi->gl = NULL;
2647		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2648	}
2649	return ret;
2650}
2651
2652static int gfs2_glocks_open(struct inode *inode, struct file *file)
2653{
2654	return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2655}
2656
2657static int gfs2_glocks_release(struct inode *inode, struct file *file)
2658{
2659	struct seq_file *seq = file->private_data;
2660	struct gfs2_glock_iter *gi = seq->private;
2661
2662	if (gi->gl)
2663		gfs2_glock_put(gi->gl);
2664	rhashtable_walk_exit(&gi->hti);
2665	return seq_release_private(inode, file);
2666}
2667
2668static int gfs2_glstats_open(struct inode *inode, struct file *file)
2669{
2670	return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2671}
2672
 
 
 
 
 
 
 
 
 
 
2673static const struct file_operations gfs2_glocks_fops = {
2674	.owner   = THIS_MODULE,
2675	.open    = gfs2_glocks_open,
2676	.read    = seq_read,
2677	.llseek  = seq_lseek,
2678	.release = gfs2_glocks_release,
2679};
2680
2681static const struct file_operations gfs2_glstats_fops = {
2682	.owner   = THIS_MODULE,
2683	.open    = gfs2_glstats_open,
2684	.read    = seq_read,
2685	.llseek  = seq_lseek,
2686	.release = gfs2_glocks_release,
2687};
2688
2689struct gfs2_glockfd_iter {
2690	struct super_block *sb;
2691	unsigned int tgid;
2692	struct task_struct *task;
2693	unsigned int fd;
2694	struct file *file;
2695};
2696
2697static struct task_struct *gfs2_glockfd_next_task(struct gfs2_glockfd_iter *i)
2698{
2699	struct pid_namespace *ns = task_active_pid_ns(current);
2700	struct pid *pid;
2701
2702	if (i->task)
2703		put_task_struct(i->task);
2704
2705	rcu_read_lock();
2706retry:
2707	i->task = NULL;
2708	pid = find_ge_pid(i->tgid, ns);
2709	if (pid) {
2710		i->tgid = pid_nr_ns(pid, ns);
2711		i->task = pid_task(pid, PIDTYPE_TGID);
2712		if (!i->task) {
2713			i->tgid++;
2714			goto retry;
2715		}
2716		get_task_struct(i->task);
2717	}
2718	rcu_read_unlock();
2719	return i->task;
2720}
2721
2722static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i)
2723{
2724	if (i->file) {
2725		fput(i->file);
2726		i->file = NULL;
2727	}
2728
2729	rcu_read_lock();
2730	for(;; i->fd++) {
2731		struct inode *inode;
2732
2733		i->file = task_lookup_next_fd_rcu(i->task, &i->fd);
2734		if (!i->file) {
2735			i->fd = 0;
2736			break;
2737		}
2738		inode = file_inode(i->file);
2739		if (inode->i_sb != i->sb)
2740			continue;
2741		if (get_file_rcu(i->file))
2742			break;
2743	}
2744	rcu_read_unlock();
2745	return i->file;
2746}
2747
2748static void *gfs2_glockfd_seq_start(struct seq_file *seq, loff_t *pos)
2749{
2750	struct gfs2_glockfd_iter *i = seq->private;
2751
2752	if (*pos)
2753		return NULL;
2754	while (gfs2_glockfd_next_task(i)) {
2755		if (gfs2_glockfd_next_file(i))
2756			return i;
2757		i->tgid++;
2758	}
2759	return NULL;
2760}
2761
2762static void *gfs2_glockfd_seq_next(struct seq_file *seq, void *iter_ptr,
2763				   loff_t *pos)
2764{
2765	struct gfs2_glockfd_iter *i = seq->private;
2766
2767	(*pos)++;
2768	i->fd++;
2769	do {
2770		if (gfs2_glockfd_next_file(i))
2771			return i;
2772		i->tgid++;
2773	} while (gfs2_glockfd_next_task(i));
2774	return NULL;
2775}
2776
2777static void gfs2_glockfd_seq_stop(struct seq_file *seq, void *iter_ptr)
2778{
2779	struct gfs2_glockfd_iter *i = seq->private;
2780
2781	if (i->file)
2782		fput(i->file);
2783	if (i->task)
2784		put_task_struct(i->task);
2785}
2786
2787static void gfs2_glockfd_seq_show_flock(struct seq_file *seq,
2788					struct gfs2_glockfd_iter *i)
2789{
2790	struct gfs2_file *fp = i->file->private_data;
2791	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
2792	struct lm_lockname gl_name = { .ln_type = LM_TYPE_RESERVED };
2793
2794	if (!READ_ONCE(fl_gh->gh_gl))
2795		return;
2796
2797	spin_lock(&i->file->f_lock);
2798	if (gfs2_holder_initialized(fl_gh))
2799		gl_name = fl_gh->gh_gl->gl_name;
2800	spin_unlock(&i->file->f_lock);
2801
2802	if (gl_name.ln_type != LM_TYPE_RESERVED) {
2803		seq_printf(seq, "%d %u %u/%llx\n",
2804			   i->tgid, i->fd, gl_name.ln_type,
2805			   (unsigned long long)gl_name.ln_number);
2806	}
2807}
2808
2809static int gfs2_glockfd_seq_show(struct seq_file *seq, void *iter_ptr)
2810{
2811	struct gfs2_glockfd_iter *i = seq->private;
2812	struct inode *inode = file_inode(i->file);
2813	struct gfs2_glock *gl;
2814
2815	inode_lock_shared(inode);
2816	gl = GFS2_I(inode)->i_iopen_gh.gh_gl;
2817	if (gl) {
2818		seq_printf(seq, "%d %u %u/%llx\n",
2819			   i->tgid, i->fd, gl->gl_name.ln_type,
2820			   (unsigned long long)gl->gl_name.ln_number);
2821	}
2822	gfs2_glockfd_seq_show_flock(seq, i);
2823	inode_unlock_shared(inode);
2824	return 0;
2825}
2826
2827static const struct seq_operations gfs2_glockfd_seq_ops = {
2828	.start = gfs2_glockfd_seq_start,
2829	.next  = gfs2_glockfd_seq_next,
2830	.stop  = gfs2_glockfd_seq_stop,
2831	.show  = gfs2_glockfd_seq_show,
2832};
2833
2834static int gfs2_glockfd_open(struct inode *inode, struct file *file)
2835{
2836	struct gfs2_glockfd_iter *i;
2837	struct gfs2_sbd *sdp = inode->i_private;
2838
2839	i = __seq_open_private(file, &gfs2_glockfd_seq_ops,
2840			       sizeof(struct gfs2_glockfd_iter));
2841	if (!i)
2842		return -ENOMEM;
2843	i->sb = sdp->sd_vfs;
2844	return 0;
2845}
2846
2847static const struct file_operations gfs2_glockfd_fops = {
2848	.owner   = THIS_MODULE,
2849	.open    = gfs2_glockfd_open,
2850	.read    = seq_read,
2851	.llseek  = seq_lseek,
2852	.release = seq_release_private,
2853};
2854
2855DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats);
2856
2857void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2858{
2859	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2860
2861	debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2862			    &gfs2_glocks_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2863
2864	debugfs_create_file("glockfd", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2865			    &gfs2_glockfd_fops);
2866
2867	debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2868			    &gfs2_glstats_fops);
2869
2870	debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2871			    &gfs2_sbstats_fops);
2872}
2873
2874void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2875{
2876	debugfs_remove_recursive(sdp->debugfs_dir);
2877	sdp->debugfs_dir = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2878}
2879
2880void gfs2_register_debugfs(void)
2881{
2882	gfs2_root = debugfs_create_dir("gfs2", NULL);
 
 
 
2883}
2884
2885void gfs2_unregister_debugfs(void)
2886{
2887	debugfs_remove(gfs2_root);
2888	gfs2_root = NULL;
2889}