Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v3.1
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
 
 
  10#include <linux/sched.h>
  11#include <linux/slab.h>
  12#include <linux/spinlock.h>
  13#include <linux/buffer_head.h>
  14#include <linux/delay.h>
  15#include <linux/sort.h>
  16#include <linux/jhash.h>
  17#include <linux/kallsyms.h>
  18#include <linux/gfs2_ondisk.h>
  19#include <linux/list.h>
  20#include <linux/wait.h>
  21#include <linux/module.h>
  22#include <asm/uaccess.h>
  23#include <linux/seq_file.h>
  24#include <linux/debugfs.h>
  25#include <linux/kthread.h>
  26#include <linux/freezer.h>
  27#include <linux/workqueue.h>
  28#include <linux/jiffies.h>
  29#include <linux/rcupdate.h>
  30#include <linux/rculist_bl.h>
  31#include <linux/bit_spinlock.h>
 
 
 
 
  32
  33#include "gfs2.h"
  34#include "incore.h"
  35#include "glock.h"
  36#include "glops.h"
  37#include "inode.h"
  38#include "lops.h"
  39#include "meta_io.h"
  40#include "quota.h"
  41#include "super.h"
  42#include "util.h"
  43#include "bmap.h"
  44#define CREATE_TRACE_POINTS
  45#include "trace_gfs2.h"
  46
  47struct gfs2_glock_iter {
  48	int hash;			/* hash bucket index         */
  49	struct gfs2_sbd *sdp;		/* incore superblock         */
  50	struct gfs2_glock *gl;		/* current glock struct      */
  51	char string[512];		/* scratch space             */
  52};
  53
  54typedef void (*glock_examiner) (struct gfs2_glock * gl);
  55
  56static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
  57#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
  58static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  59
  60static struct dentry *gfs2_root;
  61static struct workqueue_struct *glock_workqueue;
  62struct workqueue_struct *gfs2_delete_workqueue;
  63static LIST_HEAD(lru_list);
  64static atomic_t lru_count = ATOMIC_INIT(0);
  65static DEFINE_SPINLOCK(lru_lock);
  66
  67#define GFS2_GL_HASH_SHIFT      15
  68#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
  69#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
  70
  71static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
  72static struct dentry *gfs2_root;
  73
  74/**
  75 * gl_hash() - Turn glock number into hash bucket number
  76 * @lock: The glock number
  77 *
  78 * Returns: The number of the corresponding hash bucket
  79 */
  80
  81static unsigned int gl_hash(const struct gfs2_sbd *sdp,
  82			    const struct lm_lockname *name)
  83{
  84	unsigned int h;
  85
  86	h = jhash(&name->ln_number, sizeof(u64), 0);
  87	h = jhash(&name->ln_type, sizeof(unsigned int), h);
  88	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
  89	h &= GFS2_GL_HASH_MASK;
  90
  91	return h;
  92}
  93
  94static inline void spin_lock_bucket(unsigned int hash)
  95{
  96	hlist_bl_lock(&gl_hash_table[hash]);
  97}
  98
  99static inline void spin_unlock_bucket(unsigned int hash)
 100{
 101	hlist_bl_unlock(&gl_hash_table[hash]);
 102}
 103
 104static void gfs2_glock_dealloc(struct rcu_head *rcu)
 105{
 106	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
 107
 108	if (gl->gl_ops->go_flags & GLOF_ASPACE)
 109		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
 110	else
 
 111		kmem_cache_free(gfs2_glock_cachep, gl);
 112}
 113
 114void gfs2_glock_free(struct gfs2_glock *gl)
 115{
 116	struct gfs2_sbd *sdp = gl->gl_sbd;
 117
 118	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
 119	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
 120		wake_up(&sdp->sd_glock_wait);
 121}
 122
 123/**
 124 * gfs2_glock_hold() - increment reference count on glock
 125 * @gl: The glock to hold
 126 *
 127 */
 128
 129void gfs2_glock_hold(struct gfs2_glock *gl)
 130{
 131	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
 132	atomic_inc(&gl->gl_ref);
 133}
 134
 135/**
 136 * demote_ok - Check to see if it's ok to unlock a glock
 137 * @gl: the glock
 138 *
 139 * Returns: 1 if it's ok
 140 */
 141
 142static int demote_ok(const struct gfs2_glock *gl)
 143{
 144	const struct gfs2_glock_operations *glops = gl->gl_ops;
 145
 146	if (gl->gl_state == LM_ST_UNLOCKED)
 147		return 0;
 148	if (!list_empty(&gl->gl_holders))
 149		return 0;
 150	if (glops->go_demote_ok)
 151		return glops->go_demote_ok(gl);
 152	return 1;
 153}
 154
 155
 156void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
 157{
 158	spin_lock(&lru_lock);
 159
 160	if (!list_empty(&gl->gl_lru))
 161		list_del_init(&gl->gl_lru);
 162	else
 163		atomic_inc(&lru_count);
 164
 165	list_add_tail(&gl->gl_lru, &lru_list);
 166	set_bit(GLF_LRU, &gl->gl_flags);
 167	spin_unlock(&lru_lock);
 168}
 169
 170static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
 171{
 172	spin_lock(&lru_lock);
 173	if (!list_empty(&gl->gl_lru)) {
 174		list_del_init(&gl->gl_lru);
 175		atomic_dec(&lru_count);
 176		clear_bit(GLF_LRU, &gl->gl_flags);
 177	}
 178	spin_unlock(&lru_lock);
 179}
 180
 181/**
 182 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
 183 * @gl: the glock
 184 *
 185 * If the glock is demotable, then we add it (or move it) to the end
 186 * of the glock LRU list.
 187 */
 188
 189static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
 190{
 191	if (demote_ok(gl))
 192		gfs2_glock_add_to_lru(gl);
 193}
 194
 195/**
 196 * gfs2_glock_put_nolock() - Decrement reference count on glock
 197 * @gl: The glock to put
 198 *
 199 * This function should only be used if the caller has its own reference
 200 * to the glock, in addition to the one it is dropping.
 201 */
 202
 203void gfs2_glock_put_nolock(struct gfs2_glock *gl)
 204{
 205	if (atomic_dec_and_test(&gl->gl_ref))
 206		GLOCK_BUG_ON(gl, 1);
 207}
 208
 209/**
 210 * gfs2_glock_put() - Decrement reference count on glock
 211 * @gl: The glock to put
 212 *
 213 */
 214
 215void gfs2_glock_put(struct gfs2_glock *gl)
 216{
 217	struct gfs2_sbd *sdp = gl->gl_sbd;
 218	struct address_space *mapping = gfs2_glock2aspace(gl);
 219
 220	if (atomic_dec_and_test(&gl->gl_ref)) {
 221		spin_lock_bucket(gl->gl_hash);
 222		hlist_bl_del_rcu(&gl->gl_list);
 223		spin_unlock_bucket(gl->gl_hash);
 224		gfs2_glock_remove_from_lru(gl);
 225		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
 226		GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
 227		trace_gfs2_glock_put(gl);
 228		sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
 229	}
 230}
 231
 232/**
 233 * search_bucket() - Find struct gfs2_glock by lock number
 234 * @bucket: the bucket to search
 235 * @name: The lock name
 236 *
 237 * Returns: NULL, or the struct gfs2_glock with the requested number
 238 */
 239
 240static struct gfs2_glock *search_bucket(unsigned int hash,
 241					const struct gfs2_sbd *sdp,
 242					const struct lm_lockname *name)
 243{
 244	struct gfs2_glock *gl;
 245	struct hlist_bl_node *h;
 246
 247	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
 248		if (!lm_name_equal(&gl->gl_name, name))
 249			continue;
 250		if (gl->gl_sbd != sdp)
 251			continue;
 252		if (atomic_inc_not_zero(&gl->gl_ref))
 253			return gl;
 254	}
 255
 256	return NULL;
 
 
 
 
 
 
 257}
 258
 259/**
 260 * may_grant - check if its ok to grant a new lock
 261 * @gl: The glock
 262 * @gh: The lock request which we wish to grant
 263 *
 264 * Returns: true if its ok to grant the lock
 265 */
 266
 267static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
 268{
 269	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
 270	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
 271	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
 272		return 0;
 273	if (gl->gl_state == gh->gh_state)
 274		return 1;
 275	if (gh->gh_flags & GL_EXACT)
 276		return 0;
 277	if (gl->gl_state == LM_ST_EXCLUSIVE) {
 278		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
 279			return 1;
 280		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
 281			return 1;
 282	}
 283	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
 284		return 1;
 285	return 0;
 286}
 287
 288static void gfs2_holder_wake(struct gfs2_holder *gh)
 289{
 290	clear_bit(HIF_WAIT, &gh->gh_iflags);
 291	smp_mb__after_clear_bit();
 292	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
 293}
 294
 295/**
 296 * do_error - Something unexpected has happened during a lock request
 297 *
 298 */
 299
 300static inline void do_error(struct gfs2_glock *gl, const int ret)
 301{
 302	struct gfs2_holder *gh, *tmp;
 303
 304	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 305		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 306			continue;
 307		if (ret & LM_OUT_ERROR)
 308			gh->gh_error = -EIO;
 309		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
 310			gh->gh_error = GLR_TRYFAILED;
 311		else
 312			continue;
 313		list_del_init(&gh->gh_list);
 314		trace_gfs2_glock_queue(gh, 0);
 315		gfs2_holder_wake(gh);
 316	}
 317}
 318
 319/**
 320 * do_promote - promote as many requests as possible on the current queue
 321 * @gl: The glock
 322 * 
 323 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 324 *          if a type specific operation is underway.
 325 */
 326
 327static int do_promote(struct gfs2_glock *gl)
 328__releases(&gl->gl_spin)
 329__acquires(&gl->gl_spin)
 330{
 331	const struct gfs2_glock_operations *glops = gl->gl_ops;
 332	struct gfs2_holder *gh, *tmp;
 333	int ret;
 334
 335restart:
 336	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 337		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 338			continue;
 339		if (may_grant(gl, gh)) {
 340			if (gh->gh_list.prev == &gl->gl_holders &&
 341			    glops->go_lock) {
 342				spin_unlock(&gl->gl_spin);
 343				/* FIXME: eliminate this eventually */
 344				ret = glops->go_lock(gh);
 345				spin_lock(&gl->gl_spin);
 346				if (ret) {
 347					if (ret == 1)
 348						return 2;
 349					gh->gh_error = ret;
 350					list_del_init(&gh->gh_list);
 351					trace_gfs2_glock_queue(gh, 0);
 352					gfs2_holder_wake(gh);
 353					goto restart;
 354				}
 355				set_bit(HIF_HOLDER, &gh->gh_iflags);
 356				trace_gfs2_promote(gh, 1);
 357				gfs2_holder_wake(gh);
 358				goto restart;
 359			}
 360			set_bit(HIF_HOLDER, &gh->gh_iflags);
 361			trace_gfs2_promote(gh, 0);
 362			gfs2_holder_wake(gh);
 363			continue;
 364		}
 365		if (gh->gh_list.prev == &gl->gl_holders)
 366			return 1;
 367		do_error(gl, 0);
 368		break;
 369	}
 370	return 0;
 371}
 372
 373/**
 374 * find_first_waiter - find the first gh that's waiting for the glock
 375 * @gl: the glock
 376 */
 377
 378static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
 379{
 380	struct gfs2_holder *gh;
 381
 382	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
 383		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
 384			return gh;
 385	}
 386	return NULL;
 387}
 388
 389/**
 390 * state_change - record that the glock is now in a different state
 391 * @gl: the glock
 392 * @new_state the new state
 393 *
 394 */
 395
 396static void state_change(struct gfs2_glock *gl, unsigned int new_state)
 397{
 398	int held1, held2;
 399
 400	held1 = (gl->gl_state != LM_ST_UNLOCKED);
 401	held2 = (new_state != LM_ST_UNLOCKED);
 402
 403	if (held1 != held2) {
 
 404		if (held2)
 405			gfs2_glock_hold(gl);
 406		else
 407			gfs2_glock_put_nolock(gl);
 408	}
 409	if (held1 && held2 && list_empty(&gl->gl_holders))
 410		clear_bit(GLF_QUEUED, &gl->gl_flags);
 411
 412	if (new_state != gl->gl_target)
 413		/* shorten our minimum hold time */
 414		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
 415				       GL_GLOCK_MIN_HOLD);
 416	gl->gl_state = new_state;
 417	gl->gl_tchange = jiffies;
 418}
 419
 420static void gfs2_demote_wake(struct gfs2_glock *gl)
 421{
 422	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 423	clear_bit(GLF_DEMOTE, &gl->gl_flags);
 424	smp_mb__after_clear_bit();
 425	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
 426}
 427
 428/**
 429 * finish_xmote - The DLM has replied to one of our lock requests
 430 * @gl: The glock
 431 * @ret: The status from the DLM
 432 *
 433 */
 434
 435static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
 436{
 437	const struct gfs2_glock_operations *glops = gl->gl_ops;
 438	struct gfs2_holder *gh;
 439	unsigned state = ret & LM_OUT_ST_MASK;
 440	int rv;
 441
 442	spin_lock(&gl->gl_spin);
 443	trace_gfs2_glock_state_change(gl, state);
 444	state_change(gl, state);
 445	gh = find_first_waiter(gl);
 446
 447	/* Demote to UN request arrived during demote to SH or DF */
 448	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
 449	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
 450		gl->gl_target = LM_ST_UNLOCKED;
 451
 452	/* Check for state != intended state */
 453	if (unlikely(state != gl->gl_target)) {
 454		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
 455			/* move to back of queue and try next entry */
 456			if (ret & LM_OUT_CANCELED) {
 457				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
 458					list_move_tail(&gh->gh_list, &gl->gl_holders);
 459				gh = find_first_waiter(gl);
 460				gl->gl_target = gh->gh_state;
 461				goto retry;
 462			}
 463			/* Some error or failed "try lock" - report it */
 464			if ((ret & LM_OUT_ERROR) ||
 465			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
 466				gl->gl_target = gl->gl_state;
 467				do_error(gl, ret);
 468				goto out;
 469			}
 470		}
 471		switch(state) {
 472		/* Unlocked due to conversion deadlock, try again */
 473		case LM_ST_UNLOCKED:
 474retry:
 475			do_xmote(gl, gh, gl->gl_target);
 476			break;
 477		/* Conversion fails, unlock and try again */
 478		case LM_ST_SHARED:
 479		case LM_ST_DEFERRED:
 480			do_xmote(gl, gh, LM_ST_UNLOCKED);
 481			break;
 482		default: /* Everything else */
 483			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
 484			GLOCK_BUG_ON(gl, 1);
 485		}
 486		spin_unlock(&gl->gl_spin);
 487		return;
 488	}
 489
 490	/* Fast path - we got what we asked for */
 491	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
 492		gfs2_demote_wake(gl);
 493	if (state != LM_ST_UNLOCKED) {
 494		if (glops->go_xmote_bh) {
 495			spin_unlock(&gl->gl_spin);
 496			rv = glops->go_xmote_bh(gl, gh);
 497			spin_lock(&gl->gl_spin);
 498			if (rv) {
 499				do_error(gl, rv);
 500				goto out;
 501			}
 502		}
 503		rv = do_promote(gl);
 504		if (rv == 2)
 505			goto out_locked;
 506	}
 507out:
 508	clear_bit(GLF_LOCK, &gl->gl_flags);
 509out_locked:
 510	spin_unlock(&gl->gl_spin);
 511}
 512
 513/**
 514 * do_xmote - Calls the DLM to change the state of a lock
 515 * @gl: The lock state
 516 * @gh: The holder (only for promotes)
 517 * @target: The target lock state
 518 *
 519 */
 520
 521static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
 522__releases(&gl->gl_spin)
 523__acquires(&gl->gl_spin)
 524{
 525	const struct gfs2_glock_operations *glops = gl->gl_ops;
 526	struct gfs2_sbd *sdp = gl->gl_sbd;
 527	unsigned int lck_flags = gh ? gh->gh_flags : 0;
 528	int ret;
 529
 530	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
 531		      LM_FLAG_PRIORITY);
 532	GLOCK_BUG_ON(gl, gl->gl_state == target);
 533	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
 534	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
 535	    glops->go_inval) {
 536		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 537		do_error(gl, 0); /* Fail queued try locks */
 538	}
 539	gl->gl_req = target;
 540	spin_unlock(&gl->gl_spin);
 541	if (glops->go_xmote_th)
 542		glops->go_xmote_th(gl);
 
 
 
 
 
 543	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 544		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
 545	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 546
 547	gfs2_glock_hold(gl);
 548	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
 549		/* lock_dlm */
 550		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
 551		GLOCK_BUG_ON(gl, ret);
 
 
 
 
 
 
 
 
 
 
 552	} else { /* lock_nolock */
 553		finish_xmote(gl, target);
 554		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 555			gfs2_glock_put(gl);
 556	}
 557
 558	spin_lock(&gl->gl_spin);
 559}
 560
 561/**
 562 * find_first_holder - find the first "holder" gh
 563 * @gl: the glock
 564 */
 565
 566static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
 567{
 568	struct gfs2_holder *gh;
 569
 570	if (!list_empty(&gl->gl_holders)) {
 571		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
 572		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 573			return gh;
 574	}
 575	return NULL;
 576}
 577
 578/**
 579 * run_queue - do all outstanding tasks related to a glock
 580 * @gl: The glock in question
 581 * @nonblock: True if we must not block in run_queue
 582 *
 583 */
 584
 585static void run_queue(struct gfs2_glock *gl, const int nonblock)
 586__releases(&gl->gl_spin)
 587__acquires(&gl->gl_spin)
 588{
 589	struct gfs2_holder *gh = NULL;
 590	int ret;
 591
 592	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
 593		return;
 594
 595	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
 596
 597	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
 598	    gl->gl_demote_state != gl->gl_state) {
 599		if (find_first_holder(gl))
 600			goto out_unlock;
 601		if (nonblock)
 602			goto out_sched;
 603		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
 604		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
 605		gl->gl_target = gl->gl_demote_state;
 606	} else {
 607		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
 608			gfs2_demote_wake(gl);
 609		ret = do_promote(gl);
 610		if (ret == 0)
 611			goto out_unlock;
 612		if (ret == 2)
 613			goto out;
 614		gh = find_first_waiter(gl);
 615		gl->gl_target = gh->gh_state;
 616		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
 617			do_error(gl, 0); /* Fail queued try locks */
 618	}
 619	do_xmote(gl, gh, gl->gl_target);
 620out:
 621	return;
 622
 623out_sched:
 624	clear_bit(GLF_LOCK, &gl->gl_flags);
 625	smp_mb__after_clear_bit();
 626	gfs2_glock_hold(gl);
 627	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 628		gfs2_glock_put_nolock(gl);
 629	return;
 630
 631out_unlock:
 632	clear_bit(GLF_LOCK, &gl->gl_flags);
 633	smp_mb__after_clear_bit();
 634	return;
 635}
 636
 637static void delete_work_func(struct work_struct *work)
 638{
 639	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
 640	struct gfs2_sbd *sdp = gl->gl_sbd;
 641	struct gfs2_inode *ip;
 642	struct inode *inode;
 643	u64 no_addr = gl->gl_name.ln_number;
 644
 645	ip = gl->gl_object;
 646	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
 
 
 
 647
 648	if (ip)
 649		inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
 650	else
 651		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
 652	if (inode && !IS_ERR(inode)) {
 653		d_prune_aliases(inode);
 654		iput(inode);
 655	}
 
 656	gfs2_glock_put(gl);
 657}
 658
 659static void glock_work_func(struct work_struct *work)
 660{
 661	unsigned long delay = 0;
 662	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
 663	int drop_ref = 0;
 664
 665	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
 666		finish_xmote(gl, gl->gl_reply);
 667		drop_ref = 1;
 668	}
 669	spin_lock(&gl->gl_spin);
 670	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
 671	    gl->gl_state != LM_ST_UNLOCKED &&
 672	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
 673		unsigned long holdtime, now = jiffies;
 674
 675		holdtime = gl->gl_tchange + gl->gl_hold_time;
 676		if (time_before(now, holdtime))
 677			delay = holdtime - now;
 678
 679		if (!delay) {
 680			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
 681			set_bit(GLF_DEMOTE, &gl->gl_flags);
 682		}
 683	}
 684	run_queue(gl, 0);
 685	spin_unlock(&gl->gl_spin);
 686	if (!delay)
 687		gfs2_glock_put(gl);
 688	else {
 689		if (gl->gl_name.ln_type != LM_TYPE_INODE)
 690			delay = 0;
 691		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
 692			gfs2_glock_put(gl);
 693	}
 694	if (drop_ref)
 695		gfs2_glock_put(gl);
 696}
 697
 698/**
 699 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 700 * @sdp: The GFS2 superblock
 701 * @number: the lock number
 702 * @glops: The glock_operations to use
 703 * @create: If 0, don't create the glock if it doesn't exist
 704 * @glp: the glock is returned here
 705 *
 706 * This does not lock a glock, just finds/creates structures for one.
 707 *
 708 * Returns: errno
 709 */
 710
 711int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
 712		   const struct gfs2_glock_operations *glops, int create,
 713		   struct gfs2_glock **glp)
 714{
 715	struct super_block *s = sdp->sd_vfs;
 716	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
 717	struct gfs2_glock *gl, *tmp;
 718	unsigned int hash = gl_hash(sdp, &name);
 
 719	struct address_space *mapping;
 720	struct kmem_cache *cachep;
 
 721
 722	rcu_read_lock();
 723	gl = search_bucket(hash, sdp, &name);
 
 
 724	rcu_read_unlock();
 725
 726	*glp = gl;
 727	if (gl)
 728		return 0;
 729	if (!create)
 730		return -ENOENT;
 731
 732	if (glops->go_flags & GLOF_ASPACE)
 733		cachep = gfs2_glock_aspace_cachep;
 734	else
 735		cachep = gfs2_glock_cachep;
 736	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
 737	if (!gl)
 738		return -ENOMEM;
 739
 
 
 
 
 
 
 
 
 
 
 740	atomic_inc(&sdp->sd_glock_disposal);
 
 741	gl->gl_flags = 0;
 742	gl->gl_name = name;
 743	atomic_set(&gl->gl_ref, 1);
 744	gl->gl_state = LM_ST_UNLOCKED;
 745	gl->gl_target = LM_ST_UNLOCKED;
 746	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 747	gl->gl_hash = hash;
 748	gl->gl_ops = glops;
 749	snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
 750	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
 751	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
 
 
 
 
 752	gl->gl_tchange = jiffies;
 753	gl->gl_object = NULL;
 754	gl->gl_sbd = sdp;
 755	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
 756	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
 757	INIT_WORK(&gl->gl_delete, delete_work_func);
 758
 759	mapping = gfs2_glock2aspace(gl);
 760	if (mapping) {
 761                mapping->a_ops = &gfs2_meta_aops;
 762		mapping->host = s->s_bdev->bd_inode;
 763		mapping->flags = 0;
 764		mapping_set_gfp_mask(mapping, GFP_NOFS);
 765		mapping->assoc_mapping = NULL;
 766		mapping->backing_dev_info = s->s_bdi;
 767		mapping->writeback_index = 0;
 768	}
 769
 770	spin_lock_bucket(hash);
 771	tmp = search_bucket(hash, sdp, &name);
 772	if (tmp) {
 773		spin_unlock_bucket(hash);
 774		kmem_cache_free(cachep, gl);
 775		atomic_dec(&sdp->sd_glock_disposal);
 776		gl = tmp;
 777	} else {
 778		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
 779		spin_unlock_bucket(hash);
 780	}
 781
 782	*glp = gl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 783
 784	return 0;
 785}
 786
 787/**
 788 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 789 * @gl: the glock
 790 * @state: the state we're requesting
 791 * @flags: the modifier flags
 792 * @gh: the holder structure
 793 *
 794 */
 795
 796void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
 797		      struct gfs2_holder *gh)
 798{
 799	INIT_LIST_HEAD(&gh->gh_list);
 800	gh->gh_gl = gl;
 801	gh->gh_ip = (unsigned long)__builtin_return_address(0);
 802	gh->gh_owner_pid = get_pid(task_pid(current));
 803	gh->gh_state = state;
 804	gh->gh_flags = flags;
 805	gh->gh_error = 0;
 806	gh->gh_iflags = 0;
 807	gfs2_glock_hold(gl);
 808}
 809
 810/**
 811 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 812 * @state: the state we're requesting
 813 * @flags: the modifier flags
 814 * @gh: the holder structure
 815 *
 816 * Don't mess with the glock.
 817 *
 818 */
 819
 820void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
 821{
 822	gh->gh_state = state;
 823	gh->gh_flags = flags;
 824	gh->gh_iflags = 0;
 825	gh->gh_ip = (unsigned long)__builtin_return_address(0);
 826	if (gh->gh_owner_pid)
 827		put_pid(gh->gh_owner_pid);
 828	gh->gh_owner_pid = get_pid(task_pid(current));
 829}
 830
 831/**
 832 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 833 * @gh: the holder structure
 834 *
 835 */
 836
 837void gfs2_holder_uninit(struct gfs2_holder *gh)
 838{
 839	put_pid(gh->gh_owner_pid);
 840	gfs2_glock_put(gh->gh_gl);
 841	gh->gh_gl = NULL;
 842	gh->gh_ip = 0;
 843}
 844
 845/**
 846 * gfs2_glock_holder_wait
 847 * @word: unused
 848 *
 849 * This function and gfs2_glock_demote_wait both show up in the WCHAN
 850 * field. Thus I've separated these otherwise identical functions in
 851 * order to be more informative to the user.
 852 */
 853
 854static int gfs2_glock_holder_wait(void *word)
 855{
 856        schedule();
 857        return 0;
 858}
 859
 860static int gfs2_glock_demote_wait(void *word)
 861{
 862	schedule();
 863	return 0;
 864}
 865
 866static void wait_on_holder(struct gfs2_holder *gh)
 867{
 868	unsigned long time1 = jiffies;
 869
 870	might_sleep();
 871	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
 872	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
 873		/* Lengthen the minimum hold time. */
 874		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
 875					      GL_GLOCK_HOLD_INCR,
 876					      GL_GLOCK_MAX_HOLD);
 877}
 878
 879static void wait_on_demote(struct gfs2_glock *gl)
 880{
 881	might_sleep();
 882	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
 883}
 884
 885/**
 886 * handle_callback - process a demote request
 887 * @gl: the glock
 888 * @state: the state the caller wants us to change to
 889 *
 890 * There are only two requests that we are going to see in actual
 891 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
 892 */
 893
 894static void handle_callback(struct gfs2_glock *gl, unsigned int state,
 895			    unsigned long delay)
 896{
 897	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
 898
 899	set_bit(bit, &gl->gl_flags);
 900	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
 901		gl->gl_demote_state = state;
 902		gl->gl_demote_time = jiffies;
 903	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
 904			gl->gl_demote_state != state) {
 905		gl->gl_demote_state = LM_ST_UNLOCKED;
 906	}
 907	if (gl->gl_ops->go_callback)
 908		gl->gl_ops->go_callback(gl);
 909	trace_gfs2_demote_rq(gl);
 910}
 911
 912/**
 913 * gfs2_glock_wait - wait on a glock acquisition
 914 * @gh: the glock holder
 915 *
 916 * Returns: 0 on success
 917 */
 918
 919int gfs2_glock_wait(struct gfs2_holder *gh)
 920{
 921	wait_on_holder(gh);
 922	return gh->gh_error;
 923}
 924
 925void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
 926{
 927	struct va_format vaf;
 928	va_list args;
 929
 930	va_start(args, fmt);
 931
 932	if (seq) {
 933		struct gfs2_glock_iter *gi = seq->private;
 934		vsprintf(gi->string, fmt, args);
 935		seq_printf(seq, gi->string);
 936	} else {
 937		vaf.fmt = fmt;
 938		vaf.va = &args;
 939
 940		printk(KERN_ERR " %pV", &vaf);
 941	}
 942
 943	va_end(args);
 944}
 945
 946/**
 947 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 948 * @gh: the holder structure to add
 949 *
 950 * Eventually we should move the recursive locking trap to a
 951 * debugging option or something like that. This is the fast
 952 * path and needs to have the minimum number of distractions.
 953 * 
 954 */
 955
 956static inline void add_to_queue(struct gfs2_holder *gh)
 957__releases(&gl->gl_spin)
 958__acquires(&gl->gl_spin)
 959{
 960	struct gfs2_glock *gl = gh->gh_gl;
 961	struct gfs2_sbd *sdp = gl->gl_sbd;
 962	struct list_head *insert_pt = NULL;
 963	struct gfs2_holder *gh2;
 964	int try_lock = 0;
 965
 966	BUG_ON(gh->gh_owner_pid == NULL);
 967	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
 968		BUG();
 969
 970	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
 971		if (test_bit(GLF_LOCK, &gl->gl_flags))
 972			try_lock = 1;
 973		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 974			goto fail;
 975	}
 976
 977	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
 978		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
 979		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
 980			goto trap_recursive;
 981		if (try_lock &&
 982		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
 983		    !may_grant(gl, gh)) {
 984fail:
 985			gh->gh_error = GLR_TRYFAILED;
 986			gfs2_holder_wake(gh);
 987			return;
 988		}
 989		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
 990			continue;
 991		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
 992			insert_pt = &gh2->gh_list;
 993	}
 994	set_bit(GLF_QUEUED, &gl->gl_flags);
 995	trace_gfs2_glock_queue(gh, 1);
 
 
 996	if (likely(insert_pt == NULL)) {
 997		list_add_tail(&gh->gh_list, &gl->gl_holders);
 998		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
 999			goto do_cancel;
1000		return;
1001	}
1002	list_add_tail(&gh->gh_list, insert_pt);
1003do_cancel:
1004	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1005	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1006		spin_unlock(&gl->gl_spin);
1007		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1008			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1009		spin_lock(&gl->gl_spin);
1010	}
1011	return;
1012
1013trap_recursive:
1014	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1015	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1016	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1017	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1018	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1019	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1020	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1021	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1022	__dump_glock(NULL, gl);
1023	BUG();
1024}
1025
1026/**
1027 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1028 * @gh: the holder structure
1029 *
1030 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1031 *
1032 * Returns: 0, GLR_TRYFAILED, or errno on failure
1033 */
1034
1035int gfs2_glock_nq(struct gfs2_holder *gh)
1036{
1037	struct gfs2_glock *gl = gh->gh_gl;
1038	struct gfs2_sbd *sdp = gl->gl_sbd;
1039	int error = 0;
1040
1041	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1042		return -EIO;
1043
1044	if (test_bit(GLF_LRU, &gl->gl_flags))
1045		gfs2_glock_remove_from_lru(gl);
1046
1047	spin_lock(&gl->gl_spin);
1048	add_to_queue(gh);
1049	if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1050	    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1051		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
 
 
 
 
1052	run_queue(gl, 1);
1053	spin_unlock(&gl->gl_spin);
1054
1055	if (!(gh->gh_flags & GL_ASYNC))
1056		error = gfs2_glock_wait(gh);
1057
1058	return error;
1059}
1060
1061/**
1062 * gfs2_glock_poll - poll to see if an async request has been completed
1063 * @gh: the holder
1064 *
1065 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1066 */
1067
1068int gfs2_glock_poll(struct gfs2_holder *gh)
1069{
1070	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1071}
1072
1073/**
1074 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1075 * @gh: the glock holder
1076 *
1077 */
1078
1079void gfs2_glock_dq(struct gfs2_holder *gh)
1080{
1081	struct gfs2_glock *gl = gh->gh_gl;
1082	const struct gfs2_glock_operations *glops = gl->gl_ops;
1083	unsigned delay = 0;
1084	int fast_path = 0;
1085
1086	spin_lock(&gl->gl_spin);
1087	if (gh->gh_flags & GL_NOCACHE)
1088		handle_callback(gl, LM_ST_UNLOCKED, 0);
1089
1090	list_del_init(&gh->gh_list);
 
1091	if (find_first_holder(gl) == NULL) {
1092		if (glops->go_unlock) {
1093			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1094			spin_unlock(&gl->gl_spin);
1095			glops->go_unlock(gh);
1096			spin_lock(&gl->gl_spin);
1097			clear_bit(GLF_LOCK, &gl->gl_flags);
1098		}
1099		if (list_empty(&gl->gl_holders) &&
1100		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1101		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1102			fast_path = 1;
1103	}
1104	if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1105		__gfs2_glock_schedule_for_reclaim(gl);
 
 
1106	trace_gfs2_glock_queue(gh, 0);
1107	spin_unlock(&gl->gl_spin);
1108	if (likely(fast_path))
1109		return;
1110
1111	gfs2_glock_hold(gl);
1112	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1113	    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1114	    gl->gl_name.ln_type == LM_TYPE_INODE)
1115		delay = gl->gl_hold_time;
1116	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1117		gfs2_glock_put(gl);
1118}
1119
1120void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1121{
1122	struct gfs2_glock *gl = gh->gh_gl;
1123	gfs2_glock_dq(gh);
1124	wait_on_demote(gl);
 
1125}
1126
1127/**
1128 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1129 * @gh: the holder structure
1130 *
1131 */
1132
1133void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1134{
1135	gfs2_glock_dq(gh);
1136	gfs2_holder_uninit(gh);
1137}
1138
1139/**
1140 * gfs2_glock_nq_num - acquire a glock based on lock number
1141 * @sdp: the filesystem
1142 * @number: the lock number
1143 * @glops: the glock operations for the type of glock
1144 * @state: the state to acquire the glock in
1145 * @flags: modifier flags for the acquisition
1146 * @gh: the struct gfs2_holder
1147 *
1148 * Returns: errno
1149 */
1150
1151int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1152		      const struct gfs2_glock_operations *glops,
1153		      unsigned int state, int flags, struct gfs2_holder *gh)
1154{
1155	struct gfs2_glock *gl;
1156	int error;
1157
1158	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1159	if (!error) {
1160		error = gfs2_glock_nq_init(gl, state, flags, gh);
1161		gfs2_glock_put(gl);
1162	}
1163
1164	return error;
1165}
1166
1167/**
1168 * glock_compare - Compare two struct gfs2_glock structures for sorting
1169 * @arg_a: the first structure
1170 * @arg_b: the second structure
1171 *
1172 */
1173
1174static int glock_compare(const void *arg_a, const void *arg_b)
1175{
1176	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1177	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1178	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1179	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1180
1181	if (a->ln_number > b->ln_number)
1182		return 1;
1183	if (a->ln_number < b->ln_number)
1184		return -1;
1185	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1186	return 0;
1187}
1188
1189/**
1190 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1191 * @num_gh: the number of structures
1192 * @ghs: an array of struct gfs2_holder structures
1193 *
1194 * Returns: 0 on success (all glocks acquired),
1195 *          errno on failure (no glocks acquired)
1196 */
1197
1198static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1199		     struct gfs2_holder **p)
1200{
1201	unsigned int x;
1202	int error = 0;
1203
1204	for (x = 0; x < num_gh; x++)
1205		p[x] = &ghs[x];
1206
1207	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1208
1209	for (x = 0; x < num_gh; x++) {
1210		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1211
1212		error = gfs2_glock_nq(p[x]);
1213		if (error) {
1214			while (x--)
1215				gfs2_glock_dq(p[x]);
1216			break;
1217		}
1218	}
1219
1220	return error;
1221}
1222
1223/**
1224 * gfs2_glock_nq_m - acquire multiple glocks
1225 * @num_gh: the number of structures
1226 * @ghs: an array of struct gfs2_holder structures
1227 *
1228 *
1229 * Returns: 0 on success (all glocks acquired),
1230 *          errno on failure (no glocks acquired)
1231 */
1232
1233int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1234{
1235	struct gfs2_holder *tmp[4];
1236	struct gfs2_holder **pph = tmp;
1237	int error = 0;
1238
1239	switch(num_gh) {
1240	case 0:
1241		return 0;
1242	case 1:
1243		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1244		return gfs2_glock_nq(ghs);
1245	default:
1246		if (num_gh <= 4)
1247			break;
1248		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1249		if (!pph)
1250			return -ENOMEM;
1251	}
1252
1253	error = nq_m_sync(num_gh, ghs, pph);
1254
1255	if (pph != tmp)
1256		kfree(pph);
1257
1258	return error;
1259}
1260
1261/**
1262 * gfs2_glock_dq_m - release multiple glocks
1263 * @num_gh: the number of structures
1264 * @ghs: an array of struct gfs2_holder structures
1265 *
1266 */
1267
1268void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1269{
1270	while (num_gh--)
1271		gfs2_glock_dq(&ghs[num_gh]);
1272}
1273
1274/**
1275 * gfs2_glock_dq_uninit_m - release multiple glocks
1276 * @num_gh: the number of structures
1277 * @ghs: an array of struct gfs2_holder structures
1278 *
1279 */
1280
1281void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1282{
1283	while (num_gh--)
1284		gfs2_glock_dq_uninit(&ghs[num_gh]);
1285}
1286
1287void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1288{
1289	unsigned long delay = 0;
1290	unsigned long holdtime;
1291	unsigned long now = jiffies;
1292
1293	gfs2_glock_hold(gl);
1294	holdtime = gl->gl_tchange + gl->gl_hold_time;
1295	if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1296	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1297		if (time_before(now, holdtime))
1298			delay = holdtime - now;
1299		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1300			delay = gl->gl_hold_time;
1301	}
1302
1303	spin_lock(&gl->gl_spin);
1304	handle_callback(gl, state, delay);
1305	spin_unlock(&gl->gl_spin);
1306	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1307		gfs2_glock_put(gl);
1308}
1309
1310/**
1311 * gfs2_should_freeze - Figure out if glock should be frozen
1312 * @gl: The glock in question
1313 *
1314 * Glocks are not frozen if (a) the result of the dlm operation is
1315 * an error, (b) the locking operation was an unlock operation or
1316 * (c) if there is a "noexp" flagged request anywhere in the queue
1317 *
1318 * Returns: 1 if freezing should occur, 0 otherwise
1319 */
1320
1321static int gfs2_should_freeze(const struct gfs2_glock *gl)
1322{
1323	const struct gfs2_holder *gh;
1324
1325	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1326		return 0;
1327	if (gl->gl_target == LM_ST_UNLOCKED)
1328		return 0;
1329
1330	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1331		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1332			continue;
1333		if (LM_FLAG_NOEXP & gh->gh_flags)
1334			return 0;
1335	}
1336
1337	return 1;
1338}
1339
1340/**
1341 * gfs2_glock_complete - Callback used by locking
1342 * @gl: Pointer to the glock
1343 * @ret: The return value from the dlm
1344 *
1345 * The gl_reply field is under the gl_spin lock so that it is ok
1346 * to use a bitfield shared with other glock state fields.
1347 */
1348
1349void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1350{
1351	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1352
1353	spin_lock(&gl->gl_spin);
1354	gl->gl_reply = ret;
1355
1356	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1357		if (gfs2_should_freeze(gl)) {
1358			set_bit(GLF_FROZEN, &gl->gl_flags);
1359			spin_unlock(&gl->gl_spin);
1360			return;
1361		}
1362	}
1363
1364	spin_unlock(&gl->gl_spin);
1365	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1366	smp_wmb();
1367	gfs2_glock_hold(gl);
1368	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1369		gfs2_glock_put(gl);
1370}
1371
 
 
 
 
 
 
1372
1373static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1374				    struct shrink_control *sc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1375{
1376	struct gfs2_glock *gl;
1377	int may_demote;
1378	int nr_skipped = 0;
1379	int nr = sc->nr_to_scan;
1380	gfp_t gfp_mask = sc->gfp_mask;
1381	LIST_HEAD(skipped);
1382
1383	if (nr == 0)
1384		goto out;
1385
1386	if (!(gfp_mask & __GFP_FS))
1387		return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1388
1389	spin_lock(&lru_lock);
1390	while(nr && !list_empty(&lru_list)) {
1391		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1392		list_del_init(&gl->gl_lru);
1393		clear_bit(GLF_LRU, &gl->gl_flags);
1394		atomic_dec(&lru_count);
1395
1396		/* Test for being demotable */
1397		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1398			gfs2_glock_hold(gl);
1399			spin_unlock(&lru_lock);
1400			spin_lock(&gl->gl_spin);
1401			may_demote = demote_ok(gl);
1402			if (may_demote) {
1403				handle_callback(gl, LM_ST_UNLOCKED, 0);
1404				nr--;
1405			}
1406			clear_bit(GLF_LOCK, &gl->gl_flags);
1407			smp_mb__after_clear_bit();
1408			if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1409				gfs2_glock_put_nolock(gl);
1410			spin_unlock(&gl->gl_spin);
1411			spin_lock(&lru_lock);
1412			continue;
1413		}
1414		nr_skipped++;
1415		list_add(&gl->gl_lru, &skipped);
1416		set_bit(GLF_LRU, &gl->gl_flags);
1417	}
1418	list_splice(&skipped, &lru_list);
1419	atomic_add(nr_skipped, &lru_count);
 
1420	spin_unlock(&lru_lock);
1421out:
1422	return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423}
1424
1425static struct shrinker glock_shrinker = {
1426	.shrink = gfs2_shrink_glock_memory,
1427	.seeks = DEFAULT_SEEKS,
 
 
1428};
1429
1430/**
1431 * examine_bucket - Call a function for glock in a hash bucket
1432 * @examiner: the function
1433 * @sdp: the filesystem
1434 * @bucket: the bucket
1435 *
1436 */
1437
1438static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1439			  unsigned int hash)
1440{
1441	struct gfs2_glock *gl;
1442	struct hlist_bl_head *head = &gl_hash_table[hash];
1443	struct hlist_bl_node *pos;
 
1444
1445	rcu_read_lock();
1446	hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1447		if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1448			examiner(gl);
 
 
 
 
1449	}
1450	rcu_read_unlock();
1451	cond_resched();
1452}
1453
1454static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1455{
1456	unsigned x;
1457
1458	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1459		examine_bucket(examiner, sdp, x);
1460}
1461
1462
1463/**
1464 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1465 * @gl: The glock to thaw
1466 *
1467 * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1468 * so this has to result in the ref count being dropped by one.
1469 */
1470
1471static void thaw_glock(struct gfs2_glock *gl)
1472{
1473	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1474		return;
1475	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1476	gfs2_glock_hold(gl);
1477	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1478		gfs2_glock_put(gl);
 
1479}
1480
1481/**
1482 * clear_glock - look at a glock and see if we can free it from glock cache
1483 * @gl: the glock to look at
1484 *
1485 */
1486
1487static void clear_glock(struct gfs2_glock *gl)
1488{
1489	gfs2_glock_remove_from_lru(gl);
1490
1491	spin_lock(&gl->gl_spin);
1492	if (gl->gl_state != LM_ST_UNLOCKED)
1493		handle_callback(gl, LM_ST_UNLOCKED, 0);
1494	spin_unlock(&gl->gl_spin);
1495	gfs2_glock_hold(gl);
1496	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1497		gfs2_glock_put(gl);
1498}
1499
1500/**
1501 * gfs2_glock_thaw - Thaw any frozen glocks
1502 * @sdp: The super block
1503 *
1504 */
1505
1506void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1507{
1508	glock_hash_walk(thaw_glock, sdp);
1509}
1510
1511static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1512{
1513	int ret;
1514	spin_lock(&gl->gl_spin);
1515	ret = __dump_glock(seq, gl);
1516	spin_unlock(&gl->gl_spin);
1517	return ret;
1518}
1519
1520static void dump_glock_func(struct gfs2_glock *gl)
1521{
1522	dump_glock(NULL, gl);
1523}
1524
1525/**
1526 * gfs2_gl_hash_clear - Empty out the glock hash table
1527 * @sdp: the filesystem
1528 * @wait: wait until it's all gone
1529 *
1530 * Called when unmounting the filesystem.
1531 */
1532
1533void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1534{
 
 
1535	glock_hash_walk(clear_glock, sdp);
1536	flush_workqueue(glock_workqueue);
1537	wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
 
 
1538	glock_hash_walk(dump_glock_func, sdp);
1539}
1540
1541void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1542{
1543	struct gfs2_glock *gl = ip->i_gl;
1544	int ret;
1545
1546	ret = gfs2_truncatei_resume(ip);
1547	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1548
1549	spin_lock(&gl->gl_spin);
1550	clear_bit(GLF_LOCK, &gl->gl_flags);
1551	run_queue(gl, 1);
1552	spin_unlock(&gl->gl_spin);
1553}
1554
1555static const char *state2str(unsigned state)
1556{
1557	switch(state) {
1558	case LM_ST_UNLOCKED:
1559		return "UN";
1560	case LM_ST_SHARED:
1561		return "SH";
1562	case LM_ST_DEFERRED:
1563		return "DF";
1564	case LM_ST_EXCLUSIVE:
1565		return "EX";
1566	}
1567	return "??";
1568}
1569
1570static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1571{
1572	char *p = buf;
1573	if (flags & LM_FLAG_TRY)
1574		*p++ = 't';
1575	if (flags & LM_FLAG_TRY_1CB)
1576		*p++ = 'T';
1577	if (flags & LM_FLAG_NOEXP)
1578		*p++ = 'e';
1579	if (flags & LM_FLAG_ANY)
1580		*p++ = 'A';
1581	if (flags & LM_FLAG_PRIORITY)
1582		*p++ = 'p';
1583	if (flags & GL_ASYNC)
1584		*p++ = 'a';
1585	if (flags & GL_EXACT)
1586		*p++ = 'E';
1587	if (flags & GL_NOCACHE)
1588		*p++ = 'c';
1589	if (test_bit(HIF_HOLDER, &iflags))
1590		*p++ = 'H';
1591	if (test_bit(HIF_WAIT, &iflags))
1592		*p++ = 'W';
1593	if (test_bit(HIF_FIRST, &iflags))
1594		*p++ = 'F';
1595	*p = 0;
1596	return buf;
1597}
1598
1599/**
1600 * dump_holder - print information about a glock holder
1601 * @seq: the seq_file struct
1602 * @gh: the glock holder
1603 *
1604 * Returns: 0 on success, -ENOBUFS when we run out of space
1605 */
1606
1607static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1608{
1609	struct task_struct *gh_owner = NULL;
1610	char flags_buf[32];
1611
 
1612	if (gh->gh_owner_pid)
1613		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1614	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1615		       state2str(gh->gh_state),
1616		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1617		       gh->gh_error,
1618		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1619		       gh_owner ? gh_owner->comm : "(ended)",
1620		       (void *)gh->gh_ip);
1621	return 0;
1622}
1623
1624static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1625{
1626	const unsigned long *gflags = &gl->gl_flags;
1627	char *p = buf;
1628
1629	if (test_bit(GLF_LOCK, gflags))
1630		*p++ = 'l';
1631	if (test_bit(GLF_DEMOTE, gflags))
1632		*p++ = 'D';
1633	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1634		*p++ = 'd';
1635	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1636		*p++ = 'p';
1637	if (test_bit(GLF_DIRTY, gflags))
1638		*p++ = 'y';
1639	if (test_bit(GLF_LFLUSH, gflags))
1640		*p++ = 'f';
1641	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1642		*p++ = 'i';
1643	if (test_bit(GLF_REPLY_PENDING, gflags))
1644		*p++ = 'r';
1645	if (test_bit(GLF_INITIAL, gflags))
1646		*p++ = 'I';
1647	if (test_bit(GLF_FROZEN, gflags))
1648		*p++ = 'F';
1649	if (test_bit(GLF_QUEUED, gflags))
1650		*p++ = 'q';
1651	if (test_bit(GLF_LRU, gflags))
1652		*p++ = 'L';
1653	if (gl->gl_object)
1654		*p++ = 'o';
 
 
1655	*p = 0;
1656	return buf;
1657}
1658
1659/**
1660 * __dump_glock - print information about a glock
1661 * @seq: The seq_file struct
1662 * @gl: the glock
1663 *
1664 * The file format is as follows:
1665 * One line per object, capital letters are used to indicate objects
1666 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1667 * other objects are indented by a single space and follow the glock to
1668 * which they are related. Fields are indicated by lower case letters
1669 * followed by a colon and the field value, except for strings which are in
1670 * [] so that its possible to see if they are composed of spaces for
1671 * example. The field's are n = number (id of the object), f = flags,
1672 * t = type, s = state, r = refcount, e = error, p = pid.
1673 *
1674 * Returns: 0 on success, -ENOBUFS when we run out of space
1675 */
1676
1677static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1678{
1679	const struct gfs2_glock_operations *glops = gl->gl_ops;
1680	unsigned long long dtime;
1681	const struct gfs2_holder *gh;
1682	char gflags_buf[32];
1683	int error = 0;
1684
1685	dtime = jiffies - gl->gl_demote_time;
1686	dtime *= 1000000/HZ; /* demote time in uSec */
1687	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1688		dtime = 0;
1689	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1690		  state2str(gl->gl_state),
1691		  gl->gl_name.ln_type,
1692		  (unsigned long long)gl->gl_name.ln_number,
1693		  gflags2str(gflags_buf, gl),
1694		  state2str(gl->gl_target),
1695		  state2str(gl->gl_demote_state), dtime,
1696		  atomic_read(&gl->gl_ail_count),
1697		  atomic_read(&gl->gl_revokes),
1698		  atomic_read(&gl->gl_ref), gl->gl_hold_time);
 
 
 
1699
1700	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1701		error = dump_holder(seq, gh);
1702		if (error)
1703			goto out;
1704	}
1705	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1706		error = glops->go_dump(seq, gl);
1707out:
1708	return error;
1709}
1710
 
 
 
1711
 
 
 
 
 
 
 
 
 
 
 
 
 
1712
 
 
 
 
 
 
 
 
 
 
 
 
 
1713
1714int __init gfs2_glock_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
1715{
1716	unsigned i;
1717	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1718		INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1719	}
 
 
 
 
 
 
 
 
 
 
 
1720
1721	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1722					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1723	if (IS_ERR(glock_workqueue))
1724		return PTR_ERR(glock_workqueue);
 
 
1725	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1726						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1727						0);
1728	if (IS_ERR(gfs2_delete_workqueue)) {
1729		destroy_workqueue(glock_workqueue);
1730		return PTR_ERR(gfs2_delete_workqueue);
 
1731	}
1732
1733	register_shrinker(&glock_shrinker);
 
 
 
 
 
 
1734
1735	return 0;
1736}
1737
1738void gfs2_glock_exit(void)
1739{
1740	unregister_shrinker(&glock_shrinker);
 
1741	destroy_workqueue(glock_workqueue);
1742	destroy_workqueue(gfs2_delete_workqueue);
1743}
1744
1745static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1746{
1747	return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1748			      struct gfs2_glock, gl_list);
1749}
1750
1751static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1752{
1753	return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1754			      struct gfs2_glock, gl_list);
1755}
1756
1757static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1758{
1759	struct gfs2_glock *gl;
1760
1761	do {
1762		gl = gi->gl;
1763		if (gl) {
1764			gi->gl = glock_hash_next(gl);
1765		} else {
1766			gi->gl = glock_hash_chain(gi->hash);
1767		}
1768		while (gi->gl == NULL) {
1769			gi->hash++;
1770			if (gi->hash >= GFS2_GL_HASH_SIZE) {
1771				rcu_read_unlock();
1772				return 1;
1773			}
1774			gi->gl = glock_hash_chain(gi->hash);
1775		}
1776	/* Skip entries for other sb and dead entries */
1777	} while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1778
1779	return 0;
1780}
1781
1782static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1783{
1784	struct gfs2_glock_iter *gi = seq->private;
1785	loff_t n = *pos;
 
1786
1787	gi->hash = 0;
1788	rcu_read_lock();
 
 
 
 
1789
1790	do {
1791		if (gfs2_glock_iter_next(gi))
1792			return NULL;
1793	} while (n--);
1794
 
1795	return gi->gl;
1796}
1797
1798static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1799				 loff_t *pos)
1800{
1801	struct gfs2_glock_iter *gi = seq->private;
1802
1803	(*pos)++;
1804
1805	if (gfs2_glock_iter_next(gi))
1806		return NULL;
1807
1808	return gi->gl;
1809}
1810
1811static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1812{
1813	struct gfs2_glock_iter *gi = seq->private;
1814
1815	if (gi->gl)
1816		rcu_read_unlock();
1817	gi->gl = NULL;
 
1818}
1819
1820static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1821{
1822	return dump_glock(seq, iter_ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1823}
1824
1825static const struct seq_operations gfs2_glock_seq_ops = {
1826	.start = gfs2_glock_seq_start,
1827	.next  = gfs2_glock_seq_next,
1828	.stop  = gfs2_glock_seq_stop,
1829	.show  = gfs2_glock_seq_show,
1830};
1831
1832static int gfs2_debugfs_open(struct inode *inode, struct file *file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1833{
1834	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1835				   sizeof(struct gfs2_glock_iter));
1836	if (ret == 0) {
1837		struct seq_file *seq = file->private_data;
1838		struct gfs2_glock_iter *gi = seq->private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1839		gi->sdp = inode->i_private;
 
 
 
 
 
 
1840	}
1841	return ret;
1842}
1843
1844static const struct file_operations gfs2_debug_fops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1845	.owner   = THIS_MODULE,
1846	.open    = gfs2_debugfs_open,
1847	.read    = seq_read,
1848	.llseek  = seq_lseek,
1849	.release = seq_release_private,
 
 
 
 
 
 
 
 
1850};
1851
1852int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1853{
1854	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1855	if (!sdp->debugfs_dir)
1856		return -ENOMEM;
1857	sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1858							 S_IFREG | S_IRUGO,
1859							 sdp->debugfs_dir, sdp,
1860							 &gfs2_debug_fops);
1861	if (!sdp->debugfs_dentry_glocks)
1862		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1863
1864	return 0;
 
 
 
1865}
1866
1867void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1868{
1869	if (sdp && sdp->debugfs_dir) {
1870		if (sdp->debugfs_dentry_glocks) {
1871			debugfs_remove(sdp->debugfs_dentry_glocks);
1872			sdp->debugfs_dentry_glocks = NULL;
1873		}
 
 
 
 
 
 
 
 
1874		debugfs_remove(sdp->debugfs_dir);
1875		sdp->debugfs_dir = NULL;
1876	}
1877}
1878
1879int gfs2_register_debugfs(void)
1880{
1881	gfs2_root = debugfs_create_dir("gfs2", NULL);
 
 
1882	return gfs2_root ? 0 : -ENOMEM;
1883}
1884
1885void gfs2_unregister_debugfs(void)
1886{
1887	debugfs_remove(gfs2_root);
1888	gfs2_root = NULL;
1889}
v4.10.11
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11
  12#include <linux/sched.h>
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/buffer_head.h>
  16#include <linux/delay.h>
  17#include <linux/sort.h>
  18#include <linux/jhash.h>
  19#include <linux/kallsyms.h>
  20#include <linux/gfs2_ondisk.h>
  21#include <linux/list.h>
  22#include <linux/wait.h>
  23#include <linux/module.h>
  24#include <linux/uaccess.h>
  25#include <linux/seq_file.h>
  26#include <linux/debugfs.h>
  27#include <linux/kthread.h>
  28#include <linux/freezer.h>
  29#include <linux/workqueue.h>
  30#include <linux/jiffies.h>
  31#include <linux/rcupdate.h>
  32#include <linux/rculist_bl.h>
  33#include <linux/bit_spinlock.h>
  34#include <linux/percpu.h>
  35#include <linux/list_sort.h>
  36#include <linux/lockref.h>
  37#include <linux/rhashtable.h>
  38
  39#include "gfs2.h"
  40#include "incore.h"
  41#include "glock.h"
  42#include "glops.h"
  43#include "inode.h"
  44#include "lops.h"
  45#include "meta_io.h"
  46#include "quota.h"
  47#include "super.h"
  48#include "util.h"
  49#include "bmap.h"
  50#define CREATE_TRACE_POINTS
  51#include "trace_gfs2.h"
  52
  53struct gfs2_glock_iter {
  54	struct gfs2_sbd *sdp;		/* incore superblock           */
  55	struct rhashtable_iter hti;	/* rhashtable iterator         */
  56	struct gfs2_glock *gl;		/* current glock struct        */
  57	loff_t last_pos;		/* last position               */
  58};
  59
  60typedef void (*glock_examiner) (struct gfs2_glock * gl);
  61
 
 
  62static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  63
  64static struct dentry *gfs2_root;
  65static struct workqueue_struct *glock_workqueue;
  66struct workqueue_struct *gfs2_delete_workqueue;
  67static LIST_HEAD(lru_list);
  68static atomic_t lru_count = ATOMIC_INIT(0);
  69static DEFINE_SPINLOCK(lru_lock);
  70
  71#define GFS2_GL_HASH_SHIFT      15
  72#define GFS2_GL_HASH_SIZE       BIT(GFS2_GL_HASH_SHIFT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73
  74static struct rhashtable_params ht_parms = {
  75	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
  76	.key_len = sizeof(struct lm_lockname),
  77	.key_offset = offsetof(struct gfs2_glock, gl_name),
  78	.head_offset = offsetof(struct gfs2_glock, gl_node),
  79};
 
  80
  81static struct rhashtable gl_hash_table;
 
 
 
  82
  83void gfs2_glock_free(struct gfs2_glock *gl)
  84{
  85	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  86
  87	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
  88		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
  89	} else {
  90		kfree(gl->gl_lksb.sb_lvbptr);
  91		kmem_cache_free(gfs2_glock_cachep, gl);
  92	}
 
 
 
 
 
 
  93	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
  94		wake_up(&sdp->sd_glock_wait);
  95}
  96
  97/**
  98 * gfs2_glock_hold() - increment reference count on glock
  99 * @gl: The glock to hold
 100 *
 101 */
 102
 103static void gfs2_glock_hold(struct gfs2_glock *gl)
 104{
 105	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
 106	lockref_get(&gl->gl_lockref);
 107}
 108
 109/**
 110 * demote_ok - Check to see if it's ok to unlock a glock
 111 * @gl: the glock
 112 *
 113 * Returns: 1 if it's ok
 114 */
 115
 116static int demote_ok(const struct gfs2_glock *gl)
 117{
 118	const struct gfs2_glock_operations *glops = gl->gl_ops;
 119
 120	if (gl->gl_state == LM_ST_UNLOCKED)
 121		return 0;
 122	if (!list_empty(&gl->gl_holders))
 123		return 0;
 124	if (glops->go_demote_ok)
 125		return glops->go_demote_ok(gl);
 126	return 1;
 127}
 128
 129
 130void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
 131{
 132	spin_lock(&lru_lock);
 133
 134	if (!list_empty(&gl->gl_lru))
 135		list_del_init(&gl->gl_lru);
 136	else
 137		atomic_inc(&lru_count);
 138
 139	list_add_tail(&gl->gl_lru, &lru_list);
 140	set_bit(GLF_LRU, &gl->gl_flags);
 141	spin_unlock(&lru_lock);
 142}
 143
 144static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
 145{
 146	spin_lock(&lru_lock);
 147	if (!list_empty(&gl->gl_lru)) {
 148		list_del_init(&gl->gl_lru);
 149		atomic_dec(&lru_count);
 150		clear_bit(GLF_LRU, &gl->gl_flags);
 151	}
 152	spin_unlock(&lru_lock);
 153}
 154
 155/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156 * gfs2_glock_put() - Decrement reference count on glock
 157 * @gl: The glock to put
 158 *
 159 */
 160
 161void gfs2_glock_put(struct gfs2_glock *gl)
 162{
 163	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 164	struct address_space *mapping = gfs2_glock2aspace(gl);
 165
 166	if (lockref_put_or_lock(&gl->gl_lockref))
 167		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 168
 169	lockref_mark_dead(&gl->gl_lockref);
 
 
 
 
 
 
 
 170
 171	gfs2_glock_remove_from_lru(gl);
 172	spin_unlock(&gl->gl_lockref.lock);
 173	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
 174	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
 175	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
 176	trace_gfs2_glock_put(gl);
 177	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
 178}
 179
 180/**
 181 * may_grant - check if its ok to grant a new lock
 182 * @gl: The glock
 183 * @gh: The lock request which we wish to grant
 184 *
 185 * Returns: true if its ok to grant the lock
 186 */
 187
 188static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
 189{
 190	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
 191	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
 192	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
 193		return 0;
 194	if (gl->gl_state == gh->gh_state)
 195		return 1;
 196	if (gh->gh_flags & GL_EXACT)
 197		return 0;
 198	if (gl->gl_state == LM_ST_EXCLUSIVE) {
 199		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
 200			return 1;
 201		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
 202			return 1;
 203	}
 204	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
 205		return 1;
 206	return 0;
 207}
 208
 209static void gfs2_holder_wake(struct gfs2_holder *gh)
 210{
 211	clear_bit(HIF_WAIT, &gh->gh_iflags);
 212	smp_mb__after_atomic();
 213	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
 214}
 215
 216/**
 217 * do_error - Something unexpected has happened during a lock request
 218 *
 219 */
 220
 221static void do_error(struct gfs2_glock *gl, const int ret)
 222{
 223	struct gfs2_holder *gh, *tmp;
 224
 225	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 226		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 227			continue;
 228		if (ret & LM_OUT_ERROR)
 229			gh->gh_error = -EIO;
 230		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
 231			gh->gh_error = GLR_TRYFAILED;
 232		else
 233			continue;
 234		list_del_init(&gh->gh_list);
 235		trace_gfs2_glock_queue(gh, 0);
 236		gfs2_holder_wake(gh);
 237	}
 238}
 239
 240/**
 241 * do_promote - promote as many requests as possible on the current queue
 242 * @gl: The glock
 243 * 
 244 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 245 *          if a type specific operation is underway.
 246 */
 247
 248static int do_promote(struct gfs2_glock *gl)
 249__releases(&gl->gl_lockref.lock)
 250__acquires(&gl->gl_lockref.lock)
 251{
 252	const struct gfs2_glock_operations *glops = gl->gl_ops;
 253	struct gfs2_holder *gh, *tmp;
 254	int ret;
 255
 256restart:
 257	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 258		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 259			continue;
 260		if (may_grant(gl, gh)) {
 261			if (gh->gh_list.prev == &gl->gl_holders &&
 262			    glops->go_lock) {
 263				spin_unlock(&gl->gl_lockref.lock);
 264				/* FIXME: eliminate this eventually */
 265				ret = glops->go_lock(gh);
 266				spin_lock(&gl->gl_lockref.lock);
 267				if (ret) {
 268					if (ret == 1)
 269						return 2;
 270					gh->gh_error = ret;
 271					list_del_init(&gh->gh_list);
 272					trace_gfs2_glock_queue(gh, 0);
 273					gfs2_holder_wake(gh);
 274					goto restart;
 275				}
 276				set_bit(HIF_HOLDER, &gh->gh_iflags);
 277				trace_gfs2_promote(gh, 1);
 278				gfs2_holder_wake(gh);
 279				goto restart;
 280			}
 281			set_bit(HIF_HOLDER, &gh->gh_iflags);
 282			trace_gfs2_promote(gh, 0);
 283			gfs2_holder_wake(gh);
 284			continue;
 285		}
 286		if (gh->gh_list.prev == &gl->gl_holders)
 287			return 1;
 288		do_error(gl, 0);
 289		break;
 290	}
 291	return 0;
 292}
 293
 294/**
 295 * find_first_waiter - find the first gh that's waiting for the glock
 296 * @gl: the glock
 297 */
 298
 299static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
 300{
 301	struct gfs2_holder *gh;
 302
 303	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
 304		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
 305			return gh;
 306	}
 307	return NULL;
 308}
 309
 310/**
 311 * state_change - record that the glock is now in a different state
 312 * @gl: the glock
 313 * @new_state the new state
 314 *
 315 */
 316
 317static void state_change(struct gfs2_glock *gl, unsigned int new_state)
 318{
 319	int held1, held2;
 320
 321	held1 = (gl->gl_state != LM_ST_UNLOCKED);
 322	held2 = (new_state != LM_ST_UNLOCKED);
 323
 324	if (held1 != held2) {
 325		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
 326		if (held2)
 327			gl->gl_lockref.count++;
 328		else
 329			gl->gl_lockref.count--;
 330	}
 331	if (held1 && held2 && list_empty(&gl->gl_holders))
 332		clear_bit(GLF_QUEUED, &gl->gl_flags);
 333
 334	if (new_state != gl->gl_target)
 335		/* shorten our minimum hold time */
 336		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
 337				       GL_GLOCK_MIN_HOLD);
 338	gl->gl_state = new_state;
 339	gl->gl_tchange = jiffies;
 340}
 341
 342static void gfs2_demote_wake(struct gfs2_glock *gl)
 343{
 344	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 345	clear_bit(GLF_DEMOTE, &gl->gl_flags);
 346	smp_mb__after_atomic();
 347	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
 348}
 349
 350/**
 351 * finish_xmote - The DLM has replied to one of our lock requests
 352 * @gl: The glock
 353 * @ret: The status from the DLM
 354 *
 355 */
 356
 357static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
 358{
 359	const struct gfs2_glock_operations *glops = gl->gl_ops;
 360	struct gfs2_holder *gh;
 361	unsigned state = ret & LM_OUT_ST_MASK;
 362	int rv;
 363
 364	spin_lock(&gl->gl_lockref.lock);
 365	trace_gfs2_glock_state_change(gl, state);
 366	state_change(gl, state);
 367	gh = find_first_waiter(gl);
 368
 369	/* Demote to UN request arrived during demote to SH or DF */
 370	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
 371	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
 372		gl->gl_target = LM_ST_UNLOCKED;
 373
 374	/* Check for state != intended state */
 375	if (unlikely(state != gl->gl_target)) {
 376		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
 377			/* move to back of queue and try next entry */
 378			if (ret & LM_OUT_CANCELED) {
 379				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
 380					list_move_tail(&gh->gh_list, &gl->gl_holders);
 381				gh = find_first_waiter(gl);
 382				gl->gl_target = gh->gh_state;
 383				goto retry;
 384			}
 385			/* Some error or failed "try lock" - report it */
 386			if ((ret & LM_OUT_ERROR) ||
 387			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
 388				gl->gl_target = gl->gl_state;
 389				do_error(gl, ret);
 390				goto out;
 391			}
 392		}
 393		switch(state) {
 394		/* Unlocked due to conversion deadlock, try again */
 395		case LM_ST_UNLOCKED:
 396retry:
 397			do_xmote(gl, gh, gl->gl_target);
 398			break;
 399		/* Conversion fails, unlock and try again */
 400		case LM_ST_SHARED:
 401		case LM_ST_DEFERRED:
 402			do_xmote(gl, gh, LM_ST_UNLOCKED);
 403			break;
 404		default: /* Everything else */
 405			pr_err("wanted %u got %u\n", gl->gl_target, state);
 406			GLOCK_BUG_ON(gl, 1);
 407		}
 408		spin_unlock(&gl->gl_lockref.lock);
 409		return;
 410	}
 411
 412	/* Fast path - we got what we asked for */
 413	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
 414		gfs2_demote_wake(gl);
 415	if (state != LM_ST_UNLOCKED) {
 416		if (glops->go_xmote_bh) {
 417			spin_unlock(&gl->gl_lockref.lock);
 418			rv = glops->go_xmote_bh(gl, gh);
 419			spin_lock(&gl->gl_lockref.lock);
 420			if (rv) {
 421				do_error(gl, rv);
 422				goto out;
 423			}
 424		}
 425		rv = do_promote(gl);
 426		if (rv == 2)
 427			goto out_locked;
 428	}
 429out:
 430	clear_bit(GLF_LOCK, &gl->gl_flags);
 431out_locked:
 432	spin_unlock(&gl->gl_lockref.lock);
 433}
 434
 435/**
 436 * do_xmote - Calls the DLM to change the state of a lock
 437 * @gl: The lock state
 438 * @gh: The holder (only for promotes)
 439 * @target: The target lock state
 440 *
 441 */
 442
 443static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
 444__releases(&gl->gl_lockref.lock)
 445__acquires(&gl->gl_lockref.lock)
 446{
 447	const struct gfs2_glock_operations *glops = gl->gl_ops;
 448	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 449	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
 450	int ret;
 451
 452	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
 453		      LM_FLAG_PRIORITY);
 454	GLOCK_BUG_ON(gl, gl->gl_state == target);
 455	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
 456	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
 457	    glops->go_inval) {
 458		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 459		do_error(gl, 0); /* Fail queued try locks */
 460	}
 461	gl->gl_req = target;
 462	set_bit(GLF_BLOCKING, &gl->gl_flags);
 463	if ((gl->gl_req == LM_ST_UNLOCKED) ||
 464	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
 465	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
 466		clear_bit(GLF_BLOCKING, &gl->gl_flags);
 467	spin_unlock(&gl->gl_lockref.lock);
 468	if (glops->go_sync)
 469		glops->go_sync(gl);
 470	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 471		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
 472	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 473
 474	gfs2_glock_hold(gl);
 475	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
 476		/* lock_dlm */
 477		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
 478		if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
 479		    target == LM_ST_UNLOCKED &&
 480		    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
 481			finish_xmote(gl, target);
 482			if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 483				gfs2_glock_put(gl);
 484		}
 485		else if (ret) {
 486			pr_err("lm_lock ret %d\n", ret);
 487			GLOCK_BUG_ON(gl, 1);
 488		}
 489	} else { /* lock_nolock */
 490		finish_xmote(gl, target);
 491		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 492			gfs2_glock_put(gl);
 493	}
 494
 495	spin_lock(&gl->gl_lockref.lock);
 496}
 497
 498/**
 499 * find_first_holder - find the first "holder" gh
 500 * @gl: the glock
 501 */
 502
 503static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
 504{
 505	struct gfs2_holder *gh;
 506
 507	if (!list_empty(&gl->gl_holders)) {
 508		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
 509		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 510			return gh;
 511	}
 512	return NULL;
 513}
 514
 515/**
 516 * run_queue - do all outstanding tasks related to a glock
 517 * @gl: The glock in question
 518 * @nonblock: True if we must not block in run_queue
 519 *
 520 */
 521
 522static void run_queue(struct gfs2_glock *gl, const int nonblock)
 523__releases(&gl->gl_lockref.lock)
 524__acquires(&gl->gl_lockref.lock)
 525{
 526	struct gfs2_holder *gh = NULL;
 527	int ret;
 528
 529	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
 530		return;
 531
 532	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
 533
 534	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
 535	    gl->gl_demote_state != gl->gl_state) {
 536		if (find_first_holder(gl))
 537			goto out_unlock;
 538		if (nonblock)
 539			goto out_sched;
 540		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
 541		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
 542		gl->gl_target = gl->gl_demote_state;
 543	} else {
 544		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
 545			gfs2_demote_wake(gl);
 546		ret = do_promote(gl);
 547		if (ret == 0)
 548			goto out_unlock;
 549		if (ret == 2)
 550			goto out;
 551		gh = find_first_waiter(gl);
 552		gl->gl_target = gh->gh_state;
 553		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
 554			do_error(gl, 0); /* Fail queued try locks */
 555	}
 556	do_xmote(gl, gh, gl->gl_target);
 557out:
 558	return;
 559
 560out_sched:
 561	clear_bit(GLF_LOCK, &gl->gl_flags);
 562	smp_mb__after_atomic();
 563	gl->gl_lockref.count++;
 564	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 565		gl->gl_lockref.count--;
 566	return;
 567
 568out_unlock:
 569	clear_bit(GLF_LOCK, &gl->gl_flags);
 570	smp_mb__after_atomic();
 571	return;
 572}
 573
 574static void delete_work_func(struct work_struct *work)
 575{
 576	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
 577	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
 578	struct inode *inode;
 579	u64 no_addr = gl->gl_name.ln_number;
 580
 581	/* If someone's using this glock to create a new dinode, the block must
 582	   have been freed by another node, then re-used, in which case our
 583	   iopen callback is too late after the fact. Ignore it. */
 584	if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
 585		goto out;
 586
 587	inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
 
 
 
 588	if (inode && !IS_ERR(inode)) {
 589		d_prune_aliases(inode);
 590		iput(inode);
 591	}
 592out:
 593	gfs2_glock_put(gl);
 594}
 595
 596static void glock_work_func(struct work_struct *work)
 597{
 598	unsigned long delay = 0;
 599	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
 600	int drop_ref = 0;
 601
 602	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
 603		finish_xmote(gl, gl->gl_reply);
 604		drop_ref = 1;
 605	}
 606	spin_lock(&gl->gl_lockref.lock);
 607	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
 608	    gl->gl_state != LM_ST_UNLOCKED &&
 609	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
 610		unsigned long holdtime, now = jiffies;
 611
 612		holdtime = gl->gl_tchange + gl->gl_hold_time;
 613		if (time_before(now, holdtime))
 614			delay = holdtime - now;
 615
 616		if (!delay) {
 617			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
 618			set_bit(GLF_DEMOTE, &gl->gl_flags);
 619		}
 620	}
 621	run_queue(gl, 0);
 622	spin_unlock(&gl->gl_lockref.lock);
 623	if (!delay)
 624		gfs2_glock_put(gl);
 625	else {
 626		if (gl->gl_name.ln_type != LM_TYPE_INODE)
 627			delay = 0;
 628		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
 629			gfs2_glock_put(gl);
 630	}
 631	if (drop_ref)
 632		gfs2_glock_put(gl);
 633}
 634
 635/**
 636 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 637 * @sdp: The GFS2 superblock
 638 * @number: the lock number
 639 * @glops: The glock_operations to use
 640 * @create: If 0, don't create the glock if it doesn't exist
 641 * @glp: the glock is returned here
 642 *
 643 * This does not lock a glock, just finds/creates structures for one.
 644 *
 645 * Returns: errno
 646 */
 647
 648int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
 649		   const struct gfs2_glock_operations *glops, int create,
 650		   struct gfs2_glock **glp)
 651{
 652	struct super_block *s = sdp->sd_vfs;
 653	struct lm_lockname name = { .ln_number = number,
 654				    .ln_type = glops->go_type,
 655				    .ln_sbd = sdp };
 656	struct gfs2_glock *gl, *tmp = NULL;
 657	struct address_space *mapping;
 658	struct kmem_cache *cachep;
 659	int ret, tries = 0;
 660
 661	rcu_read_lock();
 662	gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
 663	if (gl && !lockref_get_not_dead(&gl->gl_lockref))
 664		gl = NULL;
 665	rcu_read_unlock();
 666
 667	*glp = gl;
 668	if (gl)
 669		return 0;
 670	if (!create)
 671		return -ENOENT;
 672
 673	if (glops->go_flags & GLOF_ASPACE)
 674		cachep = gfs2_glock_aspace_cachep;
 675	else
 676		cachep = gfs2_glock_cachep;
 677	gl = kmem_cache_alloc(cachep, GFP_NOFS);
 678	if (!gl)
 679		return -ENOMEM;
 680
 681	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
 682
 683	if (glops->go_flags & GLOF_LVB) {
 684		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
 685		if (!gl->gl_lksb.sb_lvbptr) {
 686			kmem_cache_free(cachep, gl);
 687			return -ENOMEM;
 688		}
 689	}
 690
 691	atomic_inc(&sdp->sd_glock_disposal);
 692	gl->gl_node.next = NULL;
 693	gl->gl_flags = 0;
 694	gl->gl_name = name;
 695	gl->gl_lockref.count = 1;
 696	gl->gl_state = LM_ST_UNLOCKED;
 697	gl->gl_target = LM_ST_UNLOCKED;
 698	gl->gl_demote_state = LM_ST_EXCLUSIVE;
 
 699	gl->gl_ops = glops;
 700	gl->gl_dstamp = 0;
 701	preempt_disable();
 702	/* We use the global stats to estimate the initial per-glock stats */
 703	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
 704	preempt_enable();
 705	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
 706	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
 707	gl->gl_tchange = jiffies;
 708	gl->gl_object = NULL;
 
 709	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
 710	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
 711	INIT_WORK(&gl->gl_delete, delete_work_func);
 712
 713	mapping = gfs2_glock2aspace(gl);
 714	if (mapping) {
 715                mapping->a_ops = &gfs2_meta_aops;
 716		mapping->host = s->s_bdev->bd_inode;
 717		mapping->flags = 0;
 718		mapping_set_gfp_mask(mapping, GFP_NOFS);
 719		mapping->private_data = NULL;
 
 720		mapping->writeback_index = 0;
 721	}
 722
 723again:
 724	ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node,
 725					    ht_parms);
 726	if (ret == 0) {
 727		*glp = gl;
 728		return 0;
 
 
 
 
 729	}
 730
 731	if (ret == -EEXIST) {
 732		ret = 0;
 733		rcu_read_lock();
 734		tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
 735		if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
 736			if (++tries < 100) {
 737				rcu_read_unlock();
 738				cond_resched();
 739				goto again;
 740			}
 741			tmp = NULL;
 742			ret = -ENOMEM;
 743		}
 744		rcu_read_unlock();
 745	} else {
 746		WARN_ON_ONCE(ret);
 747	}
 748	kfree(gl->gl_lksb.sb_lvbptr);
 749	kmem_cache_free(cachep, gl);
 750	atomic_dec(&sdp->sd_glock_disposal);
 751	*glp = tmp;
 752
 753	return ret;
 754}
 755
 756/**
 757 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 758 * @gl: the glock
 759 * @state: the state we're requesting
 760 * @flags: the modifier flags
 761 * @gh: the holder structure
 762 *
 763 */
 764
 765void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
 766		      struct gfs2_holder *gh)
 767{
 768	INIT_LIST_HEAD(&gh->gh_list);
 769	gh->gh_gl = gl;
 770	gh->gh_ip = _RET_IP_;
 771	gh->gh_owner_pid = get_pid(task_pid(current));
 772	gh->gh_state = state;
 773	gh->gh_flags = flags;
 774	gh->gh_error = 0;
 775	gh->gh_iflags = 0;
 776	gfs2_glock_hold(gl);
 777}
 778
 779/**
 780 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 781 * @state: the state we're requesting
 782 * @flags: the modifier flags
 783 * @gh: the holder structure
 784 *
 785 * Don't mess with the glock.
 786 *
 787 */
 788
 789void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
 790{
 791	gh->gh_state = state;
 792	gh->gh_flags = flags;
 793	gh->gh_iflags = 0;
 794	gh->gh_ip = _RET_IP_;
 795	put_pid(gh->gh_owner_pid);
 
 796	gh->gh_owner_pid = get_pid(task_pid(current));
 797}
 798
 799/**
 800 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 801 * @gh: the holder structure
 802 *
 803 */
 804
 805void gfs2_holder_uninit(struct gfs2_holder *gh)
 806{
 807	put_pid(gh->gh_owner_pid);
 808	gfs2_glock_put(gh->gh_gl);
 809	gfs2_holder_mark_uninitialized(gh);
 810	gh->gh_ip = 0;
 811}
 812
 813/**
 814 * gfs2_glock_wait - wait on a glock acquisition
 815 * @gh: the glock holder
 816 *
 817 * Returns: 0 on success
 
 
 818 */
 819
 820int gfs2_glock_wait(struct gfs2_holder *gh)
 
 
 
 
 
 
 
 
 
 
 
 
 821{
 822	unsigned long time1 = jiffies;
 823
 824	might_sleep();
 825	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
 826	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
 827		/* Lengthen the minimum hold time. */
 828		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
 829					      GL_GLOCK_HOLD_INCR,
 830					      GL_GLOCK_MAX_HOLD);
 831	return gh->gh_error;
 
 
 
 
 
 832}
 833
 834/**
 835 * handle_callback - process a demote request
 836 * @gl: the glock
 837 * @state: the state the caller wants us to change to
 838 *
 839 * There are only two requests that we are going to see in actual
 840 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
 841 */
 842
 843static void handle_callback(struct gfs2_glock *gl, unsigned int state,
 844			    unsigned long delay, bool remote)
 845{
 846	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
 847
 848	set_bit(bit, &gl->gl_flags);
 849	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
 850		gl->gl_demote_state = state;
 851		gl->gl_demote_time = jiffies;
 852	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
 853			gl->gl_demote_state != state) {
 854		gl->gl_demote_state = LM_ST_UNLOCKED;
 855	}
 856	if (gl->gl_ops->go_callback)
 857		gl->gl_ops->go_callback(gl, remote);
 858	trace_gfs2_demote_rq(gl, remote);
 
 
 
 
 
 
 
 
 
 
 
 
 
 859}
 860
 861void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
 862{
 863	struct va_format vaf;
 864	va_list args;
 865
 866	va_start(args, fmt);
 867
 868	if (seq) {
 869		seq_vprintf(seq, fmt, args);
 
 
 870	} else {
 871		vaf.fmt = fmt;
 872		vaf.va = &args;
 873
 874		pr_err("%pV", &vaf);
 875	}
 876
 877	va_end(args);
 878}
 879
 880/**
 881 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 882 * @gh: the holder structure to add
 883 *
 884 * Eventually we should move the recursive locking trap to a
 885 * debugging option or something like that. This is the fast
 886 * path and needs to have the minimum number of distractions.
 887 * 
 888 */
 889
 890static inline void add_to_queue(struct gfs2_holder *gh)
 891__releases(&gl->gl_lockref.lock)
 892__acquires(&gl->gl_lockref.lock)
 893{
 894	struct gfs2_glock *gl = gh->gh_gl;
 895	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 896	struct list_head *insert_pt = NULL;
 897	struct gfs2_holder *gh2;
 898	int try_futile = 0;
 899
 900	BUG_ON(gh->gh_owner_pid == NULL);
 901	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
 902		BUG();
 903
 904	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
 905		if (test_bit(GLF_LOCK, &gl->gl_flags))
 906			try_futile = !may_grant(gl, gh);
 907		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 908			goto fail;
 909	}
 910
 911	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
 912		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
 913		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
 914			goto trap_recursive;
 915		if (try_futile &&
 916		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
 
 917fail:
 918			gh->gh_error = GLR_TRYFAILED;
 919			gfs2_holder_wake(gh);
 920			return;
 921		}
 922		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
 923			continue;
 924		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
 925			insert_pt = &gh2->gh_list;
 926	}
 927	set_bit(GLF_QUEUED, &gl->gl_flags);
 928	trace_gfs2_glock_queue(gh, 1);
 929	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
 930	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
 931	if (likely(insert_pt == NULL)) {
 932		list_add_tail(&gh->gh_list, &gl->gl_holders);
 933		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
 934			goto do_cancel;
 935		return;
 936	}
 937	list_add_tail(&gh->gh_list, insert_pt);
 938do_cancel:
 939	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
 940	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
 941		spin_unlock(&gl->gl_lockref.lock);
 942		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
 943			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
 944		spin_lock(&gl->gl_lockref.lock);
 945	}
 946	return;
 947
 948trap_recursive:
 949	pr_err("original: %pSR\n", (void *)gh2->gh_ip);
 950	pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
 951	pr_err("lock type: %d req lock state : %d\n",
 952	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
 953	pr_err("new: %pSR\n", (void *)gh->gh_ip);
 954	pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
 955	pr_err("lock type: %d req lock state : %d\n",
 956	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
 957	gfs2_dump_glock(NULL, gl);
 958	BUG();
 959}
 960
 961/**
 962 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
 963 * @gh: the holder structure
 964 *
 965 * if (gh->gh_flags & GL_ASYNC), this never returns an error
 966 *
 967 * Returns: 0, GLR_TRYFAILED, or errno on failure
 968 */
 969
 970int gfs2_glock_nq(struct gfs2_holder *gh)
 971{
 972	struct gfs2_glock *gl = gh->gh_gl;
 973	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 974	int error = 0;
 975
 976	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 977		return -EIO;
 978
 979	if (test_bit(GLF_LRU, &gl->gl_flags))
 980		gfs2_glock_remove_from_lru(gl);
 981
 982	spin_lock(&gl->gl_lockref.lock);
 983	add_to_queue(gh);
 984	if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
 985		     test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
 986		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
 987		gl->gl_lockref.count++;
 988		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 989			gl->gl_lockref.count--;
 990	}
 991	run_queue(gl, 1);
 992	spin_unlock(&gl->gl_lockref.lock);
 993
 994	if (!(gh->gh_flags & GL_ASYNC))
 995		error = gfs2_glock_wait(gh);
 996
 997	return error;
 998}
 999
1000/**
1001 * gfs2_glock_poll - poll to see if an async request has been completed
1002 * @gh: the holder
1003 *
1004 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1005 */
1006
1007int gfs2_glock_poll(struct gfs2_holder *gh)
1008{
1009	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1010}
1011
1012/**
1013 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1014 * @gh: the glock holder
1015 *
1016 */
1017
1018void gfs2_glock_dq(struct gfs2_holder *gh)
1019{
1020	struct gfs2_glock *gl = gh->gh_gl;
1021	const struct gfs2_glock_operations *glops = gl->gl_ops;
1022	unsigned delay = 0;
1023	int fast_path = 0;
1024
1025	spin_lock(&gl->gl_lockref.lock);
1026	if (gh->gh_flags & GL_NOCACHE)
1027		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1028
1029	list_del_init(&gh->gh_list);
1030	clear_bit(HIF_HOLDER, &gh->gh_iflags);
1031	if (find_first_holder(gl) == NULL) {
1032		if (glops->go_unlock) {
1033			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1034			spin_unlock(&gl->gl_lockref.lock);
1035			glops->go_unlock(gh);
1036			spin_lock(&gl->gl_lockref.lock);
1037			clear_bit(GLF_LOCK, &gl->gl_flags);
1038		}
1039		if (list_empty(&gl->gl_holders) &&
1040		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1041		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1042			fast_path = 1;
1043	}
1044	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1045	    (glops->go_flags & GLOF_LRU))
1046		gfs2_glock_add_to_lru(gl);
1047
1048	trace_gfs2_glock_queue(gh, 0);
1049	spin_unlock(&gl->gl_lockref.lock);
1050	if (likely(fast_path))
1051		return;
1052
1053	gfs2_glock_hold(gl);
1054	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1055	    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1056	    gl->gl_name.ln_type == LM_TYPE_INODE)
1057		delay = gl->gl_hold_time;
1058	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1059		gfs2_glock_put(gl);
1060}
1061
1062void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1063{
1064	struct gfs2_glock *gl = gh->gh_gl;
1065	gfs2_glock_dq(gh);
1066	might_sleep();
1067	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1068}
1069
1070/**
1071 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1072 * @gh: the holder structure
1073 *
1074 */
1075
1076void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1077{
1078	gfs2_glock_dq(gh);
1079	gfs2_holder_uninit(gh);
1080}
1081
1082/**
1083 * gfs2_glock_nq_num - acquire a glock based on lock number
1084 * @sdp: the filesystem
1085 * @number: the lock number
1086 * @glops: the glock operations for the type of glock
1087 * @state: the state to acquire the glock in
1088 * @flags: modifier flags for the acquisition
1089 * @gh: the struct gfs2_holder
1090 *
1091 * Returns: errno
1092 */
1093
1094int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1095		      const struct gfs2_glock_operations *glops,
1096		      unsigned int state, u16 flags, struct gfs2_holder *gh)
1097{
1098	struct gfs2_glock *gl;
1099	int error;
1100
1101	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1102	if (!error) {
1103		error = gfs2_glock_nq_init(gl, state, flags, gh);
1104		gfs2_glock_put(gl);
1105	}
1106
1107	return error;
1108}
1109
1110/**
1111 * glock_compare - Compare two struct gfs2_glock structures for sorting
1112 * @arg_a: the first structure
1113 * @arg_b: the second structure
1114 *
1115 */
1116
1117static int glock_compare(const void *arg_a, const void *arg_b)
1118{
1119	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1120	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1121	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1122	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1123
1124	if (a->ln_number > b->ln_number)
1125		return 1;
1126	if (a->ln_number < b->ln_number)
1127		return -1;
1128	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1129	return 0;
1130}
1131
1132/**
1133 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1134 * @num_gh: the number of structures
1135 * @ghs: an array of struct gfs2_holder structures
1136 *
1137 * Returns: 0 on success (all glocks acquired),
1138 *          errno on failure (no glocks acquired)
1139 */
1140
1141static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1142		     struct gfs2_holder **p)
1143{
1144	unsigned int x;
1145	int error = 0;
1146
1147	for (x = 0; x < num_gh; x++)
1148		p[x] = &ghs[x];
1149
1150	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1151
1152	for (x = 0; x < num_gh; x++) {
1153		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1154
1155		error = gfs2_glock_nq(p[x]);
1156		if (error) {
1157			while (x--)
1158				gfs2_glock_dq(p[x]);
1159			break;
1160		}
1161	}
1162
1163	return error;
1164}
1165
1166/**
1167 * gfs2_glock_nq_m - acquire multiple glocks
1168 * @num_gh: the number of structures
1169 * @ghs: an array of struct gfs2_holder structures
1170 *
1171 *
1172 * Returns: 0 on success (all glocks acquired),
1173 *          errno on failure (no glocks acquired)
1174 */
1175
1176int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1177{
1178	struct gfs2_holder *tmp[4];
1179	struct gfs2_holder **pph = tmp;
1180	int error = 0;
1181
1182	switch(num_gh) {
1183	case 0:
1184		return 0;
1185	case 1:
1186		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1187		return gfs2_glock_nq(ghs);
1188	default:
1189		if (num_gh <= 4)
1190			break;
1191		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1192		if (!pph)
1193			return -ENOMEM;
1194	}
1195
1196	error = nq_m_sync(num_gh, ghs, pph);
1197
1198	if (pph != tmp)
1199		kfree(pph);
1200
1201	return error;
1202}
1203
1204/**
1205 * gfs2_glock_dq_m - release multiple glocks
1206 * @num_gh: the number of structures
1207 * @ghs: an array of struct gfs2_holder structures
1208 *
1209 */
1210
1211void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1212{
1213	while (num_gh--)
1214		gfs2_glock_dq(&ghs[num_gh]);
1215}
1216
 
 
 
 
 
 
 
 
 
 
 
 
 
1217void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1218{
1219	unsigned long delay = 0;
1220	unsigned long holdtime;
1221	unsigned long now = jiffies;
1222
1223	gfs2_glock_hold(gl);
1224	holdtime = gl->gl_tchange + gl->gl_hold_time;
1225	if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1226	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1227		if (time_before(now, holdtime))
1228			delay = holdtime - now;
1229		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1230			delay = gl->gl_hold_time;
1231	}
1232
1233	spin_lock(&gl->gl_lockref.lock);
1234	handle_callback(gl, state, delay, true);
1235	spin_unlock(&gl->gl_lockref.lock);
1236	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1237		gfs2_glock_put(gl);
1238}
1239
1240/**
1241 * gfs2_should_freeze - Figure out if glock should be frozen
1242 * @gl: The glock in question
1243 *
1244 * Glocks are not frozen if (a) the result of the dlm operation is
1245 * an error, (b) the locking operation was an unlock operation or
1246 * (c) if there is a "noexp" flagged request anywhere in the queue
1247 *
1248 * Returns: 1 if freezing should occur, 0 otherwise
1249 */
1250
1251static int gfs2_should_freeze(const struct gfs2_glock *gl)
1252{
1253	const struct gfs2_holder *gh;
1254
1255	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1256		return 0;
1257	if (gl->gl_target == LM_ST_UNLOCKED)
1258		return 0;
1259
1260	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1261		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1262			continue;
1263		if (LM_FLAG_NOEXP & gh->gh_flags)
1264			return 0;
1265	}
1266
1267	return 1;
1268}
1269
1270/**
1271 * gfs2_glock_complete - Callback used by locking
1272 * @gl: Pointer to the glock
1273 * @ret: The return value from the dlm
1274 *
1275 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1276 * to use a bitfield shared with other glock state fields.
1277 */
1278
1279void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1280{
1281	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1282
1283	spin_lock(&gl->gl_lockref.lock);
1284	gl->gl_reply = ret;
1285
1286	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1287		if (gfs2_should_freeze(gl)) {
1288			set_bit(GLF_FROZEN, &gl->gl_flags);
1289			spin_unlock(&gl->gl_lockref.lock);
1290			return;
1291		}
1292	}
1293
1294	gl->gl_lockref.count++;
1295	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1296	spin_unlock(&gl->gl_lockref.lock);
1297
1298	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1299		gfs2_glock_put(gl);
1300}
1301
1302static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1303{
1304	struct gfs2_glock *gla, *glb;
1305
1306	gla = list_entry(a, struct gfs2_glock, gl_lru);
1307	glb = list_entry(b, struct gfs2_glock, gl_lru);
1308
1309	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1310		return 1;
1311	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1312		return -1;
1313
1314	return 0;
1315}
1316
1317/**
1318 * gfs2_dispose_glock_lru - Demote a list of glocks
1319 * @list: The list to dispose of
1320 *
1321 * Disposing of glocks may involve disk accesses, so that here we sort
1322 * the glocks by number (i.e. disk location of the inodes) so that if
1323 * there are any such accesses, they'll be sent in order (mostly).
1324 *
1325 * Must be called under the lru_lock, but may drop and retake this
1326 * lock. While the lru_lock is dropped, entries may vanish from the
1327 * list, but no new entries will appear on the list (since it is
1328 * private)
1329 */
1330
1331static void gfs2_dispose_glock_lru(struct list_head *list)
1332__releases(&lru_lock)
1333__acquires(&lru_lock)
1334{
1335	struct gfs2_glock *gl;
 
 
 
 
 
1336
1337	list_sort(NULL, list, glock_cmp);
 
1338
1339	while(!list_empty(list)) {
1340		gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1341		list_del_init(&gl->gl_lru);
1342		if (!spin_trylock(&gl->gl_lockref.lock)) {
1343add_back_to_lru:
1344			list_add(&gl->gl_lru, &lru_list);
1345			atomic_inc(&lru_count);
1346			continue;
1347		}
1348		if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1349			spin_unlock(&gl->gl_lockref.lock);
1350			goto add_back_to_lru;
1351		}
1352		clear_bit(GLF_LRU, &gl->gl_flags);
1353		gl->gl_lockref.count++;
1354		if (demote_ok(gl))
1355			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1356		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1357		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1358			gl->gl_lockref.count--;
1359		spin_unlock(&gl->gl_lockref.lock);
1360		cond_resched_lock(&lru_lock);
1361	}
1362}
1363
1364/**
1365 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1366 * @nr: The number of entries to scan
1367 *
1368 * This function selects the entries on the LRU which are able to
1369 * be demoted, and then kicks off the process by calling
1370 * gfs2_dispose_glock_lru() above.
1371 */
1372
1373static long gfs2_scan_glock_lru(int nr)
1374{
1375	struct gfs2_glock *gl;
1376	LIST_HEAD(skipped);
1377	LIST_HEAD(dispose);
1378	long freed = 0;
1379
1380	spin_lock(&lru_lock);
1381	while ((nr-- >= 0) && !list_empty(&lru_list)) {
1382		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
 
 
 
1383
1384		/* Test for being demotable */
1385		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1386			list_move(&gl->gl_lru, &dispose);
1387			atomic_dec(&lru_count);
1388			freed++;
 
 
 
 
 
 
 
 
 
 
 
1389			continue;
1390		}
1391
1392		list_move(&gl->gl_lru, &skipped);
 
1393	}
1394	list_splice(&skipped, &lru_list);
1395	if (!list_empty(&dispose))
1396		gfs2_dispose_glock_lru(&dispose);
1397	spin_unlock(&lru_lock);
1398
1399	return freed;
1400}
1401
1402static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1403					    struct shrink_control *sc)
1404{
1405	if (!(sc->gfp_mask & __GFP_FS))
1406		return SHRINK_STOP;
1407	return gfs2_scan_glock_lru(sc->nr_to_scan);
1408}
1409
1410static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1411					     struct shrink_control *sc)
1412{
1413	return vfs_pressure_ratio(atomic_read(&lru_count));
1414}
1415
1416static struct shrinker glock_shrinker = {
 
1417	.seeks = DEFAULT_SEEKS,
1418	.count_objects = gfs2_glock_shrink_count,
1419	.scan_objects = gfs2_glock_shrink_scan,
1420};
1421
1422/**
1423 * examine_bucket - Call a function for glock in a hash bucket
1424 * @examiner: the function
1425 * @sdp: the filesystem
1426 * @bucket: the bucket
1427 *
1428 */
1429
1430static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
 
1431{
1432	struct gfs2_glock *gl;
1433	struct rhash_head *pos;
1434	const struct bucket_table *tbl;
1435	int i;
1436
1437	rcu_read_lock();
1438	tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
1439	for (i = 0; i < tbl->size; i++) {
1440		rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
1441			if ((gl->gl_name.ln_sbd == sdp) &&
1442			    lockref_get_not_dead(&gl->gl_lockref))
1443				examiner(gl);
1444		}
1445	}
1446	rcu_read_unlock();
1447	cond_resched();
1448}
1449
 
 
 
 
 
 
 
 
 
1450/**
1451 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1452 * @gl: The glock to thaw
1453 *
 
 
1454 */
1455
1456static void thaw_glock(struct gfs2_glock *gl)
1457{
1458	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1459		goto out;
1460	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1461	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
1462out:
1463		gfs2_glock_put(gl);
1464	}
1465}
1466
1467/**
1468 * clear_glock - look at a glock and see if we can free it from glock cache
1469 * @gl: the glock to look at
1470 *
1471 */
1472
1473static void clear_glock(struct gfs2_glock *gl)
1474{
1475	gfs2_glock_remove_from_lru(gl);
1476
1477	spin_lock(&gl->gl_lockref.lock);
1478	if (gl->gl_state != LM_ST_UNLOCKED)
1479		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1480	spin_unlock(&gl->gl_lockref.lock);
 
1481	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1482		gfs2_glock_put(gl);
1483}
1484
1485/**
1486 * gfs2_glock_thaw - Thaw any frozen glocks
1487 * @sdp: The super block
1488 *
1489 */
1490
1491void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1492{
1493	glock_hash_walk(thaw_glock, sdp);
1494}
1495
1496static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1497{
1498	spin_lock(&gl->gl_lockref.lock);
1499	gfs2_dump_glock(seq, gl);
1500	spin_unlock(&gl->gl_lockref.lock);
 
 
1501}
1502
1503static void dump_glock_func(struct gfs2_glock *gl)
1504{
1505	dump_glock(NULL, gl);
1506}
1507
1508/**
1509 * gfs2_gl_hash_clear - Empty out the glock hash table
1510 * @sdp: the filesystem
1511 * @wait: wait until it's all gone
1512 *
1513 * Called when unmounting the filesystem.
1514 */
1515
1516void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1517{
1518	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1519	flush_workqueue(glock_workqueue);
1520	glock_hash_walk(clear_glock, sdp);
1521	flush_workqueue(glock_workqueue);
1522	wait_event_timeout(sdp->sd_glock_wait,
1523			   atomic_read(&sdp->sd_glock_disposal) == 0,
1524			   HZ * 600);
1525	glock_hash_walk(dump_glock_func, sdp);
1526}
1527
1528void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1529{
1530	struct gfs2_glock *gl = ip->i_gl;
1531	int ret;
1532
1533	ret = gfs2_truncatei_resume(ip);
1534	gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1535
1536	spin_lock(&gl->gl_lockref.lock);
1537	clear_bit(GLF_LOCK, &gl->gl_flags);
1538	run_queue(gl, 1);
1539	spin_unlock(&gl->gl_lockref.lock);
1540}
1541
1542static const char *state2str(unsigned state)
1543{
1544	switch(state) {
1545	case LM_ST_UNLOCKED:
1546		return "UN";
1547	case LM_ST_SHARED:
1548		return "SH";
1549	case LM_ST_DEFERRED:
1550		return "DF";
1551	case LM_ST_EXCLUSIVE:
1552		return "EX";
1553	}
1554	return "??";
1555}
1556
1557static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1558{
1559	char *p = buf;
1560	if (flags & LM_FLAG_TRY)
1561		*p++ = 't';
1562	if (flags & LM_FLAG_TRY_1CB)
1563		*p++ = 'T';
1564	if (flags & LM_FLAG_NOEXP)
1565		*p++ = 'e';
1566	if (flags & LM_FLAG_ANY)
1567		*p++ = 'A';
1568	if (flags & LM_FLAG_PRIORITY)
1569		*p++ = 'p';
1570	if (flags & GL_ASYNC)
1571		*p++ = 'a';
1572	if (flags & GL_EXACT)
1573		*p++ = 'E';
1574	if (flags & GL_NOCACHE)
1575		*p++ = 'c';
1576	if (test_bit(HIF_HOLDER, &iflags))
1577		*p++ = 'H';
1578	if (test_bit(HIF_WAIT, &iflags))
1579		*p++ = 'W';
1580	if (test_bit(HIF_FIRST, &iflags))
1581		*p++ = 'F';
1582	*p = 0;
1583	return buf;
1584}
1585
1586/**
1587 * dump_holder - print information about a glock holder
1588 * @seq: the seq_file struct
1589 * @gh: the glock holder
1590 *
 
1591 */
1592
1593static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1594{
1595	struct task_struct *gh_owner = NULL;
1596	char flags_buf[32];
1597
1598	rcu_read_lock();
1599	if (gh->gh_owner_pid)
1600		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1601	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1602		       state2str(gh->gh_state),
1603		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1604		       gh->gh_error,
1605		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1606		       gh_owner ? gh_owner->comm : "(ended)",
1607		       (void *)gh->gh_ip);
1608	rcu_read_unlock();
1609}
1610
1611static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1612{
1613	const unsigned long *gflags = &gl->gl_flags;
1614	char *p = buf;
1615
1616	if (test_bit(GLF_LOCK, gflags))
1617		*p++ = 'l';
1618	if (test_bit(GLF_DEMOTE, gflags))
1619		*p++ = 'D';
1620	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1621		*p++ = 'd';
1622	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1623		*p++ = 'p';
1624	if (test_bit(GLF_DIRTY, gflags))
1625		*p++ = 'y';
1626	if (test_bit(GLF_LFLUSH, gflags))
1627		*p++ = 'f';
1628	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1629		*p++ = 'i';
1630	if (test_bit(GLF_REPLY_PENDING, gflags))
1631		*p++ = 'r';
1632	if (test_bit(GLF_INITIAL, gflags))
1633		*p++ = 'I';
1634	if (test_bit(GLF_FROZEN, gflags))
1635		*p++ = 'F';
1636	if (test_bit(GLF_QUEUED, gflags))
1637		*p++ = 'q';
1638	if (test_bit(GLF_LRU, gflags))
1639		*p++ = 'L';
1640	if (gl->gl_object)
1641		*p++ = 'o';
1642	if (test_bit(GLF_BLOCKING, gflags))
1643		*p++ = 'b';
1644	*p = 0;
1645	return buf;
1646}
1647
1648/**
1649 * gfs2_dump_glock - print information about a glock
1650 * @seq: The seq_file struct
1651 * @gl: the glock
1652 *
1653 * The file format is as follows:
1654 * One line per object, capital letters are used to indicate objects
1655 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1656 * other objects are indented by a single space and follow the glock to
1657 * which they are related. Fields are indicated by lower case letters
1658 * followed by a colon and the field value, except for strings which are in
1659 * [] so that its possible to see if they are composed of spaces for
1660 * example. The field's are n = number (id of the object), f = flags,
1661 * t = type, s = state, r = refcount, e = error, p = pid.
1662 *
 
1663 */
1664
1665void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1666{
1667	const struct gfs2_glock_operations *glops = gl->gl_ops;
1668	unsigned long long dtime;
1669	const struct gfs2_holder *gh;
1670	char gflags_buf[32];
 
1671
1672	dtime = jiffies - gl->gl_demote_time;
1673	dtime *= 1000000/HZ; /* demote time in uSec */
1674	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1675		dtime = 0;
1676	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1677		  state2str(gl->gl_state),
1678		  gl->gl_name.ln_type,
1679		  (unsigned long long)gl->gl_name.ln_number,
1680		  gflags2str(gflags_buf, gl),
1681		  state2str(gl->gl_target),
1682		  state2str(gl->gl_demote_state), dtime,
1683		  atomic_read(&gl->gl_ail_count),
1684		  atomic_read(&gl->gl_revokes),
1685		  (int)gl->gl_lockref.count, gl->gl_hold_time);
1686
1687	list_for_each_entry(gh, &gl->gl_holders, gh_list)
1688		dump_holder(seq, gh);
1689
 
 
 
 
 
1690	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1691		glops->go_dump(seq, gl);
 
 
1692}
1693
1694static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1695{
1696	struct gfs2_glock *gl = iter_ptr;
1697
1698	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1699		   gl->gl_name.ln_type,
1700		   (unsigned long long)gl->gl_name.ln_number,
1701		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1702		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1703		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1704		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1705		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1706		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1707		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1708		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1709	return 0;
1710}
1711
1712static const char *gfs2_gltype[] = {
1713	"type",
1714	"reserved",
1715	"nondisk",
1716	"inode",
1717	"rgrp",
1718	"meta",
1719	"iopen",
1720	"flock",
1721	"plock",
1722	"quota",
1723	"journal",
1724};
1725
1726static const char *gfs2_stype[] = {
1727	[GFS2_LKS_SRTT]		= "srtt",
1728	[GFS2_LKS_SRTTVAR]	= "srttvar",
1729	[GFS2_LKS_SRTTB]	= "srttb",
1730	[GFS2_LKS_SRTTVARB]	= "srttvarb",
1731	[GFS2_LKS_SIRT]		= "sirt",
1732	[GFS2_LKS_SIRTVAR]	= "sirtvar",
1733	[GFS2_LKS_DCOUNT]	= "dlm",
1734	[GFS2_LKS_QCOUNT]	= "queue",
1735};
1736
1737#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1738
1739static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1740{
1741	struct gfs2_sbd *sdp = seq->private;
1742	loff_t pos = *(loff_t *)iter_ptr;
1743	unsigned index = pos >> 3;
1744	unsigned subindex = pos & 0x07;
1745	int i;
1746
1747	if (index == 0 && subindex != 0)
1748		return 0;
1749
1750	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1751		   (index == 0) ? "cpu": gfs2_stype[subindex]);
1752
1753	for_each_possible_cpu(i) {
1754                const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1755
1756		if (index == 0)
1757			seq_printf(seq, " %15u", i);
1758		else
1759			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1760				   lkstats[index - 1].stats[subindex]);
1761	}
1762	seq_putc(seq, '\n');
1763	return 0;
1764}
1765
1766int __init gfs2_glock_init(void)
1767{
1768	int ret;
1769
1770	ret = rhashtable_init(&gl_hash_table, &ht_parms);
1771	if (ret < 0)
1772		return ret;
1773
1774	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1775					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1776	if (!glock_workqueue) {
1777		rhashtable_destroy(&gl_hash_table);
1778		return -ENOMEM;
1779	}
1780	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1781						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1782						0);
1783	if (!gfs2_delete_workqueue) {
1784		destroy_workqueue(glock_workqueue);
1785		rhashtable_destroy(&gl_hash_table);
1786		return -ENOMEM;
1787	}
1788
1789	ret = register_shrinker(&glock_shrinker);
1790	if (ret) {
1791		destroy_workqueue(gfs2_delete_workqueue);
1792		destroy_workqueue(glock_workqueue);
1793		rhashtable_destroy(&gl_hash_table);
1794		return ret;
1795	}
1796
1797	return 0;
1798}
1799
1800void gfs2_glock_exit(void)
1801{
1802	unregister_shrinker(&glock_shrinker);
1803	rhashtable_destroy(&gl_hash_table);
1804	destroy_workqueue(glock_workqueue);
1805	destroy_workqueue(gfs2_delete_workqueue);
1806}
1807
1808static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
 
 
 
 
 
 
1809{
 
 
 
 
 
 
 
 
1810	do {
1811		gi->gl = rhashtable_walk_next(&gi->hti);
1812		if (IS_ERR(gi->gl)) {
1813			if (PTR_ERR(gi->gl) == -EAGAIN)
1814				continue;
1815			gi->gl = NULL;
 
 
 
 
 
 
 
 
1816		}
1817	/* Skip entries for other sb and dead entries */
1818	} while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) ||
1819			      __lockref_is_dead(&gi->gl->gl_lockref)));
 
1820}
1821
1822static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1823{
1824	struct gfs2_glock_iter *gi = seq->private;
1825	loff_t n = *pos;
1826	int ret;
1827
1828	if (gi->last_pos <= *pos)
1829		n = (*pos - gi->last_pos);
1830
1831	ret = rhashtable_walk_start(&gi->hti);
1832	if (ret)
1833		return NULL;
1834
1835	do {
1836		gfs2_glock_iter_next(gi);
1837	} while (gi->gl && n--);
 
1838
1839	gi->last_pos = *pos;
1840	return gi->gl;
1841}
1842
1843static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1844				 loff_t *pos)
1845{
1846	struct gfs2_glock_iter *gi = seq->private;
1847
1848	(*pos)++;
1849	gi->last_pos = *pos;
1850	gfs2_glock_iter_next(gi);
 
 
1851	return gi->gl;
1852}
1853
1854static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1855{
1856	struct gfs2_glock_iter *gi = seq->private;
1857
 
 
1858	gi->gl = NULL;
1859	rhashtable_walk_stop(&gi->hti);
1860}
1861
1862static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1863{
1864	dump_glock(seq, iter_ptr);
1865	return 0;
1866}
1867
1868static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1869{
1870	preempt_disable();
1871	if (*pos >= GFS2_NR_SBSTATS)
1872		return NULL;
1873	return pos;
1874}
1875
1876static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1877				   loff_t *pos)
1878{
1879	(*pos)++;
1880	if (*pos >= GFS2_NR_SBSTATS)
1881		return NULL;
1882	return pos;
1883}
1884
1885static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1886{
1887	preempt_enable();
1888}
1889
1890static const struct seq_operations gfs2_glock_seq_ops = {
1891	.start = gfs2_glock_seq_start,
1892	.next  = gfs2_glock_seq_next,
1893	.stop  = gfs2_glock_seq_stop,
1894	.show  = gfs2_glock_seq_show,
1895};
1896
1897static const struct seq_operations gfs2_glstats_seq_ops = {
1898	.start = gfs2_glock_seq_start,
1899	.next  = gfs2_glock_seq_next,
1900	.stop  = gfs2_glock_seq_stop,
1901	.show  = gfs2_glstats_seq_show,
1902};
1903
1904static const struct seq_operations gfs2_sbstats_seq_ops = {
1905	.start = gfs2_sbstats_seq_start,
1906	.next  = gfs2_sbstats_seq_next,
1907	.stop  = gfs2_sbstats_seq_stop,
1908	.show  = gfs2_sbstats_seq_show,
1909};
1910
1911#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
1912
1913static int gfs2_glocks_open(struct inode *inode, struct file *file)
1914{
1915	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1916				   sizeof(struct gfs2_glock_iter));
1917	if (ret == 0) {
1918		struct seq_file *seq = file->private_data;
1919		struct gfs2_glock_iter *gi = seq->private;
1920
1921		gi->sdp = inode->i_private;
1922		gi->last_pos = 0;
1923		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1924		if (seq->buf)
1925			seq->size = GFS2_SEQ_GOODSIZE;
1926		gi->gl = NULL;
1927		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
1928	}
1929	return ret;
1930}
1931
1932static int gfs2_glocks_release(struct inode *inode, struct file *file)
1933{
1934	struct seq_file *seq = file->private_data;
1935	struct gfs2_glock_iter *gi = seq->private;
1936
1937	gi->gl = NULL;
1938	rhashtable_walk_exit(&gi->hti);
1939	return seq_release_private(inode, file);
1940}
1941
1942static int gfs2_glstats_open(struct inode *inode, struct file *file)
1943{
1944	int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
1945				   sizeof(struct gfs2_glock_iter));
1946	if (ret == 0) {
1947		struct seq_file *seq = file->private_data;
1948		struct gfs2_glock_iter *gi = seq->private;
1949		gi->sdp = inode->i_private;
1950		gi->last_pos = 0;
1951		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1952		if (seq->buf)
1953			seq->size = GFS2_SEQ_GOODSIZE;
1954		gi->gl = NULL;
1955		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
1956	}
1957	return ret;
1958}
1959
1960static int gfs2_sbstats_open(struct inode *inode, struct file *file)
1961{
1962	int ret = seq_open(file, &gfs2_sbstats_seq_ops);
1963	if (ret == 0) {
1964		struct seq_file *seq = file->private_data;
1965		seq->private = inode->i_private;  /* sdp */
1966	}
1967	return ret;
1968}
1969
1970static const struct file_operations gfs2_glocks_fops = {
1971	.owner   = THIS_MODULE,
1972	.open    = gfs2_glocks_open,
1973	.read    = seq_read,
1974	.llseek  = seq_lseek,
1975	.release = gfs2_glocks_release,
1976};
1977
1978static const struct file_operations gfs2_glstats_fops = {
1979	.owner   = THIS_MODULE,
1980	.open    = gfs2_glstats_open,
1981	.read    = seq_read,
1982	.llseek  = seq_lseek,
1983	.release = gfs2_glocks_release,
1984};
1985
1986static const struct file_operations gfs2_sbstats_fops = {
1987	.owner   = THIS_MODULE,
1988	.open	 = gfs2_sbstats_open,
1989	.read    = seq_read,
1990	.llseek  = seq_lseek,
1991	.release = seq_release,
1992};
1993
1994int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1995{
1996	struct dentry *dent;
1997
1998	dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1999	if (IS_ERR_OR_NULL(dent))
2000		goto fail;
2001	sdp->debugfs_dir = dent;
2002
2003	dent = debugfs_create_file("glocks",
2004				   S_IFREG | S_IRUGO,
2005				   sdp->debugfs_dir, sdp,
2006				   &gfs2_glocks_fops);
2007	if (IS_ERR_OR_NULL(dent))
2008		goto fail;
2009	sdp->debugfs_dentry_glocks = dent;
2010
2011	dent = debugfs_create_file("glstats",
2012				   S_IFREG | S_IRUGO,
2013				   sdp->debugfs_dir, sdp,
2014				   &gfs2_glstats_fops);
2015	if (IS_ERR_OR_NULL(dent))
2016		goto fail;
2017	sdp->debugfs_dentry_glstats = dent;
2018
2019	dent = debugfs_create_file("sbstats",
2020				   S_IFREG | S_IRUGO,
2021				   sdp->debugfs_dir, sdp,
2022				   &gfs2_sbstats_fops);
2023	if (IS_ERR_OR_NULL(dent))
2024		goto fail;
2025	sdp->debugfs_dentry_sbstats = dent;
2026
2027	return 0;
2028fail:
2029	gfs2_delete_debugfs_file(sdp);
2030	return dent ? PTR_ERR(dent) : -ENOMEM;
2031}
2032
2033void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2034{
2035	if (sdp->debugfs_dir) {
2036		if (sdp->debugfs_dentry_glocks) {
2037			debugfs_remove(sdp->debugfs_dentry_glocks);
2038			sdp->debugfs_dentry_glocks = NULL;
2039		}
2040		if (sdp->debugfs_dentry_glstats) {
2041			debugfs_remove(sdp->debugfs_dentry_glstats);
2042			sdp->debugfs_dentry_glstats = NULL;
2043		}
2044		if (sdp->debugfs_dentry_sbstats) {
2045			debugfs_remove(sdp->debugfs_dentry_sbstats);
2046			sdp->debugfs_dentry_sbstats = NULL;
2047		}
2048		debugfs_remove(sdp->debugfs_dir);
2049		sdp->debugfs_dir = NULL;
2050	}
2051}
2052
2053int gfs2_register_debugfs(void)
2054{
2055	gfs2_root = debugfs_create_dir("gfs2", NULL);
2056	if (IS_ERR(gfs2_root))
2057		return PTR_ERR(gfs2_root);
2058	return gfs2_root ? 0 : -ENOMEM;
2059}
2060
2061void gfs2_unregister_debugfs(void)
2062{
2063	debugfs_remove(gfs2_root);
2064	gfs2_root = NULL;
2065}