Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* -*- mode: c; c-basic-offset: 8; -*-
   3 * vim: noexpandtab sw=8 ts=8 sts=0:
   4 *
   5 * dlmmod.c
   6 *
   7 * standalone DLM module
   8 *
   9 * Copyright (C) 2004 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  10 */
  11
  12
  13#include <linux/module.h>
  14#include <linux/fs.h>
  15#include <linux/types.h>
  16#include <linux/slab.h>
  17#include <linux/highmem.h>
  18#include <linux/init.h>
  19#include <linux/sysctl.h>
  20#include <linux/random.h>
  21#include <linux/blkdev.h>
  22#include <linux/socket.h>
  23#include <linux/inet.h>
  24#include <linux/spinlock.h>
  25#include <linux/delay.h>
  26
  27
  28#include "cluster/heartbeat.h"
  29#include "cluster/nodemanager.h"
  30#include "cluster/tcp.h"
  31
  32#include "dlmapi.h"
  33#include "dlmcommon.h"
  34#include "dlmdomain.h"
  35#include "dlmdebug.h"
  36
  37#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
  38#include "cluster/masklog.h"
  39
  40static void dlm_mle_node_down(struct dlm_ctxt *dlm,
  41			      struct dlm_master_list_entry *mle,
  42			      struct o2nm_node *node,
  43			      int idx);
  44static void dlm_mle_node_up(struct dlm_ctxt *dlm,
  45			    struct dlm_master_list_entry *mle,
  46			    struct o2nm_node *node,
  47			    int idx);
  48
  49static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
  50static int dlm_do_assert_master(struct dlm_ctxt *dlm,
  51				struct dlm_lock_resource *res,
  52				void *nodemap, u32 flags);
  53static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
  54
  55static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
  56				struct dlm_master_list_entry *mle,
  57				const char *name,
  58				unsigned int namelen)
  59{
  60	if (dlm != mle->dlm)
  61		return 0;
  62
  63	if (namelen != mle->mnamelen ||
  64	    memcmp(name, mle->mname, namelen) != 0)
  65		return 0;
  66
  67	return 1;
  68}
  69
  70static struct kmem_cache *dlm_lockres_cache;
  71static struct kmem_cache *dlm_lockname_cache;
  72static struct kmem_cache *dlm_mle_cache;
  73
  74static void dlm_mle_release(struct kref *kref);
  75static void dlm_init_mle(struct dlm_master_list_entry *mle,
  76			enum dlm_mle_type type,
  77			struct dlm_ctxt *dlm,
  78			struct dlm_lock_resource *res,
  79			const char *name,
  80			unsigned int namelen);
  81static void dlm_put_mle(struct dlm_master_list_entry *mle);
  82static void __dlm_put_mle(struct dlm_master_list_entry *mle);
  83static int dlm_find_mle(struct dlm_ctxt *dlm,
  84			struct dlm_master_list_entry **mle,
  85			char *name, unsigned int namelen);
  86
  87static int dlm_do_master_request(struct dlm_lock_resource *res,
  88				 struct dlm_master_list_entry *mle, int to);
  89
  90
  91static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
  92				     struct dlm_lock_resource *res,
  93				     struct dlm_master_list_entry *mle,
  94				     int *blocked);
  95static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
  96				    struct dlm_lock_resource *res,
  97				    struct dlm_master_list_entry *mle,
  98				    int blocked);
  99static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
 100				 struct dlm_lock_resource *res,
 101				 struct dlm_master_list_entry *mle,
 102				 struct dlm_master_list_entry **oldmle,
 103				 const char *name, unsigned int namelen,
 104				 u8 new_master, u8 master);
 105
 106static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
 107				    struct dlm_lock_resource *res);
 108static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
 109				      struct dlm_lock_resource *res);
 110static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
 111				       struct dlm_lock_resource *res,
 112				       u8 target);
 113static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
 114				       struct dlm_lock_resource *res);
 115
 116
 117int dlm_is_host_down(int errno)
 118{
 119	switch (errno) {
 120		case -EBADF:
 121		case -ECONNREFUSED:
 122		case -ENOTCONN:
 123		case -ECONNRESET:
 124		case -EPIPE:
 125		case -EHOSTDOWN:
 126		case -EHOSTUNREACH:
 127		case -ETIMEDOUT:
 128		case -ECONNABORTED:
 129		case -ENETDOWN:
 130		case -ENETUNREACH:
 131		case -ENETRESET:
 132		case -ESHUTDOWN:
 133		case -ENOPROTOOPT:
 134		case -EINVAL:   /* if returned from our tcp code,
 135				   this means there is no socket */
 136			return 1;
 137	}
 138	return 0;
 139}
 140
 141
 142/*
 143 * MASTER LIST FUNCTIONS
 144 */
 145
 146
 147/*
 148 * regarding master list entries and heartbeat callbacks:
 149 *
 150 * in order to avoid sleeping and allocation that occurs in
 151 * heartbeat, master list entries are simply attached to the
 152 * dlm's established heartbeat callbacks.  the mle is attached
 153 * when it is created, and since the dlm->spinlock is held at
 154 * that time, any heartbeat event will be properly discovered
 155 * by the mle.  the mle needs to be detached from the
 156 * dlm->mle_hb_events list as soon as heartbeat events are no
 157 * longer useful to the mle, and before the mle is freed.
 158 *
 159 * as a general rule, heartbeat events are no longer needed by
 160 * the mle once an "answer" regarding the lock master has been
 161 * received.
 162 */
 163static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
 164					      struct dlm_master_list_entry *mle)
 165{
 166	assert_spin_locked(&dlm->spinlock);
 167
 168	list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
 169}
 170
 171
 172static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
 173					      struct dlm_master_list_entry *mle)
 174{
 175	if (!list_empty(&mle->hb_events))
 176		list_del_init(&mle->hb_events);
 177}
 178
 179
 180static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
 181					    struct dlm_master_list_entry *mle)
 182{
 183	spin_lock(&dlm->spinlock);
 184	__dlm_mle_detach_hb_events(dlm, mle);
 185	spin_unlock(&dlm->spinlock);
 186}
 187
 188static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
 189{
 190	struct dlm_ctxt *dlm;
 191	dlm = mle->dlm;
 192
 193	assert_spin_locked(&dlm->spinlock);
 194	assert_spin_locked(&dlm->master_lock);
 195	mle->inuse++;
 196	kref_get(&mle->mle_refs);
 197}
 198
 199static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
 200{
 201	struct dlm_ctxt *dlm;
 202	dlm = mle->dlm;
 203
 204	spin_lock(&dlm->spinlock);
 205	spin_lock(&dlm->master_lock);
 206	mle->inuse--;
 207	__dlm_put_mle(mle);
 208	spin_unlock(&dlm->master_lock);
 209	spin_unlock(&dlm->spinlock);
 210
 211}
 212
 213/* remove from list and free */
 214static void __dlm_put_mle(struct dlm_master_list_entry *mle)
 215{
 216	struct dlm_ctxt *dlm;
 217	dlm = mle->dlm;
 218
 219	assert_spin_locked(&dlm->spinlock);
 220	assert_spin_locked(&dlm->master_lock);
 221	if (!kref_read(&mle->mle_refs)) {
 222		/* this may or may not crash, but who cares.
 223		 * it's a BUG. */
 224		mlog(ML_ERROR, "bad mle: %p\n", mle);
 225		dlm_print_one_mle(mle);
 226		BUG();
 227	} else
 228		kref_put(&mle->mle_refs, dlm_mle_release);
 229}
 230
 231
 232/* must not have any spinlocks coming in */
 233static void dlm_put_mle(struct dlm_master_list_entry *mle)
 234{
 235	struct dlm_ctxt *dlm;
 236	dlm = mle->dlm;
 237
 238	spin_lock(&dlm->spinlock);
 239	spin_lock(&dlm->master_lock);
 240	__dlm_put_mle(mle);
 241	spin_unlock(&dlm->master_lock);
 242	spin_unlock(&dlm->spinlock);
 243}
 244
 245static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
 246{
 247	kref_get(&mle->mle_refs);
 248}
 249
 250static void dlm_init_mle(struct dlm_master_list_entry *mle,
 251			enum dlm_mle_type type,
 252			struct dlm_ctxt *dlm,
 253			struct dlm_lock_resource *res,
 254			const char *name,
 255			unsigned int namelen)
 256{
 257	assert_spin_locked(&dlm->spinlock);
 258
 259	mle->dlm = dlm;
 260	mle->type = type;
 261	INIT_HLIST_NODE(&mle->master_hash_node);
 262	INIT_LIST_HEAD(&mle->hb_events);
 263	memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
 264	spin_lock_init(&mle->spinlock);
 265	init_waitqueue_head(&mle->wq);
 266	atomic_set(&mle->woken, 0);
 267	kref_init(&mle->mle_refs);
 268	memset(mle->response_map, 0, sizeof(mle->response_map));
 269	mle->master = O2NM_MAX_NODES;
 270	mle->new_master = O2NM_MAX_NODES;
 271	mle->inuse = 0;
 272
 273	BUG_ON(mle->type != DLM_MLE_BLOCK &&
 274	       mle->type != DLM_MLE_MASTER &&
 275	       mle->type != DLM_MLE_MIGRATION);
 276
 277	if (mle->type == DLM_MLE_MASTER) {
 278		BUG_ON(!res);
 279		mle->mleres = res;
 280		memcpy(mle->mname, res->lockname.name, res->lockname.len);
 281		mle->mnamelen = res->lockname.len;
 282		mle->mnamehash = res->lockname.hash;
 283	} else {
 284		BUG_ON(!name);
 285		mle->mleres = NULL;
 286		memcpy(mle->mname, name, namelen);
 287		mle->mnamelen = namelen;
 288		mle->mnamehash = dlm_lockid_hash(name, namelen);
 289	}
 290
 291	atomic_inc(&dlm->mle_tot_count[mle->type]);
 292	atomic_inc(&dlm->mle_cur_count[mle->type]);
 293
 294	/* copy off the node_map and register hb callbacks on our copy */
 295	memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
 296	memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
 297	clear_bit(dlm->node_num, mle->vote_map);
 298	clear_bit(dlm->node_num, mle->node_map);
 299
 300	/* attach the mle to the domain node up/down events */
 301	__dlm_mle_attach_hb_events(dlm, mle);
 302}
 303
 304void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
 305{
 306	assert_spin_locked(&dlm->spinlock);
 307	assert_spin_locked(&dlm->master_lock);
 308
 309	if (!hlist_unhashed(&mle->master_hash_node))
 310		hlist_del_init(&mle->master_hash_node);
 311}
 312
 313void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
 314{
 315	struct hlist_head *bucket;
 316
 317	assert_spin_locked(&dlm->master_lock);
 318
 319	bucket = dlm_master_hash(dlm, mle->mnamehash);
 320	hlist_add_head(&mle->master_hash_node, bucket);
 321}
 322
 323/* returns 1 if found, 0 if not */
 324static int dlm_find_mle(struct dlm_ctxt *dlm,
 325			struct dlm_master_list_entry **mle,
 326			char *name, unsigned int namelen)
 327{
 328	struct dlm_master_list_entry *tmpmle;
 329	struct hlist_head *bucket;
 330	unsigned int hash;
 331
 332	assert_spin_locked(&dlm->master_lock);
 333
 334	hash = dlm_lockid_hash(name, namelen);
 335	bucket = dlm_master_hash(dlm, hash);
 336	hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
 337		if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
 338			continue;
 339		dlm_get_mle(tmpmle);
 340		*mle = tmpmle;
 341		return 1;
 342	}
 343	return 0;
 344}
 345
 346void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
 347{
 348	struct dlm_master_list_entry *mle;
 349
 350	assert_spin_locked(&dlm->spinlock);
 351
 352	list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
 353		if (node_up)
 354			dlm_mle_node_up(dlm, mle, NULL, idx);
 355		else
 356			dlm_mle_node_down(dlm, mle, NULL, idx);
 357	}
 358}
 359
 360static void dlm_mle_node_down(struct dlm_ctxt *dlm,
 361			      struct dlm_master_list_entry *mle,
 362			      struct o2nm_node *node, int idx)
 363{
 364	spin_lock(&mle->spinlock);
 365
 366	if (!test_bit(idx, mle->node_map))
 367		mlog(0, "node %u already removed from nodemap!\n", idx);
 368	else
 369		clear_bit(idx, mle->node_map);
 370
 371	spin_unlock(&mle->spinlock);
 372}
 373
 374static void dlm_mle_node_up(struct dlm_ctxt *dlm,
 375			    struct dlm_master_list_entry *mle,
 376			    struct o2nm_node *node, int idx)
 377{
 378	spin_lock(&mle->spinlock);
 379
 380	if (test_bit(idx, mle->node_map))
 381		mlog(0, "node %u already in node map!\n", idx);
 382	else
 383		set_bit(idx, mle->node_map);
 384
 385	spin_unlock(&mle->spinlock);
 386}
 387
 388
 389int dlm_init_mle_cache(void)
 390{
 391	dlm_mle_cache = kmem_cache_create("o2dlm_mle",
 392					  sizeof(struct dlm_master_list_entry),
 393					  0, SLAB_HWCACHE_ALIGN,
 394					  NULL);
 395	if (dlm_mle_cache == NULL)
 396		return -ENOMEM;
 397	return 0;
 398}
 399
 400void dlm_destroy_mle_cache(void)
 401{
 402	kmem_cache_destroy(dlm_mle_cache);
 
 403}
 404
 405static void dlm_mle_release(struct kref *kref)
 406{
 407	struct dlm_master_list_entry *mle;
 408	struct dlm_ctxt *dlm;
 409
 410	mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
 411	dlm = mle->dlm;
 412
 413	assert_spin_locked(&dlm->spinlock);
 414	assert_spin_locked(&dlm->master_lock);
 415
 416	mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
 417	     mle->type);
 418
 419	/* remove from list if not already */
 420	__dlm_unlink_mle(dlm, mle);
 421
 422	/* detach the mle from the domain node up/down events */
 423	__dlm_mle_detach_hb_events(dlm, mle);
 424
 425	atomic_dec(&dlm->mle_cur_count[mle->type]);
 426
 427	/* NOTE: kfree under spinlock here.
 428	 * if this is bad, we can move this to a freelist. */
 429	kmem_cache_free(dlm_mle_cache, mle);
 430}
 431
 432
 433/*
 434 * LOCK RESOURCE FUNCTIONS
 435 */
 436
 437int dlm_init_master_caches(void)
 438{
 439	dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
 440					      sizeof(struct dlm_lock_resource),
 441					      0, SLAB_HWCACHE_ALIGN, NULL);
 442	if (!dlm_lockres_cache)
 443		goto bail;
 444
 445	dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
 446					       DLM_LOCKID_NAME_MAX, 0,
 447					       SLAB_HWCACHE_ALIGN, NULL);
 448	if (!dlm_lockname_cache)
 449		goto bail;
 450
 451	return 0;
 452bail:
 453	dlm_destroy_master_caches();
 454	return -ENOMEM;
 455}
 456
 457void dlm_destroy_master_caches(void)
 458{
 459	kmem_cache_destroy(dlm_lockname_cache);
 460	dlm_lockname_cache = NULL;
 
 
 461
 462	kmem_cache_destroy(dlm_lockres_cache);
 463	dlm_lockres_cache = NULL;
 
 
 464}
 465
 466static void dlm_lockres_release(struct kref *kref)
 467{
 468	struct dlm_lock_resource *res;
 469	struct dlm_ctxt *dlm;
 470
 471	res = container_of(kref, struct dlm_lock_resource, refs);
 472	dlm = res->dlm;
 473
 474	/* This should not happen -- all lockres' have a name
 475	 * associated with them at init time. */
 476	BUG_ON(!res->lockname.name);
 477
 478	mlog(0, "destroying lockres %.*s\n", res->lockname.len,
 479	     res->lockname.name);
 480
 481	atomic_dec(&dlm->res_cur_count);
 482
 483	if (!hlist_unhashed(&res->hash_node) ||
 484	    !list_empty(&res->granted) ||
 485	    !list_empty(&res->converting) ||
 486	    !list_empty(&res->blocked) ||
 487	    !list_empty(&res->dirty) ||
 488	    !list_empty(&res->recovering) ||
 489	    !list_empty(&res->purge)) {
 490		mlog(ML_ERROR,
 491		     "Going to BUG for resource %.*s."
 492		     "  We're on a list! [%c%c%c%c%c%c%c]\n",
 493		     res->lockname.len, res->lockname.name,
 494		     !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
 495		     !list_empty(&res->granted) ? 'G' : ' ',
 496		     !list_empty(&res->converting) ? 'C' : ' ',
 497		     !list_empty(&res->blocked) ? 'B' : ' ',
 498		     !list_empty(&res->dirty) ? 'D' : ' ',
 499		     !list_empty(&res->recovering) ? 'R' : ' ',
 500		     !list_empty(&res->purge) ? 'P' : ' ');
 501
 502		dlm_print_one_lock_resource(res);
 503	}
 504
 505	/* By the time we're ready to blow this guy away, we shouldn't
 506	 * be on any lists. */
 507	BUG_ON(!hlist_unhashed(&res->hash_node));
 508	BUG_ON(!list_empty(&res->granted));
 509	BUG_ON(!list_empty(&res->converting));
 510	BUG_ON(!list_empty(&res->blocked));
 511	BUG_ON(!list_empty(&res->dirty));
 512	BUG_ON(!list_empty(&res->recovering));
 513	BUG_ON(!list_empty(&res->purge));
 514
 515	kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
 516
 517	kmem_cache_free(dlm_lockres_cache, res);
 518}
 519
 520void dlm_lockres_put(struct dlm_lock_resource *res)
 521{
 522	kref_put(&res->refs, dlm_lockres_release);
 523}
 524
 525static void dlm_init_lockres(struct dlm_ctxt *dlm,
 526			     struct dlm_lock_resource *res,
 527			     const char *name, unsigned int namelen)
 528{
 529	char *qname;
 530
 531	/* If we memset here, we lose our reference to the kmalloc'd
 532	 * res->lockname.name, so be sure to init every field
 533	 * correctly! */
 534
 535	qname = (char *) res->lockname.name;
 536	memcpy(qname, name, namelen);
 537
 538	res->lockname.len = namelen;
 539	res->lockname.hash = dlm_lockid_hash(name, namelen);
 540
 541	init_waitqueue_head(&res->wq);
 542	spin_lock_init(&res->spinlock);
 543	INIT_HLIST_NODE(&res->hash_node);
 544	INIT_LIST_HEAD(&res->granted);
 545	INIT_LIST_HEAD(&res->converting);
 546	INIT_LIST_HEAD(&res->blocked);
 547	INIT_LIST_HEAD(&res->dirty);
 548	INIT_LIST_HEAD(&res->recovering);
 549	INIT_LIST_HEAD(&res->purge);
 550	INIT_LIST_HEAD(&res->tracking);
 551	atomic_set(&res->asts_reserved, 0);
 552	res->migration_pending = 0;
 553	res->inflight_locks = 0;
 554	res->inflight_assert_workers = 0;
 555
 556	res->dlm = dlm;
 557
 558	kref_init(&res->refs);
 559
 560	atomic_inc(&dlm->res_tot_count);
 561	atomic_inc(&dlm->res_cur_count);
 562
 563	/* just for consistency */
 564	spin_lock(&res->spinlock);
 565	dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
 566	spin_unlock(&res->spinlock);
 567
 568	res->state = DLM_LOCK_RES_IN_PROGRESS;
 569
 570	res->last_used = 0;
 571
 572	spin_lock(&dlm->track_lock);
 573	list_add_tail(&res->tracking, &dlm->tracking_list);
 574	spin_unlock(&dlm->track_lock);
 575
 576	memset(res->lvb, 0, DLM_LVB_LEN);
 577	memset(res->refmap, 0, sizeof(res->refmap));
 578}
 579
 580struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
 581				   const char *name,
 582				   unsigned int namelen)
 583{
 584	struct dlm_lock_resource *res = NULL;
 585
 586	res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
 587	if (!res)
 588		goto error;
 589
 590	res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
 591	if (!res->lockname.name)
 592		goto error;
 593
 594	dlm_init_lockres(dlm, res, name, namelen);
 595	return res;
 596
 597error:
 598	if (res)
 599		kmem_cache_free(dlm_lockres_cache, res);
 600	return NULL;
 601}
 602
 603void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
 604				struct dlm_lock_resource *res, int bit)
 605{
 606	assert_spin_locked(&res->spinlock);
 607
 608	mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
 609	     res->lockname.name, bit, __builtin_return_address(0));
 610
 611	set_bit(bit, res->refmap);
 612}
 613
 614void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
 615				  struct dlm_lock_resource *res, int bit)
 616{
 617	assert_spin_locked(&res->spinlock);
 618
 619	mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
 620	     res->lockname.name, bit, __builtin_return_address(0));
 621
 622	clear_bit(bit, res->refmap);
 623}
 624
 625static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
 626				   struct dlm_lock_resource *res)
 627{
 628	res->inflight_locks++;
 629
 630	mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
 631	     res->lockname.len, res->lockname.name, res->inflight_locks,
 632	     __builtin_return_address(0));
 633}
 634
 635void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
 636				   struct dlm_lock_resource *res)
 637{
 638	assert_spin_locked(&res->spinlock);
 639	__dlm_lockres_grab_inflight_ref(dlm, res);
 640}
 641
 642void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
 643				   struct dlm_lock_resource *res)
 644{
 645	assert_spin_locked(&res->spinlock);
 646
 647	BUG_ON(res->inflight_locks == 0);
 648
 649	res->inflight_locks--;
 650
 651	mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
 652	     res->lockname.len, res->lockname.name, res->inflight_locks,
 653	     __builtin_return_address(0));
 654
 655	wake_up(&res->wq);
 656}
 657
 658void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
 659		struct dlm_lock_resource *res)
 660{
 661	assert_spin_locked(&res->spinlock);
 662	res->inflight_assert_workers++;
 663	mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
 664			dlm->name, res->lockname.len, res->lockname.name,
 665			res->inflight_assert_workers);
 666}
 667
 668static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
 669		struct dlm_lock_resource *res)
 670{
 671	assert_spin_locked(&res->spinlock);
 672	BUG_ON(res->inflight_assert_workers == 0);
 673	res->inflight_assert_workers--;
 674	mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
 675			dlm->name, res->lockname.len, res->lockname.name,
 676			res->inflight_assert_workers);
 677}
 678
 679static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
 680		struct dlm_lock_resource *res)
 681{
 682	spin_lock(&res->spinlock);
 683	__dlm_lockres_drop_inflight_worker(dlm, res);
 684	spin_unlock(&res->spinlock);
 685}
 686
 687/*
 688 * lookup a lock resource by name.
 689 * may already exist in the hashtable.
 690 * lockid is null terminated
 691 *
 692 * if not, allocate enough for the lockres and for
 693 * the temporary structure used in doing the mastering.
 694 *
 695 * also, do a lookup in the dlm->master_list to see
 696 * if another node has begun mastering the same lock.
 697 * if so, there should be a block entry in there
 698 * for this name, and we should *not* attempt to master
 699 * the lock here.   need to wait around for that node
 700 * to assert_master (or die).
 701 *
 702 */
 703struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
 704					  const char *lockid,
 705					  int namelen,
 706					  int flags)
 707{
 708	struct dlm_lock_resource *tmpres=NULL, *res=NULL;
 709	struct dlm_master_list_entry *mle = NULL;
 710	struct dlm_master_list_entry *alloc_mle = NULL;
 711	int blocked = 0;
 712	int ret, nodenum;
 713	struct dlm_node_iter iter;
 714	unsigned int hash;
 715	int tries = 0;
 716	int bit, wait_on_recovery = 0;
 717
 718	BUG_ON(!lockid);
 719
 720	hash = dlm_lockid_hash(lockid, namelen);
 721
 722	mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
 723
 724lookup:
 725	spin_lock(&dlm->spinlock);
 726	tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
 727	if (tmpres) {
 728		spin_unlock(&dlm->spinlock);
 729		spin_lock(&tmpres->spinlock);
 730
 731		/*
 732		 * Right after dlm spinlock was released, dlm_thread could have
 733		 * purged the lockres. Check if lockres got unhashed. If so
 734		 * start over.
 735		 */
 736		if (hlist_unhashed(&tmpres->hash_node)) {
 737			spin_unlock(&tmpres->spinlock);
 738			dlm_lockres_put(tmpres);
 739			tmpres = NULL;
 740			goto lookup;
 741		}
 742
 743		/* Wait on the thread that is mastering the resource */
 744		if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
 745			__dlm_wait_on_lockres(tmpres);
 746			BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
 747			spin_unlock(&tmpres->spinlock);
 748			dlm_lockres_put(tmpres);
 749			tmpres = NULL;
 750			goto lookup;
 751		}
 752
 753		/* Wait on the resource purge to complete before continuing */
 754		if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
 755			BUG_ON(tmpres->owner == dlm->node_num);
 756			__dlm_wait_on_lockres_flags(tmpres,
 757						    DLM_LOCK_RES_DROPPING_REF);
 758			spin_unlock(&tmpres->spinlock);
 759			dlm_lockres_put(tmpres);
 760			tmpres = NULL;
 761			goto lookup;
 762		}
 763
 764		/* Grab inflight ref to pin the resource */
 765		dlm_lockres_grab_inflight_ref(dlm, tmpres);
 766
 767		spin_unlock(&tmpres->spinlock);
 768		if (res) {
 769			spin_lock(&dlm->track_lock);
 770			if (!list_empty(&res->tracking))
 771				list_del_init(&res->tracking);
 772			else
 773				mlog(ML_ERROR, "Resource %.*s not "
 774						"on the Tracking list\n",
 775						res->lockname.len,
 776						res->lockname.name);
 777			spin_unlock(&dlm->track_lock);
 778			dlm_lockres_put(res);
 779		}
 780		res = tmpres;
 781		goto leave;
 782	}
 783
 784	if (!res) {
 785		spin_unlock(&dlm->spinlock);
 786		mlog(0, "allocating a new resource\n");
 787		/* nothing found and we need to allocate one. */
 788		alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
 789		if (!alloc_mle)
 790			goto leave;
 791		res = dlm_new_lockres(dlm, lockid, namelen);
 792		if (!res)
 793			goto leave;
 794		goto lookup;
 795	}
 796
 797	mlog(0, "no lockres found, allocated our own: %p\n", res);
 798
 799	if (flags & LKM_LOCAL) {
 800		/* caller knows it's safe to assume it's not mastered elsewhere
 801		 * DONE!  return right away */
 802		spin_lock(&res->spinlock);
 803		dlm_change_lockres_owner(dlm, res, dlm->node_num);
 804		__dlm_insert_lockres(dlm, res);
 805		dlm_lockres_grab_inflight_ref(dlm, res);
 806		spin_unlock(&res->spinlock);
 807		spin_unlock(&dlm->spinlock);
 808		/* lockres still marked IN_PROGRESS */
 809		goto wake_waiters;
 810	}
 811
 812	/* check master list to see if another node has started mastering it */
 813	spin_lock(&dlm->master_lock);
 814
 815	/* if we found a block, wait for lock to be mastered by another node */
 816	blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
 817	if (blocked) {
 818		int mig;
 819		if (mle->type == DLM_MLE_MASTER) {
 820			mlog(ML_ERROR, "master entry for nonexistent lock!\n");
 821			BUG();
 822		}
 823		mig = (mle->type == DLM_MLE_MIGRATION);
 824		/* if there is a migration in progress, let the migration
 825		 * finish before continuing.  we can wait for the absence
 826		 * of the MIGRATION mle: either the migrate finished or
 827		 * one of the nodes died and the mle was cleaned up.
 828		 * if there is a BLOCK here, but it already has a master
 829		 * set, we are too late.  the master does not have a ref
 830		 * for us in the refmap.  detach the mle and drop it.
 831		 * either way, go back to the top and start over. */
 832		if (mig || mle->master != O2NM_MAX_NODES) {
 833			BUG_ON(mig && mle->master == dlm->node_num);
 834			/* we arrived too late.  the master does not
 835			 * have a ref for us. retry. */
 836			mlog(0, "%s:%.*s: late on %s\n",
 837			     dlm->name, namelen, lockid,
 838			     mig ?  "MIGRATION" : "BLOCK");
 839			spin_unlock(&dlm->master_lock);
 840			spin_unlock(&dlm->spinlock);
 841
 842			/* master is known, detach */
 843			if (!mig)
 844				dlm_mle_detach_hb_events(dlm, mle);
 845			dlm_put_mle(mle);
 846			mle = NULL;
 847			/* this is lame, but we can't wait on either
 848			 * the mle or lockres waitqueue here */
 849			if (mig)
 850				msleep(100);
 851			goto lookup;
 852		}
 853	} else {
 854		/* go ahead and try to master lock on this node */
 855		mle = alloc_mle;
 856		/* make sure this does not get freed below */
 857		alloc_mle = NULL;
 858		dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
 859		set_bit(dlm->node_num, mle->maybe_map);
 860		__dlm_insert_mle(dlm, mle);
 861
 862		/* still holding the dlm spinlock, check the recovery map
 863		 * to see if there are any nodes that still need to be
 864		 * considered.  these will not appear in the mle nodemap
 865		 * but they might own this lockres.  wait on them. */
 866		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
 867		if (bit < O2NM_MAX_NODES) {
 868			mlog(0, "%s: res %.*s, At least one node (%d) "
 869			     "to recover before lock mastery can begin\n",
 870			     dlm->name, namelen, (char *)lockid, bit);
 871			wait_on_recovery = 1;
 872		}
 873	}
 874
 875	/* at this point there is either a DLM_MLE_BLOCK or a
 876	 * DLM_MLE_MASTER on the master list, so it's safe to add the
 877	 * lockres to the hashtable.  anyone who finds the lock will
 878	 * still have to wait on the IN_PROGRESS. */
 879
 880	/* finally add the lockres to its hash bucket */
 881	__dlm_insert_lockres(dlm, res);
 882
 883	/* since this lockres is new it doesn't not require the spinlock */
 884	__dlm_lockres_grab_inflight_ref(dlm, res);
 885
 886	/* get an extra ref on the mle in case this is a BLOCK
 887	 * if so, the creator of the BLOCK may try to put the last
 888	 * ref at this time in the assert master handler, so we
 889	 * need an extra one to keep from a bad ptr deref. */
 890	dlm_get_mle_inuse(mle);
 891	spin_unlock(&dlm->master_lock);
 892	spin_unlock(&dlm->spinlock);
 893
 894redo_request:
 895	while (wait_on_recovery) {
 896		/* any cluster changes that occurred after dropping the
 897		 * dlm spinlock would be detectable be a change on the mle,
 898		 * so we only need to clear out the recovery map once. */
 899		if (dlm_is_recovery_lock(lockid, namelen)) {
 900			mlog(0, "%s: Recovery map is not empty, but must "
 901			     "master $RECOVERY lock now\n", dlm->name);
 902			if (!dlm_pre_master_reco_lockres(dlm, res))
 903				wait_on_recovery = 0;
 904			else {
 905				mlog(0, "%s: waiting 500ms for heartbeat state "
 906				    "change\n", dlm->name);
 907				msleep(500);
 908			}
 909			continue;
 910		}
 911
 912		dlm_kick_recovery_thread(dlm);
 913		msleep(1000);
 914		dlm_wait_for_recovery(dlm);
 915
 916		spin_lock(&dlm->spinlock);
 917		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
 918		if (bit < O2NM_MAX_NODES) {
 919			mlog(0, "%s: res %.*s, At least one node (%d) "
 920			     "to recover before lock mastery can begin\n",
 921			     dlm->name, namelen, (char *)lockid, bit);
 922			wait_on_recovery = 1;
 923		} else
 924			wait_on_recovery = 0;
 925		spin_unlock(&dlm->spinlock);
 926
 927		if (wait_on_recovery)
 928			dlm_wait_for_node_recovery(dlm, bit, 10000);
 929	}
 930
 931	/* must wait for lock to be mastered elsewhere */
 932	if (blocked)
 933		goto wait;
 934
 935	ret = -EINVAL;
 936	dlm_node_iter_init(mle->vote_map, &iter);
 937	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
 938		ret = dlm_do_master_request(res, mle, nodenum);
 939		if (ret < 0)
 940			mlog_errno(ret);
 941		if (mle->master != O2NM_MAX_NODES) {
 942			/* found a master ! */
 943			if (mle->master <= nodenum)
 944				break;
 945			/* if our master request has not reached the master
 946			 * yet, keep going until it does.  this is how the
 947			 * master will know that asserts are needed back to
 948			 * the lower nodes. */
 949			mlog(0, "%s: res %.*s, Requests only up to %u but "
 950			     "master is %u, keep going\n", dlm->name, namelen,
 951			     lockid, nodenum, mle->master);
 952		}
 953	}
 954
 955wait:
 956	/* keep going until the response map includes all nodes */
 957	ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
 958	if (ret < 0) {
 959		wait_on_recovery = 1;
 960		mlog(0, "%s: res %.*s, Node map changed, redo the master "
 961		     "request now, blocked=%d\n", dlm->name, res->lockname.len,
 962		     res->lockname.name, blocked);
 963		if (++tries > 20) {
 964			mlog(ML_ERROR, "%s: res %.*s, Spinning on "
 965			     "dlm_wait_for_lock_mastery, blocked = %d\n",
 966			     dlm->name, res->lockname.len,
 967			     res->lockname.name, blocked);
 968			dlm_print_one_lock_resource(res);
 969			dlm_print_one_mle(mle);
 970			tries = 0;
 971		}
 972		goto redo_request;
 973	}
 974
 975	mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
 976	     res->lockname.name, res->owner);
 977	/* make sure we never continue without this */
 978	BUG_ON(res->owner == O2NM_MAX_NODES);
 979
 980	/* master is known, detach if not already detached */
 981	dlm_mle_detach_hb_events(dlm, mle);
 982	dlm_put_mle(mle);
 983	/* put the extra ref */
 984	dlm_put_mle_inuse(mle);
 985
 986wake_waiters:
 987	spin_lock(&res->spinlock);
 988	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
 989	spin_unlock(&res->spinlock);
 990	wake_up(&res->wq);
 991
 992leave:
 993	/* need to free the unused mle */
 994	if (alloc_mle)
 995		kmem_cache_free(dlm_mle_cache, alloc_mle);
 996
 997	return res;
 998}
 999
1000
1001#define DLM_MASTERY_TIMEOUT_MS   5000
1002
1003static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
1004				     struct dlm_lock_resource *res,
1005				     struct dlm_master_list_entry *mle,
1006				     int *blocked)
1007{
1008	u8 m;
1009	int ret, bit;
1010	int map_changed, voting_done;
1011	int assert, sleep;
1012
1013recheck:
1014	ret = 0;
1015	assert = 0;
1016
1017	/* check if another node has already become the owner */
1018	spin_lock(&res->spinlock);
1019	if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1020		mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1021		     res->lockname.len, res->lockname.name, res->owner);
1022		spin_unlock(&res->spinlock);
1023		/* this will cause the master to re-assert across
1024		 * the whole cluster, freeing up mles */
1025		if (res->owner != dlm->node_num) {
1026			ret = dlm_do_master_request(res, mle, res->owner);
1027			if (ret < 0) {
1028				/* give recovery a chance to run */
1029				mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1030				msleep(500);
1031				goto recheck;
1032			}
1033		}
1034		ret = 0;
1035		goto leave;
1036	}
1037	spin_unlock(&res->spinlock);
1038
1039	spin_lock(&mle->spinlock);
1040	m = mle->master;
1041	map_changed = (memcmp(mle->vote_map, mle->node_map,
1042			      sizeof(mle->vote_map)) != 0);
1043	voting_done = (memcmp(mle->vote_map, mle->response_map,
1044			     sizeof(mle->vote_map)) == 0);
1045
1046	/* restart if we hit any errors */
1047	if (map_changed) {
1048		int b;
1049		mlog(0, "%s: %.*s: node map changed, restarting\n",
1050		     dlm->name, res->lockname.len, res->lockname.name);
1051		ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1052		b = (mle->type == DLM_MLE_BLOCK);
1053		if ((*blocked && !b) || (!*blocked && b)) {
1054			mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1055			     dlm->name, res->lockname.len, res->lockname.name,
1056			     *blocked, b);
1057			*blocked = b;
1058		}
1059		spin_unlock(&mle->spinlock);
1060		if (ret < 0) {
1061			mlog_errno(ret);
1062			goto leave;
1063		}
1064		mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1065		     "rechecking now\n", dlm->name, res->lockname.len,
1066		     res->lockname.name);
1067		goto recheck;
1068	} else {
1069		if (!voting_done) {
1070			mlog(0, "map not changed and voting not done "
1071			     "for %s:%.*s\n", dlm->name, res->lockname.len,
1072			     res->lockname.name);
1073		}
1074	}
1075
1076	if (m != O2NM_MAX_NODES) {
1077		/* another node has done an assert!
1078		 * all done! */
1079		sleep = 0;
1080	} else {
1081		sleep = 1;
1082		/* have all nodes responded? */
1083		if (voting_done && !*blocked) {
1084			bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1085			if (dlm->node_num <= bit) {
1086				/* my node number is lowest.
1087			 	 * now tell other nodes that I am
1088				 * mastering this. */
1089				mle->master = dlm->node_num;
1090				/* ref was grabbed in get_lock_resource
1091				 * will be dropped in dlmlock_master */
1092				assert = 1;
1093				sleep = 0;
1094			}
1095			/* if voting is done, but we have not received
1096			 * an assert master yet, we must sleep */
1097		}
1098	}
1099
1100	spin_unlock(&mle->spinlock);
1101
1102	/* sleep if we haven't finished voting yet */
1103	if (sleep) {
1104		unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
 
 
 
 
 
 
 
1105		atomic_set(&mle->woken, 0);
1106		(void)wait_event_timeout(mle->wq,
1107					 (atomic_read(&mle->woken) == 1),
1108					 timeo);
1109		if (res->owner == O2NM_MAX_NODES) {
1110			mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1111			     res->lockname.len, res->lockname.name);
1112			goto recheck;
1113		}
1114		mlog(0, "done waiting, master is %u\n", res->owner);
1115		ret = 0;
1116		goto leave;
1117	}
1118
1119	ret = 0;   /* done */
1120	if (assert) {
1121		m = dlm->node_num;
1122		mlog(0, "about to master %.*s here, this=%u\n",
1123		     res->lockname.len, res->lockname.name, m);
1124		ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1125		if (ret) {
1126			/* This is a failure in the network path,
1127			 * not in the response to the assert_master
1128			 * (any nonzero response is a BUG on this node).
1129			 * Most likely a socket just got disconnected
1130			 * due to node death. */
1131			mlog_errno(ret);
1132		}
1133		/* no longer need to restart lock mastery.
1134		 * all living nodes have been contacted. */
1135		ret = 0;
1136	}
1137
1138	/* set the lockres owner */
1139	spin_lock(&res->spinlock);
1140	/* mastery reference obtained either during
1141	 * assert_master_handler or in get_lock_resource */
1142	dlm_change_lockres_owner(dlm, res, m);
1143	spin_unlock(&res->spinlock);
1144
1145leave:
1146	return ret;
1147}
1148
1149struct dlm_bitmap_diff_iter
1150{
1151	int curnode;
1152	unsigned long *orig_bm;
1153	unsigned long *cur_bm;
1154	unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1155};
1156
1157enum dlm_node_state_change
1158{
1159	NODE_DOWN = -1,
1160	NODE_NO_CHANGE = 0,
1161	NODE_UP
1162};
1163
1164static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1165				      unsigned long *orig_bm,
1166				      unsigned long *cur_bm)
1167{
1168	unsigned long p1, p2;
1169	int i;
1170
1171	iter->curnode = -1;
1172	iter->orig_bm = orig_bm;
1173	iter->cur_bm = cur_bm;
1174
1175	for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1176       		p1 = *(iter->orig_bm + i);
1177	       	p2 = *(iter->cur_bm + i);
1178		iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1179	}
1180}
1181
1182static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1183				     enum dlm_node_state_change *state)
1184{
1185	int bit;
1186
1187	if (iter->curnode >= O2NM_MAX_NODES)
1188		return -ENOENT;
1189
1190	bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1191			    iter->curnode+1);
1192	if (bit >= O2NM_MAX_NODES) {
1193		iter->curnode = O2NM_MAX_NODES;
1194		return -ENOENT;
1195	}
1196
1197	/* if it was there in the original then this node died */
1198	if (test_bit(bit, iter->orig_bm))
1199		*state = NODE_DOWN;
1200	else
1201		*state = NODE_UP;
1202
1203	iter->curnode = bit;
1204	return bit;
1205}
1206
1207
1208static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1209				    struct dlm_lock_resource *res,
1210				    struct dlm_master_list_entry *mle,
1211				    int blocked)
1212{
1213	struct dlm_bitmap_diff_iter bdi;
1214	enum dlm_node_state_change sc;
1215	int node;
1216	int ret = 0;
1217
1218	mlog(0, "something happened such that the "
1219	     "master process may need to be restarted!\n");
1220
1221	assert_spin_locked(&mle->spinlock);
1222
1223	dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1224	node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1225	while (node >= 0) {
1226		if (sc == NODE_UP) {
1227			/* a node came up.  clear any old vote from
1228			 * the response map and set it in the vote map
1229			 * then restart the mastery. */
1230			mlog(ML_NOTICE, "node %d up while restarting\n", node);
1231
1232			/* redo the master request, but only for the new node */
1233			mlog(0, "sending request to new node\n");
1234			clear_bit(node, mle->response_map);
1235			set_bit(node, mle->vote_map);
1236		} else {
1237			mlog(ML_ERROR, "node down! %d\n", node);
1238			if (blocked) {
1239				int lowest = find_next_bit(mle->maybe_map,
1240						       O2NM_MAX_NODES, 0);
1241
1242				/* act like it was never there */
1243				clear_bit(node, mle->maybe_map);
1244
1245			       	if (node == lowest) {
1246					mlog(0, "expected master %u died"
1247					    " while this node was blocked "
1248					    "waiting on it!\n", node);
1249					lowest = find_next_bit(mle->maybe_map,
1250						       	O2NM_MAX_NODES,
1251						       	lowest+1);
1252					if (lowest < O2NM_MAX_NODES) {
1253						mlog(0, "%s:%.*s:still "
1254						     "blocked. waiting on %u "
1255						     "now\n", dlm->name,
1256						     res->lockname.len,
1257						     res->lockname.name,
1258						     lowest);
1259					} else {
1260						/* mle is an MLE_BLOCK, but
1261						 * there is now nothing left to
1262						 * block on.  we need to return
1263						 * all the way back out and try
1264						 * again with an MLE_MASTER.
1265						 * dlm_do_local_recovery_cleanup
1266						 * has already run, so the mle
1267						 * refcount is ok */
1268						mlog(0, "%s:%.*s: no "
1269						     "longer blocking. try to "
1270						     "master this here\n",
1271						     dlm->name,
1272						     res->lockname.len,
1273						     res->lockname.name);
1274						mle->type = DLM_MLE_MASTER;
1275						mle->mleres = res;
1276					}
1277				}
1278			}
1279
1280			/* now blank out everything, as if we had never
1281			 * contacted anyone */
1282			memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1283			memset(mle->response_map, 0, sizeof(mle->response_map));
1284			/* reset the vote_map to the current node_map */
1285			memcpy(mle->vote_map, mle->node_map,
1286			       sizeof(mle->node_map));
1287			/* put myself into the maybe map */
1288			if (mle->type != DLM_MLE_BLOCK)
1289				set_bit(dlm->node_num, mle->maybe_map);
1290		}
1291		ret = -EAGAIN;
1292		node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1293	}
1294	return ret;
1295}
1296
1297
1298/*
1299 * DLM_MASTER_REQUEST_MSG
1300 *
1301 * returns: 0 on success,
1302 *          -errno on a network error
1303 *
1304 * on error, the caller should assume the target node is "dead"
1305 *
1306 */
1307
1308static int dlm_do_master_request(struct dlm_lock_resource *res,
1309				 struct dlm_master_list_entry *mle, int to)
1310{
1311	struct dlm_ctxt *dlm = mle->dlm;
1312	struct dlm_master_request request;
1313	int ret, response=0, resend;
1314
1315	memset(&request, 0, sizeof(request));
1316	request.node_idx = dlm->node_num;
1317
1318	BUG_ON(mle->type == DLM_MLE_MIGRATION);
1319
1320	request.namelen = (u8)mle->mnamelen;
1321	memcpy(request.name, mle->mname, request.namelen);
1322
1323again:
1324	ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1325				 sizeof(request), to, &response);
1326	if (ret < 0)  {
1327		if (ret == -ESRCH) {
1328			/* should never happen */
1329			mlog(ML_ERROR, "TCP stack not ready!\n");
1330			BUG();
1331		} else if (ret == -EINVAL) {
1332			mlog(ML_ERROR, "bad args passed to o2net!\n");
1333			BUG();
1334		} else if (ret == -ENOMEM) {
1335			mlog(ML_ERROR, "out of memory while trying to send "
1336			     "network message!  retrying\n");
1337			/* this is totally crude */
1338			msleep(50);
1339			goto again;
1340		} else if (!dlm_is_host_down(ret)) {
1341			/* not a network error. bad. */
1342			mlog_errno(ret);
1343			mlog(ML_ERROR, "unhandled error!");
1344			BUG();
1345		}
1346		/* all other errors should be network errors,
1347		 * and likely indicate node death */
1348		mlog(ML_ERROR, "link to %d went down!\n", to);
1349		goto out;
1350	}
1351
1352	ret = 0;
1353	resend = 0;
1354	spin_lock(&mle->spinlock);
1355	switch (response) {
1356		case DLM_MASTER_RESP_YES:
1357			set_bit(to, mle->response_map);
1358			mlog(0, "node %u is the master, response=YES\n", to);
1359			mlog(0, "%s:%.*s: master node %u now knows I have a "
1360			     "reference\n", dlm->name, res->lockname.len,
1361			     res->lockname.name, to);
1362			mle->master = to;
1363			break;
1364		case DLM_MASTER_RESP_NO:
1365			mlog(0, "node %u not master, response=NO\n", to);
1366			set_bit(to, mle->response_map);
1367			break;
1368		case DLM_MASTER_RESP_MAYBE:
1369			mlog(0, "node %u not master, response=MAYBE\n", to);
1370			set_bit(to, mle->response_map);
1371			set_bit(to, mle->maybe_map);
1372			break;
1373		case DLM_MASTER_RESP_ERROR:
1374			mlog(0, "node %u hit an error, resending\n", to);
1375			resend = 1;
1376			response = 0;
1377			break;
1378		default:
1379			mlog(ML_ERROR, "bad response! %u\n", response);
1380			BUG();
1381	}
1382	spin_unlock(&mle->spinlock);
1383	if (resend) {
1384		/* this is also totally crude */
1385		msleep(50);
1386		goto again;
1387	}
1388
1389out:
1390	return ret;
1391}
1392
1393/*
1394 * locks that can be taken here:
1395 * dlm->spinlock
1396 * res->spinlock
1397 * mle->spinlock
1398 * dlm->master_list
1399 *
1400 * if possible, TRIM THIS DOWN!!!
1401 */
1402int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1403			       void **ret_data)
1404{
1405	u8 response = DLM_MASTER_RESP_MAYBE;
1406	struct dlm_ctxt *dlm = data;
1407	struct dlm_lock_resource *res = NULL;
1408	struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1409	struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1410	char *name;
1411	unsigned int namelen, hash;
1412	int found, ret;
1413	int set_maybe;
1414	int dispatch_assert = 0;
1415	int dispatched = 0;
1416
1417	if (!dlm_grab(dlm))
1418		return DLM_MASTER_RESP_NO;
1419
1420	if (!dlm_domain_fully_joined(dlm)) {
1421		response = DLM_MASTER_RESP_NO;
1422		goto send_response;
1423	}
1424
1425	name = request->name;
1426	namelen = request->namelen;
1427	hash = dlm_lockid_hash(name, namelen);
1428
1429	if (namelen > DLM_LOCKID_NAME_MAX) {
1430		response = DLM_IVBUFLEN;
1431		goto send_response;
1432	}
1433
1434way_up_top:
1435	spin_lock(&dlm->spinlock);
1436	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1437	if (res) {
1438		spin_unlock(&dlm->spinlock);
1439
1440		/* take care of the easy cases up front */
1441		spin_lock(&res->spinlock);
1442
1443		/*
1444		 * Right after dlm spinlock was released, dlm_thread could have
1445		 * purged the lockres. Check if lockres got unhashed. If so
1446		 * start over.
1447		 */
1448		if (hlist_unhashed(&res->hash_node)) {
1449			spin_unlock(&res->spinlock);
1450			dlm_lockres_put(res);
1451			goto way_up_top;
1452		}
1453
1454		if (res->state & (DLM_LOCK_RES_RECOVERING|
1455				  DLM_LOCK_RES_MIGRATING)) {
1456			spin_unlock(&res->spinlock);
1457			mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1458			     "being recovered/migrated\n");
1459			response = DLM_MASTER_RESP_ERROR;
1460			if (mle)
1461				kmem_cache_free(dlm_mle_cache, mle);
1462			goto send_response;
1463		}
1464
1465		if (res->owner == dlm->node_num) {
1466			dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1467			spin_unlock(&res->spinlock);
1468			response = DLM_MASTER_RESP_YES;
1469			if (mle)
1470				kmem_cache_free(dlm_mle_cache, mle);
1471
1472			/* this node is the owner.
1473			 * there is some extra work that needs to
1474			 * happen now.  the requesting node has
1475			 * caused all nodes up to this one to
1476			 * create mles.  this node now needs to
1477			 * go back and clean those up. */
1478			dispatch_assert = 1;
1479			goto send_response;
1480		} else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1481			spin_unlock(&res->spinlock);
1482			// mlog(0, "node %u is the master\n", res->owner);
1483			response = DLM_MASTER_RESP_NO;
1484			if (mle)
1485				kmem_cache_free(dlm_mle_cache, mle);
1486			goto send_response;
1487		}
1488
1489		/* ok, there is no owner.  either this node is
1490		 * being blocked, or it is actively trying to
1491		 * master this lock. */
1492		if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1493			mlog(ML_ERROR, "lock with no owner should be "
1494			     "in-progress!\n");
1495			BUG();
1496		}
1497
1498		// mlog(0, "lockres is in progress...\n");
1499		spin_lock(&dlm->master_lock);
1500		found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1501		if (!found) {
1502			mlog(ML_ERROR, "no mle found for this lock!\n");
1503			BUG();
1504		}
1505		set_maybe = 1;
1506		spin_lock(&tmpmle->spinlock);
1507		if (tmpmle->type == DLM_MLE_BLOCK) {
1508			// mlog(0, "this node is waiting for "
1509			// "lockres to be mastered\n");
1510			response = DLM_MASTER_RESP_NO;
1511		} else if (tmpmle->type == DLM_MLE_MIGRATION) {
1512			mlog(0, "node %u is master, but trying to migrate to "
1513			     "node %u.\n", tmpmle->master, tmpmle->new_master);
1514			if (tmpmle->master == dlm->node_num) {
1515				mlog(ML_ERROR, "no owner on lockres, but this "
1516				     "node is trying to migrate it to %u?!\n",
1517				     tmpmle->new_master);
1518				BUG();
1519			} else {
1520				/* the real master can respond on its own */
1521				response = DLM_MASTER_RESP_NO;
1522			}
1523		} else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1524			set_maybe = 0;
1525			if (tmpmle->master == dlm->node_num) {
1526				response = DLM_MASTER_RESP_YES;
1527				/* this node will be the owner.
1528				 * go back and clean the mles on any
1529				 * other nodes */
1530				dispatch_assert = 1;
1531				dlm_lockres_set_refmap_bit(dlm, res,
1532							   request->node_idx);
1533			} else
1534				response = DLM_MASTER_RESP_NO;
1535		} else {
1536			// mlog(0, "this node is attempting to "
1537			// "master lockres\n");
1538			response = DLM_MASTER_RESP_MAYBE;
1539		}
1540		if (set_maybe)
1541			set_bit(request->node_idx, tmpmle->maybe_map);
1542		spin_unlock(&tmpmle->spinlock);
1543
1544		spin_unlock(&dlm->master_lock);
1545		spin_unlock(&res->spinlock);
1546
1547		/* keep the mle attached to heartbeat events */
1548		dlm_put_mle(tmpmle);
1549		if (mle)
1550			kmem_cache_free(dlm_mle_cache, mle);
1551		goto send_response;
1552	}
1553
1554	/*
1555	 * lockres doesn't exist on this node
1556	 * if there is an MLE_BLOCK, return NO
1557	 * if there is an MLE_MASTER, return MAYBE
1558	 * otherwise, add an MLE_BLOCK, return NO
1559	 */
1560	spin_lock(&dlm->master_lock);
1561	found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1562	if (!found) {
1563		/* this lockid has never been seen on this node yet */
1564		// mlog(0, "no mle found\n");
1565		if (!mle) {
1566			spin_unlock(&dlm->master_lock);
1567			spin_unlock(&dlm->spinlock);
1568
1569			mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1570			if (!mle) {
1571				response = DLM_MASTER_RESP_ERROR;
1572				mlog_errno(-ENOMEM);
1573				goto send_response;
1574			}
1575			goto way_up_top;
1576		}
1577
1578		// mlog(0, "this is second time thru, already allocated, "
1579		// "add the block.\n");
1580		dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1581		set_bit(request->node_idx, mle->maybe_map);
1582		__dlm_insert_mle(dlm, mle);
1583		response = DLM_MASTER_RESP_NO;
1584	} else {
 
 
1585		spin_lock(&tmpmle->spinlock);
1586		if (tmpmle->master == dlm->node_num) {
1587			mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1588			BUG();
1589		}
1590		if (tmpmle->type == DLM_MLE_BLOCK)
1591			response = DLM_MASTER_RESP_NO;
1592		else if (tmpmle->type == DLM_MLE_MIGRATION) {
1593			mlog(0, "migration mle was found (%u->%u)\n",
1594			     tmpmle->master, tmpmle->new_master);
1595			/* real master can respond on its own */
1596			response = DLM_MASTER_RESP_NO;
1597		} else
1598			response = DLM_MASTER_RESP_MAYBE;
1599		set_bit(request->node_idx, tmpmle->maybe_map);
 
1600		spin_unlock(&tmpmle->spinlock);
1601	}
1602	spin_unlock(&dlm->master_lock);
1603	spin_unlock(&dlm->spinlock);
1604
1605	if (found) {
1606		/* keep the mle attached to heartbeat events */
1607		dlm_put_mle(tmpmle);
1608	}
1609send_response:
1610	/*
1611	 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1612	 * The reference is released by dlm_assert_master_worker() under
1613	 * the call to dlm_dispatch_assert_master().  If
1614	 * dlm_assert_master_worker() isn't called, we drop it here.
1615	 */
1616	if (dispatch_assert) {
 
 
 
 
 
 
1617		mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1618			     dlm->node_num, res->lockname.len, res->lockname.name);
1619		spin_lock(&res->spinlock);
1620		ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1621						 DLM_ASSERT_MASTER_MLE_CLEANUP);
1622		if (ret < 0) {
1623			mlog(ML_ERROR, "failed to dispatch assert master work\n");
1624			response = DLM_MASTER_RESP_ERROR;
1625			spin_unlock(&res->spinlock);
1626			dlm_lockres_put(res);
1627		} else {
1628			dispatched = 1;
1629			__dlm_lockres_grab_inflight_worker(dlm, res);
1630			spin_unlock(&res->spinlock);
1631		}
1632	} else {
1633		if (res)
1634			dlm_lockres_put(res);
1635	}
1636
1637	if (!dispatched)
1638		dlm_put(dlm);
1639	return response;
1640}
1641
1642/*
1643 * DLM_ASSERT_MASTER_MSG
1644 */
1645
1646
1647/*
1648 * NOTE: this can be used for debugging
1649 * can periodically run all locks owned by this node
1650 * and re-assert across the cluster...
1651 */
1652static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1653				struct dlm_lock_resource *res,
1654				void *nodemap, u32 flags)
1655{
1656	struct dlm_assert_master assert;
1657	int to, tmpret;
1658	struct dlm_node_iter iter;
1659	int ret = 0;
1660	int reassert;
1661	const char *lockname = res->lockname.name;
1662	unsigned int namelen = res->lockname.len;
1663
1664	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1665
1666	spin_lock(&res->spinlock);
1667	res->state |= DLM_LOCK_RES_SETREF_INPROG;
1668	spin_unlock(&res->spinlock);
1669
1670again:
1671	reassert = 0;
1672
1673	/* note that if this nodemap is empty, it returns 0 */
1674	dlm_node_iter_init(nodemap, &iter);
1675	while ((to = dlm_node_iter_next(&iter)) >= 0) {
1676		int r = 0;
1677		struct dlm_master_list_entry *mle = NULL;
1678
1679		mlog(0, "sending assert master to %d (%.*s)\n", to,
1680		     namelen, lockname);
1681		memset(&assert, 0, sizeof(assert));
1682		assert.node_idx = dlm->node_num;
1683		assert.namelen = namelen;
1684		memcpy(assert.name, lockname, namelen);
1685		assert.flags = cpu_to_be32(flags);
1686
1687		tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1688					    &assert, sizeof(assert), to, &r);
1689		if (tmpret < 0) {
1690			mlog(ML_ERROR, "Error %d when sending message %u (key "
1691			     "0x%x) to node %u\n", tmpret,
1692			     DLM_ASSERT_MASTER_MSG, dlm->key, to);
1693			if (!dlm_is_host_down(tmpret)) {
1694				mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1695				BUG();
1696			}
1697			/* a node died.  finish out the rest of the nodes. */
1698			mlog(0, "link to %d went down!\n", to);
1699			/* any nonzero status return will do */
1700			ret = tmpret;
1701			r = 0;
1702		} else if (r < 0) {
1703			/* ok, something horribly messed.  kill thyself. */
1704			mlog(ML_ERROR,"during assert master of %.*s to %u, "
1705			     "got %d.\n", namelen, lockname, to, r);
1706			spin_lock(&dlm->spinlock);
1707			spin_lock(&dlm->master_lock);
1708			if (dlm_find_mle(dlm, &mle, (char *)lockname,
1709					 namelen)) {
1710				dlm_print_one_mle(mle);
1711				__dlm_put_mle(mle);
1712			}
1713			spin_unlock(&dlm->master_lock);
1714			spin_unlock(&dlm->spinlock);
1715			BUG();
1716		}
1717
1718		if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1719		    !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1720				mlog(ML_ERROR, "%.*s: very strange, "
1721				     "master MLE but no lockres on %u\n",
1722				     namelen, lockname, to);
1723		}
1724
1725		if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1726			mlog(0, "%.*s: node %u create mles on other "
1727			     "nodes and requests a re-assert\n",
1728			     namelen, lockname, to);
1729			reassert = 1;
1730		}
1731		if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1732			mlog(0, "%.*s: node %u has a reference to this "
1733			     "lockres, set the bit in the refmap\n",
1734			     namelen, lockname, to);
1735			spin_lock(&res->spinlock);
1736			dlm_lockres_set_refmap_bit(dlm, res, to);
1737			spin_unlock(&res->spinlock);
1738		}
1739	}
1740
1741	if (reassert)
1742		goto again;
1743
1744	spin_lock(&res->spinlock);
1745	res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1746	spin_unlock(&res->spinlock);
1747	wake_up(&res->wq);
1748
1749	return ret;
1750}
1751
1752/*
1753 * locks that can be taken here:
1754 * dlm->spinlock
1755 * res->spinlock
1756 * mle->spinlock
1757 * dlm->master_list
1758 *
1759 * if possible, TRIM THIS DOWN!!!
1760 */
1761int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1762			      void **ret_data)
1763{
1764	struct dlm_ctxt *dlm = data;
1765	struct dlm_master_list_entry *mle = NULL;
1766	struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1767	struct dlm_lock_resource *res = NULL;
1768	char *name;
1769	unsigned int namelen, hash;
1770	u32 flags;
1771	int master_request = 0, have_lockres_ref = 0;
1772	int ret = 0;
1773
1774	if (!dlm_grab(dlm))
1775		return 0;
1776
1777	name = assert->name;
1778	namelen = assert->namelen;
1779	hash = dlm_lockid_hash(name, namelen);
1780	flags = be32_to_cpu(assert->flags);
1781
1782	if (namelen > DLM_LOCKID_NAME_MAX) {
1783		mlog(ML_ERROR, "Invalid name length!");
1784		goto done;
1785	}
1786
1787	spin_lock(&dlm->spinlock);
1788
1789	if (flags)
1790		mlog(0, "assert_master with flags: %u\n", flags);
1791
1792	/* find the MLE */
1793	spin_lock(&dlm->master_lock);
1794	if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1795		/* not an error, could be master just re-asserting */
1796		mlog(0, "just got an assert_master from %u, but no "
1797		     "MLE for it! (%.*s)\n", assert->node_idx,
1798		     namelen, name);
1799	} else {
1800		int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1801		if (bit >= O2NM_MAX_NODES) {
1802			/* not necessarily an error, though less likely.
1803			 * could be master just re-asserting. */
1804			mlog(0, "no bits set in the maybe_map, but %u "
1805			     "is asserting! (%.*s)\n", assert->node_idx,
1806			     namelen, name);
1807		} else if (bit != assert->node_idx) {
1808			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1809				mlog(0, "master %u was found, %u should "
1810				     "back off\n", assert->node_idx, bit);
1811			} else {
1812				/* with the fix for bug 569, a higher node
1813				 * number winning the mastery will respond
1814				 * YES to mastery requests, but this node
1815				 * had no way of knowing.  let it pass. */
1816				mlog(0, "%u is the lowest node, "
1817				     "%u is asserting. (%.*s)  %u must "
1818				     "have begun after %u won.\n", bit,
1819				     assert->node_idx, namelen, name, bit,
1820				     assert->node_idx);
1821			}
1822		}
1823		if (mle->type == DLM_MLE_MIGRATION) {
1824			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1825				mlog(0, "%s:%.*s: got cleanup assert"
1826				     " from %u for migration\n",
1827				     dlm->name, namelen, name,
1828				     assert->node_idx);
1829			} else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1830				mlog(0, "%s:%.*s: got unrelated assert"
1831				     " from %u for migration, ignoring\n",
1832				     dlm->name, namelen, name,
1833				     assert->node_idx);
1834				__dlm_put_mle(mle);
1835				spin_unlock(&dlm->master_lock);
1836				spin_unlock(&dlm->spinlock);
1837				goto done;
1838			}
1839		}
1840	}
1841	spin_unlock(&dlm->master_lock);
1842
1843	/* ok everything checks out with the MLE
1844	 * now check to see if there is a lockres */
1845	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1846	if (res) {
1847		spin_lock(&res->spinlock);
1848		if (res->state & DLM_LOCK_RES_RECOVERING)  {
1849			mlog(ML_ERROR, "%u asserting but %.*s is "
1850			     "RECOVERING!\n", assert->node_idx, namelen, name);
1851			goto kill;
1852		}
1853		if (!mle) {
1854			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1855			    res->owner != assert->node_idx) {
1856				mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1857				     "but current owner is %u! (%.*s)\n",
1858				     assert->node_idx, res->owner, namelen,
1859				     name);
1860				__dlm_print_one_lock_resource(res);
1861				BUG();
1862			}
1863		} else if (mle->type != DLM_MLE_MIGRATION) {
1864			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1865				/* owner is just re-asserting */
1866				if (res->owner == assert->node_idx) {
1867					mlog(0, "owner %u re-asserting on "
1868					     "lock %.*s\n", assert->node_idx,
1869					     namelen, name);
1870					goto ok;
1871				}
1872				mlog(ML_ERROR, "got assert_master from "
1873				     "node %u, but %u is the owner! "
1874				     "(%.*s)\n", assert->node_idx,
1875				     res->owner, namelen, name);
1876				goto kill;
1877			}
1878			if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1879				mlog(ML_ERROR, "got assert from %u, but lock "
1880				     "with no owner should be "
1881				     "in-progress! (%.*s)\n",
1882				     assert->node_idx,
1883				     namelen, name);
1884				goto kill;
1885			}
1886		} else /* mle->type == DLM_MLE_MIGRATION */ {
1887			/* should only be getting an assert from new master */
1888			if (assert->node_idx != mle->new_master) {
1889				mlog(ML_ERROR, "got assert from %u, but "
1890				     "new master is %u, and old master "
1891				     "was %u (%.*s)\n",
1892				     assert->node_idx, mle->new_master,
1893				     mle->master, namelen, name);
1894				goto kill;
1895			}
1896
1897		}
1898ok:
1899		spin_unlock(&res->spinlock);
1900	}
1901
1902	// mlog(0, "woo!  got an assert_master from node %u!\n",
1903	// 	     assert->node_idx);
1904	if (mle) {
1905		int extra_ref = 0;
1906		int nn = -1;
1907		int rr, err = 0;
1908
1909		spin_lock(&mle->spinlock);
1910		if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1911			extra_ref = 1;
1912		else {
1913			/* MASTER mle: if any bits set in the response map
1914			 * then the calling node needs to re-assert to clear
1915			 * up nodes that this node contacted */
1916			while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1917						    nn+1)) < O2NM_MAX_NODES) {
1918				if (nn != dlm->node_num && nn != assert->node_idx) {
1919					master_request = 1;
1920					break;
1921				}
1922			}
1923		}
1924		mle->master = assert->node_idx;
1925		atomic_set(&mle->woken, 1);
1926		wake_up(&mle->wq);
1927		spin_unlock(&mle->spinlock);
1928
1929		if (res) {
1930			int wake = 0;
1931			spin_lock(&res->spinlock);
1932			if (mle->type == DLM_MLE_MIGRATION) {
1933				mlog(0, "finishing off migration of lockres %.*s, "
1934			     		"from %u to %u\n",
1935			       		res->lockname.len, res->lockname.name,
1936			       		dlm->node_num, mle->new_master);
1937				res->state &= ~DLM_LOCK_RES_MIGRATING;
1938				wake = 1;
1939				dlm_change_lockres_owner(dlm, res, mle->new_master);
1940				BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1941			} else {
1942				dlm_change_lockres_owner(dlm, res, mle->master);
1943			}
1944			spin_unlock(&res->spinlock);
1945			have_lockres_ref = 1;
1946			if (wake)
1947				wake_up(&res->wq);
1948		}
1949
1950		/* master is known, detach if not already detached.
1951		 * ensures that only one assert_master call will happen
1952		 * on this mle. */
1953		spin_lock(&dlm->master_lock);
1954
1955		rr = kref_read(&mle->mle_refs);
1956		if (mle->inuse > 0) {
1957			if (extra_ref && rr < 3)
1958				err = 1;
1959			else if (!extra_ref && rr < 2)
1960				err = 1;
1961		} else {
1962			if (extra_ref && rr < 2)
1963				err = 1;
1964			else if (!extra_ref && rr < 1)
1965				err = 1;
1966		}
1967		if (err) {
1968			mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1969			     "that will mess up this node, refs=%d, extra=%d, "
1970			     "inuse=%d\n", dlm->name, namelen, name,
1971			     assert->node_idx, rr, extra_ref, mle->inuse);
1972			dlm_print_one_mle(mle);
1973		}
1974		__dlm_unlink_mle(dlm, mle);
1975		__dlm_mle_detach_hb_events(dlm, mle);
1976		__dlm_put_mle(mle);
1977		if (extra_ref) {
1978			/* the assert master message now balances the extra
1979		 	 * ref given by the master / migration request message.
1980		 	 * if this is the last put, it will be removed
1981		 	 * from the list. */
1982			__dlm_put_mle(mle);
1983		}
1984		spin_unlock(&dlm->master_lock);
1985	} else if (res) {
1986		if (res->owner != assert->node_idx) {
1987			mlog(0, "assert_master from %u, but current "
1988			     "owner is %u (%.*s), no mle\n", assert->node_idx,
1989			     res->owner, namelen, name);
1990		}
1991	}
1992	spin_unlock(&dlm->spinlock);
1993
1994done:
1995	ret = 0;
1996	if (res) {
1997		spin_lock(&res->spinlock);
1998		res->state |= DLM_LOCK_RES_SETREF_INPROG;
1999		spin_unlock(&res->spinlock);
2000		*ret_data = (void *)res;
2001	}
2002	dlm_put(dlm);
2003	if (master_request) {
2004		mlog(0, "need to tell master to reassert\n");
2005		/* positive. negative would shoot down the node. */
2006		ret |= DLM_ASSERT_RESPONSE_REASSERT;
2007		if (!have_lockres_ref) {
2008			mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2009			     "mle present here for %s:%.*s, but no lockres!\n",
2010			     assert->node_idx, dlm->name, namelen, name);
2011		}
2012	}
2013	if (have_lockres_ref) {
2014		/* let the master know we have a reference to the lockres */
2015		ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2016		mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2017		     dlm->name, namelen, name, assert->node_idx);
2018	}
2019	return ret;
2020
2021kill:
2022	/* kill the caller! */
2023	mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
2024	     "and killing the other node now!  This node is OK and can continue.\n");
2025	__dlm_print_one_lock_resource(res);
2026	spin_unlock(&res->spinlock);
2027	spin_lock(&dlm->master_lock);
2028	if (mle)
2029		__dlm_put_mle(mle);
2030	spin_unlock(&dlm->master_lock);
2031	spin_unlock(&dlm->spinlock);
2032	*ret_data = (void *)res;
2033	dlm_put(dlm);
2034	return -EINVAL;
2035}
2036
2037void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2038{
2039	struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2040
2041	if (ret_data) {
2042		spin_lock(&res->spinlock);
2043		res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2044		spin_unlock(&res->spinlock);
2045		wake_up(&res->wq);
2046		dlm_lockres_put(res);
2047	}
2048	return;
2049}
2050
2051int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2052			       struct dlm_lock_resource *res,
2053			       int ignore_higher, u8 request_from, u32 flags)
2054{
2055	struct dlm_work_item *item;
2056	item = kzalloc(sizeof(*item), GFP_ATOMIC);
2057	if (!item)
2058		return -ENOMEM;
2059
2060
2061	/* queue up work for dlm_assert_master_worker */
2062	dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2063	item->u.am.lockres = res; /* already have a ref */
2064	/* can optionally ignore node numbers higher than this node */
2065	item->u.am.ignore_higher = ignore_higher;
2066	item->u.am.request_from = request_from;
2067	item->u.am.flags = flags;
2068
2069	if (ignore_higher)
2070		mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2071		     res->lockname.name);
2072
2073	spin_lock(&dlm->work_lock);
2074	list_add_tail(&item->list, &dlm->work_list);
2075	spin_unlock(&dlm->work_lock);
2076
2077	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2078	return 0;
2079}
2080
2081static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2082{
2083	struct dlm_ctxt *dlm = data;
2084	int ret = 0;
2085	struct dlm_lock_resource *res;
2086	unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2087	int ignore_higher;
2088	int bit;
2089	u8 request_from;
2090	u32 flags;
2091
2092	dlm = item->dlm;
2093	res = item->u.am.lockres;
2094	ignore_higher = item->u.am.ignore_higher;
2095	request_from = item->u.am.request_from;
2096	flags = item->u.am.flags;
2097
2098	spin_lock(&dlm->spinlock);
2099	memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2100	spin_unlock(&dlm->spinlock);
2101
2102	clear_bit(dlm->node_num, nodemap);
2103	if (ignore_higher) {
2104		/* if is this just to clear up mles for nodes below
2105		 * this node, do not send the message to the original
2106		 * caller or any node number higher than this */
2107		clear_bit(request_from, nodemap);
2108		bit = dlm->node_num;
2109		while (1) {
2110			bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2111					    bit+1);
2112		       	if (bit >= O2NM_MAX_NODES)
2113				break;
2114			clear_bit(bit, nodemap);
2115		}
2116	}
2117
2118	/*
2119	 * If we're migrating this lock to someone else, we are no
2120	 * longer allowed to assert out own mastery.  OTOH, we need to
2121	 * prevent migration from starting while we're still asserting
2122	 * our dominance.  The reserved ast delays migration.
2123	 */
2124	spin_lock(&res->spinlock);
2125	if (res->state & DLM_LOCK_RES_MIGRATING) {
2126		mlog(0, "Someone asked us to assert mastery, but we're "
2127		     "in the middle of migration.  Skipping assert, "
2128		     "the new master will handle that.\n");
2129		spin_unlock(&res->spinlock);
2130		goto put;
2131	} else
2132		__dlm_lockres_reserve_ast(res);
2133	spin_unlock(&res->spinlock);
2134
2135	/* this call now finishes out the nodemap
2136	 * even if one or more nodes die */
2137	mlog(0, "worker about to master %.*s here, this=%u\n",
2138		     res->lockname.len, res->lockname.name, dlm->node_num);
2139	ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2140	if (ret < 0) {
2141		/* no need to restart, we are done */
2142		if (!dlm_is_host_down(ret))
2143			mlog_errno(ret);
2144	}
2145
2146	/* Ok, we've asserted ourselves.  Let's let migration start. */
2147	dlm_lockres_release_ast(dlm, res);
2148
2149put:
2150	dlm_lockres_drop_inflight_worker(dlm, res);
2151
2152	dlm_lockres_put(res);
2153
2154	mlog(0, "finished with dlm_assert_master_worker\n");
2155}
2156
2157/* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2158 * We cannot wait for node recovery to complete to begin mastering this
2159 * lockres because this lockres is used to kick off recovery! ;-)
2160 * So, do a pre-check on all living nodes to see if any of those nodes
2161 * think that $RECOVERY is currently mastered by a dead node.  If so,
2162 * we wait a short time to allow that node to get notified by its own
2163 * heartbeat stack, then check again.  All $RECOVERY lock resources
2164 * mastered by dead nodes are purged when the heartbeat callback is
2165 * fired, so we can know for sure that it is safe to continue once
2166 * the node returns a live node or no node.  */
2167static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2168				       struct dlm_lock_resource *res)
2169{
2170	struct dlm_node_iter iter;
2171	int nodenum;
2172	int ret = 0;
2173	u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2174
2175	spin_lock(&dlm->spinlock);
2176	dlm_node_iter_init(dlm->domain_map, &iter);
2177	spin_unlock(&dlm->spinlock);
2178
2179	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2180		/* do not send to self */
2181		if (nodenum == dlm->node_num)
2182			continue;
2183		ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2184		if (ret < 0) {
2185			mlog_errno(ret);
2186			if (!dlm_is_host_down(ret))
2187				BUG();
2188			/* host is down, so answer for that node would be
2189			 * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2190			ret = 0;
2191		}
2192
2193		if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2194			/* check to see if this master is in the recovery map */
2195			spin_lock(&dlm->spinlock);
2196			if (test_bit(master, dlm->recovery_map)) {
2197				mlog(ML_NOTICE, "%s: node %u has not seen "
2198				     "node %u go down yet, and thinks the "
2199				     "dead node is mastering the recovery "
2200				     "lock.  must wait.\n", dlm->name,
2201				     nodenum, master);
2202				ret = -EAGAIN;
2203			}
2204			spin_unlock(&dlm->spinlock);
2205			mlog(0, "%s: reco lock master is %u\n", dlm->name,
2206			     master);
2207			break;
2208		}
2209	}
2210	return ret;
2211}
2212
2213/*
2214 * DLM_DEREF_LOCKRES_MSG
2215 */
2216
2217int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2218{
2219	struct dlm_deref_lockres deref;
2220	int ret = 0, r;
2221	const char *lockname;
2222	unsigned int namelen;
2223
2224	lockname = res->lockname.name;
2225	namelen = res->lockname.len;
2226	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2227
2228	memset(&deref, 0, sizeof(deref));
2229	deref.node_idx = dlm->node_num;
2230	deref.namelen = namelen;
2231	memcpy(deref.name, lockname, namelen);
2232
2233	ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2234				 &deref, sizeof(deref), res->owner, &r);
2235	if (ret < 0)
2236		mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2237		     dlm->name, namelen, lockname, ret, res->owner);
2238	else if (r < 0) {
2239		/* BAD.  other node says I did not have a ref. */
2240		mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2241		     dlm->name, namelen, lockname, res->owner, r);
2242		dlm_print_one_lock_resource(res);
2243		if (r == -ENOMEM)
2244			BUG();
2245	} else
2246		ret = r;
2247
2248	return ret;
2249}
2250
2251int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2252			      void **ret_data)
2253{
2254	struct dlm_ctxt *dlm = data;
2255	struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2256	struct dlm_lock_resource *res = NULL;
2257	char *name;
2258	unsigned int namelen;
2259	int ret = -EINVAL;
2260	u8 node;
2261	unsigned int hash;
2262	struct dlm_work_item *item;
2263	int cleared = 0;
2264	int dispatch = 0;
2265
2266	if (!dlm_grab(dlm))
2267		return 0;
2268
2269	name = deref->name;
2270	namelen = deref->namelen;
2271	node = deref->node_idx;
2272
2273	if (namelen > DLM_LOCKID_NAME_MAX) {
2274		mlog(ML_ERROR, "Invalid name length!");
2275		goto done;
2276	}
2277	if (deref->node_idx >= O2NM_MAX_NODES) {
2278		mlog(ML_ERROR, "Invalid node number: %u\n", node);
2279		goto done;
2280	}
2281
2282	hash = dlm_lockid_hash(name, namelen);
2283
2284	spin_lock(&dlm->spinlock);
2285	res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2286	if (!res) {
2287		spin_unlock(&dlm->spinlock);
2288		mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2289		     dlm->name, namelen, name);
2290		goto done;
2291	}
2292	spin_unlock(&dlm->spinlock);
2293
2294	spin_lock(&res->spinlock);
2295	if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2296		dispatch = 1;
2297	else {
2298		BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2299		if (test_bit(node, res->refmap)) {
2300			dlm_lockres_clear_refmap_bit(dlm, res, node);
2301			cleared = 1;
2302		}
2303	}
2304	spin_unlock(&res->spinlock);
2305
2306	if (!dispatch) {
2307		if (cleared)
2308			dlm_lockres_calc_usage(dlm, res);
2309		else {
2310			mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2311		     	"but it is already dropped!\n", dlm->name,
2312		     	res->lockname.len, res->lockname.name, node);
2313			dlm_print_one_lock_resource(res);
2314		}
2315		ret = DLM_DEREF_RESPONSE_DONE;
2316		goto done;
2317	}
2318
2319	item = kzalloc(sizeof(*item), GFP_NOFS);
2320	if (!item) {
2321		ret = -ENOMEM;
2322		mlog_errno(ret);
2323		goto done;
2324	}
2325
2326	dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2327	item->u.dl.deref_res = res;
2328	item->u.dl.deref_node = node;
2329
2330	spin_lock(&dlm->work_lock);
2331	list_add_tail(&item->list, &dlm->work_list);
2332	spin_unlock(&dlm->work_lock);
2333
2334	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2335	return DLM_DEREF_RESPONSE_INPROG;
2336
2337done:
2338	if (res)
2339		dlm_lockres_put(res);
2340	dlm_put(dlm);
2341
2342	return ret;
2343}
2344
2345int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
2346			      void **ret_data)
2347{
2348	struct dlm_ctxt *dlm = data;
2349	struct dlm_deref_lockres_done *deref
2350			= (struct dlm_deref_lockres_done *)msg->buf;
2351	struct dlm_lock_resource *res = NULL;
2352	char *name;
2353	unsigned int namelen;
2354	int ret = -EINVAL;
2355	u8 node;
2356	unsigned int hash;
2357
2358	if (!dlm_grab(dlm))
2359		return 0;
2360
2361	name = deref->name;
2362	namelen = deref->namelen;
2363	node = deref->node_idx;
2364
2365	if (namelen > DLM_LOCKID_NAME_MAX) {
2366		mlog(ML_ERROR, "Invalid name length!");
2367		goto done;
2368	}
2369	if (deref->node_idx >= O2NM_MAX_NODES) {
2370		mlog(ML_ERROR, "Invalid node number: %u\n", node);
2371		goto done;
2372	}
2373
2374	hash = dlm_lockid_hash(name, namelen);
2375
2376	spin_lock(&dlm->spinlock);
2377	res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2378	if (!res) {
2379		spin_unlock(&dlm->spinlock);
2380		mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2381		     dlm->name, namelen, name);
2382		goto done;
2383	}
2384
2385	spin_lock(&res->spinlock);
2386	if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) {
2387		spin_unlock(&res->spinlock);
2388		spin_unlock(&dlm->spinlock);
2389		mlog(ML_NOTICE, "%s:%.*s: node %u sends deref done "
2390			"but it is already derefed!\n", dlm->name,
2391			res->lockname.len, res->lockname.name, node);
2392		ret = 0;
2393		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2394	}
 
2395
2396	__dlm_do_purge_lockres(dlm, res);
 
 
 
2397	spin_unlock(&res->spinlock);
2398	wake_up(&res->wq);
2399
 
 
2400	spin_unlock(&dlm->spinlock);
2401
2402	ret = 0;
 
2403done:
2404	if (res)
2405		dlm_lockres_put(res);
2406	dlm_put(dlm);
2407	return ret;
2408}
2409
2410static void dlm_drop_lockres_ref_done(struct dlm_ctxt *dlm,
2411		struct dlm_lock_resource *res, u8 node)
2412{
2413	struct dlm_deref_lockres_done deref;
2414	int ret = 0, r;
2415	const char *lockname;
2416	unsigned int namelen;
2417
2418	lockname = res->lockname.name;
2419	namelen = res->lockname.len;
2420	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2421
2422	memset(&deref, 0, sizeof(deref));
2423	deref.node_idx = dlm->node_num;
2424	deref.namelen = namelen;
2425	memcpy(deref.name, lockname, namelen);
2426
2427	ret = o2net_send_message(DLM_DEREF_LOCKRES_DONE, dlm->key,
2428				 &deref, sizeof(deref), node, &r);
2429	if (ret < 0) {
2430		mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF DONE "
2431				" to node %u\n", dlm->name, namelen,
2432				lockname, ret, node);
2433	} else if (r < 0) {
2434		/* ignore the error */
2435		mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2436		     dlm->name, namelen, lockname, node, r);
2437		dlm_print_one_lock_resource(res);
2438	}
2439}
2440
2441static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2442{
2443	struct dlm_ctxt *dlm;
2444	struct dlm_lock_resource *res;
2445	u8 node;
2446	u8 cleared = 0;
2447
2448	dlm = item->dlm;
2449	res = item->u.dl.deref_res;
2450	node = item->u.dl.deref_node;
2451
2452	spin_lock(&res->spinlock);
2453	BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2454	__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2455	if (test_bit(node, res->refmap)) {
2456		dlm_lockres_clear_refmap_bit(dlm, res, node);
2457		cleared = 1;
2458	}
2459	spin_unlock(&res->spinlock);
2460
2461	dlm_drop_lockres_ref_done(dlm, res, node);
2462
2463	if (cleared) {
2464		mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2465		     dlm->name, res->lockname.len, res->lockname.name, node);
2466		dlm_lockres_calc_usage(dlm, res);
2467	} else {
2468		mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2469		     "but it is already dropped!\n", dlm->name,
2470		     res->lockname.len, res->lockname.name, node);
2471		dlm_print_one_lock_resource(res);
2472	}
2473
2474	dlm_lockres_put(res);
2475}
2476
2477/*
2478 * A migratable resource is one that is :
2479 * 1. locally mastered, and,
2480 * 2. zero local locks, and,
2481 * 3. one or more non-local locks, or, one or more references
2482 * Returns 1 if yes, 0 if not.
2483 */
2484static int dlm_is_lockres_migratable(struct dlm_ctxt *dlm,
2485				      struct dlm_lock_resource *res)
2486{
2487	enum dlm_lockres_list idx;
2488	int nonlocal = 0, node_ref;
2489	struct list_head *queue;
2490	struct dlm_lock *lock;
2491	u64 cookie;
2492
2493	assert_spin_locked(&res->spinlock);
2494
2495	/* delay migration when the lockres is in MIGRATING state */
2496	if (res->state & DLM_LOCK_RES_MIGRATING)
2497		return 0;
2498
2499	/* delay migration when the lockres is in RECOCERING state */
2500	if (res->state & (DLM_LOCK_RES_RECOVERING|
2501			DLM_LOCK_RES_RECOVERY_WAITING))
2502		return 0;
2503
2504	if (res->owner != dlm->node_num)
2505		return 0;
2506
2507        for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2508		queue = dlm_list_idx_to_ptr(res, idx);
2509		list_for_each_entry(lock, queue, list) {
2510			if (lock->ml.node != dlm->node_num) {
2511				nonlocal++;
2512				continue;
2513			}
2514			cookie = be64_to_cpu(lock->ml.cookie);
2515			mlog(0, "%s: Not migratable res %.*s, lock %u:%llu on "
2516			     "%s list\n", dlm->name, res->lockname.len,
2517			     res->lockname.name,
2518			     dlm_get_lock_cookie_node(cookie),
2519			     dlm_get_lock_cookie_seq(cookie),
2520			     dlm_list_in_text(idx));
2521			return 0;
2522		}
2523	}
2524
2525	if (!nonlocal) {
2526		node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2527		if (node_ref >= O2NM_MAX_NODES)
2528			return 0;
2529	}
2530
2531	mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len,
2532	     res->lockname.name);
2533
2534	return 1;
2535}
2536
2537/*
2538 * DLM_MIGRATE_LOCKRES
2539 */
2540
2541
2542static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2543			       struct dlm_lock_resource *res, u8 target)
2544{
2545	struct dlm_master_list_entry *mle = NULL;
2546	struct dlm_master_list_entry *oldmle = NULL;
2547 	struct dlm_migratable_lockres *mres = NULL;
2548	int ret = 0;
2549	const char *name;
2550	unsigned int namelen;
2551	int mle_added = 0;
2552	int wake = 0;
2553
2554	if (!dlm_grab(dlm))
2555		return -EINVAL;
2556
2557	BUG_ON(target == O2NM_MAX_NODES);
2558
2559	name = res->lockname.name;
2560	namelen = res->lockname.len;
2561
2562	mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
2563	     target);
2564
2565	/* preallocate up front. if this fails, abort */
2566	ret = -ENOMEM;
2567	mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2568	if (!mres) {
2569		mlog_errno(ret);
2570		goto leave;
2571	}
2572
2573	mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2574	if (!mle) {
2575		mlog_errno(ret);
2576		goto leave;
2577	}
2578	ret = 0;
2579
2580	/*
2581	 * clear any existing master requests and
2582	 * add the migration mle to the list
2583	 */
2584	spin_lock(&dlm->spinlock);
2585	spin_lock(&dlm->master_lock);
2586	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2587				    namelen, target, dlm->node_num);
2588	/* get an extra reference on the mle.
2589	 * otherwise the assert_master from the new
2590	 * master will destroy this.
2591	 */
2592	if (ret != -EEXIST)
2593		dlm_get_mle_inuse(mle);
2594
2595	spin_unlock(&dlm->master_lock);
2596	spin_unlock(&dlm->spinlock);
2597
2598	if (ret == -EEXIST) {
2599		mlog(0, "another process is already migrating it\n");
2600		goto fail;
2601	}
2602	mle_added = 1;
2603
2604	/*
2605	 * set the MIGRATING flag and flush asts
2606	 * if we fail after this we need to re-dirty the lockres
2607	 */
2608	if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2609		mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2610		     "the target went down.\n", res->lockname.len,
2611		     res->lockname.name, target);
2612		spin_lock(&res->spinlock);
2613		res->state &= ~DLM_LOCK_RES_MIGRATING;
2614		wake = 1;
2615		spin_unlock(&res->spinlock);
2616		ret = -EINVAL;
2617	}
2618
2619fail:
2620	if (ret != -EEXIST && oldmle) {
2621		/* master is known, detach if not already detached */
2622		dlm_mle_detach_hb_events(dlm, oldmle);
2623		dlm_put_mle(oldmle);
2624	}
2625
2626	if (ret < 0) {
2627		if (mle_added) {
2628			dlm_mle_detach_hb_events(dlm, mle);
2629			dlm_put_mle(mle);
2630			dlm_put_mle_inuse(mle);
2631		} else if (mle) {
2632			kmem_cache_free(dlm_mle_cache, mle);
2633			mle = NULL;
2634		}
2635		goto leave;
2636	}
2637
2638	/*
2639	 * at this point, we have a migration target, an mle
2640	 * in the master list, and the MIGRATING flag set on
2641	 * the lockres
2642	 */
2643
2644	/* now that remote nodes are spinning on the MIGRATING flag,
2645	 * ensure that all assert_master work is flushed. */
2646	flush_workqueue(dlm->dlm_worker);
2647
2648	/* notify new node and send all lock state */
2649	/* call send_one_lockres with migration flag.
2650	 * this serves as notice to the target node that a
2651	 * migration is starting. */
2652	ret = dlm_send_one_lockres(dlm, res, mres, target,
2653				   DLM_MRES_MIGRATION);
2654
2655	if (ret < 0) {
2656		mlog(0, "migration to node %u failed with %d\n",
2657		     target, ret);
2658		/* migration failed, detach and clean up mle */
2659		dlm_mle_detach_hb_events(dlm, mle);
2660		dlm_put_mle(mle);
2661		dlm_put_mle_inuse(mle);
2662		spin_lock(&res->spinlock);
2663		res->state &= ~DLM_LOCK_RES_MIGRATING;
2664		wake = 1;
2665		spin_unlock(&res->spinlock);
2666		if (dlm_is_host_down(ret))
2667			dlm_wait_for_node_death(dlm, target,
2668						DLM_NODE_DEATH_WAIT_MAX);
2669		goto leave;
2670	}
2671
2672	/* at this point, the target sends a message to all nodes,
2673	 * (using dlm_do_migrate_request).  this node is skipped since
2674	 * we had to put an mle in the list to begin the process.  this
2675	 * node now waits for target to do an assert master.  this node
2676	 * will be the last one notified, ensuring that the migration
2677	 * is complete everywhere.  if the target dies while this is
2678	 * going on, some nodes could potentially see the target as the
2679	 * master, so it is important that my recovery finds the migration
2680	 * mle and sets the master to UNKNOWN. */
2681
2682
2683	/* wait for new node to assert master */
2684	while (1) {
2685		ret = wait_event_interruptible_timeout(mle->wq,
2686					(atomic_read(&mle->woken) == 1),
2687					msecs_to_jiffies(5000));
2688
2689		if (ret >= 0) {
2690		       	if (atomic_read(&mle->woken) == 1 ||
2691			    res->owner == target)
2692				break;
2693
2694			mlog(0, "%s:%.*s: timed out during migration\n",
2695			     dlm->name, res->lockname.len, res->lockname.name);
2696			/* avoid hang during shutdown when migrating lockres
2697			 * to a node which also goes down */
2698			if (dlm_is_node_dead(dlm, target)) {
2699				mlog(0, "%s:%.*s: expected migration "
2700				     "target %u is no longer up, restarting\n",
2701				     dlm->name, res->lockname.len,
2702				     res->lockname.name, target);
2703				ret = -EINVAL;
2704				/* migration failed, detach and clean up mle */
2705				dlm_mle_detach_hb_events(dlm, mle);
2706				dlm_put_mle(mle);
2707				dlm_put_mle_inuse(mle);
2708				spin_lock(&res->spinlock);
2709				res->state &= ~DLM_LOCK_RES_MIGRATING;
2710				wake = 1;
2711				spin_unlock(&res->spinlock);
2712				goto leave;
2713			}
2714		} else
2715			mlog(0, "%s:%.*s: caught signal during migration\n",
2716			     dlm->name, res->lockname.len, res->lockname.name);
2717	}
2718
2719	/* all done, set the owner, clear the flag */
2720	spin_lock(&res->spinlock);
2721	dlm_set_lockres_owner(dlm, res, target);
2722	res->state &= ~DLM_LOCK_RES_MIGRATING;
2723	dlm_remove_nonlocal_locks(dlm, res);
2724	spin_unlock(&res->spinlock);
2725	wake_up(&res->wq);
2726
2727	/* master is known, detach if not already detached */
2728	dlm_mle_detach_hb_events(dlm, mle);
2729	dlm_put_mle_inuse(mle);
2730	ret = 0;
2731
2732	dlm_lockres_calc_usage(dlm, res);
2733
2734leave:
2735	/* re-dirty the lockres if we failed */
2736	if (ret < 0)
2737		dlm_kick_thread(dlm, res);
2738
2739	/* wake up waiters if the MIGRATING flag got set
2740	 * but migration failed */
2741	if (wake)
2742		wake_up(&res->wq);
2743
2744	if (mres)
2745		free_page((unsigned long)mres);
2746
2747	dlm_put(dlm);
2748
2749	mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
2750	     name, target, ret);
2751	return ret;
2752}
2753
2754#define DLM_MIGRATION_RETRY_MS  100
2755
2756/*
2757 * Should be called only after beginning the domain leave process.
2758 * There should not be any remaining locks on nonlocal lock resources,
2759 * and there should be no local locks left on locally mastered resources.
2760 *
2761 * Called with the dlm spinlock held, may drop it to do migration, but
2762 * will re-acquire before exit.
2763 *
2764 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2765 */
2766int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2767{
2768	int ret;
2769	int lock_dropped = 0;
2770	u8 target = O2NM_MAX_NODES;
2771
2772	assert_spin_locked(&dlm->spinlock);
2773
2774	spin_lock(&res->spinlock);
2775	if (dlm_is_lockres_migratable(dlm, res))
2776		target = dlm_pick_migration_target(dlm, res);
2777	spin_unlock(&res->spinlock);
2778
2779	if (target == O2NM_MAX_NODES)
2780		goto leave;
2781
2782	/* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2783	spin_unlock(&dlm->spinlock);
2784	lock_dropped = 1;
2785	ret = dlm_migrate_lockres(dlm, res, target);
2786	if (ret)
2787		mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2788		     dlm->name, res->lockname.len, res->lockname.name,
2789		     target, ret);
2790	spin_lock(&dlm->spinlock);
2791leave:
2792	return lock_dropped;
2793}
2794
2795int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2796{
2797	int ret;
2798	spin_lock(&dlm->ast_lock);
2799	spin_lock(&lock->spinlock);
2800	ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2801	spin_unlock(&lock->spinlock);
2802	spin_unlock(&dlm->ast_lock);
2803	return ret;
2804}
2805
2806static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2807				     struct dlm_lock_resource *res,
2808				     u8 mig_target)
2809{
2810	int can_proceed;
2811	spin_lock(&res->spinlock);
2812	can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2813	spin_unlock(&res->spinlock);
2814
2815	/* target has died, so make the caller break out of the
2816	 * wait_event, but caller must recheck the domain_map */
2817	spin_lock(&dlm->spinlock);
2818	if (!test_bit(mig_target, dlm->domain_map))
2819		can_proceed = 1;
2820	spin_unlock(&dlm->spinlock);
2821	return can_proceed;
2822}
2823
2824static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2825				struct dlm_lock_resource *res)
2826{
2827	int ret;
2828	spin_lock(&res->spinlock);
2829	ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2830	spin_unlock(&res->spinlock);
2831	return ret;
2832}
2833
2834
2835static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2836				       struct dlm_lock_resource *res,
2837				       u8 target)
2838{
2839	int ret = 0;
2840
2841	mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2842	       res->lockname.len, res->lockname.name, dlm->node_num,
2843	       target);
2844	/* need to set MIGRATING flag on lockres.  this is done by
2845	 * ensuring that all asts have been flushed for this lockres. */
2846	spin_lock(&res->spinlock);
2847	BUG_ON(res->migration_pending);
2848	res->migration_pending = 1;
2849	/* strategy is to reserve an extra ast then release
2850	 * it below, letting the release do all of the work */
2851	__dlm_lockres_reserve_ast(res);
2852	spin_unlock(&res->spinlock);
2853
2854	/* now flush all the pending asts */
2855	dlm_kick_thread(dlm, res);
2856	/* before waiting on DIRTY, block processes which may
2857	 * try to dirty the lockres before MIGRATING is set */
2858	spin_lock(&res->spinlock);
2859	BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2860	res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2861	spin_unlock(&res->spinlock);
2862	/* now wait on any pending asts and the DIRTY state */
2863	wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2864	dlm_lockres_release_ast(dlm, res);
2865
2866	mlog(0, "about to wait on migration_wq, dirty=%s\n",
2867	       res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2868	/* if the extra ref we just put was the final one, this
2869	 * will pass thru immediately.  otherwise, we need to wait
2870	 * for the last ast to finish. */
2871again:
2872	ret = wait_event_interruptible_timeout(dlm->migration_wq,
2873		   dlm_migration_can_proceed(dlm, res, target),
2874		   msecs_to_jiffies(1000));
2875	if (ret < 0) {
2876		mlog(0, "woken again: migrating? %s, dead? %s\n",
2877		       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2878		       test_bit(target, dlm->domain_map) ? "no":"yes");
2879	} else {
2880		mlog(0, "all is well: migrating? %s, dead? %s\n",
2881		       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2882		       test_bit(target, dlm->domain_map) ? "no":"yes");
2883	}
2884	if (!dlm_migration_can_proceed(dlm, res, target)) {
2885		mlog(0, "trying again...\n");
2886		goto again;
2887	}
2888
2889	ret = 0;
2890	/* did the target go down or die? */
2891	spin_lock(&dlm->spinlock);
2892	if (!test_bit(target, dlm->domain_map)) {
2893		mlog(ML_ERROR, "aha. migration target %u just went down\n",
2894		     target);
2895		ret = -EHOSTDOWN;
2896	}
2897	spin_unlock(&dlm->spinlock);
2898
2899	/*
2900	 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2901	 * another try; otherwise, we are sure the MIGRATING state is there,
2902	 * drop the unneeded state which blocked threads trying to DIRTY
2903	 */
2904	spin_lock(&res->spinlock);
2905	BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2906	res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2907	if (!ret)
2908		BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2909	else
2910		res->migration_pending = 0;
2911	spin_unlock(&res->spinlock);
2912
2913	/*
2914	 * at this point:
2915	 *
2916	 *   o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2917	 *   o there are no pending asts on this lockres
2918	 *   o all processes trying to reserve an ast on this
2919	 *     lockres must wait for the MIGRATING flag to clear
2920	 */
2921	return ret;
2922}
2923
2924/* last step in the migration process.
2925 * original master calls this to free all of the dlm_lock
2926 * structures that used to be for other nodes. */
2927static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2928				      struct dlm_lock_resource *res)
2929{
2930	struct list_head *queue = &res->granted;
2931	int i, bit;
2932	struct dlm_lock *lock, *next;
2933
2934	assert_spin_locked(&res->spinlock);
2935
2936	BUG_ON(res->owner == dlm->node_num);
2937
2938	for (i=0; i<3; i++) {
2939		list_for_each_entry_safe(lock, next, queue, list) {
2940			if (lock->ml.node != dlm->node_num) {
2941				mlog(0, "putting lock for node %u\n",
2942				     lock->ml.node);
2943				/* be extra careful */
2944				BUG_ON(!list_empty(&lock->ast_list));
2945				BUG_ON(!list_empty(&lock->bast_list));
2946				BUG_ON(lock->ast_pending);
2947				BUG_ON(lock->bast_pending);
2948				dlm_lockres_clear_refmap_bit(dlm, res,
2949							     lock->ml.node);
2950				list_del_init(&lock->list);
2951				dlm_lock_put(lock);
2952				/* In a normal unlock, we would have added a
2953				 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2954				dlm_lock_put(lock);
2955			}
2956		}
2957		queue++;
2958	}
2959	bit = 0;
2960	while (1) {
2961		bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2962		if (bit >= O2NM_MAX_NODES)
2963			break;
2964		/* do not clear the local node reference, if there is a
2965		 * process holding this, let it drop the ref itself */
2966		if (bit != dlm->node_num) {
2967			mlog(0, "%s:%.*s: node %u had a ref to this "
2968			     "migrating lockres, clearing\n", dlm->name,
2969			     res->lockname.len, res->lockname.name, bit);
2970			dlm_lockres_clear_refmap_bit(dlm, res, bit);
2971		}
2972		bit++;
2973	}
2974}
2975
2976/*
2977 * Pick a node to migrate the lock resource to. This function selects a
2978 * potential target based first on the locks and then on refmap. It skips
2979 * nodes that are in the process of exiting the domain.
2980 */
2981static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2982				    struct dlm_lock_resource *res)
2983{
2984	enum dlm_lockres_list idx;
2985	struct list_head *queue = &res->granted;
2986	struct dlm_lock *lock;
2987	int noderef;
2988	u8 nodenum = O2NM_MAX_NODES;
2989
2990	assert_spin_locked(&dlm->spinlock);
2991	assert_spin_locked(&res->spinlock);
2992
2993	/* Go through all the locks */
2994	for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2995		queue = dlm_list_idx_to_ptr(res, idx);
2996		list_for_each_entry(lock, queue, list) {
2997			if (lock->ml.node == dlm->node_num)
2998				continue;
2999			if (test_bit(lock->ml.node, dlm->exit_domain_map))
3000				continue;
3001			nodenum = lock->ml.node;
3002			goto bail;
3003		}
3004	}
3005
3006	/* Go thru the refmap */
3007	noderef = -1;
3008	while (1) {
3009		noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
3010					noderef + 1);
3011		if (noderef >= O2NM_MAX_NODES)
3012			break;
3013		if (noderef == dlm->node_num)
3014			continue;
3015		if (test_bit(noderef, dlm->exit_domain_map))
3016			continue;
3017		nodenum = noderef;
3018		goto bail;
3019	}
3020
3021bail:
3022	return nodenum;
3023}
3024
3025/* this is called by the new master once all lockres
3026 * data has been received */
3027static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
3028				  struct dlm_lock_resource *res,
3029				  u8 master, u8 new_master,
3030				  struct dlm_node_iter *iter)
3031{
3032	struct dlm_migrate_request migrate;
3033	int ret, skip, status = 0;
3034	int nodenum;
3035
3036	memset(&migrate, 0, sizeof(migrate));
3037	migrate.namelen = res->lockname.len;
3038	memcpy(migrate.name, res->lockname.name, migrate.namelen);
3039	migrate.new_master = new_master;
3040	migrate.master = master;
3041
3042	ret = 0;
3043
3044	/* send message to all nodes, except the master and myself */
3045	while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
3046		if (nodenum == master ||
3047		    nodenum == new_master)
3048			continue;
3049
3050		/* We could race exit domain. If exited, skip. */
3051		spin_lock(&dlm->spinlock);
3052		skip = (!test_bit(nodenum, dlm->domain_map));
3053		spin_unlock(&dlm->spinlock);
3054		if (skip) {
3055			clear_bit(nodenum, iter->node_map);
3056			continue;
3057		}
3058
3059		ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
3060					 &migrate, sizeof(migrate), nodenum,
3061					 &status);
3062		if (ret < 0) {
3063			mlog(ML_ERROR, "%s: res %.*s, Error %d send "
3064			     "MIGRATE_REQUEST to node %u\n", dlm->name,
3065			     migrate.namelen, migrate.name, ret, nodenum);
3066			if (!dlm_is_host_down(ret)) {
3067				mlog(ML_ERROR, "unhandled error=%d!\n", ret);
3068				BUG();
3069			}
3070			clear_bit(nodenum, iter->node_map);
3071			ret = 0;
3072		} else if (status < 0) {
3073			mlog(0, "migrate request (node %u) returned %d!\n",
3074			     nodenum, status);
3075			ret = status;
3076		} else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3077			/* during the migration request we short-circuited
3078			 * the mastery of the lockres.  make sure we have
3079			 * a mastery ref for nodenum */
3080			mlog(0, "%s:%.*s: need ref for node %u\n",
3081			     dlm->name, res->lockname.len, res->lockname.name,
3082			     nodenum);
3083			spin_lock(&res->spinlock);
3084			dlm_lockres_set_refmap_bit(dlm, res, nodenum);
3085			spin_unlock(&res->spinlock);
3086		}
3087	}
3088
3089	if (ret < 0)
3090		mlog_errno(ret);
3091
3092	mlog(0, "returning ret=%d\n", ret);
3093	return ret;
3094}
3095
3096
3097/* if there is an existing mle for this lockres, we now know who the master is.
3098 * (the one who sent us *this* message) we can clear it up right away.
3099 * since the process that put the mle on the list still has a reference to it,
3100 * we can unhash it now, set the master and wake the process.  as a result,
3101 * we will have no mle in the list to start with.  now we can add an mle for
3102 * the migration and this should be the only one found for those scanning the
3103 * list.  */
3104int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3105				void **ret_data)
3106{
3107	struct dlm_ctxt *dlm = data;
3108	struct dlm_lock_resource *res = NULL;
3109	struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3110	struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3111	const char *name;
3112	unsigned int namelen, hash;
3113	int ret = 0;
3114
3115	if (!dlm_grab(dlm))
3116		return 0;
3117
3118	name = migrate->name;
3119	namelen = migrate->namelen;
3120	hash = dlm_lockid_hash(name, namelen);
3121
3122	/* preallocate.. if this fails, abort */
3123	mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3124
3125	if (!mle) {
3126		ret = -ENOMEM;
3127		goto leave;
3128	}
3129
3130	/* check for pre-existing lock */
3131	spin_lock(&dlm->spinlock);
3132	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3133	if (res) {
3134		spin_lock(&res->spinlock);
3135		if (res->state & DLM_LOCK_RES_RECOVERING) {
3136			/* if all is working ok, this can only mean that we got
3137		 	* a migrate request from a node that we now see as
3138		 	* dead.  what can we do here?  drop it to the floor? */
3139			spin_unlock(&res->spinlock);
3140			mlog(ML_ERROR, "Got a migrate request, but the "
3141			     "lockres is marked as recovering!");
3142			kmem_cache_free(dlm_mle_cache, mle);
3143			ret = -EINVAL; /* need a better solution */
3144			goto unlock;
3145		}
3146		res->state |= DLM_LOCK_RES_MIGRATING;
3147		spin_unlock(&res->spinlock);
3148	}
3149
3150	spin_lock(&dlm->master_lock);
3151	/* ignore status.  only nonzero status would BUG. */
3152	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3153				    name, namelen,
3154				    migrate->new_master,
3155				    migrate->master);
3156
3157	if (ret < 0)
3158		kmem_cache_free(dlm_mle_cache, mle);
3159
3160	spin_unlock(&dlm->master_lock);
3161unlock:
3162	spin_unlock(&dlm->spinlock);
3163
3164	if (oldmle) {
3165		/* master is known, detach if not already detached */
3166		dlm_mle_detach_hb_events(dlm, oldmle);
3167		dlm_put_mle(oldmle);
3168	}
3169
3170	if (res)
3171		dlm_lockres_put(res);
3172leave:
3173	dlm_put(dlm);
3174	return ret;
3175}
3176
3177/* must be holding dlm->spinlock and dlm->master_lock
3178 * when adding a migration mle, we can clear any other mles
3179 * in the master list because we know with certainty that
3180 * the master is "master".  so we remove any old mle from
3181 * the list after setting it's master field, and then add
3182 * the new migration mle.  this way we can hold with the rule
3183 * of having only one mle for a given lock name at all times. */
3184static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3185				 struct dlm_lock_resource *res,
3186				 struct dlm_master_list_entry *mle,
3187				 struct dlm_master_list_entry **oldmle,
3188				 const char *name, unsigned int namelen,
3189				 u8 new_master, u8 master)
3190{
3191	int found;
3192	int ret = 0;
3193
3194	*oldmle = NULL;
3195
3196	assert_spin_locked(&dlm->spinlock);
3197	assert_spin_locked(&dlm->master_lock);
3198
3199	/* caller is responsible for any ref taken here on oldmle */
3200	found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3201	if (found) {
3202		struct dlm_master_list_entry *tmp = *oldmle;
3203		spin_lock(&tmp->spinlock);
3204		if (tmp->type == DLM_MLE_MIGRATION) {
3205			if (master == dlm->node_num) {
3206				/* ah another process raced me to it */
3207				mlog(0, "tried to migrate %.*s, but some "
3208				     "process beat me to it\n",
3209				     namelen, name);
3210				spin_unlock(&tmp->spinlock);
3211				return -EEXIST;
3212			} else {
3213				/* bad.  2 NODES are trying to migrate! */
3214				mlog(ML_ERROR, "migration error  mle: "
3215				     "master=%u new_master=%u // request: "
3216				     "master=%u new_master=%u // "
3217				     "lockres=%.*s\n",
3218				     tmp->master, tmp->new_master,
3219				     master, new_master,
3220				     namelen, name);
3221				BUG();
3222			}
3223		} else {
3224			/* this is essentially what assert_master does */
3225			tmp->master = master;
3226			atomic_set(&tmp->woken, 1);
3227			wake_up(&tmp->wq);
3228			/* remove it so that only one mle will be found */
3229			__dlm_unlink_mle(dlm, tmp);
3230			__dlm_mle_detach_hb_events(dlm, tmp);
3231			if (tmp->type == DLM_MLE_MASTER) {
3232				ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3233				mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3234						"telling master to get ref "
3235						"for cleared out mle during "
3236						"migration\n", dlm->name,
3237						namelen, name, master,
3238						new_master);
3239			}
3240		}
3241		spin_unlock(&tmp->spinlock);
3242	}
3243
3244	/* now add a migration mle to the tail of the list */
3245	dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3246	mle->new_master = new_master;
3247	/* the new master will be sending an assert master for this.
3248	 * at that point we will get the refmap reference */
3249	mle->master = master;
3250	/* do this for consistency with other mle types */
3251	set_bit(new_master, mle->maybe_map);
3252	__dlm_insert_mle(dlm, mle);
3253
3254	return ret;
3255}
3256
3257/*
3258 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3259 */
3260static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3261					struct dlm_master_list_entry *mle)
3262{
3263	struct dlm_lock_resource *res;
3264
3265	/* Find the lockres associated to the mle and set its owner to UNK */
3266	res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3267				   mle->mnamehash);
3268	if (res) {
3269		spin_unlock(&dlm->master_lock);
3270
3271		/* move lockres onto recovery list */
3272		spin_lock(&res->spinlock);
3273		dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3274		dlm_move_lockres_to_recovery_list(dlm, res);
3275		spin_unlock(&res->spinlock);
3276		dlm_lockres_put(res);
3277
3278		/* about to get rid of mle, detach from heartbeat */
3279		__dlm_mle_detach_hb_events(dlm, mle);
3280
3281		/* dump the mle */
3282		spin_lock(&dlm->master_lock);
3283		__dlm_put_mle(mle);
3284		spin_unlock(&dlm->master_lock);
3285	}
3286
3287	return res;
3288}
3289
3290static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3291				    struct dlm_master_list_entry *mle)
3292{
3293	__dlm_mle_detach_hb_events(dlm, mle);
3294
3295	spin_lock(&mle->spinlock);
3296	__dlm_unlink_mle(dlm, mle);
3297	atomic_set(&mle->woken, 1);
3298	spin_unlock(&mle->spinlock);
3299
3300	wake_up(&mle->wq);
3301}
3302
3303static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3304				struct dlm_master_list_entry *mle, u8 dead_node)
3305{
3306	int bit;
3307
3308	BUG_ON(mle->type != DLM_MLE_BLOCK);
3309
3310	spin_lock(&mle->spinlock);
3311	bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3312	if (bit != dead_node) {
3313		mlog(0, "mle found, but dead node %u would not have been "
3314		     "master\n", dead_node);
3315		spin_unlock(&mle->spinlock);
3316	} else {
3317		/* Must drop the refcount by one since the assert_master will
3318		 * never arrive. This may result in the mle being unlinked and
3319		 * freed, but there may still be a process waiting in the
3320		 * dlmlock path which is fine. */
3321		mlog(0, "node %u was expected master\n", dead_node);
3322		atomic_set(&mle->woken, 1);
3323		spin_unlock(&mle->spinlock);
3324		wake_up(&mle->wq);
3325
3326		/* Do not need events any longer, so detach from heartbeat */
3327		__dlm_mle_detach_hb_events(dlm, mle);
3328		__dlm_put_mle(mle);
3329	}
3330}
3331
3332void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3333{
3334	struct dlm_master_list_entry *mle;
3335	struct dlm_lock_resource *res;
3336	struct hlist_head *bucket;
3337	struct hlist_node *tmp;
3338	unsigned int i;
3339
3340	mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
3341top:
3342	assert_spin_locked(&dlm->spinlock);
3343
3344	/* clean the master list */
3345	spin_lock(&dlm->master_lock);
3346	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3347		bucket = dlm_master_hash(dlm, i);
3348		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3349			BUG_ON(mle->type != DLM_MLE_BLOCK &&
3350			       mle->type != DLM_MLE_MASTER &&
3351			       mle->type != DLM_MLE_MIGRATION);
3352
3353			/* MASTER mles are initiated locally. The waiting
3354			 * process will notice the node map change shortly.
3355			 * Let that happen as normal. */
3356			if (mle->type == DLM_MLE_MASTER)
3357				continue;
3358
3359			/* BLOCK mles are initiated by other nodes. Need to
3360			 * clean up if the dead node would have been the
3361			 * master. */
3362			if (mle->type == DLM_MLE_BLOCK) {
3363				dlm_clean_block_mle(dlm, mle, dead_node);
3364				continue;
3365			}
3366
3367			/* Everything else is a MIGRATION mle */
3368
3369			/* The rule for MIGRATION mles is that the master
3370			 * becomes UNKNOWN if *either* the original or the new
3371			 * master dies. All UNKNOWN lockres' are sent to
3372			 * whichever node becomes the recovery master. The new
3373			 * master is responsible for determining if there is
3374			 * still a master for this lockres, or if he needs to
3375			 * take over mastery. Either way, this node should
3376			 * expect another message to resolve this. */
3377
3378			if (mle->master != dead_node &&
3379			    mle->new_master != dead_node)
3380				continue;
3381
3382			if (mle->new_master == dead_node && mle->inuse) {
3383				mlog(ML_NOTICE, "%s: target %u died during "
3384						"migration from %u, the MLE is "
3385						"still keep used, ignore it!\n",
3386						dlm->name, dead_node,
3387						mle->master);
3388				continue;
3389			}
3390
3391			/* If we have reached this point, this mle needs to be
3392			 * removed from the list and freed. */
3393			dlm_clean_migration_mle(dlm, mle);
3394
3395			mlog(0, "%s: node %u died during migration from "
3396			     "%u to %u!\n", dlm->name, dead_node, mle->master,
3397			     mle->new_master);
3398
3399			/* If we find a lockres associated with the mle, we've
3400			 * hit this rare case that messes up our lock ordering.
3401			 * If so, we need to drop the master lock so that we can
3402			 * take the lockres lock, meaning that we will have to
3403			 * restart from the head of list. */
3404			res = dlm_reset_mleres_owner(dlm, mle);
3405			if (res)
3406				/* restart */
3407				goto top;
3408
3409			/* This may be the last reference */
3410			__dlm_put_mle(mle);
3411		}
3412	}
3413	spin_unlock(&dlm->master_lock);
3414}
3415
3416int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3417			 u8 old_master)
3418{
3419	struct dlm_node_iter iter;
3420	int ret = 0;
3421
3422	spin_lock(&dlm->spinlock);
3423	dlm_node_iter_init(dlm->domain_map, &iter);
3424	clear_bit(old_master, iter.node_map);
3425	clear_bit(dlm->node_num, iter.node_map);
3426	spin_unlock(&dlm->spinlock);
3427
3428	/* ownership of the lockres is changing.  account for the
3429	 * mastery reference here since old_master will briefly have
3430	 * a reference after the migration completes */
3431	spin_lock(&res->spinlock);
3432	dlm_lockres_set_refmap_bit(dlm, res, old_master);
3433	spin_unlock(&res->spinlock);
3434
3435	mlog(0, "now time to do a migrate request to other nodes\n");
3436	ret = dlm_do_migrate_request(dlm, res, old_master,
3437				     dlm->node_num, &iter);
3438	if (ret < 0) {
3439		mlog_errno(ret);
3440		goto leave;
3441	}
3442
3443	mlog(0, "doing assert master of %.*s to all except the original node\n",
3444	     res->lockname.len, res->lockname.name);
3445	/* this call now finishes out the nodemap
3446	 * even if one or more nodes die */
3447	ret = dlm_do_assert_master(dlm, res, iter.node_map,
3448				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3449	if (ret < 0) {
3450		/* no longer need to retry.  all living nodes contacted. */
3451		mlog_errno(ret);
3452		ret = 0;
3453	}
3454
3455	memset(iter.node_map, 0, sizeof(iter.node_map));
3456	set_bit(old_master, iter.node_map);
3457	mlog(0, "doing assert master of %.*s back to %u\n",
3458	     res->lockname.len, res->lockname.name, old_master);
3459	ret = dlm_do_assert_master(dlm, res, iter.node_map,
3460				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3461	if (ret < 0) {
3462		mlog(0, "assert master to original master failed "
3463		     "with %d.\n", ret);
3464		/* the only nonzero status here would be because of
3465		 * a dead original node.  we're done. */
3466		ret = 0;
3467	}
3468
3469	/* all done, set the owner, clear the flag */
3470	spin_lock(&res->spinlock);
3471	dlm_set_lockres_owner(dlm, res, dlm->node_num);
3472	res->state &= ~DLM_LOCK_RES_MIGRATING;
3473	spin_unlock(&res->spinlock);
3474	/* re-dirty it on the new master */
3475	dlm_kick_thread(dlm, res);
3476	wake_up(&res->wq);
3477leave:
3478	return ret;
3479}
3480
3481/*
3482 * LOCKRES AST REFCOUNT
3483 * this is integral to migration
3484 */
3485
3486/* for future intent to call an ast, reserve one ahead of time.
3487 * this should be called only after waiting on the lockres
3488 * with dlm_wait_on_lockres, and while still holding the
3489 * spinlock after the call. */
3490void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3491{
3492	assert_spin_locked(&res->spinlock);
3493	if (res->state & DLM_LOCK_RES_MIGRATING) {
3494		__dlm_print_one_lock_resource(res);
3495	}
3496	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3497
3498	atomic_inc(&res->asts_reserved);
3499}
3500
3501/*
3502 * used to drop the reserved ast, either because it went unused,
3503 * or because the ast/bast was actually called.
3504 *
3505 * also, if there is a pending migration on this lockres,
3506 * and this was the last pending ast on the lockres,
3507 * atomically set the MIGRATING flag before we drop the lock.
3508 * this is how we ensure that migration can proceed with no
3509 * asts in progress.  note that it is ok if the state of the
3510 * queues is such that a lock should be granted in the future
3511 * or that a bast should be fired, because the new master will
3512 * shuffle the lists on this lockres as soon as it is migrated.
3513 */
3514void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3515			     struct dlm_lock_resource *res)
3516{
3517	if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3518		return;
3519
3520	if (!res->migration_pending) {
3521		spin_unlock(&res->spinlock);
3522		return;
3523	}
3524
3525	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3526	res->migration_pending = 0;
3527	res->state |= DLM_LOCK_RES_MIGRATING;
3528	spin_unlock(&res->spinlock);
3529	wake_up(&res->wq);
3530	wake_up(&dlm->migration_wq);
3531}
3532
3533void dlm_force_free_mles(struct dlm_ctxt *dlm)
3534{
3535	int i;
3536	struct hlist_head *bucket;
3537	struct dlm_master_list_entry *mle;
3538	struct hlist_node *tmp;
3539
3540	/*
3541	 * We notified all other nodes that we are exiting the domain and
3542	 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3543	 * around we force free them and wake any processes that are waiting
3544	 * on the mles
3545	 */
3546	spin_lock(&dlm->spinlock);
3547	spin_lock(&dlm->master_lock);
3548
3549	BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3550	BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3551
3552	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3553		bucket = dlm_master_hash(dlm, i);
3554		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3555			if (mle->type != DLM_MLE_BLOCK) {
3556				mlog(ML_ERROR, "bad mle: %p\n", mle);
3557				dlm_print_one_mle(mle);
3558			}
3559			atomic_set(&mle->woken, 1);
3560			wake_up(&mle->wq);
3561
3562			__dlm_unlink_mle(dlm, mle);
3563			__dlm_mle_detach_hb_events(dlm, mle);
3564			__dlm_put_mle(mle);
3565		}
3566	}
3567	spin_unlock(&dlm->master_lock);
3568	spin_unlock(&dlm->spinlock);
3569}
v4.6
 
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * dlmmod.c
   5 *
   6 * standalone DLM module
   7 *
   8 * Copyright (C) 2004 Oracle.  All rights reserved.
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public
  12 * License as published by the Free Software Foundation; either
  13 * version 2 of the License, or (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public
  21 * License along with this program; if not, write to the
  22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23 * Boston, MA 021110-1307, USA.
  24 *
  25 */
  26
  27
  28#include <linux/module.h>
  29#include <linux/fs.h>
  30#include <linux/types.h>
  31#include <linux/slab.h>
  32#include <linux/highmem.h>
  33#include <linux/init.h>
  34#include <linux/sysctl.h>
  35#include <linux/random.h>
  36#include <linux/blkdev.h>
  37#include <linux/socket.h>
  38#include <linux/inet.h>
  39#include <linux/spinlock.h>
  40#include <linux/delay.h>
  41
  42
  43#include "cluster/heartbeat.h"
  44#include "cluster/nodemanager.h"
  45#include "cluster/tcp.h"
  46
  47#include "dlmapi.h"
  48#include "dlmcommon.h"
  49#include "dlmdomain.h"
  50#include "dlmdebug.h"
  51
  52#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
  53#include "cluster/masklog.h"
  54
  55static void dlm_mle_node_down(struct dlm_ctxt *dlm,
  56			      struct dlm_master_list_entry *mle,
  57			      struct o2nm_node *node,
  58			      int idx);
  59static void dlm_mle_node_up(struct dlm_ctxt *dlm,
  60			    struct dlm_master_list_entry *mle,
  61			    struct o2nm_node *node,
  62			    int idx);
  63
  64static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
  65static int dlm_do_assert_master(struct dlm_ctxt *dlm,
  66				struct dlm_lock_resource *res,
  67				void *nodemap, u32 flags);
  68static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
  69
  70static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
  71				struct dlm_master_list_entry *mle,
  72				const char *name,
  73				unsigned int namelen)
  74{
  75	if (dlm != mle->dlm)
  76		return 0;
  77
  78	if (namelen != mle->mnamelen ||
  79	    memcmp(name, mle->mname, namelen) != 0)
  80		return 0;
  81
  82	return 1;
  83}
  84
  85static struct kmem_cache *dlm_lockres_cache;
  86static struct kmem_cache *dlm_lockname_cache;
  87static struct kmem_cache *dlm_mle_cache;
  88
  89static void dlm_mle_release(struct kref *kref);
  90static void dlm_init_mle(struct dlm_master_list_entry *mle,
  91			enum dlm_mle_type type,
  92			struct dlm_ctxt *dlm,
  93			struct dlm_lock_resource *res,
  94			const char *name,
  95			unsigned int namelen);
  96static void dlm_put_mle(struct dlm_master_list_entry *mle);
  97static void __dlm_put_mle(struct dlm_master_list_entry *mle);
  98static int dlm_find_mle(struct dlm_ctxt *dlm,
  99			struct dlm_master_list_entry **mle,
 100			char *name, unsigned int namelen);
 101
 102static int dlm_do_master_request(struct dlm_lock_resource *res,
 103				 struct dlm_master_list_entry *mle, int to);
 104
 105
 106static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
 107				     struct dlm_lock_resource *res,
 108				     struct dlm_master_list_entry *mle,
 109				     int *blocked);
 110static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
 111				    struct dlm_lock_resource *res,
 112				    struct dlm_master_list_entry *mle,
 113				    int blocked);
 114static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
 115				 struct dlm_lock_resource *res,
 116				 struct dlm_master_list_entry *mle,
 117				 struct dlm_master_list_entry **oldmle,
 118				 const char *name, unsigned int namelen,
 119				 u8 new_master, u8 master);
 120
 121static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
 122				    struct dlm_lock_resource *res);
 123static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
 124				      struct dlm_lock_resource *res);
 125static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
 126				       struct dlm_lock_resource *res,
 127				       u8 target);
 128static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
 129				       struct dlm_lock_resource *res);
 130
 131
 132int dlm_is_host_down(int errno)
 133{
 134	switch (errno) {
 135		case -EBADF:
 136		case -ECONNREFUSED:
 137		case -ENOTCONN:
 138		case -ECONNRESET:
 139		case -EPIPE:
 140		case -EHOSTDOWN:
 141		case -EHOSTUNREACH:
 142		case -ETIMEDOUT:
 143		case -ECONNABORTED:
 144		case -ENETDOWN:
 145		case -ENETUNREACH:
 146		case -ENETRESET:
 147		case -ESHUTDOWN:
 148		case -ENOPROTOOPT:
 149		case -EINVAL:   /* if returned from our tcp code,
 150				   this means there is no socket */
 151			return 1;
 152	}
 153	return 0;
 154}
 155
 156
 157/*
 158 * MASTER LIST FUNCTIONS
 159 */
 160
 161
 162/*
 163 * regarding master list entries and heartbeat callbacks:
 164 *
 165 * in order to avoid sleeping and allocation that occurs in
 166 * heartbeat, master list entries are simply attached to the
 167 * dlm's established heartbeat callbacks.  the mle is attached
 168 * when it is created, and since the dlm->spinlock is held at
 169 * that time, any heartbeat event will be properly discovered
 170 * by the mle.  the mle needs to be detached from the
 171 * dlm->mle_hb_events list as soon as heartbeat events are no
 172 * longer useful to the mle, and before the mle is freed.
 173 *
 174 * as a general rule, heartbeat events are no longer needed by
 175 * the mle once an "answer" regarding the lock master has been
 176 * received.
 177 */
 178static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
 179					      struct dlm_master_list_entry *mle)
 180{
 181	assert_spin_locked(&dlm->spinlock);
 182
 183	list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
 184}
 185
 186
 187static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
 188					      struct dlm_master_list_entry *mle)
 189{
 190	if (!list_empty(&mle->hb_events))
 191		list_del_init(&mle->hb_events);
 192}
 193
 194
 195static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
 196					    struct dlm_master_list_entry *mle)
 197{
 198	spin_lock(&dlm->spinlock);
 199	__dlm_mle_detach_hb_events(dlm, mle);
 200	spin_unlock(&dlm->spinlock);
 201}
 202
 203static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
 204{
 205	struct dlm_ctxt *dlm;
 206	dlm = mle->dlm;
 207
 208	assert_spin_locked(&dlm->spinlock);
 209	assert_spin_locked(&dlm->master_lock);
 210	mle->inuse++;
 211	kref_get(&mle->mle_refs);
 212}
 213
 214static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
 215{
 216	struct dlm_ctxt *dlm;
 217	dlm = mle->dlm;
 218
 219	spin_lock(&dlm->spinlock);
 220	spin_lock(&dlm->master_lock);
 221	mle->inuse--;
 222	__dlm_put_mle(mle);
 223	spin_unlock(&dlm->master_lock);
 224	spin_unlock(&dlm->spinlock);
 225
 226}
 227
 228/* remove from list and free */
 229static void __dlm_put_mle(struct dlm_master_list_entry *mle)
 230{
 231	struct dlm_ctxt *dlm;
 232	dlm = mle->dlm;
 233
 234	assert_spin_locked(&dlm->spinlock);
 235	assert_spin_locked(&dlm->master_lock);
 236	if (!atomic_read(&mle->mle_refs.refcount)) {
 237		/* this may or may not crash, but who cares.
 238		 * it's a BUG. */
 239		mlog(ML_ERROR, "bad mle: %p\n", mle);
 240		dlm_print_one_mle(mle);
 241		BUG();
 242	} else
 243		kref_put(&mle->mle_refs, dlm_mle_release);
 244}
 245
 246
 247/* must not have any spinlocks coming in */
 248static void dlm_put_mle(struct dlm_master_list_entry *mle)
 249{
 250	struct dlm_ctxt *dlm;
 251	dlm = mle->dlm;
 252
 253	spin_lock(&dlm->spinlock);
 254	spin_lock(&dlm->master_lock);
 255	__dlm_put_mle(mle);
 256	spin_unlock(&dlm->master_lock);
 257	spin_unlock(&dlm->spinlock);
 258}
 259
 260static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
 261{
 262	kref_get(&mle->mle_refs);
 263}
 264
 265static void dlm_init_mle(struct dlm_master_list_entry *mle,
 266			enum dlm_mle_type type,
 267			struct dlm_ctxt *dlm,
 268			struct dlm_lock_resource *res,
 269			const char *name,
 270			unsigned int namelen)
 271{
 272	assert_spin_locked(&dlm->spinlock);
 273
 274	mle->dlm = dlm;
 275	mle->type = type;
 276	INIT_HLIST_NODE(&mle->master_hash_node);
 277	INIT_LIST_HEAD(&mle->hb_events);
 278	memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
 279	spin_lock_init(&mle->spinlock);
 280	init_waitqueue_head(&mle->wq);
 281	atomic_set(&mle->woken, 0);
 282	kref_init(&mle->mle_refs);
 283	memset(mle->response_map, 0, sizeof(mle->response_map));
 284	mle->master = O2NM_MAX_NODES;
 285	mle->new_master = O2NM_MAX_NODES;
 286	mle->inuse = 0;
 287
 288	BUG_ON(mle->type != DLM_MLE_BLOCK &&
 289	       mle->type != DLM_MLE_MASTER &&
 290	       mle->type != DLM_MLE_MIGRATION);
 291
 292	if (mle->type == DLM_MLE_MASTER) {
 293		BUG_ON(!res);
 294		mle->mleres = res;
 295		memcpy(mle->mname, res->lockname.name, res->lockname.len);
 296		mle->mnamelen = res->lockname.len;
 297		mle->mnamehash = res->lockname.hash;
 298	} else {
 299		BUG_ON(!name);
 300		mle->mleres = NULL;
 301		memcpy(mle->mname, name, namelen);
 302		mle->mnamelen = namelen;
 303		mle->mnamehash = dlm_lockid_hash(name, namelen);
 304	}
 305
 306	atomic_inc(&dlm->mle_tot_count[mle->type]);
 307	atomic_inc(&dlm->mle_cur_count[mle->type]);
 308
 309	/* copy off the node_map and register hb callbacks on our copy */
 310	memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
 311	memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
 312	clear_bit(dlm->node_num, mle->vote_map);
 313	clear_bit(dlm->node_num, mle->node_map);
 314
 315	/* attach the mle to the domain node up/down events */
 316	__dlm_mle_attach_hb_events(dlm, mle);
 317}
 318
 319void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
 320{
 321	assert_spin_locked(&dlm->spinlock);
 322	assert_spin_locked(&dlm->master_lock);
 323
 324	if (!hlist_unhashed(&mle->master_hash_node))
 325		hlist_del_init(&mle->master_hash_node);
 326}
 327
 328void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
 329{
 330	struct hlist_head *bucket;
 331
 332	assert_spin_locked(&dlm->master_lock);
 333
 334	bucket = dlm_master_hash(dlm, mle->mnamehash);
 335	hlist_add_head(&mle->master_hash_node, bucket);
 336}
 337
 338/* returns 1 if found, 0 if not */
 339static int dlm_find_mle(struct dlm_ctxt *dlm,
 340			struct dlm_master_list_entry **mle,
 341			char *name, unsigned int namelen)
 342{
 343	struct dlm_master_list_entry *tmpmle;
 344	struct hlist_head *bucket;
 345	unsigned int hash;
 346
 347	assert_spin_locked(&dlm->master_lock);
 348
 349	hash = dlm_lockid_hash(name, namelen);
 350	bucket = dlm_master_hash(dlm, hash);
 351	hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
 352		if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
 353			continue;
 354		dlm_get_mle(tmpmle);
 355		*mle = tmpmle;
 356		return 1;
 357	}
 358	return 0;
 359}
 360
 361void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
 362{
 363	struct dlm_master_list_entry *mle;
 364
 365	assert_spin_locked(&dlm->spinlock);
 366
 367	list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
 368		if (node_up)
 369			dlm_mle_node_up(dlm, mle, NULL, idx);
 370		else
 371			dlm_mle_node_down(dlm, mle, NULL, idx);
 372	}
 373}
 374
 375static void dlm_mle_node_down(struct dlm_ctxt *dlm,
 376			      struct dlm_master_list_entry *mle,
 377			      struct o2nm_node *node, int idx)
 378{
 379	spin_lock(&mle->spinlock);
 380
 381	if (!test_bit(idx, mle->node_map))
 382		mlog(0, "node %u already removed from nodemap!\n", idx);
 383	else
 384		clear_bit(idx, mle->node_map);
 385
 386	spin_unlock(&mle->spinlock);
 387}
 388
 389static void dlm_mle_node_up(struct dlm_ctxt *dlm,
 390			    struct dlm_master_list_entry *mle,
 391			    struct o2nm_node *node, int idx)
 392{
 393	spin_lock(&mle->spinlock);
 394
 395	if (test_bit(idx, mle->node_map))
 396		mlog(0, "node %u already in node map!\n", idx);
 397	else
 398		set_bit(idx, mle->node_map);
 399
 400	spin_unlock(&mle->spinlock);
 401}
 402
 403
 404int dlm_init_mle_cache(void)
 405{
 406	dlm_mle_cache = kmem_cache_create("o2dlm_mle",
 407					  sizeof(struct dlm_master_list_entry),
 408					  0, SLAB_HWCACHE_ALIGN,
 409					  NULL);
 410	if (dlm_mle_cache == NULL)
 411		return -ENOMEM;
 412	return 0;
 413}
 414
 415void dlm_destroy_mle_cache(void)
 416{
 417	if (dlm_mle_cache)
 418		kmem_cache_destroy(dlm_mle_cache);
 419}
 420
 421static void dlm_mle_release(struct kref *kref)
 422{
 423	struct dlm_master_list_entry *mle;
 424	struct dlm_ctxt *dlm;
 425
 426	mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
 427	dlm = mle->dlm;
 428
 429	assert_spin_locked(&dlm->spinlock);
 430	assert_spin_locked(&dlm->master_lock);
 431
 432	mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
 433	     mle->type);
 434
 435	/* remove from list if not already */
 436	__dlm_unlink_mle(dlm, mle);
 437
 438	/* detach the mle from the domain node up/down events */
 439	__dlm_mle_detach_hb_events(dlm, mle);
 440
 441	atomic_dec(&dlm->mle_cur_count[mle->type]);
 442
 443	/* NOTE: kfree under spinlock here.
 444	 * if this is bad, we can move this to a freelist. */
 445	kmem_cache_free(dlm_mle_cache, mle);
 446}
 447
 448
 449/*
 450 * LOCK RESOURCE FUNCTIONS
 451 */
 452
 453int dlm_init_master_caches(void)
 454{
 455	dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
 456					      sizeof(struct dlm_lock_resource),
 457					      0, SLAB_HWCACHE_ALIGN, NULL);
 458	if (!dlm_lockres_cache)
 459		goto bail;
 460
 461	dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
 462					       DLM_LOCKID_NAME_MAX, 0,
 463					       SLAB_HWCACHE_ALIGN, NULL);
 464	if (!dlm_lockname_cache)
 465		goto bail;
 466
 467	return 0;
 468bail:
 469	dlm_destroy_master_caches();
 470	return -ENOMEM;
 471}
 472
 473void dlm_destroy_master_caches(void)
 474{
 475	if (dlm_lockname_cache) {
 476		kmem_cache_destroy(dlm_lockname_cache);
 477		dlm_lockname_cache = NULL;
 478	}
 479
 480	if (dlm_lockres_cache) {
 481		kmem_cache_destroy(dlm_lockres_cache);
 482		dlm_lockres_cache = NULL;
 483	}
 484}
 485
 486static void dlm_lockres_release(struct kref *kref)
 487{
 488	struct dlm_lock_resource *res;
 489	struct dlm_ctxt *dlm;
 490
 491	res = container_of(kref, struct dlm_lock_resource, refs);
 492	dlm = res->dlm;
 493
 494	/* This should not happen -- all lockres' have a name
 495	 * associated with them at init time. */
 496	BUG_ON(!res->lockname.name);
 497
 498	mlog(0, "destroying lockres %.*s\n", res->lockname.len,
 499	     res->lockname.name);
 500
 501	atomic_dec(&dlm->res_cur_count);
 502
 503	if (!hlist_unhashed(&res->hash_node) ||
 504	    !list_empty(&res->granted) ||
 505	    !list_empty(&res->converting) ||
 506	    !list_empty(&res->blocked) ||
 507	    !list_empty(&res->dirty) ||
 508	    !list_empty(&res->recovering) ||
 509	    !list_empty(&res->purge)) {
 510		mlog(ML_ERROR,
 511		     "Going to BUG for resource %.*s."
 512		     "  We're on a list! [%c%c%c%c%c%c%c]\n",
 513		     res->lockname.len, res->lockname.name,
 514		     !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
 515		     !list_empty(&res->granted) ? 'G' : ' ',
 516		     !list_empty(&res->converting) ? 'C' : ' ',
 517		     !list_empty(&res->blocked) ? 'B' : ' ',
 518		     !list_empty(&res->dirty) ? 'D' : ' ',
 519		     !list_empty(&res->recovering) ? 'R' : ' ',
 520		     !list_empty(&res->purge) ? 'P' : ' ');
 521
 522		dlm_print_one_lock_resource(res);
 523	}
 524
 525	/* By the time we're ready to blow this guy away, we shouldn't
 526	 * be on any lists. */
 527	BUG_ON(!hlist_unhashed(&res->hash_node));
 528	BUG_ON(!list_empty(&res->granted));
 529	BUG_ON(!list_empty(&res->converting));
 530	BUG_ON(!list_empty(&res->blocked));
 531	BUG_ON(!list_empty(&res->dirty));
 532	BUG_ON(!list_empty(&res->recovering));
 533	BUG_ON(!list_empty(&res->purge));
 534
 535	kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
 536
 537	kmem_cache_free(dlm_lockres_cache, res);
 538}
 539
 540void dlm_lockres_put(struct dlm_lock_resource *res)
 541{
 542	kref_put(&res->refs, dlm_lockres_release);
 543}
 544
 545static void dlm_init_lockres(struct dlm_ctxt *dlm,
 546			     struct dlm_lock_resource *res,
 547			     const char *name, unsigned int namelen)
 548{
 549	char *qname;
 550
 551	/* If we memset here, we lose our reference to the kmalloc'd
 552	 * res->lockname.name, so be sure to init every field
 553	 * correctly! */
 554
 555	qname = (char *) res->lockname.name;
 556	memcpy(qname, name, namelen);
 557
 558	res->lockname.len = namelen;
 559	res->lockname.hash = dlm_lockid_hash(name, namelen);
 560
 561	init_waitqueue_head(&res->wq);
 562	spin_lock_init(&res->spinlock);
 563	INIT_HLIST_NODE(&res->hash_node);
 564	INIT_LIST_HEAD(&res->granted);
 565	INIT_LIST_HEAD(&res->converting);
 566	INIT_LIST_HEAD(&res->blocked);
 567	INIT_LIST_HEAD(&res->dirty);
 568	INIT_LIST_HEAD(&res->recovering);
 569	INIT_LIST_HEAD(&res->purge);
 570	INIT_LIST_HEAD(&res->tracking);
 571	atomic_set(&res->asts_reserved, 0);
 572	res->migration_pending = 0;
 573	res->inflight_locks = 0;
 574	res->inflight_assert_workers = 0;
 575
 576	res->dlm = dlm;
 577
 578	kref_init(&res->refs);
 579
 580	atomic_inc(&dlm->res_tot_count);
 581	atomic_inc(&dlm->res_cur_count);
 582
 583	/* just for consistency */
 584	spin_lock(&res->spinlock);
 585	dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
 586	spin_unlock(&res->spinlock);
 587
 588	res->state = DLM_LOCK_RES_IN_PROGRESS;
 589
 590	res->last_used = 0;
 591
 592	spin_lock(&dlm->spinlock);
 593	list_add_tail(&res->tracking, &dlm->tracking_list);
 594	spin_unlock(&dlm->spinlock);
 595
 596	memset(res->lvb, 0, DLM_LVB_LEN);
 597	memset(res->refmap, 0, sizeof(res->refmap));
 598}
 599
 600struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
 601				   const char *name,
 602				   unsigned int namelen)
 603{
 604	struct dlm_lock_resource *res = NULL;
 605
 606	res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
 607	if (!res)
 608		goto error;
 609
 610	res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
 611	if (!res->lockname.name)
 612		goto error;
 613
 614	dlm_init_lockres(dlm, res, name, namelen);
 615	return res;
 616
 617error:
 618	if (res)
 619		kmem_cache_free(dlm_lockres_cache, res);
 620	return NULL;
 621}
 622
 623void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
 624				struct dlm_lock_resource *res, int bit)
 625{
 626	assert_spin_locked(&res->spinlock);
 627
 628	mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
 629	     res->lockname.name, bit, __builtin_return_address(0));
 630
 631	set_bit(bit, res->refmap);
 632}
 633
 634void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
 635				  struct dlm_lock_resource *res, int bit)
 636{
 637	assert_spin_locked(&res->spinlock);
 638
 639	mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
 640	     res->lockname.name, bit, __builtin_return_address(0));
 641
 642	clear_bit(bit, res->refmap);
 643}
 644
 645static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
 646				   struct dlm_lock_resource *res)
 647{
 648	res->inflight_locks++;
 649
 650	mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
 651	     res->lockname.len, res->lockname.name, res->inflight_locks,
 652	     __builtin_return_address(0));
 653}
 654
 655void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
 656				   struct dlm_lock_resource *res)
 657{
 658	assert_spin_locked(&res->spinlock);
 659	__dlm_lockres_grab_inflight_ref(dlm, res);
 660}
 661
 662void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
 663				   struct dlm_lock_resource *res)
 664{
 665	assert_spin_locked(&res->spinlock);
 666
 667	BUG_ON(res->inflight_locks == 0);
 668
 669	res->inflight_locks--;
 670
 671	mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
 672	     res->lockname.len, res->lockname.name, res->inflight_locks,
 673	     __builtin_return_address(0));
 674
 675	wake_up(&res->wq);
 676}
 677
 678void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
 679		struct dlm_lock_resource *res)
 680{
 681	assert_spin_locked(&res->spinlock);
 682	res->inflight_assert_workers++;
 683	mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
 684			dlm->name, res->lockname.len, res->lockname.name,
 685			res->inflight_assert_workers);
 686}
 687
 688static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
 689		struct dlm_lock_resource *res)
 690{
 691	assert_spin_locked(&res->spinlock);
 692	BUG_ON(res->inflight_assert_workers == 0);
 693	res->inflight_assert_workers--;
 694	mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
 695			dlm->name, res->lockname.len, res->lockname.name,
 696			res->inflight_assert_workers);
 697}
 698
 699static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
 700		struct dlm_lock_resource *res)
 701{
 702	spin_lock(&res->spinlock);
 703	__dlm_lockres_drop_inflight_worker(dlm, res);
 704	spin_unlock(&res->spinlock);
 705}
 706
 707/*
 708 * lookup a lock resource by name.
 709 * may already exist in the hashtable.
 710 * lockid is null terminated
 711 *
 712 * if not, allocate enough for the lockres and for
 713 * the temporary structure used in doing the mastering.
 714 *
 715 * also, do a lookup in the dlm->master_list to see
 716 * if another node has begun mastering the same lock.
 717 * if so, there should be a block entry in there
 718 * for this name, and we should *not* attempt to master
 719 * the lock here.   need to wait around for that node
 720 * to assert_master (or die).
 721 *
 722 */
 723struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
 724					  const char *lockid,
 725					  int namelen,
 726					  int flags)
 727{
 728	struct dlm_lock_resource *tmpres=NULL, *res=NULL;
 729	struct dlm_master_list_entry *mle = NULL;
 730	struct dlm_master_list_entry *alloc_mle = NULL;
 731	int blocked = 0;
 732	int ret, nodenum;
 733	struct dlm_node_iter iter;
 734	unsigned int hash;
 735	int tries = 0;
 736	int bit, wait_on_recovery = 0;
 737
 738	BUG_ON(!lockid);
 739
 740	hash = dlm_lockid_hash(lockid, namelen);
 741
 742	mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
 743
 744lookup:
 745	spin_lock(&dlm->spinlock);
 746	tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
 747	if (tmpres) {
 748		spin_unlock(&dlm->spinlock);
 749		spin_lock(&tmpres->spinlock);
 750
 751		/*
 752		 * Right after dlm spinlock was released, dlm_thread could have
 753		 * purged the lockres. Check if lockres got unhashed. If so
 754		 * start over.
 755		 */
 756		if (hlist_unhashed(&tmpres->hash_node)) {
 757			spin_unlock(&tmpres->spinlock);
 758			dlm_lockres_put(tmpres);
 759			tmpres = NULL;
 760			goto lookup;
 761		}
 762
 763		/* Wait on the thread that is mastering the resource */
 764		if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
 765			__dlm_wait_on_lockres(tmpres);
 766			BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
 767			spin_unlock(&tmpres->spinlock);
 768			dlm_lockres_put(tmpres);
 769			tmpres = NULL;
 770			goto lookup;
 771		}
 772
 773		/* Wait on the resource purge to complete before continuing */
 774		if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
 775			BUG_ON(tmpres->owner == dlm->node_num);
 776			__dlm_wait_on_lockres_flags(tmpres,
 777						    DLM_LOCK_RES_DROPPING_REF);
 778			spin_unlock(&tmpres->spinlock);
 779			dlm_lockres_put(tmpres);
 780			tmpres = NULL;
 781			goto lookup;
 782		}
 783
 784		/* Grab inflight ref to pin the resource */
 785		dlm_lockres_grab_inflight_ref(dlm, tmpres);
 786
 787		spin_unlock(&tmpres->spinlock);
 788		if (res) {
 789			spin_lock(&dlm->track_lock);
 790			if (!list_empty(&res->tracking))
 791				list_del_init(&res->tracking);
 792			else
 793				mlog(ML_ERROR, "Resource %.*s not "
 794						"on the Tracking list\n",
 795						res->lockname.len,
 796						res->lockname.name);
 797			spin_unlock(&dlm->track_lock);
 798			dlm_lockres_put(res);
 799		}
 800		res = tmpres;
 801		goto leave;
 802	}
 803
 804	if (!res) {
 805		spin_unlock(&dlm->spinlock);
 806		mlog(0, "allocating a new resource\n");
 807		/* nothing found and we need to allocate one. */
 808		alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
 809		if (!alloc_mle)
 810			goto leave;
 811		res = dlm_new_lockres(dlm, lockid, namelen);
 812		if (!res)
 813			goto leave;
 814		goto lookup;
 815	}
 816
 817	mlog(0, "no lockres found, allocated our own: %p\n", res);
 818
 819	if (flags & LKM_LOCAL) {
 820		/* caller knows it's safe to assume it's not mastered elsewhere
 821		 * DONE!  return right away */
 822		spin_lock(&res->spinlock);
 823		dlm_change_lockres_owner(dlm, res, dlm->node_num);
 824		__dlm_insert_lockres(dlm, res);
 825		dlm_lockres_grab_inflight_ref(dlm, res);
 826		spin_unlock(&res->spinlock);
 827		spin_unlock(&dlm->spinlock);
 828		/* lockres still marked IN_PROGRESS */
 829		goto wake_waiters;
 830	}
 831
 832	/* check master list to see if another node has started mastering it */
 833	spin_lock(&dlm->master_lock);
 834
 835	/* if we found a block, wait for lock to be mastered by another node */
 836	blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
 837	if (blocked) {
 838		int mig;
 839		if (mle->type == DLM_MLE_MASTER) {
 840			mlog(ML_ERROR, "master entry for nonexistent lock!\n");
 841			BUG();
 842		}
 843		mig = (mle->type == DLM_MLE_MIGRATION);
 844		/* if there is a migration in progress, let the migration
 845		 * finish before continuing.  we can wait for the absence
 846		 * of the MIGRATION mle: either the migrate finished or
 847		 * one of the nodes died and the mle was cleaned up.
 848		 * if there is a BLOCK here, but it already has a master
 849		 * set, we are too late.  the master does not have a ref
 850		 * for us in the refmap.  detach the mle and drop it.
 851		 * either way, go back to the top and start over. */
 852		if (mig || mle->master != O2NM_MAX_NODES) {
 853			BUG_ON(mig && mle->master == dlm->node_num);
 854			/* we arrived too late.  the master does not
 855			 * have a ref for us. retry. */
 856			mlog(0, "%s:%.*s: late on %s\n",
 857			     dlm->name, namelen, lockid,
 858			     mig ?  "MIGRATION" : "BLOCK");
 859			spin_unlock(&dlm->master_lock);
 860			spin_unlock(&dlm->spinlock);
 861
 862			/* master is known, detach */
 863			if (!mig)
 864				dlm_mle_detach_hb_events(dlm, mle);
 865			dlm_put_mle(mle);
 866			mle = NULL;
 867			/* this is lame, but we can't wait on either
 868			 * the mle or lockres waitqueue here */
 869			if (mig)
 870				msleep(100);
 871			goto lookup;
 872		}
 873	} else {
 874		/* go ahead and try to master lock on this node */
 875		mle = alloc_mle;
 876		/* make sure this does not get freed below */
 877		alloc_mle = NULL;
 878		dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
 879		set_bit(dlm->node_num, mle->maybe_map);
 880		__dlm_insert_mle(dlm, mle);
 881
 882		/* still holding the dlm spinlock, check the recovery map
 883		 * to see if there are any nodes that still need to be
 884		 * considered.  these will not appear in the mle nodemap
 885		 * but they might own this lockres.  wait on them. */
 886		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
 887		if (bit < O2NM_MAX_NODES) {
 888			mlog(0, "%s: res %.*s, At least one node (%d) "
 889			     "to recover before lock mastery can begin\n",
 890			     dlm->name, namelen, (char *)lockid, bit);
 891			wait_on_recovery = 1;
 892		}
 893	}
 894
 895	/* at this point there is either a DLM_MLE_BLOCK or a
 896	 * DLM_MLE_MASTER on the master list, so it's safe to add the
 897	 * lockres to the hashtable.  anyone who finds the lock will
 898	 * still have to wait on the IN_PROGRESS. */
 899
 900	/* finally add the lockres to its hash bucket */
 901	__dlm_insert_lockres(dlm, res);
 902
 903	/* since this lockres is new it doesn't not require the spinlock */
 904	__dlm_lockres_grab_inflight_ref(dlm, res);
 905
 906	/* get an extra ref on the mle in case this is a BLOCK
 907	 * if so, the creator of the BLOCK may try to put the last
 908	 * ref at this time in the assert master handler, so we
 909	 * need an extra one to keep from a bad ptr deref. */
 910	dlm_get_mle_inuse(mle);
 911	spin_unlock(&dlm->master_lock);
 912	spin_unlock(&dlm->spinlock);
 913
 914redo_request:
 915	while (wait_on_recovery) {
 916		/* any cluster changes that occurred after dropping the
 917		 * dlm spinlock would be detectable be a change on the mle,
 918		 * so we only need to clear out the recovery map once. */
 919		if (dlm_is_recovery_lock(lockid, namelen)) {
 920			mlog(0, "%s: Recovery map is not empty, but must "
 921			     "master $RECOVERY lock now\n", dlm->name);
 922			if (!dlm_pre_master_reco_lockres(dlm, res))
 923				wait_on_recovery = 0;
 924			else {
 925				mlog(0, "%s: waiting 500ms for heartbeat state "
 926				    "change\n", dlm->name);
 927				msleep(500);
 928			}
 929			continue;
 930		}
 931
 932		dlm_kick_recovery_thread(dlm);
 933		msleep(1000);
 934		dlm_wait_for_recovery(dlm);
 935
 936		spin_lock(&dlm->spinlock);
 937		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
 938		if (bit < O2NM_MAX_NODES) {
 939			mlog(0, "%s: res %.*s, At least one node (%d) "
 940			     "to recover before lock mastery can begin\n",
 941			     dlm->name, namelen, (char *)lockid, bit);
 942			wait_on_recovery = 1;
 943		} else
 944			wait_on_recovery = 0;
 945		spin_unlock(&dlm->spinlock);
 946
 947		if (wait_on_recovery)
 948			dlm_wait_for_node_recovery(dlm, bit, 10000);
 949	}
 950
 951	/* must wait for lock to be mastered elsewhere */
 952	if (blocked)
 953		goto wait;
 954
 955	ret = -EINVAL;
 956	dlm_node_iter_init(mle->vote_map, &iter);
 957	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
 958		ret = dlm_do_master_request(res, mle, nodenum);
 959		if (ret < 0)
 960			mlog_errno(ret);
 961		if (mle->master != O2NM_MAX_NODES) {
 962			/* found a master ! */
 963			if (mle->master <= nodenum)
 964				break;
 965			/* if our master request has not reached the master
 966			 * yet, keep going until it does.  this is how the
 967			 * master will know that asserts are needed back to
 968			 * the lower nodes. */
 969			mlog(0, "%s: res %.*s, Requests only up to %u but "
 970			     "master is %u, keep going\n", dlm->name, namelen,
 971			     lockid, nodenum, mle->master);
 972		}
 973	}
 974
 975wait:
 976	/* keep going until the response map includes all nodes */
 977	ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
 978	if (ret < 0) {
 979		wait_on_recovery = 1;
 980		mlog(0, "%s: res %.*s, Node map changed, redo the master "
 981		     "request now, blocked=%d\n", dlm->name, res->lockname.len,
 982		     res->lockname.name, blocked);
 983		if (++tries > 20) {
 984			mlog(ML_ERROR, "%s: res %.*s, Spinning on "
 985			     "dlm_wait_for_lock_mastery, blocked = %d\n",
 986			     dlm->name, res->lockname.len,
 987			     res->lockname.name, blocked);
 988			dlm_print_one_lock_resource(res);
 989			dlm_print_one_mle(mle);
 990			tries = 0;
 991		}
 992		goto redo_request;
 993	}
 994
 995	mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
 996	     res->lockname.name, res->owner);
 997	/* make sure we never continue without this */
 998	BUG_ON(res->owner == O2NM_MAX_NODES);
 999
1000	/* master is known, detach if not already detached */
1001	dlm_mle_detach_hb_events(dlm, mle);
1002	dlm_put_mle(mle);
1003	/* put the extra ref */
1004	dlm_put_mle_inuse(mle);
1005
1006wake_waiters:
1007	spin_lock(&res->spinlock);
1008	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1009	spin_unlock(&res->spinlock);
1010	wake_up(&res->wq);
1011
1012leave:
1013	/* need to free the unused mle */
1014	if (alloc_mle)
1015		kmem_cache_free(dlm_mle_cache, alloc_mle);
1016
1017	return res;
1018}
1019
1020
1021#define DLM_MASTERY_TIMEOUT_MS   5000
1022
1023static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
1024				     struct dlm_lock_resource *res,
1025				     struct dlm_master_list_entry *mle,
1026				     int *blocked)
1027{
1028	u8 m;
1029	int ret, bit;
1030	int map_changed, voting_done;
1031	int assert, sleep;
1032
1033recheck:
1034	ret = 0;
1035	assert = 0;
1036
1037	/* check if another node has already become the owner */
1038	spin_lock(&res->spinlock);
1039	if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1040		mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1041		     res->lockname.len, res->lockname.name, res->owner);
1042		spin_unlock(&res->spinlock);
1043		/* this will cause the master to re-assert across
1044		 * the whole cluster, freeing up mles */
1045		if (res->owner != dlm->node_num) {
1046			ret = dlm_do_master_request(res, mle, res->owner);
1047			if (ret < 0) {
1048				/* give recovery a chance to run */
1049				mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1050				msleep(500);
1051				goto recheck;
1052			}
1053		}
1054		ret = 0;
1055		goto leave;
1056	}
1057	spin_unlock(&res->spinlock);
1058
1059	spin_lock(&mle->spinlock);
1060	m = mle->master;
1061	map_changed = (memcmp(mle->vote_map, mle->node_map,
1062			      sizeof(mle->vote_map)) != 0);
1063	voting_done = (memcmp(mle->vote_map, mle->response_map,
1064			     sizeof(mle->vote_map)) == 0);
1065
1066	/* restart if we hit any errors */
1067	if (map_changed) {
1068		int b;
1069		mlog(0, "%s: %.*s: node map changed, restarting\n",
1070		     dlm->name, res->lockname.len, res->lockname.name);
1071		ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1072		b = (mle->type == DLM_MLE_BLOCK);
1073		if ((*blocked && !b) || (!*blocked && b)) {
1074			mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1075			     dlm->name, res->lockname.len, res->lockname.name,
1076			     *blocked, b);
1077			*blocked = b;
1078		}
1079		spin_unlock(&mle->spinlock);
1080		if (ret < 0) {
1081			mlog_errno(ret);
1082			goto leave;
1083		}
1084		mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1085		     "rechecking now\n", dlm->name, res->lockname.len,
1086		     res->lockname.name);
1087		goto recheck;
1088	} else {
1089		if (!voting_done) {
1090			mlog(0, "map not changed and voting not done "
1091			     "for %s:%.*s\n", dlm->name, res->lockname.len,
1092			     res->lockname.name);
1093		}
1094	}
1095
1096	if (m != O2NM_MAX_NODES) {
1097		/* another node has done an assert!
1098		 * all done! */
1099		sleep = 0;
1100	} else {
1101		sleep = 1;
1102		/* have all nodes responded? */
1103		if (voting_done && !*blocked) {
1104			bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1105			if (dlm->node_num <= bit) {
1106				/* my node number is lowest.
1107			 	 * now tell other nodes that I am
1108				 * mastering this. */
1109				mle->master = dlm->node_num;
1110				/* ref was grabbed in get_lock_resource
1111				 * will be dropped in dlmlock_master */
1112				assert = 1;
1113				sleep = 0;
1114			}
1115			/* if voting is done, but we have not received
1116			 * an assert master yet, we must sleep */
1117		}
1118	}
1119
1120	spin_unlock(&mle->spinlock);
1121
1122	/* sleep if we haven't finished voting yet */
1123	if (sleep) {
1124		unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1125
1126		/*
1127		if (atomic_read(&mle->mle_refs.refcount) < 2)
1128			mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1129			atomic_read(&mle->mle_refs.refcount),
1130			res->lockname.len, res->lockname.name);
1131		*/
1132		atomic_set(&mle->woken, 0);
1133		(void)wait_event_timeout(mle->wq,
1134					 (atomic_read(&mle->woken) == 1),
1135					 timeo);
1136		if (res->owner == O2NM_MAX_NODES) {
1137			mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1138			     res->lockname.len, res->lockname.name);
1139			goto recheck;
1140		}
1141		mlog(0, "done waiting, master is %u\n", res->owner);
1142		ret = 0;
1143		goto leave;
1144	}
1145
1146	ret = 0;   /* done */
1147	if (assert) {
1148		m = dlm->node_num;
1149		mlog(0, "about to master %.*s here, this=%u\n",
1150		     res->lockname.len, res->lockname.name, m);
1151		ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1152		if (ret) {
1153			/* This is a failure in the network path,
1154			 * not in the response to the assert_master
1155			 * (any nonzero response is a BUG on this node).
1156			 * Most likely a socket just got disconnected
1157			 * due to node death. */
1158			mlog_errno(ret);
1159		}
1160		/* no longer need to restart lock mastery.
1161		 * all living nodes have been contacted. */
1162		ret = 0;
1163	}
1164
1165	/* set the lockres owner */
1166	spin_lock(&res->spinlock);
1167	/* mastery reference obtained either during
1168	 * assert_master_handler or in get_lock_resource */
1169	dlm_change_lockres_owner(dlm, res, m);
1170	spin_unlock(&res->spinlock);
1171
1172leave:
1173	return ret;
1174}
1175
1176struct dlm_bitmap_diff_iter
1177{
1178	int curnode;
1179	unsigned long *orig_bm;
1180	unsigned long *cur_bm;
1181	unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1182};
1183
1184enum dlm_node_state_change
1185{
1186	NODE_DOWN = -1,
1187	NODE_NO_CHANGE = 0,
1188	NODE_UP
1189};
1190
1191static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1192				      unsigned long *orig_bm,
1193				      unsigned long *cur_bm)
1194{
1195	unsigned long p1, p2;
1196	int i;
1197
1198	iter->curnode = -1;
1199	iter->orig_bm = orig_bm;
1200	iter->cur_bm = cur_bm;
1201
1202	for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1203       		p1 = *(iter->orig_bm + i);
1204	       	p2 = *(iter->cur_bm + i);
1205		iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1206	}
1207}
1208
1209static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1210				     enum dlm_node_state_change *state)
1211{
1212	int bit;
1213
1214	if (iter->curnode >= O2NM_MAX_NODES)
1215		return -ENOENT;
1216
1217	bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1218			    iter->curnode+1);
1219	if (bit >= O2NM_MAX_NODES) {
1220		iter->curnode = O2NM_MAX_NODES;
1221		return -ENOENT;
1222	}
1223
1224	/* if it was there in the original then this node died */
1225	if (test_bit(bit, iter->orig_bm))
1226		*state = NODE_DOWN;
1227	else
1228		*state = NODE_UP;
1229
1230	iter->curnode = bit;
1231	return bit;
1232}
1233
1234
1235static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1236				    struct dlm_lock_resource *res,
1237				    struct dlm_master_list_entry *mle,
1238				    int blocked)
1239{
1240	struct dlm_bitmap_diff_iter bdi;
1241	enum dlm_node_state_change sc;
1242	int node;
1243	int ret = 0;
1244
1245	mlog(0, "something happened such that the "
1246	     "master process may need to be restarted!\n");
1247
1248	assert_spin_locked(&mle->spinlock);
1249
1250	dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1251	node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1252	while (node >= 0) {
1253		if (sc == NODE_UP) {
1254			/* a node came up.  clear any old vote from
1255			 * the response map and set it in the vote map
1256			 * then restart the mastery. */
1257			mlog(ML_NOTICE, "node %d up while restarting\n", node);
1258
1259			/* redo the master request, but only for the new node */
1260			mlog(0, "sending request to new node\n");
1261			clear_bit(node, mle->response_map);
1262			set_bit(node, mle->vote_map);
1263		} else {
1264			mlog(ML_ERROR, "node down! %d\n", node);
1265			if (blocked) {
1266				int lowest = find_next_bit(mle->maybe_map,
1267						       O2NM_MAX_NODES, 0);
1268
1269				/* act like it was never there */
1270				clear_bit(node, mle->maybe_map);
1271
1272			       	if (node == lowest) {
1273					mlog(0, "expected master %u died"
1274					    " while this node was blocked "
1275					    "waiting on it!\n", node);
1276					lowest = find_next_bit(mle->maybe_map,
1277						       	O2NM_MAX_NODES,
1278						       	lowest+1);
1279					if (lowest < O2NM_MAX_NODES) {
1280						mlog(0, "%s:%.*s:still "
1281						     "blocked. waiting on %u "
1282						     "now\n", dlm->name,
1283						     res->lockname.len,
1284						     res->lockname.name,
1285						     lowest);
1286					} else {
1287						/* mle is an MLE_BLOCK, but
1288						 * there is now nothing left to
1289						 * block on.  we need to return
1290						 * all the way back out and try
1291						 * again with an MLE_MASTER.
1292						 * dlm_do_local_recovery_cleanup
1293						 * has already run, so the mle
1294						 * refcount is ok */
1295						mlog(0, "%s:%.*s: no "
1296						     "longer blocking. try to "
1297						     "master this here\n",
1298						     dlm->name,
1299						     res->lockname.len,
1300						     res->lockname.name);
1301						mle->type = DLM_MLE_MASTER;
1302						mle->mleres = res;
1303					}
1304				}
1305			}
1306
1307			/* now blank out everything, as if we had never
1308			 * contacted anyone */
1309			memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1310			memset(mle->response_map, 0, sizeof(mle->response_map));
1311			/* reset the vote_map to the current node_map */
1312			memcpy(mle->vote_map, mle->node_map,
1313			       sizeof(mle->node_map));
1314			/* put myself into the maybe map */
1315			if (mle->type != DLM_MLE_BLOCK)
1316				set_bit(dlm->node_num, mle->maybe_map);
1317		}
1318		ret = -EAGAIN;
1319		node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1320	}
1321	return ret;
1322}
1323
1324
1325/*
1326 * DLM_MASTER_REQUEST_MSG
1327 *
1328 * returns: 0 on success,
1329 *          -errno on a network error
1330 *
1331 * on error, the caller should assume the target node is "dead"
1332 *
1333 */
1334
1335static int dlm_do_master_request(struct dlm_lock_resource *res,
1336				 struct dlm_master_list_entry *mle, int to)
1337{
1338	struct dlm_ctxt *dlm = mle->dlm;
1339	struct dlm_master_request request;
1340	int ret, response=0, resend;
1341
1342	memset(&request, 0, sizeof(request));
1343	request.node_idx = dlm->node_num;
1344
1345	BUG_ON(mle->type == DLM_MLE_MIGRATION);
1346
1347	request.namelen = (u8)mle->mnamelen;
1348	memcpy(request.name, mle->mname, request.namelen);
1349
1350again:
1351	ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1352				 sizeof(request), to, &response);
1353	if (ret < 0)  {
1354		if (ret == -ESRCH) {
1355			/* should never happen */
1356			mlog(ML_ERROR, "TCP stack not ready!\n");
1357			BUG();
1358		} else if (ret == -EINVAL) {
1359			mlog(ML_ERROR, "bad args passed to o2net!\n");
1360			BUG();
1361		} else if (ret == -ENOMEM) {
1362			mlog(ML_ERROR, "out of memory while trying to send "
1363			     "network message!  retrying\n");
1364			/* this is totally crude */
1365			msleep(50);
1366			goto again;
1367		} else if (!dlm_is_host_down(ret)) {
1368			/* not a network error. bad. */
1369			mlog_errno(ret);
1370			mlog(ML_ERROR, "unhandled error!");
1371			BUG();
1372		}
1373		/* all other errors should be network errors,
1374		 * and likely indicate node death */
1375		mlog(ML_ERROR, "link to %d went down!\n", to);
1376		goto out;
1377	}
1378
1379	ret = 0;
1380	resend = 0;
1381	spin_lock(&mle->spinlock);
1382	switch (response) {
1383		case DLM_MASTER_RESP_YES:
1384			set_bit(to, mle->response_map);
1385			mlog(0, "node %u is the master, response=YES\n", to);
1386			mlog(0, "%s:%.*s: master node %u now knows I have a "
1387			     "reference\n", dlm->name, res->lockname.len,
1388			     res->lockname.name, to);
1389			mle->master = to;
1390			break;
1391		case DLM_MASTER_RESP_NO:
1392			mlog(0, "node %u not master, response=NO\n", to);
1393			set_bit(to, mle->response_map);
1394			break;
1395		case DLM_MASTER_RESP_MAYBE:
1396			mlog(0, "node %u not master, response=MAYBE\n", to);
1397			set_bit(to, mle->response_map);
1398			set_bit(to, mle->maybe_map);
1399			break;
1400		case DLM_MASTER_RESP_ERROR:
1401			mlog(0, "node %u hit an error, resending\n", to);
1402			resend = 1;
1403			response = 0;
1404			break;
1405		default:
1406			mlog(ML_ERROR, "bad response! %u\n", response);
1407			BUG();
1408	}
1409	spin_unlock(&mle->spinlock);
1410	if (resend) {
1411		/* this is also totally crude */
1412		msleep(50);
1413		goto again;
1414	}
1415
1416out:
1417	return ret;
1418}
1419
1420/*
1421 * locks that can be taken here:
1422 * dlm->spinlock
1423 * res->spinlock
1424 * mle->spinlock
1425 * dlm->master_list
1426 *
1427 * if possible, TRIM THIS DOWN!!!
1428 */
1429int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1430			       void **ret_data)
1431{
1432	u8 response = DLM_MASTER_RESP_MAYBE;
1433	struct dlm_ctxt *dlm = data;
1434	struct dlm_lock_resource *res = NULL;
1435	struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1436	struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1437	char *name;
1438	unsigned int namelen, hash;
1439	int found, ret;
1440	int set_maybe;
1441	int dispatch_assert = 0;
1442	int dispatched = 0;
1443
1444	if (!dlm_grab(dlm))
1445		return DLM_MASTER_RESP_NO;
1446
1447	if (!dlm_domain_fully_joined(dlm)) {
1448		response = DLM_MASTER_RESP_NO;
1449		goto send_response;
1450	}
1451
1452	name = request->name;
1453	namelen = request->namelen;
1454	hash = dlm_lockid_hash(name, namelen);
1455
1456	if (namelen > DLM_LOCKID_NAME_MAX) {
1457		response = DLM_IVBUFLEN;
1458		goto send_response;
1459	}
1460
1461way_up_top:
1462	spin_lock(&dlm->spinlock);
1463	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1464	if (res) {
1465		spin_unlock(&dlm->spinlock);
1466
1467		/* take care of the easy cases up front */
1468		spin_lock(&res->spinlock);
1469
1470		/*
1471		 * Right after dlm spinlock was released, dlm_thread could have
1472		 * purged the lockres. Check if lockres got unhashed. If so
1473		 * start over.
1474		 */
1475		if (hlist_unhashed(&res->hash_node)) {
1476			spin_unlock(&res->spinlock);
1477			dlm_lockres_put(res);
1478			goto way_up_top;
1479		}
1480
1481		if (res->state & (DLM_LOCK_RES_RECOVERING|
1482				  DLM_LOCK_RES_MIGRATING)) {
1483			spin_unlock(&res->spinlock);
1484			mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1485			     "being recovered/migrated\n");
1486			response = DLM_MASTER_RESP_ERROR;
1487			if (mle)
1488				kmem_cache_free(dlm_mle_cache, mle);
1489			goto send_response;
1490		}
1491
1492		if (res->owner == dlm->node_num) {
1493			dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1494			spin_unlock(&res->spinlock);
1495			response = DLM_MASTER_RESP_YES;
1496			if (mle)
1497				kmem_cache_free(dlm_mle_cache, mle);
1498
1499			/* this node is the owner.
1500			 * there is some extra work that needs to
1501			 * happen now.  the requesting node has
1502			 * caused all nodes up to this one to
1503			 * create mles.  this node now needs to
1504			 * go back and clean those up. */
1505			dispatch_assert = 1;
1506			goto send_response;
1507		} else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1508			spin_unlock(&res->spinlock);
1509			// mlog(0, "node %u is the master\n", res->owner);
1510			response = DLM_MASTER_RESP_NO;
1511			if (mle)
1512				kmem_cache_free(dlm_mle_cache, mle);
1513			goto send_response;
1514		}
1515
1516		/* ok, there is no owner.  either this node is
1517		 * being blocked, or it is actively trying to
1518		 * master this lock. */
1519		if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1520			mlog(ML_ERROR, "lock with no owner should be "
1521			     "in-progress!\n");
1522			BUG();
1523		}
1524
1525		// mlog(0, "lockres is in progress...\n");
1526		spin_lock(&dlm->master_lock);
1527		found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1528		if (!found) {
1529			mlog(ML_ERROR, "no mle found for this lock!\n");
1530			BUG();
1531		}
1532		set_maybe = 1;
1533		spin_lock(&tmpmle->spinlock);
1534		if (tmpmle->type == DLM_MLE_BLOCK) {
1535			// mlog(0, "this node is waiting for "
1536			// "lockres to be mastered\n");
1537			response = DLM_MASTER_RESP_NO;
1538		} else if (tmpmle->type == DLM_MLE_MIGRATION) {
1539			mlog(0, "node %u is master, but trying to migrate to "
1540			     "node %u.\n", tmpmle->master, tmpmle->new_master);
1541			if (tmpmle->master == dlm->node_num) {
1542				mlog(ML_ERROR, "no owner on lockres, but this "
1543				     "node is trying to migrate it to %u?!\n",
1544				     tmpmle->new_master);
1545				BUG();
1546			} else {
1547				/* the real master can respond on its own */
1548				response = DLM_MASTER_RESP_NO;
1549			}
1550		} else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1551			set_maybe = 0;
1552			if (tmpmle->master == dlm->node_num) {
1553				response = DLM_MASTER_RESP_YES;
1554				/* this node will be the owner.
1555				 * go back and clean the mles on any
1556				 * other nodes */
1557				dispatch_assert = 1;
1558				dlm_lockres_set_refmap_bit(dlm, res,
1559							   request->node_idx);
1560			} else
1561				response = DLM_MASTER_RESP_NO;
1562		} else {
1563			// mlog(0, "this node is attempting to "
1564			// "master lockres\n");
1565			response = DLM_MASTER_RESP_MAYBE;
1566		}
1567		if (set_maybe)
1568			set_bit(request->node_idx, tmpmle->maybe_map);
1569		spin_unlock(&tmpmle->spinlock);
1570
1571		spin_unlock(&dlm->master_lock);
1572		spin_unlock(&res->spinlock);
1573
1574		/* keep the mle attached to heartbeat events */
1575		dlm_put_mle(tmpmle);
1576		if (mle)
1577			kmem_cache_free(dlm_mle_cache, mle);
1578		goto send_response;
1579	}
1580
1581	/*
1582	 * lockres doesn't exist on this node
1583	 * if there is an MLE_BLOCK, return NO
1584	 * if there is an MLE_MASTER, return MAYBE
1585	 * otherwise, add an MLE_BLOCK, return NO
1586	 */
1587	spin_lock(&dlm->master_lock);
1588	found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1589	if (!found) {
1590		/* this lockid has never been seen on this node yet */
1591		// mlog(0, "no mle found\n");
1592		if (!mle) {
1593			spin_unlock(&dlm->master_lock);
1594			spin_unlock(&dlm->spinlock);
1595
1596			mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1597			if (!mle) {
1598				response = DLM_MASTER_RESP_ERROR;
1599				mlog_errno(-ENOMEM);
1600				goto send_response;
1601			}
1602			goto way_up_top;
1603		}
1604
1605		// mlog(0, "this is second time thru, already allocated, "
1606		// "add the block.\n");
1607		dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1608		set_bit(request->node_idx, mle->maybe_map);
1609		__dlm_insert_mle(dlm, mle);
1610		response = DLM_MASTER_RESP_NO;
1611	} else {
1612		// mlog(0, "mle was found\n");
1613		set_maybe = 1;
1614		spin_lock(&tmpmle->spinlock);
1615		if (tmpmle->master == dlm->node_num) {
1616			mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1617			BUG();
1618		}
1619		if (tmpmle->type == DLM_MLE_BLOCK)
1620			response = DLM_MASTER_RESP_NO;
1621		else if (tmpmle->type == DLM_MLE_MIGRATION) {
1622			mlog(0, "migration mle was found (%u->%u)\n",
1623			     tmpmle->master, tmpmle->new_master);
1624			/* real master can respond on its own */
1625			response = DLM_MASTER_RESP_NO;
1626		} else
1627			response = DLM_MASTER_RESP_MAYBE;
1628		if (set_maybe)
1629			set_bit(request->node_idx, tmpmle->maybe_map);
1630		spin_unlock(&tmpmle->spinlock);
1631	}
1632	spin_unlock(&dlm->master_lock);
1633	spin_unlock(&dlm->spinlock);
1634
1635	if (found) {
1636		/* keep the mle attached to heartbeat events */
1637		dlm_put_mle(tmpmle);
1638	}
1639send_response:
1640	/*
1641	 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1642	 * The reference is released by dlm_assert_master_worker() under
1643	 * the call to dlm_dispatch_assert_master().  If
1644	 * dlm_assert_master_worker() isn't called, we drop it here.
1645	 */
1646	if (dispatch_assert) {
1647		if (response != DLM_MASTER_RESP_YES)
1648			mlog(ML_ERROR, "invalid response %d\n", response);
1649		if (!res) {
1650			mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1651			BUG();
1652		}
1653		mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1654			     dlm->node_num, res->lockname.len, res->lockname.name);
1655		spin_lock(&res->spinlock);
1656		ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1657						 DLM_ASSERT_MASTER_MLE_CLEANUP);
1658		if (ret < 0) {
1659			mlog(ML_ERROR, "failed to dispatch assert master work\n");
1660			response = DLM_MASTER_RESP_ERROR;
1661			spin_unlock(&res->spinlock);
1662			dlm_lockres_put(res);
1663		} else {
1664			dispatched = 1;
1665			__dlm_lockres_grab_inflight_worker(dlm, res);
1666			spin_unlock(&res->spinlock);
1667		}
1668	} else {
1669		if (res)
1670			dlm_lockres_put(res);
1671	}
1672
1673	if (!dispatched)
1674		dlm_put(dlm);
1675	return response;
1676}
1677
1678/*
1679 * DLM_ASSERT_MASTER_MSG
1680 */
1681
1682
1683/*
1684 * NOTE: this can be used for debugging
1685 * can periodically run all locks owned by this node
1686 * and re-assert across the cluster...
1687 */
1688static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1689				struct dlm_lock_resource *res,
1690				void *nodemap, u32 flags)
1691{
1692	struct dlm_assert_master assert;
1693	int to, tmpret;
1694	struct dlm_node_iter iter;
1695	int ret = 0;
1696	int reassert;
1697	const char *lockname = res->lockname.name;
1698	unsigned int namelen = res->lockname.len;
1699
1700	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1701
1702	spin_lock(&res->spinlock);
1703	res->state |= DLM_LOCK_RES_SETREF_INPROG;
1704	spin_unlock(&res->spinlock);
1705
1706again:
1707	reassert = 0;
1708
1709	/* note that if this nodemap is empty, it returns 0 */
1710	dlm_node_iter_init(nodemap, &iter);
1711	while ((to = dlm_node_iter_next(&iter)) >= 0) {
1712		int r = 0;
1713		struct dlm_master_list_entry *mle = NULL;
1714
1715		mlog(0, "sending assert master to %d (%.*s)\n", to,
1716		     namelen, lockname);
1717		memset(&assert, 0, sizeof(assert));
1718		assert.node_idx = dlm->node_num;
1719		assert.namelen = namelen;
1720		memcpy(assert.name, lockname, namelen);
1721		assert.flags = cpu_to_be32(flags);
1722
1723		tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1724					    &assert, sizeof(assert), to, &r);
1725		if (tmpret < 0) {
1726			mlog(ML_ERROR, "Error %d when sending message %u (key "
1727			     "0x%x) to node %u\n", tmpret,
1728			     DLM_ASSERT_MASTER_MSG, dlm->key, to);
1729			if (!dlm_is_host_down(tmpret)) {
1730				mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1731				BUG();
1732			}
1733			/* a node died.  finish out the rest of the nodes. */
1734			mlog(0, "link to %d went down!\n", to);
1735			/* any nonzero status return will do */
1736			ret = tmpret;
1737			r = 0;
1738		} else if (r < 0) {
1739			/* ok, something horribly messed.  kill thyself. */
1740			mlog(ML_ERROR,"during assert master of %.*s to %u, "
1741			     "got %d.\n", namelen, lockname, to, r);
1742			spin_lock(&dlm->spinlock);
1743			spin_lock(&dlm->master_lock);
1744			if (dlm_find_mle(dlm, &mle, (char *)lockname,
1745					 namelen)) {
1746				dlm_print_one_mle(mle);
1747				__dlm_put_mle(mle);
1748			}
1749			spin_unlock(&dlm->master_lock);
1750			spin_unlock(&dlm->spinlock);
1751			BUG();
1752		}
1753
1754		if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1755		    !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1756				mlog(ML_ERROR, "%.*s: very strange, "
1757				     "master MLE but no lockres on %u\n",
1758				     namelen, lockname, to);
1759		}
1760
1761		if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1762			mlog(0, "%.*s: node %u create mles on other "
1763			     "nodes and requests a re-assert\n",
1764			     namelen, lockname, to);
1765			reassert = 1;
1766		}
1767		if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1768			mlog(0, "%.*s: node %u has a reference to this "
1769			     "lockres, set the bit in the refmap\n",
1770			     namelen, lockname, to);
1771			spin_lock(&res->spinlock);
1772			dlm_lockres_set_refmap_bit(dlm, res, to);
1773			spin_unlock(&res->spinlock);
1774		}
1775	}
1776
1777	if (reassert)
1778		goto again;
1779
1780	spin_lock(&res->spinlock);
1781	res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1782	spin_unlock(&res->spinlock);
1783	wake_up(&res->wq);
1784
1785	return ret;
1786}
1787
1788/*
1789 * locks that can be taken here:
1790 * dlm->spinlock
1791 * res->spinlock
1792 * mle->spinlock
1793 * dlm->master_list
1794 *
1795 * if possible, TRIM THIS DOWN!!!
1796 */
1797int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1798			      void **ret_data)
1799{
1800	struct dlm_ctxt *dlm = data;
1801	struct dlm_master_list_entry *mle = NULL;
1802	struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1803	struct dlm_lock_resource *res = NULL;
1804	char *name;
1805	unsigned int namelen, hash;
1806	u32 flags;
1807	int master_request = 0, have_lockres_ref = 0;
1808	int ret = 0;
1809
1810	if (!dlm_grab(dlm))
1811		return 0;
1812
1813	name = assert->name;
1814	namelen = assert->namelen;
1815	hash = dlm_lockid_hash(name, namelen);
1816	flags = be32_to_cpu(assert->flags);
1817
1818	if (namelen > DLM_LOCKID_NAME_MAX) {
1819		mlog(ML_ERROR, "Invalid name length!");
1820		goto done;
1821	}
1822
1823	spin_lock(&dlm->spinlock);
1824
1825	if (flags)
1826		mlog(0, "assert_master with flags: %u\n", flags);
1827
1828	/* find the MLE */
1829	spin_lock(&dlm->master_lock);
1830	if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1831		/* not an error, could be master just re-asserting */
1832		mlog(0, "just got an assert_master from %u, but no "
1833		     "MLE for it! (%.*s)\n", assert->node_idx,
1834		     namelen, name);
1835	} else {
1836		int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1837		if (bit >= O2NM_MAX_NODES) {
1838			/* not necessarily an error, though less likely.
1839			 * could be master just re-asserting. */
1840			mlog(0, "no bits set in the maybe_map, but %u "
1841			     "is asserting! (%.*s)\n", assert->node_idx,
1842			     namelen, name);
1843		} else if (bit != assert->node_idx) {
1844			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1845				mlog(0, "master %u was found, %u should "
1846				     "back off\n", assert->node_idx, bit);
1847			} else {
1848				/* with the fix for bug 569, a higher node
1849				 * number winning the mastery will respond
1850				 * YES to mastery requests, but this node
1851				 * had no way of knowing.  let it pass. */
1852				mlog(0, "%u is the lowest node, "
1853				     "%u is asserting. (%.*s)  %u must "
1854				     "have begun after %u won.\n", bit,
1855				     assert->node_idx, namelen, name, bit,
1856				     assert->node_idx);
1857			}
1858		}
1859		if (mle->type == DLM_MLE_MIGRATION) {
1860			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1861				mlog(0, "%s:%.*s: got cleanup assert"
1862				     " from %u for migration\n",
1863				     dlm->name, namelen, name,
1864				     assert->node_idx);
1865			} else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1866				mlog(0, "%s:%.*s: got unrelated assert"
1867				     " from %u for migration, ignoring\n",
1868				     dlm->name, namelen, name,
1869				     assert->node_idx);
1870				__dlm_put_mle(mle);
1871				spin_unlock(&dlm->master_lock);
1872				spin_unlock(&dlm->spinlock);
1873				goto done;
1874			}
1875		}
1876	}
1877	spin_unlock(&dlm->master_lock);
1878
1879	/* ok everything checks out with the MLE
1880	 * now check to see if there is a lockres */
1881	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1882	if (res) {
1883		spin_lock(&res->spinlock);
1884		if (res->state & DLM_LOCK_RES_RECOVERING)  {
1885			mlog(ML_ERROR, "%u asserting but %.*s is "
1886			     "RECOVERING!\n", assert->node_idx, namelen, name);
1887			goto kill;
1888		}
1889		if (!mle) {
1890			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1891			    res->owner != assert->node_idx) {
1892				mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1893				     "but current owner is %u! (%.*s)\n",
1894				     assert->node_idx, res->owner, namelen,
1895				     name);
1896				__dlm_print_one_lock_resource(res);
1897				BUG();
1898			}
1899		} else if (mle->type != DLM_MLE_MIGRATION) {
1900			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1901				/* owner is just re-asserting */
1902				if (res->owner == assert->node_idx) {
1903					mlog(0, "owner %u re-asserting on "
1904					     "lock %.*s\n", assert->node_idx,
1905					     namelen, name);
1906					goto ok;
1907				}
1908				mlog(ML_ERROR, "got assert_master from "
1909				     "node %u, but %u is the owner! "
1910				     "(%.*s)\n", assert->node_idx,
1911				     res->owner, namelen, name);
1912				goto kill;
1913			}
1914			if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1915				mlog(ML_ERROR, "got assert from %u, but lock "
1916				     "with no owner should be "
1917				     "in-progress! (%.*s)\n",
1918				     assert->node_idx,
1919				     namelen, name);
1920				goto kill;
1921			}
1922		} else /* mle->type == DLM_MLE_MIGRATION */ {
1923			/* should only be getting an assert from new master */
1924			if (assert->node_idx != mle->new_master) {
1925				mlog(ML_ERROR, "got assert from %u, but "
1926				     "new master is %u, and old master "
1927				     "was %u (%.*s)\n",
1928				     assert->node_idx, mle->new_master,
1929				     mle->master, namelen, name);
1930				goto kill;
1931			}
1932
1933		}
1934ok:
1935		spin_unlock(&res->spinlock);
1936	}
1937
1938	// mlog(0, "woo!  got an assert_master from node %u!\n",
1939	// 	     assert->node_idx);
1940	if (mle) {
1941		int extra_ref = 0;
1942		int nn = -1;
1943		int rr, err = 0;
1944
1945		spin_lock(&mle->spinlock);
1946		if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1947			extra_ref = 1;
1948		else {
1949			/* MASTER mle: if any bits set in the response map
1950			 * then the calling node needs to re-assert to clear
1951			 * up nodes that this node contacted */
1952			while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1953						    nn+1)) < O2NM_MAX_NODES) {
1954				if (nn != dlm->node_num && nn != assert->node_idx) {
1955					master_request = 1;
1956					break;
1957				}
1958			}
1959		}
1960		mle->master = assert->node_idx;
1961		atomic_set(&mle->woken, 1);
1962		wake_up(&mle->wq);
1963		spin_unlock(&mle->spinlock);
1964
1965		if (res) {
1966			int wake = 0;
1967			spin_lock(&res->spinlock);
1968			if (mle->type == DLM_MLE_MIGRATION) {
1969				mlog(0, "finishing off migration of lockres %.*s, "
1970			     		"from %u to %u\n",
1971			       		res->lockname.len, res->lockname.name,
1972			       		dlm->node_num, mle->new_master);
1973				res->state &= ~DLM_LOCK_RES_MIGRATING;
1974				wake = 1;
1975				dlm_change_lockres_owner(dlm, res, mle->new_master);
1976				BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1977			} else {
1978				dlm_change_lockres_owner(dlm, res, mle->master);
1979			}
1980			spin_unlock(&res->spinlock);
1981			have_lockres_ref = 1;
1982			if (wake)
1983				wake_up(&res->wq);
1984		}
1985
1986		/* master is known, detach if not already detached.
1987		 * ensures that only one assert_master call will happen
1988		 * on this mle. */
1989		spin_lock(&dlm->master_lock);
1990
1991		rr = atomic_read(&mle->mle_refs.refcount);
1992		if (mle->inuse > 0) {
1993			if (extra_ref && rr < 3)
1994				err = 1;
1995			else if (!extra_ref && rr < 2)
1996				err = 1;
1997		} else {
1998			if (extra_ref && rr < 2)
1999				err = 1;
2000			else if (!extra_ref && rr < 1)
2001				err = 1;
2002		}
2003		if (err) {
2004			mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
2005			     "that will mess up this node, refs=%d, extra=%d, "
2006			     "inuse=%d\n", dlm->name, namelen, name,
2007			     assert->node_idx, rr, extra_ref, mle->inuse);
2008			dlm_print_one_mle(mle);
2009		}
2010		__dlm_unlink_mle(dlm, mle);
2011		__dlm_mle_detach_hb_events(dlm, mle);
2012		__dlm_put_mle(mle);
2013		if (extra_ref) {
2014			/* the assert master message now balances the extra
2015		 	 * ref given by the master / migration request message.
2016		 	 * if this is the last put, it will be removed
2017		 	 * from the list. */
2018			__dlm_put_mle(mle);
2019		}
2020		spin_unlock(&dlm->master_lock);
2021	} else if (res) {
2022		if (res->owner != assert->node_idx) {
2023			mlog(0, "assert_master from %u, but current "
2024			     "owner is %u (%.*s), no mle\n", assert->node_idx,
2025			     res->owner, namelen, name);
2026		}
2027	}
2028	spin_unlock(&dlm->spinlock);
2029
2030done:
2031	ret = 0;
2032	if (res) {
2033		spin_lock(&res->spinlock);
2034		res->state |= DLM_LOCK_RES_SETREF_INPROG;
2035		spin_unlock(&res->spinlock);
2036		*ret_data = (void *)res;
2037	}
2038	dlm_put(dlm);
2039	if (master_request) {
2040		mlog(0, "need to tell master to reassert\n");
2041		/* positive. negative would shoot down the node. */
2042		ret |= DLM_ASSERT_RESPONSE_REASSERT;
2043		if (!have_lockres_ref) {
2044			mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2045			     "mle present here for %s:%.*s, but no lockres!\n",
2046			     assert->node_idx, dlm->name, namelen, name);
2047		}
2048	}
2049	if (have_lockres_ref) {
2050		/* let the master know we have a reference to the lockres */
2051		ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2052		mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2053		     dlm->name, namelen, name, assert->node_idx);
2054	}
2055	return ret;
2056
2057kill:
2058	/* kill the caller! */
2059	mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
2060	     "and killing the other node now!  This node is OK and can continue.\n");
2061	__dlm_print_one_lock_resource(res);
2062	spin_unlock(&res->spinlock);
2063	spin_lock(&dlm->master_lock);
2064	if (mle)
2065		__dlm_put_mle(mle);
2066	spin_unlock(&dlm->master_lock);
2067	spin_unlock(&dlm->spinlock);
2068	*ret_data = (void *)res;
2069	dlm_put(dlm);
2070	return -EINVAL;
2071}
2072
2073void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2074{
2075	struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2076
2077	if (ret_data) {
2078		spin_lock(&res->spinlock);
2079		res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2080		spin_unlock(&res->spinlock);
2081		wake_up(&res->wq);
2082		dlm_lockres_put(res);
2083	}
2084	return;
2085}
2086
2087int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2088			       struct dlm_lock_resource *res,
2089			       int ignore_higher, u8 request_from, u32 flags)
2090{
2091	struct dlm_work_item *item;
2092	item = kzalloc(sizeof(*item), GFP_ATOMIC);
2093	if (!item)
2094		return -ENOMEM;
2095
2096
2097	/* queue up work for dlm_assert_master_worker */
2098	dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2099	item->u.am.lockres = res; /* already have a ref */
2100	/* can optionally ignore node numbers higher than this node */
2101	item->u.am.ignore_higher = ignore_higher;
2102	item->u.am.request_from = request_from;
2103	item->u.am.flags = flags;
2104
2105	if (ignore_higher)
2106		mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2107		     res->lockname.name);
2108
2109	spin_lock(&dlm->work_lock);
2110	list_add_tail(&item->list, &dlm->work_list);
2111	spin_unlock(&dlm->work_lock);
2112
2113	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2114	return 0;
2115}
2116
2117static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2118{
2119	struct dlm_ctxt *dlm = data;
2120	int ret = 0;
2121	struct dlm_lock_resource *res;
2122	unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2123	int ignore_higher;
2124	int bit;
2125	u8 request_from;
2126	u32 flags;
2127
2128	dlm = item->dlm;
2129	res = item->u.am.lockres;
2130	ignore_higher = item->u.am.ignore_higher;
2131	request_from = item->u.am.request_from;
2132	flags = item->u.am.flags;
2133
2134	spin_lock(&dlm->spinlock);
2135	memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2136	spin_unlock(&dlm->spinlock);
2137
2138	clear_bit(dlm->node_num, nodemap);
2139	if (ignore_higher) {
2140		/* if is this just to clear up mles for nodes below
2141		 * this node, do not send the message to the original
2142		 * caller or any node number higher than this */
2143		clear_bit(request_from, nodemap);
2144		bit = dlm->node_num;
2145		while (1) {
2146			bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2147					    bit+1);
2148		       	if (bit >= O2NM_MAX_NODES)
2149				break;
2150			clear_bit(bit, nodemap);
2151		}
2152	}
2153
2154	/*
2155	 * If we're migrating this lock to someone else, we are no
2156	 * longer allowed to assert out own mastery.  OTOH, we need to
2157	 * prevent migration from starting while we're still asserting
2158	 * our dominance.  The reserved ast delays migration.
2159	 */
2160	spin_lock(&res->spinlock);
2161	if (res->state & DLM_LOCK_RES_MIGRATING) {
2162		mlog(0, "Someone asked us to assert mastery, but we're "
2163		     "in the middle of migration.  Skipping assert, "
2164		     "the new master will handle that.\n");
2165		spin_unlock(&res->spinlock);
2166		goto put;
2167	} else
2168		__dlm_lockres_reserve_ast(res);
2169	spin_unlock(&res->spinlock);
2170
2171	/* this call now finishes out the nodemap
2172	 * even if one or more nodes die */
2173	mlog(0, "worker about to master %.*s here, this=%u\n",
2174		     res->lockname.len, res->lockname.name, dlm->node_num);
2175	ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2176	if (ret < 0) {
2177		/* no need to restart, we are done */
2178		if (!dlm_is_host_down(ret))
2179			mlog_errno(ret);
2180	}
2181
2182	/* Ok, we've asserted ourselves.  Let's let migration start. */
2183	dlm_lockres_release_ast(dlm, res);
2184
2185put:
2186	dlm_lockres_drop_inflight_worker(dlm, res);
2187
2188	dlm_lockres_put(res);
2189
2190	mlog(0, "finished with dlm_assert_master_worker\n");
2191}
2192
2193/* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2194 * We cannot wait for node recovery to complete to begin mastering this
2195 * lockres because this lockres is used to kick off recovery! ;-)
2196 * So, do a pre-check on all living nodes to see if any of those nodes
2197 * think that $RECOVERY is currently mastered by a dead node.  If so,
2198 * we wait a short time to allow that node to get notified by its own
2199 * heartbeat stack, then check again.  All $RECOVERY lock resources
2200 * mastered by dead nodes are purged when the hearbeat callback is
2201 * fired, so we can know for sure that it is safe to continue once
2202 * the node returns a live node or no node.  */
2203static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2204				       struct dlm_lock_resource *res)
2205{
2206	struct dlm_node_iter iter;
2207	int nodenum;
2208	int ret = 0;
2209	u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2210
2211	spin_lock(&dlm->spinlock);
2212	dlm_node_iter_init(dlm->domain_map, &iter);
2213	spin_unlock(&dlm->spinlock);
2214
2215	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2216		/* do not send to self */
2217		if (nodenum == dlm->node_num)
2218			continue;
2219		ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2220		if (ret < 0) {
2221			mlog_errno(ret);
2222			if (!dlm_is_host_down(ret))
2223				BUG();
2224			/* host is down, so answer for that node would be
2225			 * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2226			ret = 0;
2227		}
2228
2229		if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2230			/* check to see if this master is in the recovery map */
2231			spin_lock(&dlm->spinlock);
2232			if (test_bit(master, dlm->recovery_map)) {
2233				mlog(ML_NOTICE, "%s: node %u has not seen "
2234				     "node %u go down yet, and thinks the "
2235				     "dead node is mastering the recovery "
2236				     "lock.  must wait.\n", dlm->name,
2237				     nodenum, master);
2238				ret = -EAGAIN;
2239			}
2240			spin_unlock(&dlm->spinlock);
2241			mlog(0, "%s: reco lock master is %u\n", dlm->name,
2242			     master);
2243			break;
2244		}
2245	}
2246	return ret;
2247}
2248
2249/*
2250 * DLM_DEREF_LOCKRES_MSG
2251 */
2252
2253int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2254{
2255	struct dlm_deref_lockres deref;
2256	int ret = 0, r;
2257	const char *lockname;
2258	unsigned int namelen;
2259
2260	lockname = res->lockname.name;
2261	namelen = res->lockname.len;
2262	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2263
2264	memset(&deref, 0, sizeof(deref));
2265	deref.node_idx = dlm->node_num;
2266	deref.namelen = namelen;
2267	memcpy(deref.name, lockname, namelen);
2268
2269	ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2270				 &deref, sizeof(deref), res->owner, &r);
2271	if (ret < 0)
2272		mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2273		     dlm->name, namelen, lockname, ret, res->owner);
2274	else if (r < 0) {
2275		/* BAD.  other node says I did not have a ref. */
2276		mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2277		     dlm->name, namelen, lockname, res->owner, r);
2278		dlm_print_one_lock_resource(res);
2279		BUG();
2280	}
2281	return ret ? ret : r;
 
 
 
2282}
2283
2284int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2285			      void **ret_data)
2286{
2287	struct dlm_ctxt *dlm = data;
2288	struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2289	struct dlm_lock_resource *res = NULL;
2290	char *name;
2291	unsigned int namelen;
2292	int ret = -EINVAL;
2293	u8 node;
2294	unsigned int hash;
2295	struct dlm_work_item *item;
2296	int cleared = 0;
2297	int dispatch = 0;
2298
2299	if (!dlm_grab(dlm))
2300		return 0;
2301
2302	name = deref->name;
2303	namelen = deref->namelen;
2304	node = deref->node_idx;
2305
2306	if (namelen > DLM_LOCKID_NAME_MAX) {
2307		mlog(ML_ERROR, "Invalid name length!");
2308		goto done;
2309	}
2310	if (deref->node_idx >= O2NM_MAX_NODES) {
2311		mlog(ML_ERROR, "Invalid node number: %u\n", node);
2312		goto done;
2313	}
2314
2315	hash = dlm_lockid_hash(name, namelen);
2316
2317	spin_lock(&dlm->spinlock);
2318	res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2319	if (!res) {
2320		spin_unlock(&dlm->spinlock);
2321		mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2322		     dlm->name, namelen, name);
2323		goto done;
2324	}
2325	spin_unlock(&dlm->spinlock);
2326
2327	spin_lock(&res->spinlock);
2328	if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2329		dispatch = 1;
2330	else {
2331		BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2332		if (test_bit(node, res->refmap)) {
2333			dlm_lockres_clear_refmap_bit(dlm, res, node);
2334			cleared = 1;
2335		}
2336	}
2337	spin_unlock(&res->spinlock);
2338
2339	if (!dispatch) {
2340		if (cleared)
2341			dlm_lockres_calc_usage(dlm, res);
2342		else {
2343			mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2344		     	"but it is already dropped!\n", dlm->name,
2345		     	res->lockname.len, res->lockname.name, node);
2346			dlm_print_one_lock_resource(res);
2347		}
2348		ret = DLM_DEREF_RESPONSE_DONE;
2349		goto done;
2350	}
2351
2352	item = kzalloc(sizeof(*item), GFP_NOFS);
2353	if (!item) {
2354		ret = -ENOMEM;
2355		mlog_errno(ret);
2356		goto done;
2357	}
2358
2359	dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2360	item->u.dl.deref_res = res;
2361	item->u.dl.deref_node = node;
2362
2363	spin_lock(&dlm->work_lock);
2364	list_add_tail(&item->list, &dlm->work_list);
2365	spin_unlock(&dlm->work_lock);
2366
2367	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2368	return DLM_DEREF_RESPONSE_INPROG;
2369
2370done:
2371	if (res)
2372		dlm_lockres_put(res);
2373	dlm_put(dlm);
2374
2375	return ret;
2376}
2377
2378int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
2379			      void **ret_data)
2380{
2381	struct dlm_ctxt *dlm = data;
2382	struct dlm_deref_lockres_done *deref
2383			= (struct dlm_deref_lockres_done *)msg->buf;
2384	struct dlm_lock_resource *res = NULL;
2385	char *name;
2386	unsigned int namelen;
2387	int ret = -EINVAL;
2388	u8 node;
2389	unsigned int hash;
2390
2391	if (!dlm_grab(dlm))
2392		return 0;
2393
2394	name = deref->name;
2395	namelen = deref->namelen;
2396	node = deref->node_idx;
2397
2398	if (namelen > DLM_LOCKID_NAME_MAX) {
2399		mlog(ML_ERROR, "Invalid name length!");
2400		goto done;
2401	}
2402	if (deref->node_idx >= O2NM_MAX_NODES) {
2403		mlog(ML_ERROR, "Invalid node number: %u\n", node);
2404		goto done;
2405	}
2406
2407	hash = dlm_lockid_hash(name, namelen);
2408
2409	spin_lock(&dlm->spinlock);
2410	res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2411	if (!res) {
2412		spin_unlock(&dlm->spinlock);
2413		mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2414		     dlm->name, namelen, name);
2415		goto done;
2416	}
2417
2418	spin_lock(&res->spinlock);
2419	BUG_ON(!(res->state & DLM_LOCK_RES_DROPPING_REF));
2420	if (!list_empty(&res->purge)) {
2421		mlog(0, "%s: Removing res %.*s from purgelist\n",
2422			dlm->name, res->lockname.len, res->lockname.name);
2423		list_del_init(&res->purge);
2424		dlm_lockres_put(res);
2425		dlm->purge_count--;
2426	}
2427
2428	if (!__dlm_lockres_unused(res)) {
2429		mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
2430			dlm->name, res->lockname.len, res->lockname.name);
2431		__dlm_print_one_lock_resource(res);
2432		BUG();
2433	}
2434
2435	__dlm_unhash_lockres(dlm, res);
2436
2437	spin_lock(&dlm->track_lock);
2438	if (!list_empty(&res->tracking))
2439		list_del_init(&res->tracking);
2440	else {
2441		mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
2442		     dlm->name, res->lockname.len, res->lockname.name);
2443		__dlm_print_one_lock_resource(res);
2444	}
2445	spin_unlock(&dlm->track_lock);
2446
2447	/* lockres is not in the hash now. drop the flag and wake up
2448	 * any processes waiting in dlm_get_lock_resource.
2449	 */
2450	res->state &= ~DLM_LOCK_RES_DROPPING_REF;
2451	spin_unlock(&res->spinlock);
2452	wake_up(&res->wq);
2453
2454	dlm_lockres_put(res);
2455
2456	spin_unlock(&dlm->spinlock);
2457
2458	ret = 0;
2459
2460done:
 
 
2461	dlm_put(dlm);
2462	return ret;
2463}
2464
2465static void dlm_drop_lockres_ref_done(struct dlm_ctxt *dlm,
2466		struct dlm_lock_resource *res, u8 node)
2467{
2468	struct dlm_deref_lockres_done deref;
2469	int ret = 0, r;
2470	const char *lockname;
2471	unsigned int namelen;
2472
2473	lockname = res->lockname.name;
2474	namelen = res->lockname.len;
2475	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2476
2477	memset(&deref, 0, sizeof(deref));
2478	deref.node_idx = dlm->node_num;
2479	deref.namelen = namelen;
2480	memcpy(deref.name, lockname, namelen);
2481
2482	ret = o2net_send_message(DLM_DEREF_LOCKRES_DONE, dlm->key,
2483				 &deref, sizeof(deref), node, &r);
2484	if (ret < 0) {
2485		mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF DONE "
2486				" to node %u\n", dlm->name, namelen,
2487				lockname, ret, node);
2488	} else if (r < 0) {
2489		/* ignore the error */
2490		mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2491		     dlm->name, namelen, lockname, node, r);
2492		dlm_print_one_lock_resource(res);
2493	}
2494}
2495
2496static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2497{
2498	struct dlm_ctxt *dlm;
2499	struct dlm_lock_resource *res;
2500	u8 node;
2501	u8 cleared = 0;
2502
2503	dlm = item->dlm;
2504	res = item->u.dl.deref_res;
2505	node = item->u.dl.deref_node;
2506
2507	spin_lock(&res->spinlock);
2508	BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2509	__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2510	if (test_bit(node, res->refmap)) {
2511		dlm_lockres_clear_refmap_bit(dlm, res, node);
2512		cleared = 1;
2513	}
2514	spin_unlock(&res->spinlock);
2515
2516	dlm_drop_lockres_ref_done(dlm, res, node);
2517
2518	if (cleared) {
2519		mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2520		     dlm->name, res->lockname.len, res->lockname.name, node);
2521		dlm_lockres_calc_usage(dlm, res);
2522	} else {
2523		mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2524		     "but it is already dropped!\n", dlm->name,
2525		     res->lockname.len, res->lockname.name, node);
2526		dlm_print_one_lock_resource(res);
2527	}
2528
2529	dlm_lockres_put(res);
2530}
2531
2532/*
2533 * A migrateable resource is one that is :
2534 * 1. locally mastered, and,
2535 * 2. zero local locks, and,
2536 * 3. one or more non-local locks, or, one or more references
2537 * Returns 1 if yes, 0 if not.
2538 */
2539static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2540				      struct dlm_lock_resource *res)
2541{
2542	enum dlm_lockres_list idx;
2543	int nonlocal = 0, node_ref;
2544	struct list_head *queue;
2545	struct dlm_lock *lock;
2546	u64 cookie;
2547
2548	assert_spin_locked(&res->spinlock);
2549
2550	/* delay migration when the lockres is in MIGRATING state */
2551	if (res->state & DLM_LOCK_RES_MIGRATING)
2552		return 0;
2553
2554	/* delay migration when the lockres is in RECOCERING state */
2555	if (res->state & (DLM_LOCK_RES_RECOVERING|
2556			DLM_LOCK_RES_RECOVERY_WAITING))
2557		return 0;
2558
2559	if (res->owner != dlm->node_num)
2560		return 0;
2561
2562        for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2563		queue = dlm_list_idx_to_ptr(res, idx);
2564		list_for_each_entry(lock, queue, list) {
2565			if (lock->ml.node != dlm->node_num) {
2566				nonlocal++;
2567				continue;
2568			}
2569			cookie = be64_to_cpu(lock->ml.cookie);
2570			mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
2571			     "%s list\n", dlm->name, res->lockname.len,
2572			     res->lockname.name,
2573			     dlm_get_lock_cookie_node(cookie),
2574			     dlm_get_lock_cookie_seq(cookie),
2575			     dlm_list_in_text(idx));
2576			return 0;
2577		}
2578	}
2579
2580	if (!nonlocal) {
2581		node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2582		if (node_ref >= O2NM_MAX_NODES)
2583			return 0;
2584	}
2585
2586	mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len,
2587	     res->lockname.name);
2588
2589	return 1;
2590}
2591
2592/*
2593 * DLM_MIGRATE_LOCKRES
2594 */
2595
2596
2597static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2598			       struct dlm_lock_resource *res, u8 target)
2599{
2600	struct dlm_master_list_entry *mle = NULL;
2601	struct dlm_master_list_entry *oldmle = NULL;
2602 	struct dlm_migratable_lockres *mres = NULL;
2603	int ret = 0;
2604	const char *name;
2605	unsigned int namelen;
2606	int mle_added = 0;
2607	int wake = 0;
2608
2609	if (!dlm_grab(dlm))
2610		return -EINVAL;
2611
2612	BUG_ON(target == O2NM_MAX_NODES);
2613
2614	name = res->lockname.name;
2615	namelen = res->lockname.len;
2616
2617	mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
2618	     target);
2619
2620	/* preallocate up front. if this fails, abort */
2621	ret = -ENOMEM;
2622	mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2623	if (!mres) {
2624		mlog_errno(ret);
2625		goto leave;
2626	}
2627
2628	mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2629	if (!mle) {
2630		mlog_errno(ret);
2631		goto leave;
2632	}
2633	ret = 0;
2634
2635	/*
2636	 * clear any existing master requests and
2637	 * add the migration mle to the list
2638	 */
2639	spin_lock(&dlm->spinlock);
2640	spin_lock(&dlm->master_lock);
2641	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2642				    namelen, target, dlm->node_num);
2643	/* get an extra reference on the mle.
2644	 * otherwise the assert_master from the new
2645	 * master will destroy this.
2646	 */
2647	dlm_get_mle_inuse(mle);
 
 
2648	spin_unlock(&dlm->master_lock);
2649	spin_unlock(&dlm->spinlock);
2650
2651	if (ret == -EEXIST) {
2652		mlog(0, "another process is already migrating it\n");
2653		goto fail;
2654	}
2655	mle_added = 1;
2656
2657	/*
2658	 * set the MIGRATING flag and flush asts
2659	 * if we fail after this we need to re-dirty the lockres
2660	 */
2661	if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2662		mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2663		     "the target went down.\n", res->lockname.len,
2664		     res->lockname.name, target);
2665		spin_lock(&res->spinlock);
2666		res->state &= ~DLM_LOCK_RES_MIGRATING;
2667		wake = 1;
2668		spin_unlock(&res->spinlock);
2669		ret = -EINVAL;
2670	}
2671
2672fail:
2673	if (ret != -EEXIST && oldmle) {
2674		/* master is known, detach if not already detached */
2675		dlm_mle_detach_hb_events(dlm, oldmle);
2676		dlm_put_mle(oldmle);
2677	}
2678
2679	if (ret < 0) {
2680		if (mle_added) {
2681			dlm_mle_detach_hb_events(dlm, mle);
2682			dlm_put_mle(mle);
2683			dlm_put_mle_inuse(mle);
2684		} else if (mle) {
2685			kmem_cache_free(dlm_mle_cache, mle);
2686			mle = NULL;
2687		}
2688		goto leave;
2689	}
2690
2691	/*
2692	 * at this point, we have a migration target, an mle
2693	 * in the master list, and the MIGRATING flag set on
2694	 * the lockres
2695	 */
2696
2697	/* now that remote nodes are spinning on the MIGRATING flag,
2698	 * ensure that all assert_master work is flushed. */
2699	flush_workqueue(dlm->dlm_worker);
2700
2701	/* notify new node and send all lock state */
2702	/* call send_one_lockres with migration flag.
2703	 * this serves as notice to the target node that a
2704	 * migration is starting. */
2705	ret = dlm_send_one_lockres(dlm, res, mres, target,
2706				   DLM_MRES_MIGRATION);
2707
2708	if (ret < 0) {
2709		mlog(0, "migration to node %u failed with %d\n",
2710		     target, ret);
2711		/* migration failed, detach and clean up mle */
2712		dlm_mle_detach_hb_events(dlm, mle);
2713		dlm_put_mle(mle);
2714		dlm_put_mle_inuse(mle);
2715		spin_lock(&res->spinlock);
2716		res->state &= ~DLM_LOCK_RES_MIGRATING;
2717		wake = 1;
2718		spin_unlock(&res->spinlock);
2719		if (dlm_is_host_down(ret))
2720			dlm_wait_for_node_death(dlm, target,
2721						DLM_NODE_DEATH_WAIT_MAX);
2722		goto leave;
2723	}
2724
2725	/* at this point, the target sends a message to all nodes,
2726	 * (using dlm_do_migrate_request).  this node is skipped since
2727	 * we had to put an mle in the list to begin the process.  this
2728	 * node now waits for target to do an assert master.  this node
2729	 * will be the last one notified, ensuring that the migration
2730	 * is complete everywhere.  if the target dies while this is
2731	 * going on, some nodes could potentially see the target as the
2732	 * master, so it is important that my recovery finds the migration
2733	 * mle and sets the master to UNKNOWN. */
2734
2735
2736	/* wait for new node to assert master */
2737	while (1) {
2738		ret = wait_event_interruptible_timeout(mle->wq,
2739					(atomic_read(&mle->woken) == 1),
2740					msecs_to_jiffies(5000));
2741
2742		if (ret >= 0) {
2743		       	if (atomic_read(&mle->woken) == 1 ||
2744			    res->owner == target)
2745				break;
2746
2747			mlog(0, "%s:%.*s: timed out during migration\n",
2748			     dlm->name, res->lockname.len, res->lockname.name);
2749			/* avoid hang during shutdown when migrating lockres
2750			 * to a node which also goes down */
2751			if (dlm_is_node_dead(dlm, target)) {
2752				mlog(0, "%s:%.*s: expected migration "
2753				     "target %u is no longer up, restarting\n",
2754				     dlm->name, res->lockname.len,
2755				     res->lockname.name, target);
2756				ret = -EINVAL;
2757				/* migration failed, detach and clean up mle */
2758				dlm_mle_detach_hb_events(dlm, mle);
2759				dlm_put_mle(mle);
2760				dlm_put_mle_inuse(mle);
2761				spin_lock(&res->spinlock);
2762				res->state &= ~DLM_LOCK_RES_MIGRATING;
2763				wake = 1;
2764				spin_unlock(&res->spinlock);
2765				goto leave;
2766			}
2767		} else
2768			mlog(0, "%s:%.*s: caught signal during migration\n",
2769			     dlm->name, res->lockname.len, res->lockname.name);
2770	}
2771
2772	/* all done, set the owner, clear the flag */
2773	spin_lock(&res->spinlock);
2774	dlm_set_lockres_owner(dlm, res, target);
2775	res->state &= ~DLM_LOCK_RES_MIGRATING;
2776	dlm_remove_nonlocal_locks(dlm, res);
2777	spin_unlock(&res->spinlock);
2778	wake_up(&res->wq);
2779
2780	/* master is known, detach if not already detached */
2781	dlm_mle_detach_hb_events(dlm, mle);
2782	dlm_put_mle_inuse(mle);
2783	ret = 0;
2784
2785	dlm_lockres_calc_usage(dlm, res);
2786
2787leave:
2788	/* re-dirty the lockres if we failed */
2789	if (ret < 0)
2790		dlm_kick_thread(dlm, res);
2791
2792	/* wake up waiters if the MIGRATING flag got set
2793	 * but migration failed */
2794	if (wake)
2795		wake_up(&res->wq);
2796
2797	if (mres)
2798		free_page((unsigned long)mres);
2799
2800	dlm_put(dlm);
2801
2802	mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
2803	     name, target, ret);
2804	return ret;
2805}
2806
2807#define DLM_MIGRATION_RETRY_MS  100
2808
2809/*
2810 * Should be called only after beginning the domain leave process.
2811 * There should not be any remaining locks on nonlocal lock resources,
2812 * and there should be no local locks left on locally mastered resources.
2813 *
2814 * Called with the dlm spinlock held, may drop it to do migration, but
2815 * will re-acquire before exit.
2816 *
2817 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2818 */
2819int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2820{
2821	int ret;
2822	int lock_dropped = 0;
2823	u8 target = O2NM_MAX_NODES;
2824
2825	assert_spin_locked(&dlm->spinlock);
2826
2827	spin_lock(&res->spinlock);
2828	if (dlm_is_lockres_migrateable(dlm, res))
2829		target = dlm_pick_migration_target(dlm, res);
2830	spin_unlock(&res->spinlock);
2831
2832	if (target == O2NM_MAX_NODES)
2833		goto leave;
2834
2835	/* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2836	spin_unlock(&dlm->spinlock);
2837	lock_dropped = 1;
2838	ret = dlm_migrate_lockres(dlm, res, target);
2839	if (ret)
2840		mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2841		     dlm->name, res->lockname.len, res->lockname.name,
2842		     target, ret);
2843	spin_lock(&dlm->spinlock);
2844leave:
2845	return lock_dropped;
2846}
2847
2848int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2849{
2850	int ret;
2851	spin_lock(&dlm->ast_lock);
2852	spin_lock(&lock->spinlock);
2853	ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2854	spin_unlock(&lock->spinlock);
2855	spin_unlock(&dlm->ast_lock);
2856	return ret;
2857}
2858
2859static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2860				     struct dlm_lock_resource *res,
2861				     u8 mig_target)
2862{
2863	int can_proceed;
2864	spin_lock(&res->spinlock);
2865	can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2866	spin_unlock(&res->spinlock);
2867
2868	/* target has died, so make the caller break out of the
2869	 * wait_event, but caller must recheck the domain_map */
2870	spin_lock(&dlm->spinlock);
2871	if (!test_bit(mig_target, dlm->domain_map))
2872		can_proceed = 1;
2873	spin_unlock(&dlm->spinlock);
2874	return can_proceed;
2875}
2876
2877static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2878				struct dlm_lock_resource *res)
2879{
2880	int ret;
2881	spin_lock(&res->spinlock);
2882	ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2883	spin_unlock(&res->spinlock);
2884	return ret;
2885}
2886
2887
2888static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2889				       struct dlm_lock_resource *res,
2890				       u8 target)
2891{
2892	int ret = 0;
2893
2894	mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2895	       res->lockname.len, res->lockname.name, dlm->node_num,
2896	       target);
2897	/* need to set MIGRATING flag on lockres.  this is done by
2898	 * ensuring that all asts have been flushed for this lockres. */
2899	spin_lock(&res->spinlock);
2900	BUG_ON(res->migration_pending);
2901	res->migration_pending = 1;
2902	/* strategy is to reserve an extra ast then release
2903	 * it below, letting the release do all of the work */
2904	__dlm_lockres_reserve_ast(res);
2905	spin_unlock(&res->spinlock);
2906
2907	/* now flush all the pending asts */
2908	dlm_kick_thread(dlm, res);
2909	/* before waiting on DIRTY, block processes which may
2910	 * try to dirty the lockres before MIGRATING is set */
2911	spin_lock(&res->spinlock);
2912	BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2913	res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2914	spin_unlock(&res->spinlock);
2915	/* now wait on any pending asts and the DIRTY state */
2916	wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2917	dlm_lockres_release_ast(dlm, res);
2918
2919	mlog(0, "about to wait on migration_wq, dirty=%s\n",
2920	       res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2921	/* if the extra ref we just put was the final one, this
2922	 * will pass thru immediately.  otherwise, we need to wait
2923	 * for the last ast to finish. */
2924again:
2925	ret = wait_event_interruptible_timeout(dlm->migration_wq,
2926		   dlm_migration_can_proceed(dlm, res, target),
2927		   msecs_to_jiffies(1000));
2928	if (ret < 0) {
2929		mlog(0, "woken again: migrating? %s, dead? %s\n",
2930		       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2931		       test_bit(target, dlm->domain_map) ? "no":"yes");
2932	} else {
2933		mlog(0, "all is well: migrating? %s, dead? %s\n",
2934		       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2935		       test_bit(target, dlm->domain_map) ? "no":"yes");
2936	}
2937	if (!dlm_migration_can_proceed(dlm, res, target)) {
2938		mlog(0, "trying again...\n");
2939		goto again;
2940	}
2941
2942	ret = 0;
2943	/* did the target go down or die? */
2944	spin_lock(&dlm->spinlock);
2945	if (!test_bit(target, dlm->domain_map)) {
2946		mlog(ML_ERROR, "aha. migration target %u just went down\n",
2947		     target);
2948		ret = -EHOSTDOWN;
2949	}
2950	spin_unlock(&dlm->spinlock);
2951
2952	/*
2953	 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2954	 * another try; otherwise, we are sure the MIGRATING state is there,
2955	 * drop the unneded state which blocked threads trying to DIRTY
2956	 */
2957	spin_lock(&res->spinlock);
2958	BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2959	res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2960	if (!ret)
2961		BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2962	else
2963		res->migration_pending = 0;
2964	spin_unlock(&res->spinlock);
2965
2966	/*
2967	 * at this point:
2968	 *
2969	 *   o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2970	 *   o there are no pending asts on this lockres
2971	 *   o all processes trying to reserve an ast on this
2972	 *     lockres must wait for the MIGRATING flag to clear
2973	 */
2974	return ret;
2975}
2976
2977/* last step in the migration process.
2978 * original master calls this to free all of the dlm_lock
2979 * structures that used to be for other nodes. */
2980static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2981				      struct dlm_lock_resource *res)
2982{
2983	struct list_head *queue = &res->granted;
2984	int i, bit;
2985	struct dlm_lock *lock, *next;
2986
2987	assert_spin_locked(&res->spinlock);
2988
2989	BUG_ON(res->owner == dlm->node_num);
2990
2991	for (i=0; i<3; i++) {
2992		list_for_each_entry_safe(lock, next, queue, list) {
2993			if (lock->ml.node != dlm->node_num) {
2994				mlog(0, "putting lock for node %u\n",
2995				     lock->ml.node);
2996				/* be extra careful */
2997				BUG_ON(!list_empty(&lock->ast_list));
2998				BUG_ON(!list_empty(&lock->bast_list));
2999				BUG_ON(lock->ast_pending);
3000				BUG_ON(lock->bast_pending);
3001				dlm_lockres_clear_refmap_bit(dlm, res,
3002							     lock->ml.node);
3003				list_del_init(&lock->list);
3004				dlm_lock_put(lock);
3005				/* In a normal unlock, we would have added a
3006				 * DLM_UNLOCK_FREE_LOCK action. Force it. */
3007				dlm_lock_put(lock);
3008			}
3009		}
3010		queue++;
3011	}
3012	bit = 0;
3013	while (1) {
3014		bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
3015		if (bit >= O2NM_MAX_NODES)
3016			break;
3017		/* do not clear the local node reference, if there is a
3018		 * process holding this, let it drop the ref itself */
3019		if (bit != dlm->node_num) {
3020			mlog(0, "%s:%.*s: node %u had a ref to this "
3021			     "migrating lockres, clearing\n", dlm->name,
3022			     res->lockname.len, res->lockname.name, bit);
3023			dlm_lockres_clear_refmap_bit(dlm, res, bit);
3024		}
3025		bit++;
3026	}
3027}
3028
3029/*
3030 * Pick a node to migrate the lock resource to. This function selects a
3031 * potential target based first on the locks and then on refmap. It skips
3032 * nodes that are in the process of exiting the domain.
3033 */
3034static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
3035				    struct dlm_lock_resource *res)
3036{
3037	enum dlm_lockres_list idx;
3038	struct list_head *queue = &res->granted;
3039	struct dlm_lock *lock;
3040	int noderef;
3041	u8 nodenum = O2NM_MAX_NODES;
3042
3043	assert_spin_locked(&dlm->spinlock);
3044	assert_spin_locked(&res->spinlock);
3045
3046	/* Go through all the locks */
3047	for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
3048		queue = dlm_list_idx_to_ptr(res, idx);
3049		list_for_each_entry(lock, queue, list) {
3050			if (lock->ml.node == dlm->node_num)
3051				continue;
3052			if (test_bit(lock->ml.node, dlm->exit_domain_map))
3053				continue;
3054			nodenum = lock->ml.node;
3055			goto bail;
3056		}
3057	}
3058
3059	/* Go thru the refmap */
3060	noderef = -1;
3061	while (1) {
3062		noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
3063					noderef + 1);
3064		if (noderef >= O2NM_MAX_NODES)
3065			break;
3066		if (noderef == dlm->node_num)
3067			continue;
3068		if (test_bit(noderef, dlm->exit_domain_map))
3069			continue;
3070		nodenum = noderef;
3071		goto bail;
3072	}
3073
3074bail:
3075	return nodenum;
3076}
3077
3078/* this is called by the new master once all lockres
3079 * data has been received */
3080static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
3081				  struct dlm_lock_resource *res,
3082				  u8 master, u8 new_master,
3083				  struct dlm_node_iter *iter)
3084{
3085	struct dlm_migrate_request migrate;
3086	int ret, skip, status = 0;
3087	int nodenum;
3088
3089	memset(&migrate, 0, sizeof(migrate));
3090	migrate.namelen = res->lockname.len;
3091	memcpy(migrate.name, res->lockname.name, migrate.namelen);
3092	migrate.new_master = new_master;
3093	migrate.master = master;
3094
3095	ret = 0;
3096
3097	/* send message to all nodes, except the master and myself */
3098	while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
3099		if (nodenum == master ||
3100		    nodenum == new_master)
3101			continue;
3102
3103		/* We could race exit domain. If exited, skip. */
3104		spin_lock(&dlm->spinlock);
3105		skip = (!test_bit(nodenum, dlm->domain_map));
3106		spin_unlock(&dlm->spinlock);
3107		if (skip) {
3108			clear_bit(nodenum, iter->node_map);
3109			continue;
3110		}
3111
3112		ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
3113					 &migrate, sizeof(migrate), nodenum,
3114					 &status);
3115		if (ret < 0) {
3116			mlog(ML_ERROR, "%s: res %.*s, Error %d send "
3117			     "MIGRATE_REQUEST to node %u\n", dlm->name,
3118			     migrate.namelen, migrate.name, ret, nodenum);
3119			if (!dlm_is_host_down(ret)) {
3120				mlog(ML_ERROR, "unhandled error=%d!\n", ret);
3121				BUG();
3122			}
3123			clear_bit(nodenum, iter->node_map);
3124			ret = 0;
3125		} else if (status < 0) {
3126			mlog(0, "migrate request (node %u) returned %d!\n",
3127			     nodenum, status);
3128			ret = status;
3129		} else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3130			/* during the migration request we short-circuited
3131			 * the mastery of the lockres.  make sure we have
3132			 * a mastery ref for nodenum */
3133			mlog(0, "%s:%.*s: need ref for node %u\n",
3134			     dlm->name, res->lockname.len, res->lockname.name,
3135			     nodenum);
3136			spin_lock(&res->spinlock);
3137			dlm_lockres_set_refmap_bit(dlm, res, nodenum);
3138			spin_unlock(&res->spinlock);
3139		}
3140	}
3141
3142	if (ret < 0)
3143		mlog_errno(ret);
3144
3145	mlog(0, "returning ret=%d\n", ret);
3146	return ret;
3147}
3148
3149
3150/* if there is an existing mle for this lockres, we now know who the master is.
3151 * (the one who sent us *this* message) we can clear it up right away.
3152 * since the process that put the mle on the list still has a reference to it,
3153 * we can unhash it now, set the master and wake the process.  as a result,
3154 * we will have no mle in the list to start with.  now we can add an mle for
3155 * the migration and this should be the only one found for those scanning the
3156 * list.  */
3157int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3158				void **ret_data)
3159{
3160	struct dlm_ctxt *dlm = data;
3161	struct dlm_lock_resource *res = NULL;
3162	struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3163	struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3164	const char *name;
3165	unsigned int namelen, hash;
3166	int ret = 0;
3167
3168	if (!dlm_grab(dlm))
3169		return 0;
3170
3171	name = migrate->name;
3172	namelen = migrate->namelen;
3173	hash = dlm_lockid_hash(name, namelen);
3174
3175	/* preallocate.. if this fails, abort */
3176	mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3177
3178	if (!mle) {
3179		ret = -ENOMEM;
3180		goto leave;
3181	}
3182
3183	/* check for pre-existing lock */
3184	spin_lock(&dlm->spinlock);
3185	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3186	if (res) {
3187		spin_lock(&res->spinlock);
3188		if (res->state & DLM_LOCK_RES_RECOVERING) {
3189			/* if all is working ok, this can only mean that we got
3190		 	* a migrate request from a node that we now see as
3191		 	* dead.  what can we do here?  drop it to the floor? */
3192			spin_unlock(&res->spinlock);
3193			mlog(ML_ERROR, "Got a migrate request, but the "
3194			     "lockres is marked as recovering!");
3195			kmem_cache_free(dlm_mle_cache, mle);
3196			ret = -EINVAL; /* need a better solution */
3197			goto unlock;
3198		}
3199		res->state |= DLM_LOCK_RES_MIGRATING;
3200		spin_unlock(&res->spinlock);
3201	}
3202
3203	spin_lock(&dlm->master_lock);
3204	/* ignore status.  only nonzero status would BUG. */
3205	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3206				    name, namelen,
3207				    migrate->new_master,
3208				    migrate->master);
 
 
 
3209
3210	spin_unlock(&dlm->master_lock);
3211unlock:
3212	spin_unlock(&dlm->spinlock);
3213
3214	if (oldmle) {
3215		/* master is known, detach if not already detached */
3216		dlm_mle_detach_hb_events(dlm, oldmle);
3217		dlm_put_mle(oldmle);
3218	}
3219
3220	if (res)
3221		dlm_lockres_put(res);
3222leave:
3223	dlm_put(dlm);
3224	return ret;
3225}
3226
3227/* must be holding dlm->spinlock and dlm->master_lock
3228 * when adding a migration mle, we can clear any other mles
3229 * in the master list because we know with certainty that
3230 * the master is "master".  so we remove any old mle from
3231 * the list after setting it's master field, and then add
3232 * the new migration mle.  this way we can hold with the rule
3233 * of having only one mle for a given lock name at all times. */
3234static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3235				 struct dlm_lock_resource *res,
3236				 struct dlm_master_list_entry *mle,
3237				 struct dlm_master_list_entry **oldmle,
3238				 const char *name, unsigned int namelen,
3239				 u8 new_master, u8 master)
3240{
3241	int found;
3242	int ret = 0;
3243
3244	*oldmle = NULL;
3245
3246	assert_spin_locked(&dlm->spinlock);
3247	assert_spin_locked(&dlm->master_lock);
3248
3249	/* caller is responsible for any ref taken here on oldmle */
3250	found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3251	if (found) {
3252		struct dlm_master_list_entry *tmp = *oldmle;
3253		spin_lock(&tmp->spinlock);
3254		if (tmp->type == DLM_MLE_MIGRATION) {
3255			if (master == dlm->node_num) {
3256				/* ah another process raced me to it */
3257				mlog(0, "tried to migrate %.*s, but some "
3258				     "process beat me to it\n",
3259				     namelen, name);
3260				spin_unlock(&tmp->spinlock);
3261				return -EEXIST;
3262			} else {
3263				/* bad.  2 NODES are trying to migrate! */
3264				mlog(ML_ERROR, "migration error  mle: "
3265				     "master=%u new_master=%u // request: "
3266				     "master=%u new_master=%u // "
3267				     "lockres=%.*s\n",
3268				     tmp->master, tmp->new_master,
3269				     master, new_master,
3270				     namelen, name);
3271				BUG();
3272			}
3273		} else {
3274			/* this is essentially what assert_master does */
3275			tmp->master = master;
3276			atomic_set(&tmp->woken, 1);
3277			wake_up(&tmp->wq);
3278			/* remove it so that only one mle will be found */
3279			__dlm_unlink_mle(dlm, tmp);
3280			__dlm_mle_detach_hb_events(dlm, tmp);
3281			if (tmp->type == DLM_MLE_MASTER) {
3282				ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3283				mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3284						"telling master to get ref "
3285						"for cleared out mle during "
3286						"migration\n", dlm->name,
3287						namelen, name, master,
3288						new_master);
3289			}
3290		}
3291		spin_unlock(&tmp->spinlock);
3292	}
3293
3294	/* now add a migration mle to the tail of the list */
3295	dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3296	mle->new_master = new_master;
3297	/* the new master will be sending an assert master for this.
3298	 * at that point we will get the refmap reference */
3299	mle->master = master;
3300	/* do this for consistency with other mle types */
3301	set_bit(new_master, mle->maybe_map);
3302	__dlm_insert_mle(dlm, mle);
3303
3304	return ret;
3305}
3306
3307/*
3308 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3309 */
3310static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3311					struct dlm_master_list_entry *mle)
3312{
3313	struct dlm_lock_resource *res;
3314
3315	/* Find the lockres associated to the mle and set its owner to UNK */
3316	res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3317				   mle->mnamehash);
3318	if (res) {
3319		spin_unlock(&dlm->master_lock);
3320
3321		/* move lockres onto recovery list */
3322		spin_lock(&res->spinlock);
3323		dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3324		dlm_move_lockres_to_recovery_list(dlm, res);
3325		spin_unlock(&res->spinlock);
3326		dlm_lockres_put(res);
3327
3328		/* about to get rid of mle, detach from heartbeat */
3329		__dlm_mle_detach_hb_events(dlm, mle);
3330
3331		/* dump the mle */
3332		spin_lock(&dlm->master_lock);
3333		__dlm_put_mle(mle);
3334		spin_unlock(&dlm->master_lock);
3335	}
3336
3337	return res;
3338}
3339
3340static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3341				    struct dlm_master_list_entry *mle)
3342{
3343	__dlm_mle_detach_hb_events(dlm, mle);
3344
3345	spin_lock(&mle->spinlock);
3346	__dlm_unlink_mle(dlm, mle);
3347	atomic_set(&mle->woken, 1);
3348	spin_unlock(&mle->spinlock);
3349
3350	wake_up(&mle->wq);
3351}
3352
3353static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3354				struct dlm_master_list_entry *mle, u8 dead_node)
3355{
3356	int bit;
3357
3358	BUG_ON(mle->type != DLM_MLE_BLOCK);
3359
3360	spin_lock(&mle->spinlock);
3361	bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3362	if (bit != dead_node) {
3363		mlog(0, "mle found, but dead node %u would not have been "
3364		     "master\n", dead_node);
3365		spin_unlock(&mle->spinlock);
3366	} else {
3367		/* Must drop the refcount by one since the assert_master will
3368		 * never arrive. This may result in the mle being unlinked and
3369		 * freed, but there may still be a process waiting in the
3370		 * dlmlock path which is fine. */
3371		mlog(0, "node %u was expected master\n", dead_node);
3372		atomic_set(&mle->woken, 1);
3373		spin_unlock(&mle->spinlock);
3374		wake_up(&mle->wq);
3375
3376		/* Do not need events any longer, so detach from heartbeat */
3377		__dlm_mle_detach_hb_events(dlm, mle);
3378		__dlm_put_mle(mle);
3379	}
3380}
3381
3382void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3383{
3384	struct dlm_master_list_entry *mle;
3385	struct dlm_lock_resource *res;
3386	struct hlist_head *bucket;
3387	struct hlist_node *tmp;
3388	unsigned int i;
3389
3390	mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
3391top:
3392	assert_spin_locked(&dlm->spinlock);
3393
3394	/* clean the master list */
3395	spin_lock(&dlm->master_lock);
3396	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3397		bucket = dlm_master_hash(dlm, i);
3398		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3399			BUG_ON(mle->type != DLM_MLE_BLOCK &&
3400			       mle->type != DLM_MLE_MASTER &&
3401			       mle->type != DLM_MLE_MIGRATION);
3402
3403			/* MASTER mles are initiated locally. The waiting
3404			 * process will notice the node map change shortly.
3405			 * Let that happen as normal. */
3406			if (mle->type == DLM_MLE_MASTER)
3407				continue;
3408
3409			/* BLOCK mles are initiated by other nodes. Need to
3410			 * clean up if the dead node would have been the
3411			 * master. */
3412			if (mle->type == DLM_MLE_BLOCK) {
3413				dlm_clean_block_mle(dlm, mle, dead_node);
3414				continue;
3415			}
3416
3417			/* Everything else is a MIGRATION mle */
3418
3419			/* The rule for MIGRATION mles is that the master
3420			 * becomes UNKNOWN if *either* the original or the new
3421			 * master dies. All UNKNOWN lockres' are sent to
3422			 * whichever node becomes the recovery master. The new
3423			 * master is responsible for determining if there is
3424			 * still a master for this lockres, or if he needs to
3425			 * take over mastery. Either way, this node should
3426			 * expect another message to resolve this. */
3427
3428			if (mle->master != dead_node &&
3429			    mle->new_master != dead_node)
3430				continue;
3431
3432			if (mle->new_master == dead_node && mle->inuse) {
3433				mlog(ML_NOTICE, "%s: target %u died during "
3434						"migration from %u, the MLE is "
3435						"still keep used, ignore it!\n",
3436						dlm->name, dead_node,
3437						mle->master);
3438				continue;
3439			}
3440
3441			/* If we have reached this point, this mle needs to be
3442			 * removed from the list and freed. */
3443			dlm_clean_migration_mle(dlm, mle);
3444
3445			mlog(0, "%s: node %u died during migration from "
3446			     "%u to %u!\n", dlm->name, dead_node, mle->master,
3447			     mle->new_master);
3448
3449			/* If we find a lockres associated with the mle, we've
3450			 * hit this rare case that messes up our lock ordering.
3451			 * If so, we need to drop the master lock so that we can
3452			 * take the lockres lock, meaning that we will have to
3453			 * restart from the head of list. */
3454			res = dlm_reset_mleres_owner(dlm, mle);
3455			if (res)
3456				/* restart */
3457				goto top;
3458
3459			/* This may be the last reference */
3460			__dlm_put_mle(mle);
3461		}
3462	}
3463	spin_unlock(&dlm->master_lock);
3464}
3465
3466int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3467			 u8 old_master)
3468{
3469	struct dlm_node_iter iter;
3470	int ret = 0;
3471
3472	spin_lock(&dlm->spinlock);
3473	dlm_node_iter_init(dlm->domain_map, &iter);
3474	clear_bit(old_master, iter.node_map);
3475	clear_bit(dlm->node_num, iter.node_map);
3476	spin_unlock(&dlm->spinlock);
3477
3478	/* ownership of the lockres is changing.  account for the
3479	 * mastery reference here since old_master will briefly have
3480	 * a reference after the migration completes */
3481	spin_lock(&res->spinlock);
3482	dlm_lockres_set_refmap_bit(dlm, res, old_master);
3483	spin_unlock(&res->spinlock);
3484
3485	mlog(0, "now time to do a migrate request to other nodes\n");
3486	ret = dlm_do_migrate_request(dlm, res, old_master,
3487				     dlm->node_num, &iter);
3488	if (ret < 0) {
3489		mlog_errno(ret);
3490		goto leave;
3491	}
3492
3493	mlog(0, "doing assert master of %.*s to all except the original node\n",
3494	     res->lockname.len, res->lockname.name);
3495	/* this call now finishes out the nodemap
3496	 * even if one or more nodes die */
3497	ret = dlm_do_assert_master(dlm, res, iter.node_map,
3498				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3499	if (ret < 0) {
3500		/* no longer need to retry.  all living nodes contacted. */
3501		mlog_errno(ret);
3502		ret = 0;
3503	}
3504
3505	memset(iter.node_map, 0, sizeof(iter.node_map));
3506	set_bit(old_master, iter.node_map);
3507	mlog(0, "doing assert master of %.*s back to %u\n",
3508	     res->lockname.len, res->lockname.name, old_master);
3509	ret = dlm_do_assert_master(dlm, res, iter.node_map,
3510				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3511	if (ret < 0) {
3512		mlog(0, "assert master to original master failed "
3513		     "with %d.\n", ret);
3514		/* the only nonzero status here would be because of
3515		 * a dead original node.  we're done. */
3516		ret = 0;
3517	}
3518
3519	/* all done, set the owner, clear the flag */
3520	spin_lock(&res->spinlock);
3521	dlm_set_lockres_owner(dlm, res, dlm->node_num);
3522	res->state &= ~DLM_LOCK_RES_MIGRATING;
3523	spin_unlock(&res->spinlock);
3524	/* re-dirty it on the new master */
3525	dlm_kick_thread(dlm, res);
3526	wake_up(&res->wq);
3527leave:
3528	return ret;
3529}
3530
3531/*
3532 * LOCKRES AST REFCOUNT
3533 * this is integral to migration
3534 */
3535
3536/* for future intent to call an ast, reserve one ahead of time.
3537 * this should be called only after waiting on the lockres
3538 * with dlm_wait_on_lockres, and while still holding the
3539 * spinlock after the call. */
3540void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3541{
3542	assert_spin_locked(&res->spinlock);
3543	if (res->state & DLM_LOCK_RES_MIGRATING) {
3544		__dlm_print_one_lock_resource(res);
3545	}
3546	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3547
3548	atomic_inc(&res->asts_reserved);
3549}
3550
3551/*
3552 * used to drop the reserved ast, either because it went unused,
3553 * or because the ast/bast was actually called.
3554 *
3555 * also, if there is a pending migration on this lockres,
3556 * and this was the last pending ast on the lockres,
3557 * atomically set the MIGRATING flag before we drop the lock.
3558 * this is how we ensure that migration can proceed with no
3559 * asts in progress.  note that it is ok if the state of the
3560 * queues is such that a lock should be granted in the future
3561 * or that a bast should be fired, because the new master will
3562 * shuffle the lists on this lockres as soon as it is migrated.
3563 */
3564void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3565			     struct dlm_lock_resource *res)
3566{
3567	if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3568		return;
3569
3570	if (!res->migration_pending) {
3571		spin_unlock(&res->spinlock);
3572		return;
3573	}
3574
3575	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3576	res->migration_pending = 0;
3577	res->state |= DLM_LOCK_RES_MIGRATING;
3578	spin_unlock(&res->spinlock);
3579	wake_up(&res->wq);
3580	wake_up(&dlm->migration_wq);
3581}
3582
3583void dlm_force_free_mles(struct dlm_ctxt *dlm)
3584{
3585	int i;
3586	struct hlist_head *bucket;
3587	struct dlm_master_list_entry *mle;
3588	struct hlist_node *tmp;
3589
3590	/*
3591	 * We notified all other nodes that we are exiting the domain and
3592	 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3593	 * around we force free them and wake any processes that are waiting
3594	 * on the mles
3595	 */
3596	spin_lock(&dlm->spinlock);
3597	spin_lock(&dlm->master_lock);
3598
3599	BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3600	BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3601
3602	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3603		bucket = dlm_master_hash(dlm, i);
3604		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3605			if (mle->type != DLM_MLE_BLOCK) {
3606				mlog(ML_ERROR, "bad mle: %p\n", mle);
3607				dlm_print_one_mle(mle);
3608			}
3609			atomic_set(&mle->woken, 1);
3610			wake_up(&mle->wq);
3611
3612			__dlm_unlink_mle(dlm, mle);
3613			__dlm_mle_detach_hb_events(dlm, mle);
3614			__dlm_put_mle(mle);
3615		}
3616	}
3617	spin_unlock(&dlm->master_lock);
3618	spin_unlock(&dlm->spinlock);
3619}