Linux Audio

Check our new training course

Loading...
v3.1
   1/******************************************************************************
   2*******************************************************************************
   3**
   4**  Copyright (C) 2005-2010 Red Hat, Inc.  All rights reserved.
   5**
   6**  This copyrighted material is made available to anyone wishing to use,
   7**  modify, copy, or redistribute it subject to the terms and conditions
   8**  of the GNU General Public License v.2.
   9**
  10*******************************************************************************
  11******************************************************************************/
  12
  13/* Central locking logic has four stages:
  14
  15   dlm_lock()
  16   dlm_unlock()
  17
  18   request_lock(ls, lkb)
  19   convert_lock(ls, lkb)
  20   unlock_lock(ls, lkb)
  21   cancel_lock(ls, lkb)
  22
  23   _request_lock(r, lkb)
  24   _convert_lock(r, lkb)
  25   _unlock_lock(r, lkb)
  26   _cancel_lock(r, lkb)
  27
  28   do_request(r, lkb)
  29   do_convert(r, lkb)
  30   do_unlock(r, lkb)
  31   do_cancel(r, lkb)
  32
  33   Stage 1 (lock, unlock) is mainly about checking input args and
  34   splitting into one of the four main operations:
  35
  36       dlm_lock          = request_lock
  37       dlm_lock+CONVERT  = convert_lock
  38       dlm_unlock        = unlock_lock
  39       dlm_unlock+CANCEL = cancel_lock
  40
  41   Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
  42   provided to the next stage.
  43
  44   Stage 3, _xxxx_lock(), determines if the operation is local or remote.
  45   When remote, it calls send_xxxx(), when local it calls do_xxxx().
  46
  47   Stage 4, do_xxxx(), is the guts of the operation.  It manipulates the
  48   given rsb and lkb and queues callbacks.
  49
  50   For remote operations, send_xxxx() results in the corresponding do_xxxx()
  51   function being executed on the remote node.  The connecting send/receive
  52   calls on local (L) and remote (R) nodes:
  53
  54   L: send_xxxx()              ->  R: receive_xxxx()
  55                                   R: do_xxxx()
  56   L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
  57*/
  58#include <linux/types.h>
 
  59#include <linux/slab.h>
  60#include "dlm_internal.h"
  61#include <linux/dlm_device.h>
  62#include "memory.h"
  63#include "lowcomms.h"
  64#include "requestqueue.h"
  65#include "util.h"
  66#include "dir.h"
  67#include "member.h"
  68#include "lockspace.h"
  69#include "ast.h"
  70#include "lock.h"
  71#include "rcom.h"
  72#include "recover.h"
  73#include "lvb_table.h"
  74#include "user.h"
  75#include "config.h"
  76
  77static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
  78static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
  79static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  80static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
  81static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
  82static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
  83static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
  84static int send_remove(struct dlm_rsb *r);
  85static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  86static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  87static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
  88				    struct dlm_message *ms);
  89static int receive_extralen(struct dlm_message *ms);
  90static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
  91static void del_timeout(struct dlm_lkb *lkb);
 
  92
  93/*
  94 * Lock compatibilty matrix - thanks Steve
  95 * UN = Unlocked state. Not really a state, used as a flag
  96 * PD = Padding. Used to make the matrix a nice power of two in size
  97 * Other states are the same as the VMS DLM.
  98 * Usage: matrix[grmode+1][rqmode+1]  (although m[rq+1][gr+1] is the same)
  99 */
 100
 101static const int __dlm_compat_matrix[8][8] = {
 102      /* UN NL CR CW PR PW EX PD */
 103        {1, 1, 1, 1, 1, 1, 1, 0},       /* UN */
 104        {1, 1, 1, 1, 1, 1, 1, 0},       /* NL */
 105        {1, 1, 1, 1, 1, 1, 0, 0},       /* CR */
 106        {1, 1, 1, 1, 0, 0, 0, 0},       /* CW */
 107        {1, 1, 1, 0, 1, 0, 0, 0},       /* PR */
 108        {1, 1, 1, 0, 0, 0, 0, 0},       /* PW */
 109        {1, 1, 0, 0, 0, 0, 0, 0},       /* EX */
 110        {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
 111};
 112
 113/*
 114 * This defines the direction of transfer of LVB data.
 115 * Granted mode is the row; requested mode is the column.
 116 * Usage: matrix[grmode+1][rqmode+1]
 117 * 1 = LVB is returned to the caller
 118 * 0 = LVB is written to the resource
 119 * -1 = nothing happens to the LVB
 120 */
 121
 122const int dlm_lvb_operations[8][8] = {
 123        /* UN   NL  CR  CW  PR  PW  EX  PD*/
 124        {  -1,  1,  1,  1,  1,  1,  1, -1 }, /* UN */
 125        {  -1,  1,  1,  1,  1,  1,  1,  0 }, /* NL */
 126        {  -1, -1,  1,  1,  1,  1,  1,  0 }, /* CR */
 127        {  -1, -1, -1,  1,  1,  1,  1,  0 }, /* CW */
 128        {  -1, -1, -1, -1,  1,  1,  1,  0 }, /* PR */
 129        {  -1,  0,  0,  0,  0,  0,  1,  0 }, /* PW */
 130        {  -1,  0,  0,  0,  0,  0,  0,  0 }, /* EX */
 131        {  -1,  0,  0,  0,  0,  0,  0,  0 }  /* PD */
 132};
 133
 134#define modes_compat(gr, rq) \
 135	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
 136
 137int dlm_modes_compat(int mode1, int mode2)
 138{
 139	return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
 140}
 141
 142/*
 143 * Compatibility matrix for conversions with QUECVT set.
 144 * Granted mode is the row; requested mode is the column.
 145 * Usage: matrix[grmode+1][rqmode+1]
 146 */
 147
 148static const int __quecvt_compat_matrix[8][8] = {
 149      /* UN NL CR CW PR PW EX PD */
 150        {0, 0, 0, 0, 0, 0, 0, 0},       /* UN */
 151        {0, 0, 1, 1, 1, 1, 1, 0},       /* NL */
 152        {0, 0, 0, 1, 1, 1, 1, 0},       /* CR */
 153        {0, 0, 0, 0, 1, 1, 1, 0},       /* CW */
 154        {0, 0, 0, 1, 0, 1, 1, 0},       /* PR */
 155        {0, 0, 0, 0, 0, 0, 1, 0},       /* PW */
 156        {0, 0, 0, 0, 0, 0, 0, 0},       /* EX */
 157        {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
 158};
 159
 160void dlm_print_lkb(struct dlm_lkb *lkb)
 161{
 162	printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
 163	       "     status %d rqmode %d grmode %d wait_type %d\n",
 164	       lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
 165	       lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
 166	       lkb->lkb_grmode, lkb->lkb_wait_type);
 
 167}
 168
 169static void dlm_print_rsb(struct dlm_rsb *r)
 170{
 171	printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
 172	       r->res_nodeid, r->res_flags, r->res_first_lkid,
 173	       r->res_recover_locks_count, r->res_name);
 
 
 174}
 175
 176void dlm_dump_rsb(struct dlm_rsb *r)
 177{
 178	struct dlm_lkb *lkb;
 179
 180	dlm_print_rsb(r);
 181
 182	printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
 183	       list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
 184	printk(KERN_ERR "rsb lookup list\n");
 185	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
 186		dlm_print_lkb(lkb);
 187	printk(KERN_ERR "rsb grant queue:\n");
 188	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
 189		dlm_print_lkb(lkb);
 190	printk(KERN_ERR "rsb convert queue:\n");
 191	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
 192		dlm_print_lkb(lkb);
 193	printk(KERN_ERR "rsb wait queue:\n");
 194	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
 195		dlm_print_lkb(lkb);
 196}
 197
 198/* Threads cannot use the lockspace while it's being recovered */
 199
 200static inline void dlm_lock_recovery(struct dlm_ls *ls)
 201{
 202	down_read(&ls->ls_in_recovery);
 203}
 204
 205void dlm_unlock_recovery(struct dlm_ls *ls)
 206{
 207	up_read(&ls->ls_in_recovery);
 208}
 209
 210int dlm_lock_recovery_try(struct dlm_ls *ls)
 211{
 212	return down_read_trylock(&ls->ls_in_recovery);
 213}
 214
 215static inline int can_be_queued(struct dlm_lkb *lkb)
 216{
 217	return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
 218}
 219
 220static inline int force_blocking_asts(struct dlm_lkb *lkb)
 221{
 222	return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
 223}
 224
 225static inline int is_demoted(struct dlm_lkb *lkb)
 226{
 227	return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
 228}
 229
 230static inline int is_altmode(struct dlm_lkb *lkb)
 231{
 232	return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
 233}
 234
 235static inline int is_granted(struct dlm_lkb *lkb)
 236{
 237	return (lkb->lkb_status == DLM_LKSTS_GRANTED);
 238}
 239
 240static inline int is_remote(struct dlm_rsb *r)
 241{
 242	DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
 243	return !!r->res_nodeid;
 244}
 245
 246static inline int is_process_copy(struct dlm_lkb *lkb)
 247{
 248	return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
 249}
 250
 251static inline int is_master_copy(struct dlm_lkb *lkb)
 252{
 253	if (lkb->lkb_flags & DLM_IFL_MSTCPY)
 254		DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
 255	return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
 256}
 257
 258static inline int middle_conversion(struct dlm_lkb *lkb)
 259{
 260	if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
 261	    (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
 262		return 1;
 263	return 0;
 264}
 265
 266static inline int down_conversion(struct dlm_lkb *lkb)
 267{
 268	return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
 269}
 270
 271static inline int is_overlap_unlock(struct dlm_lkb *lkb)
 272{
 273	return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
 274}
 275
 276static inline int is_overlap_cancel(struct dlm_lkb *lkb)
 277{
 278	return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
 279}
 280
 281static inline int is_overlap(struct dlm_lkb *lkb)
 282{
 283	return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
 284				  DLM_IFL_OVERLAP_CANCEL));
 285}
 286
 287static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
 288{
 289	if (is_master_copy(lkb))
 290		return;
 291
 292	del_timeout(lkb);
 293
 294	DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
 295
 296	/* if the operation was a cancel, then return -DLM_ECANCEL, if a
 297	   timeout caused the cancel then return -ETIMEDOUT */
 298	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
 299		lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
 300		rv = -ETIMEDOUT;
 301	}
 302
 303	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
 304		lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
 305		rv = -EDEADLK;
 306	}
 307
 308	dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
 309}
 310
 311static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
 312{
 313	queue_cast(r, lkb,
 314		   is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
 315}
 316
 317static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
 318{
 319	if (is_master_copy(lkb)) {
 320		send_bast(r, lkb, rqmode);
 321	} else {
 322		dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
 323	}
 324}
 325
 326/*
 327 * Basic operations on rsb's and lkb's
 328 */
 329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330static int pre_rsb_struct(struct dlm_ls *ls)
 331{
 332	struct dlm_rsb *r1, *r2;
 333	int count = 0;
 334
 335	spin_lock(&ls->ls_new_rsb_spin);
 336	if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
 337		spin_unlock(&ls->ls_new_rsb_spin);
 338		return 0;
 339	}
 340	spin_unlock(&ls->ls_new_rsb_spin);
 341
 342	r1 = dlm_allocate_rsb(ls);
 343	r2 = dlm_allocate_rsb(ls);
 344
 345	spin_lock(&ls->ls_new_rsb_spin);
 346	if (r1) {
 347		list_add(&r1->res_hashchain, &ls->ls_new_rsb);
 348		ls->ls_new_rsb_count++;
 349	}
 350	if (r2) {
 351		list_add(&r2->res_hashchain, &ls->ls_new_rsb);
 352		ls->ls_new_rsb_count++;
 353	}
 354	count = ls->ls_new_rsb_count;
 355	spin_unlock(&ls->ls_new_rsb_spin);
 356
 357	if (!count)
 358		return -ENOMEM;
 359	return 0;
 360}
 361
 362/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
 363   unlock any spinlocks, go back and call pre_rsb_struct again.
 364   Otherwise, take an rsb off the list and return it. */
 365
 366static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
 367			  struct dlm_rsb **r_ret)
 368{
 369	struct dlm_rsb *r;
 370	int count;
 371
 372	spin_lock(&ls->ls_new_rsb_spin);
 373	if (list_empty(&ls->ls_new_rsb)) {
 374		count = ls->ls_new_rsb_count;
 375		spin_unlock(&ls->ls_new_rsb_spin);
 376		log_debug(ls, "find_rsb retry %d %d %s",
 377			  count, dlm_config.ci_new_rsb_count, name);
 378		return -EAGAIN;
 379	}
 380
 381	r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
 382	list_del(&r->res_hashchain);
 
 
 383	ls->ls_new_rsb_count--;
 384	spin_unlock(&ls->ls_new_rsb_spin);
 385
 386	r->res_ls = ls;
 387	r->res_length = len;
 388	memcpy(r->res_name, name, len);
 389	mutex_init(&r->res_mutex);
 390
 391	INIT_LIST_HEAD(&r->res_hashchain);
 392	INIT_LIST_HEAD(&r->res_lookup);
 393	INIT_LIST_HEAD(&r->res_grantqueue);
 394	INIT_LIST_HEAD(&r->res_convertqueue);
 395	INIT_LIST_HEAD(&r->res_waitqueue);
 396	INIT_LIST_HEAD(&r->res_root_list);
 397	INIT_LIST_HEAD(&r->res_recover_list);
 398
 399	*r_ret = r;
 400	return 0;
 401}
 402
 403static int search_rsb_list(struct list_head *head, char *name, int len,
 404			   unsigned int flags, struct dlm_rsb **r_ret)
 
 
 
 
 
 
 
 
 
 405{
 
 406	struct dlm_rsb *r;
 407	int error = 0;
 408
 409	list_for_each_entry(r, head, res_hashchain) {
 410		if (len == r->res_length && !memcmp(name, r->res_name, len))
 
 
 
 
 
 
 411			goto found;
 412	}
 413	*r_ret = NULL;
 414	return -EBADR;
 415
 416 found:
 417	if (r->res_nodeid && (flags & R_MASTER))
 418		error = -ENOTBLK;
 419	*r_ret = r;
 420	return error;
 421}
 422
 423static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
 424		       unsigned int flags, struct dlm_rsb **r_ret)
 425{
 426	struct dlm_rsb *r;
 427	int error;
 428
 429	error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
 430	if (!error) {
 431		kref_get(&r->res_ref);
 432		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 433	}
 434	error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
 435	if (error)
 436		goto out;
 437
 438	list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
 439
 440	if (dlm_no_directory(ls))
 441		goto out;
 442
 443	if (r->res_nodeid == -1) {
 444		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
 445		r->res_first_lkid = 0;
 446	} else if (r->res_nodeid > 0) {
 447		rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
 448		r->res_first_lkid = 0;
 449	} else {
 450		DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
 451		DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
 452	}
 453 out:
 454	*r_ret = r;
 455	return error;
 456}
 457
 458/*
 459 * Find rsb in rsbtbl and potentially create/add one
 460 *
 461 * Delaying the release of rsb's has a similar benefit to applications keeping
 462 * NL locks on an rsb, but without the guarantee that the cached master value
 463 * will still be valid when the rsb is reused.  Apps aren't always smart enough
 464 * to keep NL locks on an rsb that they may lock again shortly; this can lead
 465 * to excessive master lookups and removals if we don't delay the release.
 466 *
 467 * Searching for an rsb means looking through both the normal list and toss
 468 * list.  When found on the toss list the rsb is moved to the normal list with
 469 * ref count of 1; when found on normal list the ref count is incremented.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 470 */
 471
 472static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
 473		    unsigned int flags, struct dlm_rsb **r_ret)
 
 
 474{
 475	struct dlm_rsb *r = NULL;
 476	uint32_t hash, bucket;
 
 
 
 
 477	int error;
 478
 479	if (namelen > DLM_RESNAME_MAXLEN) {
 480		error = -EINVAL;
 481		goto out;
 
 
 
 
 482	}
 483
 484	if (dlm_no_directory(ls))
 485		flags |= R_CREATE;
 
 
 
 
 
 
 
 
 
 
 
 
 486
 487	hash = jhash(name, namelen, 0);
 488	bucket = hash & (ls->ls_rsbtbl_size - 1);
 
 
 489
 490 retry:
 491	if (flags & R_CREATE) {
 492		error = pre_rsb_struct(ls);
 493		if (error < 0)
 494			goto out;
 495	}
 496
 497	spin_lock(&ls->ls_rsbtbl[bucket].lock);
 498
 499	error = _search_rsb(ls, name, namelen, bucket, flags, &r);
 500	if (!error)
 501		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502
 503	if (error == -EBADR && !(flags & R_CREATE))
 
 
 
 
 
 
 504		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 505
 506	/* the rsb was found but wasn't a master copy */
 507	if (error == -ENOTBLK)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508		goto out_unlock;
 509
 510	error = get_rsb_struct(ls, name, namelen, &r);
 511	if (error == -EAGAIN) {
 512		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 513		goto retry;
 514	}
 515	if (error)
 516		goto out_unlock;
 517
 518	r->res_hash = hash;
 519	r->res_bucket = bucket;
 520	r->res_nodeid = -1;
 521	kref_init(&r->res_ref);
 522
 523	/* With no directory, the master can be set immediately */
 524	if (dlm_no_directory(ls)) {
 525		int nodeid = dlm_dir_nodeid(r);
 526		if (nodeid == dlm_our_nodeid())
 527			nodeid = 0;
 528		r->res_nodeid = nodeid;
 
 529	}
 530	list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
 531	error = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 532 out_unlock:
 533	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 534 out:
 535	*r_ret = r;
 536	return error;
 537}
 538
 539/* This is only called to add a reference when the code already holds
 540   a valid reference to the rsb, so there's no need for locking. */
 541
 542static inline void hold_rsb(struct dlm_rsb *r)
 
 
 
 
 543{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544	kref_get(&r->res_ref);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 545}
 546
 547void dlm_hold_rsb(struct dlm_rsb *r)
 
 548{
 549	hold_rsb(r);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550}
 551
 552static void toss_rsb(struct kref *kref)
 
 
 
 
 553{
 554	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
 555	struct dlm_ls *ls = r->res_ls;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 556
 557	DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 558	kref_init(&r->res_ref);
 559	list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
 560	r->res_toss_time = jiffies;
 561	if (r->res_lvbptr) {
 562		dlm_free_lvb(r->res_lvbptr);
 563		r->res_lvbptr = NULL;
 
 
 
 
 564	}
 
 
 
 
 
 
 
 
 565}
 566
 567/* When all references to the rsb are gone it's transferred to
 568   the tossed list for later disposal. */
 
 
 
 569
 570static void put_rsb(struct dlm_rsb *r)
 
 
 
 
 
 
 
 
 
 
 
 571{
 572	struct dlm_ls *ls = r->res_ls;
 573	uint32_t bucket = r->res_bucket;
 
 574
 575	spin_lock(&ls->ls_rsbtbl[bucket].lock);
 576	kref_put(&r->res_ref, toss_rsb);
 577	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 
 
 
 
 
 
 
 
 
 
 
 
 578}
 579
 580void dlm_put_rsb(struct dlm_rsb *r)
 581{
 582	put_rsb(r);
 
 
 
 
 
 
 
 
 
 
 
 
 583}
 584
 585/* See comment for unhold_lkb */
 586
 587static void unhold_rsb(struct dlm_rsb *r)
 588{
 589	int rv;
 590	rv = kref_put(&r->res_ref, toss_rsb);
 591	DLM_ASSERT(!rv, dlm_dump_rsb(r););
 592}
 593
 594static void kill_rsb(struct kref *kref)
 595{
 596	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
 597
 598	/* All work is done after the return from kref_put() so we
 599	   can release the write_lock before the remove and free. */
 600
 601	DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
 602	DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
 603	DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
 604	DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
 605	DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
 606	DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
 607}
 608
 609/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
 610   The rsb must exist as long as any lkb's for it do. */
 611
 612static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
 613{
 614	hold_rsb(r);
 615	lkb->lkb_resource = r;
 616}
 617
 618static void detach_lkb(struct dlm_lkb *lkb)
 619{
 620	if (lkb->lkb_resource) {
 621		put_rsb(lkb->lkb_resource);
 622		lkb->lkb_resource = NULL;
 623	}
 624}
 625
 626static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
 627{
 628	struct dlm_lkb *lkb;
 629	int rv, id;
 630
 631	lkb = dlm_allocate_lkb(ls);
 632	if (!lkb)
 633		return -ENOMEM;
 634
 635	lkb->lkb_nodeid = -1;
 636	lkb->lkb_grmode = DLM_LOCK_IV;
 637	kref_init(&lkb->lkb_ref);
 638	INIT_LIST_HEAD(&lkb->lkb_ownqueue);
 639	INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
 640	INIT_LIST_HEAD(&lkb->lkb_time_list);
 641	INIT_LIST_HEAD(&lkb->lkb_cb_list);
 642	mutex_init(&lkb->lkb_cb_mutex);
 643	INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
 644
 645 retry:
 646	rv = idr_pre_get(&ls->ls_lkbidr, GFP_NOFS);
 647	if (!rv)
 648		return -ENOMEM;
 649
 650	spin_lock(&ls->ls_lkbidr_spin);
 651	rv = idr_get_new_above(&ls->ls_lkbidr, lkb, 1, &id);
 652	if (!rv)
 653		lkb->lkb_id = id;
 654	spin_unlock(&ls->ls_lkbidr_spin);
 655
 656	if (rv == -EAGAIN)
 657		goto retry;
 658
 659	if (rv < 0) {
 660		log_error(ls, "create_lkb idr error %d", rv);
 661		return rv;
 662	}
 663
 664	*lkb_ret = lkb;
 665	return 0;
 666}
 667
 668static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
 669{
 670	struct dlm_lkb *lkb;
 671
 672	spin_lock(&ls->ls_lkbidr_spin);
 673	lkb = idr_find(&ls->ls_lkbidr, lkid);
 674	if (lkb)
 675		kref_get(&lkb->lkb_ref);
 676	spin_unlock(&ls->ls_lkbidr_spin);
 677
 678	*lkb_ret = lkb;
 679	return lkb ? 0 : -ENOENT;
 680}
 681
 682static void kill_lkb(struct kref *kref)
 683{
 684	struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
 685
 686	/* All work is done after the return from kref_put() so we
 687	   can release the write_lock before the detach_lkb */
 688
 689	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
 690}
 691
 692/* __put_lkb() is used when an lkb may not have an rsb attached to
 693   it so we need to provide the lockspace explicitly */
 694
 695static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
 696{
 697	uint32_t lkid = lkb->lkb_id;
 698
 699	spin_lock(&ls->ls_lkbidr_spin);
 700	if (kref_put(&lkb->lkb_ref, kill_lkb)) {
 701		idr_remove(&ls->ls_lkbidr, lkid);
 702		spin_unlock(&ls->ls_lkbidr_spin);
 703
 704		detach_lkb(lkb);
 705
 706		/* for local/process lkbs, lvbptr points to caller's lksb */
 707		if (lkb->lkb_lvbptr && is_master_copy(lkb))
 708			dlm_free_lvb(lkb->lkb_lvbptr);
 709		dlm_free_lkb(lkb);
 710		return 1;
 711	} else {
 712		spin_unlock(&ls->ls_lkbidr_spin);
 713		return 0;
 714	}
 715}
 716
 717int dlm_put_lkb(struct dlm_lkb *lkb)
 718{
 719	struct dlm_ls *ls;
 720
 721	DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
 722	DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
 723
 724	ls = lkb->lkb_resource->res_ls;
 725	return __put_lkb(ls, lkb);
 726}
 727
 728/* This is only called to add a reference when the code already holds
 729   a valid reference to the lkb, so there's no need for locking. */
 730
 731static inline void hold_lkb(struct dlm_lkb *lkb)
 732{
 733	kref_get(&lkb->lkb_ref);
 734}
 735
 736/* This is called when we need to remove a reference and are certain
 737   it's not the last ref.  e.g. del_lkb is always called between a
 738   find_lkb/put_lkb and is always the inverse of a previous add_lkb.
 739   put_lkb would work fine, but would involve unnecessary locking */
 740
 741static inline void unhold_lkb(struct dlm_lkb *lkb)
 742{
 743	int rv;
 744	rv = kref_put(&lkb->lkb_ref, kill_lkb);
 745	DLM_ASSERT(!rv, dlm_print_lkb(lkb););
 746}
 747
 748static void lkb_add_ordered(struct list_head *new, struct list_head *head,
 749			    int mode)
 750{
 751	struct dlm_lkb *lkb = NULL;
 752
 753	list_for_each_entry(lkb, head, lkb_statequeue)
 754		if (lkb->lkb_rqmode < mode)
 755			break;
 756
 757	__list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
 758}
 759
 760/* add/remove lkb to rsb's grant/convert/wait queue */
 761
 762static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
 763{
 764	kref_get(&lkb->lkb_ref);
 765
 766	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
 767
 768	lkb->lkb_timestamp = ktime_get();
 769
 770	lkb->lkb_status = status;
 771
 772	switch (status) {
 773	case DLM_LKSTS_WAITING:
 774		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
 775			list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
 776		else
 777			list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
 778		break;
 779	case DLM_LKSTS_GRANTED:
 780		/* convention says granted locks kept in order of grmode */
 781		lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
 782				lkb->lkb_grmode);
 783		break;
 784	case DLM_LKSTS_CONVERT:
 785		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
 786			list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
 787		else
 788			list_add_tail(&lkb->lkb_statequeue,
 789				      &r->res_convertqueue);
 790		break;
 791	default:
 792		DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
 793	}
 794}
 795
 796static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
 797{
 798	lkb->lkb_status = 0;
 799	list_del(&lkb->lkb_statequeue);
 800	unhold_lkb(lkb);
 801}
 802
 803static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
 804{
 805	hold_lkb(lkb);
 806	del_lkb(r, lkb);
 807	add_lkb(r, lkb, sts);
 808	unhold_lkb(lkb);
 809}
 810
 811static int msg_reply_type(int mstype)
 812{
 813	switch (mstype) {
 814	case DLM_MSG_REQUEST:
 815		return DLM_MSG_REQUEST_REPLY;
 816	case DLM_MSG_CONVERT:
 817		return DLM_MSG_CONVERT_REPLY;
 818	case DLM_MSG_UNLOCK:
 819		return DLM_MSG_UNLOCK_REPLY;
 820	case DLM_MSG_CANCEL:
 821		return DLM_MSG_CANCEL_REPLY;
 822	case DLM_MSG_LOOKUP:
 823		return DLM_MSG_LOOKUP_REPLY;
 824	}
 825	return -1;
 826}
 827
 828static int nodeid_warned(int nodeid, int num_nodes, int *warned)
 829{
 830	int i;
 831
 832	for (i = 0; i < num_nodes; i++) {
 833		if (!warned[i]) {
 834			warned[i] = nodeid;
 835			return 0;
 836		}
 837		if (warned[i] == nodeid)
 838			return 1;
 839	}
 840	return 0;
 841}
 842
 843void dlm_scan_waiters(struct dlm_ls *ls)
 844{
 845	struct dlm_lkb *lkb;
 846	ktime_t zero = ktime_set(0, 0);
 847	s64 us;
 848	s64 debug_maxus = 0;
 849	u32 debug_scanned = 0;
 850	u32 debug_expired = 0;
 851	int num_nodes = 0;
 852	int *warned = NULL;
 853
 854	if (!dlm_config.ci_waitwarn_us)
 855		return;
 856
 857	mutex_lock(&ls->ls_waiters_mutex);
 858
 859	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
 860		if (ktime_equal(lkb->lkb_wait_time, zero))
 861			continue;
 862
 863		debug_scanned++;
 864
 865		us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
 866
 867		if (us < dlm_config.ci_waitwarn_us)
 868			continue;
 869
 870		lkb->lkb_wait_time = zero;
 871
 872		debug_expired++;
 873		if (us > debug_maxus)
 874			debug_maxus = us;
 875
 876		if (!num_nodes) {
 877			num_nodes = ls->ls_num_nodes;
 878			warned = kzalloc(num_nodes * sizeof(int), GFP_KERNEL);
 879		}
 880		if (!warned)
 881			continue;
 882		if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
 883			continue;
 884
 885		log_error(ls, "waitwarn %x %lld %d us check connection to "
 886			  "node %d", lkb->lkb_id, (long long)us,
 887			  dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
 888	}
 889	mutex_unlock(&ls->ls_waiters_mutex);
 890	kfree(warned);
 891
 892	if (debug_expired)
 893		log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
 894			  debug_scanned, debug_expired,
 895			  dlm_config.ci_waitwarn_us, (long long)debug_maxus);
 896}
 897
 898/* add/remove lkb from global waiters list of lkb's waiting for
 899   a reply from a remote node */
 900
 901static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
 902{
 903	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
 904	int error = 0;
 905
 906	mutex_lock(&ls->ls_waiters_mutex);
 907
 908	if (is_overlap_unlock(lkb) ||
 909	    (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
 910		error = -EINVAL;
 911		goto out;
 912	}
 913
 914	if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
 915		switch (mstype) {
 916		case DLM_MSG_UNLOCK:
 917			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
 918			break;
 919		case DLM_MSG_CANCEL:
 920			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
 921			break;
 922		default:
 923			error = -EBUSY;
 924			goto out;
 925		}
 926		lkb->lkb_wait_count++;
 927		hold_lkb(lkb);
 928
 929		log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
 930			  lkb->lkb_id, lkb->lkb_wait_type, mstype,
 931			  lkb->lkb_wait_count, lkb->lkb_flags);
 932		goto out;
 933	}
 934
 935	DLM_ASSERT(!lkb->lkb_wait_count,
 936		   dlm_print_lkb(lkb);
 937		   printk("wait_count %d\n", lkb->lkb_wait_count););
 938
 939	lkb->lkb_wait_count++;
 940	lkb->lkb_wait_type = mstype;
 941	lkb->lkb_wait_time = ktime_get();
 942	lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
 943	hold_lkb(lkb);
 944	list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
 945 out:
 946	if (error)
 947		log_error(ls, "addwait error %x %d flags %x %d %d %s",
 948			  lkb->lkb_id, error, lkb->lkb_flags, mstype,
 949			  lkb->lkb_wait_type, lkb->lkb_resource->res_name);
 950	mutex_unlock(&ls->ls_waiters_mutex);
 951	return error;
 952}
 953
 954/* We clear the RESEND flag because we might be taking an lkb off the waiters
 955   list as part of process_requestqueue (e.g. a lookup that has an optimized
 956   request reply on the requestqueue) between dlm_recover_waiters_pre() which
 957   set RESEND and dlm_recover_waiters_post() */
 958
 959static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
 960				struct dlm_message *ms)
 961{
 962	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
 963	int overlap_done = 0;
 964
 965	if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
 966		log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
 967		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
 968		overlap_done = 1;
 969		goto out_del;
 970	}
 971
 972	if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
 973		log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
 974		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
 975		overlap_done = 1;
 976		goto out_del;
 977	}
 978
 979	/* Cancel state was preemptively cleared by a successful convert,
 980	   see next comment, nothing to do. */
 981
 982	if ((mstype == DLM_MSG_CANCEL_REPLY) &&
 983	    (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
 984		log_debug(ls, "remwait %x cancel_reply wait_type %d",
 985			  lkb->lkb_id, lkb->lkb_wait_type);
 986		return -1;
 987	}
 988
 989	/* Remove for the convert reply, and premptively remove for the
 990	   cancel reply.  A convert has been granted while there's still
 991	   an outstanding cancel on it (the cancel is moot and the result
 992	   in the cancel reply should be 0).  We preempt the cancel reply
 993	   because the app gets the convert result and then can follow up
 994	   with another op, like convert.  This subsequent op would see the
 995	   lingering state of the cancel and fail with -EBUSY. */
 996
 997	if ((mstype == DLM_MSG_CONVERT_REPLY) &&
 998	    (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
 999	    is_overlap_cancel(lkb) && ms && !ms->m_result) {
1000		log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1001			  lkb->lkb_id);
1002		lkb->lkb_wait_type = 0;
1003		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1004		lkb->lkb_wait_count--;
1005		goto out_del;
1006	}
1007
1008	/* N.B. type of reply may not always correspond to type of original
1009	   msg due to lookup->request optimization, verify others? */
1010
1011	if (lkb->lkb_wait_type) {
1012		lkb->lkb_wait_type = 0;
1013		goto out_del;
1014	}
1015
1016	log_error(ls, "remwait error %x reply %d flags %x no wait_type",
1017		  lkb->lkb_id, mstype, lkb->lkb_flags);
 
1018	return -1;
1019
1020 out_del:
1021	/* the force-unlock/cancel has completed and we haven't recvd a reply
1022	   to the op that was in progress prior to the unlock/cancel; we
1023	   give up on any reply to the earlier op.  FIXME: not sure when/how
1024	   this would happen */
1025
1026	if (overlap_done && lkb->lkb_wait_type) {
1027		log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1028			  lkb->lkb_id, mstype, lkb->lkb_wait_type);
1029		lkb->lkb_wait_count--;
1030		lkb->lkb_wait_type = 0;
1031	}
1032
1033	DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1034
1035	lkb->lkb_flags &= ~DLM_IFL_RESEND;
1036	lkb->lkb_wait_count--;
1037	if (!lkb->lkb_wait_count)
1038		list_del_init(&lkb->lkb_wait_reply);
1039	unhold_lkb(lkb);
1040	return 0;
1041}
1042
1043static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1044{
1045	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1046	int error;
1047
1048	mutex_lock(&ls->ls_waiters_mutex);
1049	error = _remove_from_waiters(lkb, mstype, NULL);
1050	mutex_unlock(&ls->ls_waiters_mutex);
1051	return error;
1052}
1053
1054/* Handles situations where we might be processing a "fake" or "stub" reply in
1055   which we can't try to take waiters_mutex again. */
1056
1057static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1058{
1059	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1060	int error;
1061
1062	if (ms->m_flags != DLM_IFL_STUB_MS)
1063		mutex_lock(&ls->ls_waiters_mutex);
1064	error = _remove_from_waiters(lkb, ms->m_type, ms);
1065	if (ms->m_flags != DLM_IFL_STUB_MS)
1066		mutex_unlock(&ls->ls_waiters_mutex);
1067	return error;
1068}
1069
1070static void dir_remove(struct dlm_rsb *r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1071{
1072	int to_nodeid;
 
 
 
 
 
 
 
 
1073
1074	if (dlm_no_directory(r->res_ls))
 
 
 
1075		return;
 
1076
1077	to_nodeid = dlm_dir_nodeid(r);
1078	if (to_nodeid != dlm_our_nodeid())
1079		send_remove(r);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1080	else
1081		dlm_dir_remove_entry(r->res_ls, to_nodeid,
1082				     r->res_name, r->res_length);
1083}
1084
1085/* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
1086   found since they are in order of newest to oldest? */
 
 
 
 
 
 
 
 
 
 
 
1087
1088static int shrink_bucket(struct dlm_ls *ls, int b)
1089{
1090	struct dlm_rsb *r;
1091	int count = 0, found;
1092
1093	for (;;) {
1094		found = 0;
1095		spin_lock(&ls->ls_rsbtbl[b].lock);
1096		list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
1097					    res_hashchain) {
1098			if (!time_after_eq(jiffies, r->res_toss_time +
1099					   dlm_config.ci_toss_secs * HZ))
1100				continue;
1101			found = 1;
1102			break;
1103		}
1104
1105		if (!found) {
1106			spin_unlock(&ls->ls_rsbtbl[b].lock);
1107			break;
 
 
 
1108		}
1109
1110		if (kref_put(&r->res_ref, kill_rsb)) {
1111			list_del(&r->res_hashchain);
1112			spin_unlock(&ls->ls_rsbtbl[b].lock);
 
 
 
 
 
1113
1114			if (is_master(r))
1115				dir_remove(r);
1116			dlm_free_rsb(r);
1117			count++;
1118		} else {
1119			spin_unlock(&ls->ls_rsbtbl[b].lock);
1120			log_error(ls, "tossed rsb in use %s", r->res_name);
 
 
1121		}
1122	}
1123
1124	return count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1125}
1126
1127void dlm_scan_rsbs(struct dlm_ls *ls)
1128{
1129	int i;
1130
1131	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1132		shrink_bucket(ls, i);
1133		if (dlm_locking_stopped(ls))
1134			break;
1135		cond_resched();
1136	}
1137}
1138
1139static void add_timeout(struct dlm_lkb *lkb)
1140{
1141	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1142
1143	if (is_master_copy(lkb))
1144		return;
1145
1146	if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1147	    !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1148		lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1149		goto add_it;
1150	}
1151	if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1152		goto add_it;
1153	return;
1154
1155 add_it:
1156	DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1157	mutex_lock(&ls->ls_timeout_mutex);
1158	hold_lkb(lkb);
1159	list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1160	mutex_unlock(&ls->ls_timeout_mutex);
1161}
1162
1163static void del_timeout(struct dlm_lkb *lkb)
1164{
1165	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1166
1167	mutex_lock(&ls->ls_timeout_mutex);
1168	if (!list_empty(&lkb->lkb_time_list)) {
1169		list_del_init(&lkb->lkb_time_list);
1170		unhold_lkb(lkb);
1171	}
1172	mutex_unlock(&ls->ls_timeout_mutex);
1173}
1174
1175/* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1176   lkb_lksb_timeout without lock_rsb?  Note: we can't lock timeout_mutex
1177   and then lock rsb because of lock ordering in add_timeout.  We may need
1178   to specify some special timeout-related bits in the lkb that are just to
1179   be accessed under the timeout_mutex. */
1180
1181void dlm_scan_timeout(struct dlm_ls *ls)
1182{
1183	struct dlm_rsb *r;
1184	struct dlm_lkb *lkb;
1185	int do_cancel, do_warn;
1186	s64 wait_us;
1187
1188	for (;;) {
1189		if (dlm_locking_stopped(ls))
1190			break;
1191
1192		do_cancel = 0;
1193		do_warn = 0;
1194		mutex_lock(&ls->ls_timeout_mutex);
1195		list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1196
1197			wait_us = ktime_to_us(ktime_sub(ktime_get(),
1198					      		lkb->lkb_timestamp));
1199
1200			if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1201			    wait_us >= (lkb->lkb_timeout_cs * 10000))
1202				do_cancel = 1;
1203
1204			if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1205			    wait_us >= dlm_config.ci_timewarn_cs * 10000)
1206				do_warn = 1;
1207
1208			if (!do_cancel && !do_warn)
1209				continue;
1210			hold_lkb(lkb);
1211			break;
1212		}
1213		mutex_unlock(&ls->ls_timeout_mutex);
1214
1215		if (!do_cancel && !do_warn)
1216			break;
1217
1218		r = lkb->lkb_resource;
1219		hold_rsb(r);
1220		lock_rsb(r);
1221
1222		if (do_warn) {
1223			/* clear flag so we only warn once */
1224			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1225			if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1226				del_timeout(lkb);
1227			dlm_timeout_warn(lkb);
1228		}
1229
1230		if (do_cancel) {
1231			log_debug(ls, "timeout cancel %x node %d %s",
1232				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1233			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1234			lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1235			del_timeout(lkb);
1236			_cancel_lock(r, lkb);
1237		}
1238
1239		unlock_rsb(r);
1240		unhold_rsb(r);
1241		dlm_put_lkb(lkb);
1242	}
1243}
1244
1245/* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1246   dlm_recoverd before checking/setting ls_recover_begin. */
1247
1248void dlm_adjust_timeouts(struct dlm_ls *ls)
1249{
1250	struct dlm_lkb *lkb;
1251	u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1252
1253	ls->ls_recover_begin = 0;
1254	mutex_lock(&ls->ls_timeout_mutex);
1255	list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1256		lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1257	mutex_unlock(&ls->ls_timeout_mutex);
1258
1259	if (!dlm_config.ci_waitwarn_us)
1260		return;
1261
1262	mutex_lock(&ls->ls_waiters_mutex);
1263	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1264		if (ktime_to_us(lkb->lkb_wait_time))
1265			lkb->lkb_wait_time = ktime_get();
1266	}
1267	mutex_unlock(&ls->ls_waiters_mutex);
1268}
1269
1270/* lkb is master or local copy */
1271
1272static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1273{
1274	int b, len = r->res_ls->ls_lvblen;
1275
1276	/* b=1 lvb returned to caller
1277	   b=0 lvb written to rsb or invalidated
1278	   b=-1 do nothing */
1279
1280	b =  dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1281
1282	if (b == 1) {
1283		if (!lkb->lkb_lvbptr)
1284			return;
1285
1286		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1287			return;
1288
1289		if (!r->res_lvbptr)
1290			return;
1291
1292		memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1293		lkb->lkb_lvbseq = r->res_lvbseq;
1294
1295	} else if (b == 0) {
1296		if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1297			rsb_set_flag(r, RSB_VALNOTVALID);
1298			return;
1299		}
1300
1301		if (!lkb->lkb_lvbptr)
1302			return;
1303
1304		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1305			return;
1306
1307		if (!r->res_lvbptr)
1308			r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1309
1310		if (!r->res_lvbptr)
1311			return;
1312
1313		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1314		r->res_lvbseq++;
1315		lkb->lkb_lvbseq = r->res_lvbseq;
1316		rsb_clear_flag(r, RSB_VALNOTVALID);
1317	}
1318
1319	if (rsb_flag(r, RSB_VALNOTVALID))
1320		lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1321}
1322
1323static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1324{
1325	if (lkb->lkb_grmode < DLM_LOCK_PW)
1326		return;
1327
1328	if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1329		rsb_set_flag(r, RSB_VALNOTVALID);
1330		return;
1331	}
1332
1333	if (!lkb->lkb_lvbptr)
1334		return;
1335
1336	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1337		return;
1338
1339	if (!r->res_lvbptr)
1340		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1341
1342	if (!r->res_lvbptr)
1343		return;
1344
1345	memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1346	r->res_lvbseq++;
1347	rsb_clear_flag(r, RSB_VALNOTVALID);
1348}
1349
1350/* lkb is process copy (pc) */
1351
1352static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1353			    struct dlm_message *ms)
1354{
1355	int b;
1356
1357	if (!lkb->lkb_lvbptr)
1358		return;
1359
1360	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1361		return;
1362
1363	b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1364	if (b == 1) {
1365		int len = receive_extralen(ms);
1366		if (len > DLM_RESNAME_MAXLEN)
1367			len = DLM_RESNAME_MAXLEN;
1368		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
1369		lkb->lkb_lvbseq = ms->m_lvbseq;
1370	}
1371}
1372
1373/* Manipulate lkb's on rsb's convert/granted/waiting queues
1374   remove_lock -- used for unlock, removes lkb from granted
1375   revert_lock -- used for cancel, moves lkb from convert to granted
1376   grant_lock  -- used for request and convert, adds lkb to granted or
1377                  moves lkb from convert or waiting to granted
1378
1379   Each of these is used for master or local copy lkb's.  There is
1380   also a _pc() variation used to make the corresponding change on
1381   a process copy (pc) lkb. */
1382
1383static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1384{
1385	del_lkb(r, lkb);
1386	lkb->lkb_grmode = DLM_LOCK_IV;
1387	/* this unhold undoes the original ref from create_lkb()
1388	   so this leads to the lkb being freed */
1389	unhold_lkb(lkb);
1390}
1391
1392static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1393{
1394	set_lvb_unlock(r, lkb);
1395	_remove_lock(r, lkb);
1396}
1397
1398static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1399{
1400	_remove_lock(r, lkb);
1401}
1402
1403/* returns: 0 did nothing
1404	    1 moved lock to granted
1405	   -1 removed lock */
1406
1407static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1408{
1409	int rv = 0;
1410
1411	lkb->lkb_rqmode = DLM_LOCK_IV;
1412
1413	switch (lkb->lkb_status) {
1414	case DLM_LKSTS_GRANTED:
1415		break;
1416	case DLM_LKSTS_CONVERT:
1417		move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1418		rv = 1;
1419		break;
1420	case DLM_LKSTS_WAITING:
1421		del_lkb(r, lkb);
1422		lkb->lkb_grmode = DLM_LOCK_IV;
1423		/* this unhold undoes the original ref from create_lkb()
1424		   so this leads to the lkb being freed */
1425		unhold_lkb(lkb);
1426		rv = -1;
1427		break;
1428	default:
1429		log_print("invalid status for revert %d", lkb->lkb_status);
1430	}
1431	return rv;
1432}
1433
1434static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1435{
1436	return revert_lock(r, lkb);
1437}
1438
1439static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1440{
1441	if (lkb->lkb_grmode != lkb->lkb_rqmode) {
1442		lkb->lkb_grmode = lkb->lkb_rqmode;
1443		if (lkb->lkb_status)
1444			move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1445		else
1446			add_lkb(r, lkb, DLM_LKSTS_GRANTED);
1447	}
1448
1449	lkb->lkb_rqmode = DLM_LOCK_IV;
 
1450}
1451
1452static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1453{
1454	set_lvb_lock(r, lkb);
1455	_grant_lock(r, lkb);
1456	lkb->lkb_highbast = 0;
1457}
1458
1459static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1460			  struct dlm_message *ms)
1461{
1462	set_lvb_lock_pc(r, lkb, ms);
1463	_grant_lock(r, lkb);
1464}
1465
1466/* called by grant_pending_locks() which means an async grant message must
1467   be sent to the requesting node in addition to granting the lock if the
1468   lkb belongs to a remote node. */
1469
1470static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1471{
1472	grant_lock(r, lkb);
1473	if (is_master_copy(lkb))
1474		send_grant(r, lkb);
1475	else
1476		queue_cast(r, lkb, 0);
1477}
1478
1479/* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
1480   change the granted/requested modes.  We're munging things accordingly in
1481   the process copy.
1482   CONVDEADLK: our grmode may have been forced down to NL to resolve a
1483   conversion deadlock
1484   ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
1485   compatible with other granted locks */
1486
1487static void munge_demoted(struct dlm_lkb *lkb)
1488{
1489	if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
1490		log_print("munge_demoted %x invalid modes gr %d rq %d",
1491			  lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
1492		return;
1493	}
1494
1495	lkb->lkb_grmode = DLM_LOCK_NL;
1496}
1497
1498static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
1499{
1500	if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
1501	    ms->m_type != DLM_MSG_GRANT) {
1502		log_print("munge_altmode %x invalid reply type %d",
1503			  lkb->lkb_id, ms->m_type);
1504		return;
1505	}
1506
1507	if (lkb->lkb_exflags & DLM_LKF_ALTPR)
1508		lkb->lkb_rqmode = DLM_LOCK_PR;
1509	else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
1510		lkb->lkb_rqmode = DLM_LOCK_CW;
1511	else {
1512		log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
1513		dlm_print_lkb(lkb);
1514	}
1515}
1516
1517static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
1518{
1519	struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
1520					   lkb_statequeue);
1521	if (lkb->lkb_id == first->lkb_id)
1522		return 1;
1523
1524	return 0;
1525}
1526
1527/* Check if the given lkb conflicts with another lkb on the queue. */
1528
1529static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
1530{
1531	struct dlm_lkb *this;
1532
1533	list_for_each_entry(this, head, lkb_statequeue) {
1534		if (this == lkb)
1535			continue;
1536		if (!modes_compat(this, lkb))
1537			return 1;
1538	}
1539	return 0;
1540}
1541
1542/*
1543 * "A conversion deadlock arises with a pair of lock requests in the converting
1544 * queue for one resource.  The granted mode of each lock blocks the requested
1545 * mode of the other lock."
1546 *
1547 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
1548 * convert queue from being granted, then deadlk/demote lkb.
1549 *
1550 * Example:
1551 * Granted Queue: empty
1552 * Convert Queue: NL->EX (first lock)
1553 *                PR->EX (second lock)
1554 *
1555 * The first lock can't be granted because of the granted mode of the second
1556 * lock and the second lock can't be granted because it's not first in the
1557 * list.  We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
1558 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
1559 * flag set and return DEMOTED in the lksb flags.
1560 *
1561 * Originally, this function detected conv-deadlk in a more limited scope:
1562 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
1563 * - if lkb1 was the first entry in the queue (not just earlier), and was
1564 *   blocked by the granted mode of lkb2, and there was nothing on the
1565 *   granted queue preventing lkb1 from being granted immediately, i.e.
1566 *   lkb2 was the only thing preventing lkb1 from being granted.
1567 *
1568 * That second condition meant we'd only say there was conv-deadlk if
1569 * resolving it (by demotion) would lead to the first lock on the convert
1570 * queue being granted right away.  It allowed conversion deadlocks to exist
1571 * between locks on the convert queue while they couldn't be granted anyway.
1572 *
1573 * Now, we detect and take action on conversion deadlocks immediately when
1574 * they're created, even if they may not be immediately consequential.  If
1575 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
1576 * mode that would prevent lkb1's conversion from being granted, we do a
1577 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
1578 * I think this means that the lkb_is_ahead condition below should always
1579 * be zero, i.e. there will never be conv-deadlk between two locks that are
1580 * both already on the convert queue.
1581 */
1582
1583static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
1584{
1585	struct dlm_lkb *lkb1;
1586	int lkb_is_ahead = 0;
1587
1588	list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
1589		if (lkb1 == lkb2) {
1590			lkb_is_ahead = 1;
1591			continue;
1592		}
1593
1594		if (!lkb_is_ahead) {
1595			if (!modes_compat(lkb2, lkb1))
1596				return 1;
1597		} else {
1598			if (!modes_compat(lkb2, lkb1) &&
1599			    !modes_compat(lkb1, lkb2))
1600				return 1;
1601		}
1602	}
1603	return 0;
1604}
1605
1606/*
1607 * Return 1 if the lock can be granted, 0 otherwise.
1608 * Also detect and resolve conversion deadlocks.
1609 *
1610 * lkb is the lock to be granted
1611 *
1612 * now is 1 if the function is being called in the context of the
1613 * immediate request, it is 0 if called later, after the lock has been
1614 * queued.
1615 *
 
 
 
1616 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
1617 */
1618
1619static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
 
1620{
1621	int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
1622
1623	/*
1624	 * 6-10: Version 5.4 introduced an option to address the phenomenon of
1625	 * a new request for a NL mode lock being blocked.
1626	 *
1627	 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
1628	 * request, then it would be granted.  In essence, the use of this flag
1629	 * tells the Lock Manager to expedite theis request by not considering
1630	 * what may be in the CONVERTING or WAITING queues...  As of this
1631	 * writing, the EXPEDITE flag can be used only with new requests for NL
1632	 * mode locks.  This flag is not valid for conversion requests.
1633	 *
1634	 * A shortcut.  Earlier checks return an error if EXPEDITE is used in a
1635	 * conversion or used with a non-NL requested mode.  We also know an
1636	 * EXPEDITE request is always granted immediately, so now must always
1637	 * be 1.  The full condition to grant an expedite request: (now &&
1638	 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
1639	 * therefore be shortened to just checking the flag.
1640	 */
1641
1642	if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
1643		return 1;
1644
1645	/*
1646	 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
1647	 * added to the remaining conditions.
1648	 */
1649
1650	if (queue_conflict(&r->res_grantqueue, lkb))
1651		goto out;
1652
1653	/*
1654	 * 6-3: By default, a conversion request is immediately granted if the
1655	 * requested mode is compatible with the modes of all other granted
1656	 * locks
1657	 */
1658
1659	if (queue_conflict(&r->res_convertqueue, lkb))
1660		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1661
1662	/*
1663	 * 6-5: But the default algorithm for deciding whether to grant or
1664	 * queue conversion requests does not by itself guarantee that such
1665	 * requests are serviced on a "first come first serve" basis.  This, in
1666	 * turn, can lead to a phenomenon known as "indefinate postponement".
1667	 *
1668	 * 6-7: This issue is dealt with by using the optional QUECVT flag with
1669	 * the system service employed to request a lock conversion.  This flag
1670	 * forces certain conversion requests to be queued, even if they are
1671	 * compatible with the granted modes of other locks on the same
1672	 * resource.  Thus, the use of this flag results in conversion requests
1673	 * being ordered on a "first come first servce" basis.
1674	 *
1675	 * DCT: This condition is all about new conversions being able to occur
1676	 * "in place" while the lock remains on the granted queue (assuming
1677	 * nothing else conflicts.)  IOW if QUECVT isn't set, a conversion
1678	 * doesn't _have_ to go onto the convert queue where it's processed in
1679	 * order.  The "now" variable is necessary to distinguish converts
1680	 * being received and processed for the first time now, because once a
1681	 * convert is moved to the conversion queue the condition below applies
1682	 * requiring fifo granting.
1683	 */
1684
1685	if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
1686		return 1;
1687
1688	/*
 
 
 
 
 
 
 
 
 
 
 
 
1689	 * The NOORDER flag is set to avoid the standard vms rules on grant
1690	 * order.
1691	 */
1692
1693	if (lkb->lkb_exflags & DLM_LKF_NOORDER)
1694		return 1;
1695
1696	/*
1697	 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
1698	 * granted until all other conversion requests ahead of it are granted
1699	 * and/or canceled.
1700	 */
1701
1702	if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
1703		return 1;
1704
1705	/*
1706	 * 6-4: By default, a new request is immediately granted only if all
1707	 * three of the following conditions are satisfied when the request is
1708	 * issued:
1709	 * - The queue of ungranted conversion requests for the resource is
1710	 *   empty.
1711	 * - The queue of ungranted new requests for the resource is empty.
1712	 * - The mode of the new request is compatible with the most
1713	 *   restrictive mode of all granted locks on the resource.
1714	 */
1715
1716	if (now && !conv && list_empty(&r->res_convertqueue) &&
1717	    list_empty(&r->res_waitqueue))
1718		return 1;
1719
1720	/*
1721	 * 6-4: Once a lock request is in the queue of ungranted new requests,
1722	 * it cannot be granted until the queue of ungranted conversion
1723	 * requests is empty, all ungranted new requests ahead of it are
1724	 * granted and/or canceled, and it is compatible with the granted mode
1725	 * of the most restrictive lock granted on the resource.
1726	 */
1727
1728	if (!now && !conv && list_empty(&r->res_convertqueue) &&
1729	    first_in_list(lkb, &r->res_waitqueue))
1730		return 1;
1731 out:
1732	return 0;
1733}
1734
1735static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
1736			  int *err)
1737{
1738	int rv;
1739	int8_t alt = 0, rqmode = lkb->lkb_rqmode;
1740	int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
1741
1742	if (err)
1743		*err = 0;
1744
1745	rv = _can_be_granted(r, lkb, now);
1746	if (rv)
1747		goto out;
1748
1749	/*
1750	 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
1751	 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
1752	 * cancels one of the locks.
1753	 */
1754
1755	if (is_convert && can_be_queued(lkb) &&
1756	    conversion_deadlock_detect(r, lkb)) {
1757		if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
1758			lkb->lkb_grmode = DLM_LOCK_NL;
1759			lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
1760		} else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1761			if (err)
1762				*err = -EDEADLK;
1763			else {
1764				log_print("can_be_granted deadlock %x now %d",
1765					  lkb->lkb_id, now);
1766				dlm_dump_rsb(r);
1767			}
1768		}
1769		goto out;
1770	}
1771
1772	/*
1773	 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
1774	 * to grant a request in a mode other than the normal rqmode.  It's a
1775	 * simple way to provide a big optimization to applications that can
1776	 * use them.
1777	 */
1778
1779	if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
1780		alt = DLM_LOCK_PR;
1781	else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
1782		alt = DLM_LOCK_CW;
1783
1784	if (alt) {
1785		lkb->lkb_rqmode = alt;
1786		rv = _can_be_granted(r, lkb, now);
1787		if (rv)
1788			lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
1789		else
1790			lkb->lkb_rqmode = rqmode;
1791	}
1792 out:
1793	return rv;
1794}
1795
1796/* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
1797   for locks pending on the convert list.  Once verified (watch for these
1798   log_prints), we should be able to just call _can_be_granted() and not
1799   bother with the demote/deadlk cases here (and there's no easy way to deal
1800   with a deadlk here, we'd have to generate something like grant_lock with
1801   the deadlk error.) */
1802
1803/* Returns the highest requested mode of all blocked conversions; sets
1804   cw if there's a blocked conversion to DLM_LOCK_CW. */
1805
1806static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw)
 
1807{
1808	struct dlm_lkb *lkb, *s;
 
1809	int hi, demoted, quit, grant_restart, demote_restart;
1810	int deadlk;
1811
1812	quit = 0;
1813 restart:
1814	grant_restart = 0;
1815	demote_restart = 0;
1816	hi = DLM_LOCK_IV;
1817
1818	list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
1819		demoted = is_demoted(lkb);
1820		deadlk = 0;
1821
1822		if (can_be_granted(r, lkb, 0, &deadlk)) {
1823			grant_lock_pending(r, lkb);
1824			grant_restart = 1;
 
 
1825			continue;
1826		}
1827
1828		if (!demoted && is_demoted(lkb)) {
1829			log_print("WARN: pending demoted %x node %d %s",
1830				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1831			demote_restart = 1;
1832			continue;
1833		}
1834
1835		if (deadlk) {
1836			log_print("WARN: pending deadlock %x node %d %s",
1837				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1838			dlm_dump_rsb(r);
1839			continue;
1840		}
1841
1842		hi = max_t(int, lkb->lkb_rqmode, hi);
1843
1844		if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
1845			*cw = 1;
1846	}
1847
1848	if (grant_restart)
1849		goto restart;
1850	if (demote_restart && !quit) {
1851		quit = 1;
1852		goto restart;
1853	}
1854
1855	return max_t(int, high, hi);
1856}
1857
1858static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw)
 
1859{
1860	struct dlm_lkb *lkb, *s;
1861
1862	list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
1863		if (can_be_granted(r, lkb, 0, NULL))
1864			grant_lock_pending(r, lkb);
1865                else {
 
 
1866			high = max_t(int, lkb->lkb_rqmode, high);
1867			if (lkb->lkb_rqmode == DLM_LOCK_CW)
1868				*cw = 1;
1869		}
1870	}
1871
1872	return high;
1873}
1874
1875/* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
1876   on either the convert or waiting queue.
1877   high is the largest rqmode of all locks blocked on the convert or
1878   waiting queue. */
1879
1880static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
1881{
1882	if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
1883		if (gr->lkb_highbast < DLM_LOCK_EX)
1884			return 1;
1885		return 0;
1886	}
1887
1888	if (gr->lkb_highbast < high &&
1889	    !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
1890		return 1;
1891	return 0;
1892}
1893
1894static void grant_pending_locks(struct dlm_rsb *r)
1895{
1896	struct dlm_lkb *lkb, *s;
1897	int high = DLM_LOCK_IV;
1898	int cw = 0;
1899
1900	DLM_ASSERT(is_master(r), dlm_dump_rsb(r););
 
 
 
 
1901
1902	high = grant_pending_convert(r, high, &cw);
1903	high = grant_pending_wait(r, high, &cw);
1904
1905	if (high == DLM_LOCK_IV)
1906		return;
1907
1908	/*
1909	 * If there are locks left on the wait/convert queue then send blocking
1910	 * ASTs to granted locks based on the largest requested mode (high)
1911	 * found above.
1912	 */
1913
1914	list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
1915		if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
1916			if (cw && high == DLM_LOCK_PR &&
1917			    lkb->lkb_grmode == DLM_LOCK_PR)
1918				queue_bast(r, lkb, DLM_LOCK_CW);
1919			else
1920				queue_bast(r, lkb, high);
1921			lkb->lkb_highbast = high;
1922		}
1923	}
1924}
1925
1926static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
1927{
1928	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
1929	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
1930		if (gr->lkb_highbast < DLM_LOCK_EX)
1931			return 1;
1932		return 0;
1933	}
1934
1935	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
1936		return 1;
1937	return 0;
1938}
1939
1940static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1941			    struct dlm_lkb *lkb)
1942{
1943	struct dlm_lkb *gr;
1944
1945	list_for_each_entry(gr, head, lkb_statequeue) {
1946		/* skip self when sending basts to convertqueue */
1947		if (gr == lkb)
1948			continue;
1949		if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
1950			queue_bast(r, gr, lkb->lkb_rqmode);
1951			gr->lkb_highbast = lkb->lkb_rqmode;
1952		}
1953	}
1954}
1955
1956static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
1957{
1958	send_bast_queue(r, &r->res_grantqueue, lkb);
1959}
1960
1961static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
1962{
1963	send_bast_queue(r, &r->res_grantqueue, lkb);
1964	send_bast_queue(r, &r->res_convertqueue, lkb);
1965}
1966
1967/* set_master(r, lkb) -- set the master nodeid of a resource
1968
1969   The purpose of this function is to set the nodeid field in the given
1970   lkb using the nodeid field in the given rsb.  If the rsb's nodeid is
1971   known, it can just be copied to the lkb and the function will return
1972   0.  If the rsb's nodeid is _not_ known, it needs to be looked up
1973   before it can be copied to the lkb.
1974
1975   When the rsb nodeid is being looked up remotely, the initial lkb
1976   causing the lookup is kept on the ls_waiters list waiting for the
1977   lookup reply.  Other lkb's waiting for the same rsb lookup are kept
1978   on the rsb's res_lookup list until the master is verified.
1979
1980   Return values:
1981   0: nodeid is set in rsb/lkb and the caller should go ahead and use it
1982   1: the rsb master is not available and the lkb has been placed on
1983      a wait queue
1984*/
1985
1986static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
1987{
1988	struct dlm_ls *ls = r->res_ls;
1989	int i, error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
1990
1991	if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
1992		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
1993		r->res_first_lkid = lkb->lkb_id;
1994		lkb->lkb_nodeid = r->res_nodeid;
1995		return 0;
1996	}
1997
1998	if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
1999		list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2000		return 1;
2001	}
2002
2003	if (r->res_nodeid == 0) {
2004		lkb->lkb_nodeid = 0;
2005		return 0;
2006	}
2007
2008	if (r->res_nodeid > 0) {
2009		lkb->lkb_nodeid = r->res_nodeid;
2010		return 0;
2011	}
2012
2013	DLM_ASSERT(r->res_nodeid == -1, dlm_dump_rsb(r););
2014
2015	dir_nodeid = dlm_dir_nodeid(r);
2016
2017	if (dir_nodeid != our_nodeid) {
2018		r->res_first_lkid = lkb->lkb_id;
2019		send_lookup(r, lkb);
2020		return 1;
2021	}
2022
2023	for (i = 0; i < 2; i++) {
2024		/* It's possible for dlm_scand to remove an old rsb for
2025		   this same resource from the toss list, us to create
2026		   a new one, look up the master locally, and find it
2027		   already exists just before dlm_scand does the
2028		   dir_remove() on the previous rsb. */
2029
2030		error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
2031				       r->res_length, &ret_nodeid);
2032		if (!error)
2033			break;
2034		log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
2035		schedule();
2036	}
2037	if (error && error != -EEXIST)
2038		return error;
2039
2040	if (ret_nodeid == our_nodeid) {
2041		r->res_first_lkid = 0;
2042		r->res_nodeid = 0;
2043		lkb->lkb_nodeid = 0;
2044	} else {
2045		r->res_first_lkid = lkb->lkb_id;
2046		r->res_nodeid = ret_nodeid;
2047		lkb->lkb_nodeid = ret_nodeid;
2048	}
2049	return 0;
 
 
 
 
 
2050}
2051
2052static void process_lookup_list(struct dlm_rsb *r)
2053{
2054	struct dlm_lkb *lkb, *safe;
2055
2056	list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2057		list_del_init(&lkb->lkb_rsb_lookup);
2058		_request_lock(r, lkb);
2059		schedule();
2060	}
2061}
2062
2063/* confirm_master -- confirm (or deny) an rsb's master nodeid */
2064
2065static void confirm_master(struct dlm_rsb *r, int error)
2066{
2067	struct dlm_lkb *lkb;
2068
2069	if (!r->res_first_lkid)
2070		return;
2071
2072	switch (error) {
2073	case 0:
2074	case -EINPROGRESS:
2075		r->res_first_lkid = 0;
2076		process_lookup_list(r);
2077		break;
2078
2079	case -EAGAIN:
2080	case -EBADR:
2081	case -ENOTBLK:
2082		/* the remote request failed and won't be retried (it was
2083		   a NOQUEUE, or has been canceled/unlocked); make a waiting
2084		   lkb the first_lkid */
2085
2086		r->res_first_lkid = 0;
2087
2088		if (!list_empty(&r->res_lookup)) {
2089			lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2090					 lkb_rsb_lookup);
2091			list_del_init(&lkb->lkb_rsb_lookup);
2092			r->res_first_lkid = lkb->lkb_id;
2093			_request_lock(r, lkb);
2094		}
2095		break;
2096
2097	default:
2098		log_error(r->res_ls, "confirm_master unknown error %d", error);
2099	}
2100}
2101
2102static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2103			 int namelen, unsigned long timeout_cs,
2104			 void (*ast) (void *astparam),
2105			 void *astparam,
2106			 void (*bast) (void *astparam, int mode),
2107			 struct dlm_args *args)
2108{
2109	int rv = -EINVAL;
2110
2111	/* check for invalid arg usage */
2112
2113	if (mode < 0 || mode > DLM_LOCK_EX)
2114		goto out;
2115
2116	if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2117		goto out;
2118
2119	if (flags & DLM_LKF_CANCEL)
2120		goto out;
2121
2122	if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2123		goto out;
2124
2125	if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2126		goto out;
2127
2128	if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2129		goto out;
2130
2131	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2132		goto out;
2133
2134	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2135		goto out;
2136
2137	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2138		goto out;
2139
2140	if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2141		goto out;
2142
2143	if (!ast || !lksb)
2144		goto out;
2145
2146	if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2147		goto out;
2148
2149	if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2150		goto out;
2151
2152	/* these args will be copied to the lkb in validate_lock_args,
2153	   it cannot be done now because when converting locks, fields in
2154	   an active lkb cannot be modified before locking the rsb */
2155
2156	args->flags = flags;
2157	args->astfn = ast;
2158	args->astparam = astparam;
2159	args->bastfn = bast;
2160	args->timeout = timeout_cs;
2161	args->mode = mode;
2162	args->lksb = lksb;
2163	rv = 0;
2164 out:
2165	return rv;
2166}
2167
2168static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2169{
2170	if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2171 		      DLM_LKF_FORCEUNLOCK))
2172		return -EINVAL;
2173
2174	if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2175		return -EINVAL;
2176
2177	args->flags = flags;
2178	args->astparam = astarg;
2179	return 0;
2180}
2181
2182static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2183			      struct dlm_args *args)
2184{
2185	int rv = -EINVAL;
2186
2187	if (args->flags & DLM_LKF_CONVERT) {
2188		if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2189			goto out;
2190
2191		if (args->flags & DLM_LKF_QUECVT &&
2192		    !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2193			goto out;
2194
2195		rv = -EBUSY;
2196		if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2197			goto out;
2198
2199		if (lkb->lkb_wait_type)
2200			goto out;
2201
2202		if (is_overlap(lkb))
2203			goto out;
2204	}
2205
2206	lkb->lkb_exflags = args->flags;
2207	lkb->lkb_sbflags = 0;
2208	lkb->lkb_astfn = args->astfn;
2209	lkb->lkb_astparam = args->astparam;
2210	lkb->lkb_bastfn = args->bastfn;
2211	lkb->lkb_rqmode = args->mode;
2212	lkb->lkb_lksb = args->lksb;
2213	lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2214	lkb->lkb_ownpid = (int) current->pid;
2215	lkb->lkb_timeout_cs = args->timeout;
2216	rv = 0;
2217 out:
2218	if (rv)
2219		log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2220			  rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2221			  lkb->lkb_status, lkb->lkb_wait_type,
2222			  lkb->lkb_resource->res_name);
2223	return rv;
2224}
2225
2226/* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2227   for success */
2228
2229/* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2230   because there may be a lookup in progress and it's valid to do
2231   cancel/unlockf on it */
2232
2233static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2234{
2235	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2236	int rv = -EINVAL;
2237
2238	if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2239		log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2240		dlm_print_lkb(lkb);
2241		goto out;
2242	}
2243
2244	/* an lkb may still exist even though the lock is EOL'ed due to a
2245	   cancel, unlock or failed noqueue request; an app can't use these
2246	   locks; return same error as if the lkid had not been found at all */
2247
2248	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2249		log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2250		rv = -ENOENT;
2251		goto out;
2252	}
2253
2254	/* an lkb may be waiting for an rsb lookup to complete where the
2255	   lookup was initiated by another lock */
2256
2257	if (!list_empty(&lkb->lkb_rsb_lookup)) {
2258		if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2259			log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2260			list_del_init(&lkb->lkb_rsb_lookup);
2261			queue_cast(lkb->lkb_resource, lkb,
2262				   args->flags & DLM_LKF_CANCEL ?
2263				   -DLM_ECANCEL : -DLM_EUNLOCK);
2264			unhold_lkb(lkb); /* undoes create_lkb() */
2265		}
2266		/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2267		rv = -EBUSY;
2268		goto out;
2269	}
2270
2271	/* cancel not allowed with another cancel/unlock in progress */
2272
2273	if (args->flags & DLM_LKF_CANCEL) {
2274		if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2275			goto out;
2276
2277		if (is_overlap(lkb))
2278			goto out;
2279
2280		/* don't let scand try to do a cancel */
2281		del_timeout(lkb);
2282
2283		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2284			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2285			rv = -EBUSY;
2286			goto out;
2287		}
2288
2289		/* there's nothing to cancel */
2290		if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2291		    !lkb->lkb_wait_type) {
2292			rv = -EBUSY;
2293			goto out;
2294		}
2295
2296		switch (lkb->lkb_wait_type) {
2297		case DLM_MSG_LOOKUP:
2298		case DLM_MSG_REQUEST:
2299			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2300			rv = -EBUSY;
2301			goto out;
2302		case DLM_MSG_UNLOCK:
2303		case DLM_MSG_CANCEL:
2304			goto out;
2305		}
2306		/* add_to_waiters() will set OVERLAP_CANCEL */
2307		goto out_ok;
2308	}
2309
2310	/* do we need to allow a force-unlock if there's a normal unlock
2311	   already in progress?  in what conditions could the normal unlock
2312	   fail such that we'd want to send a force-unlock to be sure? */
2313
2314	if (args->flags & DLM_LKF_FORCEUNLOCK) {
2315		if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
2316			goto out;
2317
2318		if (is_overlap_unlock(lkb))
2319			goto out;
2320
2321		/* don't let scand try to do a cancel */
2322		del_timeout(lkb);
2323
2324		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2325			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2326			rv = -EBUSY;
2327			goto out;
2328		}
2329
2330		switch (lkb->lkb_wait_type) {
2331		case DLM_MSG_LOOKUP:
2332		case DLM_MSG_REQUEST:
2333			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2334			rv = -EBUSY;
2335			goto out;
2336		case DLM_MSG_UNLOCK:
2337			goto out;
2338		}
2339		/* add_to_waiters() will set OVERLAP_UNLOCK */
2340		goto out_ok;
2341	}
2342
2343	/* normal unlock not allowed if there's any op in progress */
2344	rv = -EBUSY;
2345	if (lkb->lkb_wait_type || lkb->lkb_wait_count)
2346		goto out;
2347
2348 out_ok:
2349	/* an overlapping op shouldn't blow away exflags from other op */
2350	lkb->lkb_exflags |= args->flags;
2351	lkb->lkb_sbflags = 0;
2352	lkb->lkb_astparam = args->astparam;
2353	rv = 0;
2354 out:
2355	if (rv)
2356		log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
2357			  lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
2358			  args->flags, lkb->lkb_wait_type,
2359			  lkb->lkb_resource->res_name);
2360	return rv;
2361}
2362
2363/*
2364 * Four stage 4 varieties:
2365 * do_request(), do_convert(), do_unlock(), do_cancel()
2366 * These are called on the master node for the given lock and
2367 * from the central locking logic.
2368 */
2369
2370static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2371{
2372	int error = 0;
2373
2374	if (can_be_granted(r, lkb, 1, NULL)) {
2375		grant_lock(r, lkb);
2376		queue_cast(r, lkb, 0);
2377		goto out;
2378	}
2379
2380	if (can_be_queued(lkb)) {
2381		error = -EINPROGRESS;
2382		add_lkb(r, lkb, DLM_LKSTS_WAITING);
2383		add_timeout(lkb);
2384		goto out;
2385	}
2386
2387	error = -EAGAIN;
2388	queue_cast(r, lkb, -EAGAIN);
2389 out:
2390	return error;
2391}
2392
2393static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2394			       int error)
2395{
2396	switch (error) {
2397	case -EAGAIN:
2398		if (force_blocking_asts(lkb))
2399			send_blocking_asts_all(r, lkb);
2400		break;
2401	case -EINPROGRESS:
2402		send_blocking_asts(r, lkb);
2403		break;
2404	}
2405}
2406
2407static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2408{
2409	int error = 0;
2410	int deadlk = 0;
2411
2412	/* changing an existing lock may allow others to be granted */
2413
2414	if (can_be_granted(r, lkb, 1, &deadlk)) {
2415		grant_lock(r, lkb);
2416		queue_cast(r, lkb, 0);
2417		goto out;
2418	}
2419
2420	/* can_be_granted() detected that this lock would block in a conversion
2421	   deadlock, so we leave it on the granted queue and return EDEADLK in
2422	   the ast for the convert. */
2423
2424	if (deadlk) {
2425		/* it's left on the granted queue */
2426		revert_lock(r, lkb);
2427		queue_cast(r, lkb, -EDEADLK);
2428		error = -EDEADLK;
2429		goto out;
2430	}
2431
2432	/* is_demoted() means the can_be_granted() above set the grmode
2433	   to NL, and left us on the granted queue.  This auto-demotion
2434	   (due to CONVDEADLK) might mean other locks, and/or this lock, are
2435	   now grantable.  We have to try to grant other converting locks
2436	   before we try again to grant this one. */
2437
2438	if (is_demoted(lkb)) {
2439		grant_pending_convert(r, DLM_LOCK_IV, NULL);
2440		if (_can_be_granted(r, lkb, 1)) {
2441			grant_lock(r, lkb);
2442			queue_cast(r, lkb, 0);
2443			goto out;
2444		}
2445		/* else fall through and move to convert queue */
2446	}
2447
2448	if (can_be_queued(lkb)) {
2449		error = -EINPROGRESS;
2450		del_lkb(r, lkb);
2451		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
2452		add_timeout(lkb);
2453		goto out;
2454	}
2455
2456	error = -EAGAIN;
2457	queue_cast(r, lkb, -EAGAIN);
2458 out:
2459	return error;
2460}
2461
2462static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2463			       int error)
2464{
2465	switch (error) {
2466	case 0:
2467		grant_pending_locks(r);
2468		/* grant_pending_locks also sends basts */
2469		break;
2470	case -EAGAIN:
2471		if (force_blocking_asts(lkb))
2472			send_blocking_asts_all(r, lkb);
2473		break;
2474	case -EINPROGRESS:
2475		send_blocking_asts(r, lkb);
2476		break;
2477	}
2478}
2479
2480static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2481{
2482	remove_lock(r, lkb);
2483	queue_cast(r, lkb, -DLM_EUNLOCK);
2484	return -DLM_EUNLOCK;
2485}
2486
2487static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2488			      int error)
2489{
2490	grant_pending_locks(r);
2491}
2492
2493/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
2494 
2495static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2496{
2497	int error;
2498
2499	error = revert_lock(r, lkb);
2500	if (error) {
2501		queue_cast(r, lkb, -DLM_ECANCEL);
2502		return -DLM_ECANCEL;
2503	}
2504	return 0;
2505}
2506
2507static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2508			      int error)
2509{
2510	if (error)
2511		grant_pending_locks(r);
2512}
2513
2514/*
2515 * Four stage 3 varieties:
2516 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
2517 */
2518
2519/* add a new lkb to a possibly new rsb, called by requesting process */
2520
2521static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2522{
2523	int error;
2524
2525	/* set_master: sets lkb nodeid from r */
2526
2527	error = set_master(r, lkb);
2528	if (error < 0)
2529		goto out;
2530	if (error) {
2531		error = 0;
2532		goto out;
2533	}
2534
2535	if (is_remote(r)) {
2536		/* receive_request() calls do_request() on remote node */
2537		error = send_request(r, lkb);
2538	} else {
2539		error = do_request(r, lkb);
2540		/* for remote locks the request_reply is sent
2541		   between do_request and do_request_effects */
2542		do_request_effects(r, lkb, error);
2543	}
2544 out:
2545	return error;
2546}
2547
2548/* change some property of an existing lkb, e.g. mode */
2549
2550static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2551{
2552	int error;
2553
2554	if (is_remote(r)) {
2555		/* receive_convert() calls do_convert() on remote node */
2556		error = send_convert(r, lkb);
2557	} else {
2558		error = do_convert(r, lkb);
2559		/* for remote locks the convert_reply is sent
2560		   between do_convert and do_convert_effects */
2561		do_convert_effects(r, lkb, error);
2562	}
2563
2564	return error;
2565}
2566
2567/* remove an existing lkb from the granted queue */
2568
2569static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2570{
2571	int error;
2572
2573	if (is_remote(r)) {
2574		/* receive_unlock() calls do_unlock() on remote node */
2575		error = send_unlock(r, lkb);
2576	} else {
2577		error = do_unlock(r, lkb);
2578		/* for remote locks the unlock_reply is sent
2579		   between do_unlock and do_unlock_effects */
2580		do_unlock_effects(r, lkb, error);
2581	}
2582
2583	return error;
2584}
2585
2586/* remove an existing lkb from the convert or wait queue */
2587
2588static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2589{
2590	int error;
2591
2592	if (is_remote(r)) {
2593		/* receive_cancel() calls do_cancel() on remote node */
2594		error = send_cancel(r, lkb);
2595	} else {
2596		error = do_cancel(r, lkb);
2597		/* for remote locks the cancel_reply is sent
2598		   between do_cancel and do_cancel_effects */
2599		do_cancel_effects(r, lkb, error);
2600	}
2601
2602	return error;
2603}
2604
2605/*
2606 * Four stage 2 varieties:
2607 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
2608 */
2609
2610static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
2611			int len, struct dlm_args *args)
2612{
2613	struct dlm_rsb *r;
2614	int error;
2615
2616	error = validate_lock_args(ls, lkb, args);
2617	if (error)
2618		goto out;
2619
2620	error = find_rsb(ls, name, len, R_CREATE, &r);
2621	if (error)
2622		goto out;
2623
2624	lock_rsb(r);
2625
2626	attach_lkb(r, lkb);
2627	lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
2628
2629	error = _request_lock(r, lkb);
2630
2631	unlock_rsb(r);
2632	put_rsb(r);
2633
2634 out:
2635	return error;
2636}
2637
2638static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2639			struct dlm_args *args)
2640{
2641	struct dlm_rsb *r;
2642	int error;
2643
2644	r = lkb->lkb_resource;
2645
2646	hold_rsb(r);
2647	lock_rsb(r);
2648
2649	error = validate_lock_args(ls, lkb, args);
2650	if (error)
2651		goto out;
2652
2653	error = _convert_lock(r, lkb);
2654 out:
2655	unlock_rsb(r);
2656	put_rsb(r);
2657	return error;
2658}
2659
2660static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2661		       struct dlm_args *args)
2662{
2663	struct dlm_rsb *r;
2664	int error;
2665
2666	r = lkb->lkb_resource;
2667
2668	hold_rsb(r);
2669	lock_rsb(r);
2670
2671	error = validate_unlock_args(lkb, args);
2672	if (error)
2673		goto out;
2674
2675	error = _unlock_lock(r, lkb);
2676 out:
2677	unlock_rsb(r);
2678	put_rsb(r);
2679	return error;
2680}
2681
2682static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2683		       struct dlm_args *args)
2684{
2685	struct dlm_rsb *r;
2686	int error;
2687
2688	r = lkb->lkb_resource;
2689
2690	hold_rsb(r);
2691	lock_rsb(r);
2692
2693	error = validate_unlock_args(lkb, args);
2694	if (error)
2695		goto out;
2696
2697	error = _cancel_lock(r, lkb);
2698 out:
2699	unlock_rsb(r);
2700	put_rsb(r);
2701	return error;
2702}
2703
2704/*
2705 * Two stage 1 varieties:  dlm_lock() and dlm_unlock()
2706 */
2707
2708int dlm_lock(dlm_lockspace_t *lockspace,
2709	     int mode,
2710	     struct dlm_lksb *lksb,
2711	     uint32_t flags,
2712	     void *name,
2713	     unsigned int namelen,
2714	     uint32_t parent_lkid,
2715	     void (*ast) (void *astarg),
2716	     void *astarg,
2717	     void (*bast) (void *astarg, int mode))
2718{
2719	struct dlm_ls *ls;
2720	struct dlm_lkb *lkb;
2721	struct dlm_args args;
2722	int error, convert = flags & DLM_LKF_CONVERT;
2723
2724	ls = dlm_find_lockspace_local(lockspace);
2725	if (!ls)
2726		return -EINVAL;
2727
2728	dlm_lock_recovery(ls);
2729
2730	if (convert)
2731		error = find_lkb(ls, lksb->sb_lkid, &lkb);
2732	else
2733		error = create_lkb(ls, &lkb);
2734
2735	if (error)
2736		goto out;
2737
2738	error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
2739			      astarg, bast, &args);
2740	if (error)
2741		goto out_put;
2742
2743	if (convert)
2744		error = convert_lock(ls, lkb, &args);
2745	else
2746		error = request_lock(ls, lkb, name, namelen, &args);
2747
2748	if (error == -EINPROGRESS)
2749		error = 0;
2750 out_put:
2751	if (convert || error)
2752		__put_lkb(ls, lkb);
2753	if (error == -EAGAIN || error == -EDEADLK)
2754		error = 0;
2755 out:
2756	dlm_unlock_recovery(ls);
2757	dlm_put_lockspace(ls);
2758	return error;
2759}
2760
2761int dlm_unlock(dlm_lockspace_t *lockspace,
2762	       uint32_t lkid,
2763	       uint32_t flags,
2764	       struct dlm_lksb *lksb,
2765	       void *astarg)
2766{
2767	struct dlm_ls *ls;
2768	struct dlm_lkb *lkb;
2769	struct dlm_args args;
2770	int error;
2771
2772	ls = dlm_find_lockspace_local(lockspace);
2773	if (!ls)
2774		return -EINVAL;
2775
2776	dlm_lock_recovery(ls);
2777
2778	error = find_lkb(ls, lkid, &lkb);
2779	if (error)
2780		goto out;
2781
2782	error = set_unlock_args(flags, astarg, &args);
2783	if (error)
2784		goto out_put;
2785
2786	if (flags & DLM_LKF_CANCEL)
2787		error = cancel_lock(ls, lkb, &args);
2788	else
2789		error = unlock_lock(ls, lkb, &args);
2790
2791	if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
2792		error = 0;
2793	if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
2794		error = 0;
2795 out_put:
2796	dlm_put_lkb(lkb);
2797 out:
2798	dlm_unlock_recovery(ls);
2799	dlm_put_lockspace(ls);
2800	return error;
2801}
2802
2803/*
2804 * send/receive routines for remote operations and replies
2805 *
2806 * send_args
2807 * send_common
2808 * send_request			receive_request
2809 * send_convert			receive_convert
2810 * send_unlock			receive_unlock
2811 * send_cancel			receive_cancel
2812 * send_grant			receive_grant
2813 * send_bast			receive_bast
2814 * send_lookup			receive_lookup
2815 * send_remove			receive_remove
2816 *
2817 * 				send_common_reply
2818 * receive_request_reply	send_request_reply
2819 * receive_convert_reply	send_convert_reply
2820 * receive_unlock_reply		send_unlock_reply
2821 * receive_cancel_reply		send_cancel_reply
2822 * receive_lookup_reply		send_lookup_reply
2823 */
2824
2825static int _create_message(struct dlm_ls *ls, int mb_len,
2826			   int to_nodeid, int mstype,
2827			   struct dlm_message **ms_ret,
2828			   struct dlm_mhandle **mh_ret)
2829{
2830	struct dlm_message *ms;
2831	struct dlm_mhandle *mh;
2832	char *mb;
2833
2834	/* get_buffer gives us a message handle (mh) that we need to
2835	   pass into lowcomms_commit and a message buffer (mb) that we
2836	   write our data into */
2837
2838	mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
2839	if (!mh)
2840		return -ENOBUFS;
2841
2842	memset(mb, 0, mb_len);
2843
2844	ms = (struct dlm_message *) mb;
2845
2846	ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
2847	ms->m_header.h_lockspace = ls->ls_global_id;
2848	ms->m_header.h_nodeid = dlm_our_nodeid();
2849	ms->m_header.h_length = mb_len;
2850	ms->m_header.h_cmd = DLM_MSG;
2851
2852	ms->m_type = mstype;
2853
2854	*mh_ret = mh;
2855	*ms_ret = ms;
2856	return 0;
2857}
2858
2859static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
2860			  int to_nodeid, int mstype,
2861			  struct dlm_message **ms_ret,
2862			  struct dlm_mhandle **mh_ret)
2863{
2864	int mb_len = sizeof(struct dlm_message);
2865
2866	switch (mstype) {
2867	case DLM_MSG_REQUEST:
2868	case DLM_MSG_LOOKUP:
2869	case DLM_MSG_REMOVE:
2870		mb_len += r->res_length;
2871		break;
2872	case DLM_MSG_CONVERT:
2873	case DLM_MSG_UNLOCK:
2874	case DLM_MSG_REQUEST_REPLY:
2875	case DLM_MSG_CONVERT_REPLY:
2876	case DLM_MSG_GRANT:
2877		if (lkb && lkb->lkb_lvbptr)
2878			mb_len += r->res_ls->ls_lvblen;
2879		break;
2880	}
2881
2882	return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
2883			       ms_ret, mh_ret);
2884}
2885
2886/* further lowcomms enhancements or alternate implementations may make
2887   the return value from this function useful at some point */
2888
2889static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
2890{
2891	dlm_message_out(ms);
2892	dlm_lowcomms_commit_buffer(mh);
2893	return 0;
2894}
2895
2896static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2897		      struct dlm_message *ms)
2898{
2899	ms->m_nodeid   = lkb->lkb_nodeid;
2900	ms->m_pid      = lkb->lkb_ownpid;
2901	ms->m_lkid     = lkb->lkb_id;
2902	ms->m_remid    = lkb->lkb_remid;
2903	ms->m_exflags  = lkb->lkb_exflags;
2904	ms->m_sbflags  = lkb->lkb_sbflags;
2905	ms->m_flags    = lkb->lkb_flags;
2906	ms->m_lvbseq   = lkb->lkb_lvbseq;
2907	ms->m_status   = lkb->lkb_status;
2908	ms->m_grmode   = lkb->lkb_grmode;
2909	ms->m_rqmode   = lkb->lkb_rqmode;
2910	ms->m_hash     = r->res_hash;
2911
2912	/* m_result and m_bastmode are set from function args,
2913	   not from lkb fields */
2914
2915	if (lkb->lkb_bastfn)
2916		ms->m_asts |= DLM_CB_BAST;
2917	if (lkb->lkb_astfn)
2918		ms->m_asts |= DLM_CB_CAST;
2919
2920	/* compare with switch in create_message; send_remove() doesn't
2921	   use send_args() */
2922
2923	switch (ms->m_type) {
2924	case DLM_MSG_REQUEST:
2925	case DLM_MSG_LOOKUP:
2926		memcpy(ms->m_extra, r->res_name, r->res_length);
2927		break;
2928	case DLM_MSG_CONVERT:
2929	case DLM_MSG_UNLOCK:
2930	case DLM_MSG_REQUEST_REPLY:
2931	case DLM_MSG_CONVERT_REPLY:
2932	case DLM_MSG_GRANT:
2933		if (!lkb->lkb_lvbptr)
2934			break;
2935		memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2936		break;
2937	}
2938}
2939
2940static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
2941{
2942	struct dlm_message *ms;
2943	struct dlm_mhandle *mh;
2944	int to_nodeid, error;
2945
2946	to_nodeid = r->res_nodeid;
2947
2948	error = add_to_waiters(lkb, mstype, to_nodeid);
2949	if (error)
2950		return error;
2951
2952	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2953	if (error)
2954		goto fail;
2955
2956	send_args(r, lkb, ms);
2957
2958	error = send_message(mh, ms);
2959	if (error)
2960		goto fail;
2961	return 0;
2962
2963 fail:
2964	remove_from_waiters(lkb, msg_reply_type(mstype));
2965	return error;
2966}
2967
2968static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2969{
2970	return send_common(r, lkb, DLM_MSG_REQUEST);
2971}
2972
2973static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2974{
2975	int error;
2976
2977	error = send_common(r, lkb, DLM_MSG_CONVERT);
2978
2979	/* down conversions go without a reply from the master */
2980	if (!error && down_conversion(lkb)) {
2981		remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
2982		r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
2983		r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
2984		r->res_ls->ls_stub_ms.m_result = 0;
2985		__receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
2986	}
2987
2988	return error;
2989}
2990
2991/* FIXME: if this lkb is the only lock we hold on the rsb, then set
2992   MASTER_UNCERTAIN to force the next request on the rsb to confirm
2993   that the master is still correct. */
2994
2995static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2996{
2997	return send_common(r, lkb, DLM_MSG_UNLOCK);
2998}
2999
3000static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3001{
3002	return send_common(r, lkb, DLM_MSG_CANCEL);
3003}
3004
3005static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3006{
3007	struct dlm_message *ms;
3008	struct dlm_mhandle *mh;
3009	int to_nodeid, error;
3010
3011	to_nodeid = lkb->lkb_nodeid;
3012
3013	error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3014	if (error)
3015		goto out;
3016
3017	send_args(r, lkb, ms);
3018
3019	ms->m_result = 0;
3020
3021	error = send_message(mh, ms);
3022 out:
3023	return error;
3024}
3025
3026static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3027{
3028	struct dlm_message *ms;
3029	struct dlm_mhandle *mh;
3030	int to_nodeid, error;
3031
3032	to_nodeid = lkb->lkb_nodeid;
3033
3034	error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3035	if (error)
3036		goto out;
3037
3038	send_args(r, lkb, ms);
3039
3040	ms->m_bastmode = mode;
3041
3042	error = send_message(mh, ms);
3043 out:
3044	return error;
3045}
3046
3047static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3048{
3049	struct dlm_message *ms;
3050	struct dlm_mhandle *mh;
3051	int to_nodeid, error;
3052
3053	to_nodeid = dlm_dir_nodeid(r);
3054
3055	error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3056	if (error)
3057		return error;
3058
3059	error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3060	if (error)
3061		goto fail;
3062
3063	send_args(r, lkb, ms);
3064
3065	error = send_message(mh, ms);
3066	if (error)
3067		goto fail;
3068	return 0;
3069
3070 fail:
3071	remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3072	return error;
3073}
3074
3075static int send_remove(struct dlm_rsb *r)
3076{
3077	struct dlm_message *ms;
3078	struct dlm_mhandle *mh;
3079	int to_nodeid, error;
3080
3081	to_nodeid = dlm_dir_nodeid(r);
3082
3083	error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3084	if (error)
3085		goto out;
3086
3087	memcpy(ms->m_extra, r->res_name, r->res_length);
3088	ms->m_hash = r->res_hash;
3089
3090	error = send_message(mh, ms);
3091 out:
3092	return error;
3093}
3094
3095static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3096			     int mstype, int rv)
3097{
3098	struct dlm_message *ms;
3099	struct dlm_mhandle *mh;
3100	int to_nodeid, error;
3101
3102	to_nodeid = lkb->lkb_nodeid;
3103
3104	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3105	if (error)
3106		goto out;
3107
3108	send_args(r, lkb, ms);
3109
3110	ms->m_result = rv;
3111
3112	error = send_message(mh, ms);
3113 out:
3114	return error;
3115}
3116
3117static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3118{
3119	return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3120}
3121
3122static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3123{
3124	return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3125}
3126
3127static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3128{
3129	return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3130}
3131
3132static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3133{
3134	return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3135}
3136
3137static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3138			     int ret_nodeid, int rv)
3139{
3140	struct dlm_rsb *r = &ls->ls_stub_rsb;
3141	struct dlm_message *ms;
3142	struct dlm_mhandle *mh;
3143	int error, nodeid = ms_in->m_header.h_nodeid;
3144
3145	error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3146	if (error)
3147		goto out;
3148
3149	ms->m_lkid = ms_in->m_lkid;
3150	ms->m_result = rv;
3151	ms->m_nodeid = ret_nodeid;
3152
3153	error = send_message(mh, ms);
3154 out:
3155	return error;
3156}
3157
3158/* which args we save from a received message depends heavily on the type
3159   of message, unlike the send side where we can safely send everything about
3160   the lkb for any type of message */
3161
3162static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3163{
3164	lkb->lkb_exflags = ms->m_exflags;
3165	lkb->lkb_sbflags = ms->m_sbflags;
3166	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3167		         (ms->m_flags & 0x0000FFFF);
3168}
3169
3170static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3171{
3172	if (ms->m_flags == DLM_IFL_STUB_MS)
3173		return;
3174
3175	lkb->lkb_sbflags = ms->m_sbflags;
3176	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3177		         (ms->m_flags & 0x0000FFFF);
3178}
3179
3180static int receive_extralen(struct dlm_message *ms)
3181{
3182	return (ms->m_header.h_length - sizeof(struct dlm_message));
3183}
3184
3185static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3186		       struct dlm_message *ms)
3187{
3188	int len;
3189
3190	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3191		if (!lkb->lkb_lvbptr)
3192			lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3193		if (!lkb->lkb_lvbptr)
3194			return -ENOMEM;
3195		len = receive_extralen(ms);
3196		if (len > DLM_RESNAME_MAXLEN)
3197			len = DLM_RESNAME_MAXLEN;
3198		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3199	}
3200	return 0;
3201}
3202
3203static void fake_bastfn(void *astparam, int mode)
3204{
3205	log_print("fake_bastfn should not be called");
3206}
3207
3208static void fake_astfn(void *astparam)
3209{
3210	log_print("fake_astfn should not be called");
3211}
3212
3213static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3214				struct dlm_message *ms)
3215{
3216	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3217	lkb->lkb_ownpid = ms->m_pid;
3218	lkb->lkb_remid = ms->m_lkid;
3219	lkb->lkb_grmode = DLM_LOCK_IV;
3220	lkb->lkb_rqmode = ms->m_rqmode;
3221
3222	lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3223	lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3224
3225	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3226		/* lkb was just created so there won't be an lvb yet */
3227		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3228		if (!lkb->lkb_lvbptr)
3229			return -ENOMEM;
3230	}
3231
3232	return 0;
3233}
3234
3235static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3236				struct dlm_message *ms)
3237{
3238	if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3239		return -EBUSY;
3240
3241	if (receive_lvb(ls, lkb, ms))
3242		return -ENOMEM;
3243
3244	lkb->lkb_rqmode = ms->m_rqmode;
3245	lkb->lkb_lvbseq = ms->m_lvbseq;
3246
3247	return 0;
3248}
3249
3250static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3251			       struct dlm_message *ms)
3252{
3253	if (receive_lvb(ls, lkb, ms))
3254		return -ENOMEM;
3255	return 0;
3256}
3257
3258/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3259   uses to send a reply and that the remote end uses to process the reply. */
3260
3261static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3262{
3263	struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3264	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3265	lkb->lkb_remid = ms->m_lkid;
3266}
3267
3268/* This is called after the rsb is locked so that we can safely inspect
3269   fields in the lkb. */
3270
3271static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3272{
3273	int from = ms->m_header.h_nodeid;
3274	int error = 0;
3275
3276	switch (ms->m_type) {
3277	case DLM_MSG_CONVERT:
3278	case DLM_MSG_UNLOCK:
3279	case DLM_MSG_CANCEL:
3280		if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3281			error = -EINVAL;
3282		break;
3283
3284	case DLM_MSG_CONVERT_REPLY:
3285	case DLM_MSG_UNLOCK_REPLY:
3286	case DLM_MSG_CANCEL_REPLY:
3287	case DLM_MSG_GRANT:
3288	case DLM_MSG_BAST:
3289		if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3290			error = -EINVAL;
3291		break;
3292
3293	case DLM_MSG_REQUEST_REPLY:
3294		if (!is_process_copy(lkb))
3295			error = -EINVAL;
3296		else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
3297			error = -EINVAL;
3298		break;
3299
3300	default:
3301		error = -EINVAL;
3302	}
3303
3304	if (error)
3305		log_error(lkb->lkb_resource->res_ls,
3306			  "ignore invalid message %d from %d %x %x %x %d",
3307			  ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
3308			  lkb->lkb_flags, lkb->lkb_nodeid);
3309	return error;
3310}
3311
3312static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3313{
3314	struct dlm_lkb *lkb;
3315	struct dlm_rsb *r;
3316	int error, namelen;
 
 
 
3317
3318	error = create_lkb(ls, &lkb);
3319	if (error)
3320		goto fail;
3321
3322	receive_flags(lkb, ms);
3323	lkb->lkb_flags |= DLM_IFL_MSTCPY;
3324	error = receive_request_args(ls, lkb, ms);
3325	if (error) {
3326		__put_lkb(ls, lkb);
3327		goto fail;
3328	}
3329
 
 
 
 
 
 
3330	namelen = receive_extralen(ms);
3331
3332	error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
 
3333	if (error) {
3334		__put_lkb(ls, lkb);
3335		goto fail;
3336	}
3337
3338	lock_rsb(r);
3339
 
 
 
 
 
 
 
 
 
 
3340	attach_lkb(r, lkb);
3341	error = do_request(r, lkb);
3342	send_request_reply(r, lkb, error);
3343	do_request_effects(r, lkb, error);
3344
3345	unlock_rsb(r);
3346	put_rsb(r);
3347
3348	if (error == -EINPROGRESS)
3349		error = 0;
3350	if (error)
3351		dlm_put_lkb(lkb);
3352	return;
3353
3354 fail:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3355	setup_stub_lkb(ls, ms);
3356	send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
 
3357}
3358
3359static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
3360{
3361	struct dlm_lkb *lkb;
3362	struct dlm_rsb *r;
3363	int error, reply = 1;
3364
3365	error = find_lkb(ls, ms->m_remid, &lkb);
3366	if (error)
3367		goto fail;
3368
 
 
 
 
 
 
 
 
 
3369	r = lkb->lkb_resource;
3370
3371	hold_rsb(r);
3372	lock_rsb(r);
3373
3374	error = validate_message(lkb, ms);
3375	if (error)
3376		goto out;
3377
3378	receive_flags(lkb, ms);
3379
3380	error = receive_convert_args(ls, lkb, ms);
3381	if (error) {
3382		send_convert_reply(r, lkb, error);
3383		goto out;
3384	}
3385
3386	reply = !down_conversion(lkb);
3387
3388	error = do_convert(r, lkb);
3389	if (reply)
3390		send_convert_reply(r, lkb, error);
3391	do_convert_effects(r, lkb, error);
3392 out:
3393	unlock_rsb(r);
3394	put_rsb(r);
3395	dlm_put_lkb(lkb);
3396	return;
3397
3398 fail:
3399	setup_stub_lkb(ls, ms);
3400	send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
 
3401}
3402
3403static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
3404{
3405	struct dlm_lkb *lkb;
3406	struct dlm_rsb *r;
3407	int error;
3408
3409	error = find_lkb(ls, ms->m_remid, &lkb);
3410	if (error)
3411		goto fail;
3412
 
 
 
 
 
 
 
 
3413	r = lkb->lkb_resource;
3414
3415	hold_rsb(r);
3416	lock_rsb(r);
3417
3418	error = validate_message(lkb, ms);
3419	if (error)
3420		goto out;
3421
3422	receive_flags(lkb, ms);
3423
3424	error = receive_unlock_args(ls, lkb, ms);
3425	if (error) {
3426		send_unlock_reply(r, lkb, error);
3427		goto out;
3428	}
3429
3430	error = do_unlock(r, lkb);
3431	send_unlock_reply(r, lkb, error);
3432	do_unlock_effects(r, lkb, error);
3433 out:
3434	unlock_rsb(r);
3435	put_rsb(r);
3436	dlm_put_lkb(lkb);
3437	return;
3438
3439 fail:
3440	setup_stub_lkb(ls, ms);
3441	send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
 
3442}
3443
3444static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
3445{
3446	struct dlm_lkb *lkb;
3447	struct dlm_rsb *r;
3448	int error;
3449
3450	error = find_lkb(ls, ms->m_remid, &lkb);
3451	if (error)
3452		goto fail;
3453
3454	receive_flags(lkb, ms);
3455
3456	r = lkb->lkb_resource;
3457
3458	hold_rsb(r);
3459	lock_rsb(r);
3460
3461	error = validate_message(lkb, ms);
3462	if (error)
3463		goto out;
3464
3465	error = do_cancel(r, lkb);
3466	send_cancel_reply(r, lkb, error);
3467	do_cancel_effects(r, lkb, error);
3468 out:
3469	unlock_rsb(r);
3470	put_rsb(r);
3471	dlm_put_lkb(lkb);
3472	return;
3473
3474 fail:
3475	setup_stub_lkb(ls, ms);
3476	send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
 
3477}
3478
3479static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
3480{
3481	struct dlm_lkb *lkb;
3482	struct dlm_rsb *r;
3483	int error;
3484
3485	error = find_lkb(ls, ms->m_remid, &lkb);
3486	if (error) {
3487		log_debug(ls, "receive_grant from %d no lkb %x",
3488			  ms->m_header.h_nodeid, ms->m_remid);
3489		return;
3490	}
3491
3492	r = lkb->lkb_resource;
3493
3494	hold_rsb(r);
3495	lock_rsb(r);
3496
3497	error = validate_message(lkb, ms);
3498	if (error)
3499		goto out;
3500
3501	receive_flags_reply(lkb, ms);
3502	if (is_altmode(lkb))
3503		munge_altmode(lkb, ms);
3504	grant_lock_pc(r, lkb, ms);
3505	queue_cast(r, lkb, 0);
3506 out:
3507	unlock_rsb(r);
3508	put_rsb(r);
3509	dlm_put_lkb(lkb);
 
3510}
3511
3512static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
3513{
3514	struct dlm_lkb *lkb;
3515	struct dlm_rsb *r;
3516	int error;
3517
3518	error = find_lkb(ls, ms->m_remid, &lkb);
3519	if (error) {
3520		log_debug(ls, "receive_bast from %d no lkb %x",
3521			  ms->m_header.h_nodeid, ms->m_remid);
3522		return;
3523	}
3524
3525	r = lkb->lkb_resource;
3526
3527	hold_rsb(r);
3528	lock_rsb(r);
3529
3530	error = validate_message(lkb, ms);
3531	if (error)
3532		goto out;
3533
3534	queue_bast(r, lkb, ms->m_bastmode);
 
3535 out:
3536	unlock_rsb(r);
3537	put_rsb(r);
3538	dlm_put_lkb(lkb);
 
3539}
3540
3541static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
3542{
3543	int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
3544
3545	from_nodeid = ms->m_header.h_nodeid;
3546	our_nodeid = dlm_our_nodeid();
3547
3548	len = receive_extralen(ms);
3549
3550	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
3551	if (dir_nodeid != our_nodeid) {
3552		log_error(ls, "lookup dir_nodeid %d from %d",
3553			  dir_nodeid, from_nodeid);
3554		error = -EINVAL;
3555		ret_nodeid = -1;
3556		goto out;
3557	}
3558
3559	error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
3560
3561	/* Optimization: we're master so treat lookup as a request */
3562	if (!error && ret_nodeid == our_nodeid) {
3563		receive_request(ls, ms);
3564		return;
3565	}
3566 out:
3567	send_lookup_reply(ls, ms, ret_nodeid, error);
3568}
3569
3570static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
3571{
3572	int len, dir_nodeid, from_nodeid;
 
 
 
3573
3574	from_nodeid = ms->m_header.h_nodeid;
3575
3576	len = receive_extralen(ms);
3577
 
 
 
 
 
 
3578	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
3579	if (dir_nodeid != dlm_our_nodeid()) {
3580		log_error(ls, "remove dir entry dir_nodeid %d from %d",
3581			  dir_nodeid, from_nodeid);
3582		return;
3583	}
3584
3585	dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3586}
3587
3588static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
3589{
3590	do_purge(ls, ms->m_nodeid, ms->m_pid);
3591}
3592
3593static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
3594{
3595	struct dlm_lkb *lkb;
3596	struct dlm_rsb *r;
3597	int error, mstype, result;
 
3598
3599	error = find_lkb(ls, ms->m_remid, &lkb);
3600	if (error) {
3601		log_debug(ls, "receive_request_reply from %d no lkb %x",
3602			  ms->m_header.h_nodeid, ms->m_remid);
3603		return;
3604	}
3605
3606	r = lkb->lkb_resource;
3607	hold_rsb(r);
3608	lock_rsb(r);
3609
3610	error = validate_message(lkb, ms);
3611	if (error)
3612		goto out;
3613
3614	mstype = lkb->lkb_wait_type;
3615	error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
3616	if (error)
 
 
 
3617		goto out;
 
3618
3619	/* Optimization: the dir node was also the master, so it took our
3620	   lookup as a request and sent request reply instead of lookup reply */
3621	if (mstype == DLM_MSG_LOOKUP) {
3622		r->res_nodeid = ms->m_header.h_nodeid;
3623		lkb->lkb_nodeid = r->res_nodeid;
 
3624	}
3625
3626	/* this is the value returned from do_request() on the master */
3627	result = ms->m_result;
3628
3629	switch (result) {
3630	case -EAGAIN:
3631		/* request would block (be queued) on remote master */
3632		queue_cast(r, lkb, -EAGAIN);
3633		confirm_master(r, -EAGAIN);
3634		unhold_lkb(lkb); /* undoes create_lkb() */
3635		break;
3636
3637	case -EINPROGRESS:
3638	case 0:
3639		/* request was queued or granted on remote master */
3640		receive_flags_reply(lkb, ms);
3641		lkb->lkb_remid = ms->m_lkid;
3642		if (is_altmode(lkb))
3643			munge_altmode(lkb, ms);
3644		if (result) {
3645			add_lkb(r, lkb, DLM_LKSTS_WAITING);
3646			add_timeout(lkb);
3647		} else {
3648			grant_lock_pc(r, lkb, ms);
3649			queue_cast(r, lkb, 0);
3650		}
3651		confirm_master(r, result);
3652		break;
3653
3654	case -EBADR:
3655	case -ENOTBLK:
3656		/* find_rsb failed to find rsb or rsb wasn't master */
3657		log_debug(ls, "receive_request_reply %x %x master diff %d %d",
3658			  lkb->lkb_id, lkb->lkb_flags, r->res_nodeid, result);
3659		r->res_nodeid = -1;
3660		lkb->lkb_nodeid = -1;
 
 
 
 
 
 
 
 
3661
3662		if (is_overlap(lkb)) {
3663			/* we'll ignore error in cancel/unlock reply */
3664			queue_cast_overlap(r, lkb);
3665			confirm_master(r, result);
3666			unhold_lkb(lkb); /* undoes create_lkb() */
3667		} else
3668			_request_lock(r, lkb);
 
 
 
 
3669		break;
3670
3671	default:
3672		log_error(ls, "receive_request_reply %x error %d",
3673			  lkb->lkb_id, result);
3674	}
3675
3676	if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
3677		log_debug(ls, "receive_request_reply %x result %d unlock",
3678			  lkb->lkb_id, result);
3679		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3680		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3681		send_unlock(r, lkb);
3682	} else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
3683		log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
3684		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3685		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3686		send_cancel(r, lkb);
3687	} else {
3688		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3689		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3690	}
3691 out:
3692	unlock_rsb(r);
3693	put_rsb(r);
3694	dlm_put_lkb(lkb);
 
3695}
3696
3697static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3698				    struct dlm_message *ms)
3699{
3700	/* this is the value returned from do_convert() on the master */
3701	switch (ms->m_result) {
3702	case -EAGAIN:
3703		/* convert would block (be queued) on remote master */
3704		queue_cast(r, lkb, -EAGAIN);
3705		break;
3706
3707	case -EDEADLK:
3708		receive_flags_reply(lkb, ms);
3709		revert_lock_pc(r, lkb);
3710		queue_cast(r, lkb, -EDEADLK);
3711		break;
3712
3713	case -EINPROGRESS:
3714		/* convert was queued on remote master */
3715		receive_flags_reply(lkb, ms);
3716		if (is_demoted(lkb))
3717			munge_demoted(lkb);
3718		del_lkb(r, lkb);
3719		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3720		add_timeout(lkb);
3721		break;
3722
3723	case 0:
3724		/* convert was granted on remote master */
3725		receive_flags_reply(lkb, ms);
3726		if (is_demoted(lkb))
3727			munge_demoted(lkb);
3728		grant_lock_pc(r, lkb, ms);
3729		queue_cast(r, lkb, 0);
3730		break;
3731
3732	default:
3733		log_error(r->res_ls, "receive_convert_reply %x error %d",
3734			  lkb->lkb_id, ms->m_result);
 
 
 
3735	}
3736}
3737
3738static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3739{
3740	struct dlm_rsb *r = lkb->lkb_resource;
3741	int error;
3742
3743	hold_rsb(r);
3744	lock_rsb(r);
3745
3746	error = validate_message(lkb, ms);
3747	if (error)
3748		goto out;
3749
3750	/* stub reply can happen with waiters_mutex held */
3751	error = remove_from_waiters_ms(lkb, ms);
3752	if (error)
3753		goto out;
3754
3755	__receive_convert_reply(r, lkb, ms);
3756 out:
3757	unlock_rsb(r);
3758	put_rsb(r);
3759}
3760
3761static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
3762{
3763	struct dlm_lkb *lkb;
3764	int error;
3765
3766	error = find_lkb(ls, ms->m_remid, &lkb);
3767	if (error) {
3768		log_debug(ls, "receive_convert_reply from %d no lkb %x",
3769			  ms->m_header.h_nodeid, ms->m_remid);
3770		return;
3771	}
3772
3773	_receive_convert_reply(lkb, ms);
3774	dlm_put_lkb(lkb);
 
3775}
3776
3777static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3778{
3779	struct dlm_rsb *r = lkb->lkb_resource;
3780	int error;
3781
3782	hold_rsb(r);
3783	lock_rsb(r);
3784
3785	error = validate_message(lkb, ms);
3786	if (error)
3787		goto out;
3788
3789	/* stub reply can happen with waiters_mutex held */
3790	error = remove_from_waiters_ms(lkb, ms);
3791	if (error)
3792		goto out;
3793
3794	/* this is the value returned from do_unlock() on the master */
3795
3796	switch (ms->m_result) {
3797	case -DLM_EUNLOCK:
3798		receive_flags_reply(lkb, ms);
3799		remove_lock_pc(r, lkb);
3800		queue_cast(r, lkb, -DLM_EUNLOCK);
3801		break;
3802	case -ENOENT:
3803		break;
3804	default:
3805		log_error(r->res_ls, "receive_unlock_reply %x error %d",
3806			  lkb->lkb_id, ms->m_result);
3807	}
3808 out:
3809	unlock_rsb(r);
3810	put_rsb(r);
3811}
3812
3813static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
3814{
3815	struct dlm_lkb *lkb;
3816	int error;
3817
3818	error = find_lkb(ls, ms->m_remid, &lkb);
3819	if (error) {
3820		log_debug(ls, "receive_unlock_reply from %d no lkb %x",
3821			  ms->m_header.h_nodeid, ms->m_remid);
3822		return;
3823	}
3824
3825	_receive_unlock_reply(lkb, ms);
3826	dlm_put_lkb(lkb);
 
3827}
3828
3829static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3830{
3831	struct dlm_rsb *r = lkb->lkb_resource;
3832	int error;
3833
3834	hold_rsb(r);
3835	lock_rsb(r);
3836
3837	error = validate_message(lkb, ms);
3838	if (error)
3839		goto out;
3840
3841	/* stub reply can happen with waiters_mutex held */
3842	error = remove_from_waiters_ms(lkb, ms);
3843	if (error)
3844		goto out;
3845
3846	/* this is the value returned from do_cancel() on the master */
3847
3848	switch (ms->m_result) {
3849	case -DLM_ECANCEL:
3850		receive_flags_reply(lkb, ms);
3851		revert_lock_pc(r, lkb);
3852		queue_cast(r, lkb, -DLM_ECANCEL);
3853		break;
3854	case 0:
3855		break;
3856	default:
3857		log_error(r->res_ls, "receive_cancel_reply %x error %d",
3858			  lkb->lkb_id, ms->m_result);
3859	}
3860 out:
3861	unlock_rsb(r);
3862	put_rsb(r);
3863}
3864
3865static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
3866{
3867	struct dlm_lkb *lkb;
3868	int error;
3869
3870	error = find_lkb(ls, ms->m_remid, &lkb);
3871	if (error) {
3872		log_debug(ls, "receive_cancel_reply from %d no lkb %x",
3873			  ms->m_header.h_nodeid, ms->m_remid);
3874		return;
3875	}
3876
3877	_receive_cancel_reply(lkb, ms);
3878	dlm_put_lkb(lkb);
 
3879}
3880
3881static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
3882{
3883	struct dlm_lkb *lkb;
3884	struct dlm_rsb *r;
3885	int error, ret_nodeid;
 
3886
3887	error = find_lkb(ls, ms->m_lkid, &lkb);
3888	if (error) {
3889		log_error(ls, "receive_lookup_reply no lkb");
3890		return;
3891	}
3892
3893	/* ms->m_result is the value returned by dlm_dir_lookup on dir node
3894	   FIXME: will a non-zero error ever be returned? */
3895
3896	r = lkb->lkb_resource;
3897	hold_rsb(r);
3898	lock_rsb(r);
3899
3900	error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3901	if (error)
3902		goto out;
3903
3904	ret_nodeid = ms->m_nodeid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3905	if (ret_nodeid == dlm_our_nodeid()) {
 
3906		r->res_nodeid = 0;
3907		ret_nodeid = 0;
3908		r->res_first_lkid = 0;
 
 
 
 
 
 
 
3909	} else {
3910		/* set_master() will copy res_nodeid to lkb_nodeid */
 
3911		r->res_nodeid = ret_nodeid;
3912	}
3913
3914	if (is_overlap(lkb)) {
3915		log_debug(ls, "receive_lookup_reply %x unlock %x",
3916			  lkb->lkb_id, lkb->lkb_flags);
3917		queue_cast_overlap(r, lkb);
3918		unhold_lkb(lkb); /* undoes create_lkb() */
3919		goto out_list;
3920	}
3921
3922	_request_lock(r, lkb);
3923
3924 out_list:
3925	if (!ret_nodeid)
3926		process_lookup_list(r);
3927 out:
3928	unlock_rsb(r);
3929	put_rsb(r);
3930	dlm_put_lkb(lkb);
3931}
3932
3933static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms)
 
3934{
 
 
3935	if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
3936		log_debug(ls, "ignore non-member message %d from %d %x %x %d",
3937			  ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
3938			  ms->m_remid, ms->m_result);
3939		return;
3940	}
3941
3942	switch (ms->m_type) {
3943
3944	/* messages sent to a master node */
3945
3946	case DLM_MSG_REQUEST:
3947		receive_request(ls, ms);
3948		break;
3949
3950	case DLM_MSG_CONVERT:
3951		receive_convert(ls, ms);
3952		break;
3953
3954	case DLM_MSG_UNLOCK:
3955		receive_unlock(ls, ms);
3956		break;
3957
3958	case DLM_MSG_CANCEL:
3959		receive_cancel(ls, ms);
 
3960		break;
3961
3962	/* messages sent from a master node (replies to above) */
3963
3964	case DLM_MSG_REQUEST_REPLY:
3965		receive_request_reply(ls, ms);
3966		break;
3967
3968	case DLM_MSG_CONVERT_REPLY:
3969		receive_convert_reply(ls, ms);
3970		break;
3971
3972	case DLM_MSG_UNLOCK_REPLY:
3973		receive_unlock_reply(ls, ms);
3974		break;
3975
3976	case DLM_MSG_CANCEL_REPLY:
3977		receive_cancel_reply(ls, ms);
3978		break;
3979
3980	/* messages sent from a master node (only two types of async msg) */
3981
3982	case DLM_MSG_GRANT:
3983		receive_grant(ls, ms);
 
3984		break;
3985
3986	case DLM_MSG_BAST:
3987		receive_bast(ls, ms);
 
3988		break;
3989
3990	/* messages sent to a dir node */
3991
3992	case DLM_MSG_LOOKUP:
3993		receive_lookup(ls, ms);
3994		break;
3995
3996	case DLM_MSG_REMOVE:
3997		receive_remove(ls, ms);
3998		break;
3999
4000	/* messages sent from a dir node (remove has no reply) */
4001
4002	case DLM_MSG_LOOKUP_REPLY:
4003		receive_lookup_reply(ls, ms);
4004		break;
4005
4006	/* other messages */
4007
4008	case DLM_MSG_PURGE:
4009		receive_purge(ls, ms);
4010		break;
4011
4012	default:
4013		log_error(ls, "unknown message type %d", ms->m_type);
4014	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4015}
4016
4017/* If the lockspace is in recovery mode (locking stopped), then normal
4018   messages are saved on the requestqueue for processing after recovery is
4019   done.  When not in recovery mode, we wait for dlm_recoverd to drain saved
4020   messages off the requestqueue before we process new ones. This occurs right
4021   after recovery completes when we transition from saving all messages on
4022   requestqueue, to processing all the saved messages, to processing new
4023   messages as they arrive. */
4024
4025static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4026				int nodeid)
4027{
4028	if (dlm_locking_stopped(ls)) {
 
 
 
 
 
 
 
 
 
4029		dlm_add_requestqueue(ls, nodeid, ms);
4030	} else {
4031		dlm_wait_requestqueue(ls);
4032		_receive_message(ls, ms);
4033	}
4034}
4035
4036/* This is called by dlm_recoverd to process messages that were saved on
4037   the requestqueue. */
4038
4039void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms)
 
4040{
4041	_receive_message(ls, ms);
4042}
4043
4044/* This is called by the midcomms layer when something is received for
4045   the lockspace.  It could be either a MSG (normal message sent as part of
4046   standard locking activity) or an RCOM (recovery message sent as part of
4047   lockspace recovery). */
4048
4049void dlm_receive_buffer(union dlm_packet *p, int nodeid)
4050{
4051	struct dlm_header *hd = &p->header;
4052	struct dlm_ls *ls;
4053	int type = 0;
4054
4055	switch (hd->h_cmd) {
4056	case DLM_MSG:
4057		dlm_message_in(&p->message);
4058		type = p->message.m_type;
4059		break;
4060	case DLM_RCOM:
4061		dlm_rcom_in(&p->rcom);
4062		type = p->rcom.rc_type;
4063		break;
4064	default:
4065		log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
4066		return;
4067	}
4068
4069	if (hd->h_nodeid != nodeid) {
4070		log_print("invalid h_nodeid %d from %d lockspace %x",
4071			  hd->h_nodeid, nodeid, hd->h_lockspace);
4072		return;
4073	}
4074
4075	ls = dlm_find_lockspace_global(hd->h_lockspace);
4076	if (!ls) {
4077		if (dlm_config.ci_log_debug)
4078			log_print("invalid lockspace %x from %d cmd %d type %d",
4079				  hd->h_lockspace, nodeid, hd->h_cmd, type);
 
 
4080
4081		if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
4082			dlm_send_ls_not_ready(nodeid, &p->rcom);
4083		return;
4084	}
4085
4086	/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
4087	   be inactive (in this ls) before transitioning to recovery mode */
4088
4089	down_read(&ls->ls_recv_active);
4090	if (hd->h_cmd == DLM_MSG)
4091		dlm_receive_message(ls, &p->message, nodeid);
4092	else
4093		dlm_receive_rcom(ls, &p->rcom, nodeid);
4094	up_read(&ls->ls_recv_active);
4095
4096	dlm_put_lockspace(ls);
4097}
4098
4099static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
4100				   struct dlm_message *ms_stub)
4101{
4102	if (middle_conversion(lkb)) {
4103		hold_lkb(lkb);
4104		memset(ms_stub, 0, sizeof(struct dlm_message));
4105		ms_stub->m_flags = DLM_IFL_STUB_MS;
4106		ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
4107		ms_stub->m_result = -EINPROGRESS;
4108		ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4109		_receive_convert_reply(lkb, ms_stub);
4110
4111		/* Same special case as in receive_rcom_lock_args() */
4112		lkb->lkb_grmode = DLM_LOCK_IV;
4113		rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
4114		unhold_lkb(lkb);
4115
4116	} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
4117		lkb->lkb_flags |= DLM_IFL_RESEND;
4118	}
4119
4120	/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
4121	   conversions are async; there's no reply from the remote master */
4122}
4123
4124/* A waiting lkb needs recovery if the master node has failed, or
4125   the master node is changing (only when no directory is used) */
4126
4127static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
 
4128{
4129	if (dlm_is_removed(ls, lkb->lkb_nodeid))
4130		return 1;
4131
4132	if (!dlm_no_directory(ls))
4133		return 0;
4134
4135	if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
4136		return 1;
4137
4138	return 0;
4139}
4140
4141/* Recovery for locks that are waiting for replies from nodes that are now
4142   gone.  We can just complete unlocks and cancels by faking a reply from the
4143   dead node.  Requests and up-conversions we flag to be resent after
4144   recovery.  Down-conversions can just be completed with a fake reply like
4145   unlocks.  Conversions between PR and CW need special attention. */
4146
4147void dlm_recover_waiters_pre(struct dlm_ls *ls)
4148{
4149	struct dlm_lkb *lkb, *safe;
4150	struct dlm_message *ms_stub;
4151	int wait_type, stub_unlock_result, stub_cancel_result;
 
4152
4153	ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL);
4154	if (!ms_stub) {
4155		log_error(ls, "dlm_recover_waiters_pre no mem");
4156		return;
4157	}
4158
4159	mutex_lock(&ls->ls_waiters_mutex);
4160
4161	list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
4162
 
 
4163		/* exclude debug messages about unlocks because there can be so
4164		   many and they aren't very interesting */
4165
4166		if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
4167			log_debug(ls, "recover_waiter %x nodeid %d "
4168				  "msg %d to %d", lkb->lkb_id, lkb->lkb_nodeid,
4169				  lkb->lkb_wait_type, lkb->lkb_wait_nodeid);
 
 
 
 
 
 
4170		}
4171
4172		/* all outstanding lookups, regardless of destination  will be
4173		   resent after recovery is done */
4174
4175		if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
4176			lkb->lkb_flags |= DLM_IFL_RESEND;
4177			continue;
4178		}
4179
4180		if (!waiter_needs_recovery(ls, lkb))
4181			continue;
4182
4183		wait_type = lkb->lkb_wait_type;
4184		stub_unlock_result = -DLM_EUNLOCK;
4185		stub_cancel_result = -DLM_ECANCEL;
4186
4187		/* Main reply may have been received leaving a zero wait_type,
4188		   but a reply for the overlapping op may not have been
4189		   received.  In that case we need to fake the appropriate
4190		   reply for the overlap op. */
4191
4192		if (!wait_type) {
4193			if (is_overlap_cancel(lkb)) {
4194				wait_type = DLM_MSG_CANCEL;
4195				if (lkb->lkb_grmode == DLM_LOCK_IV)
4196					stub_cancel_result = 0;
4197			}
4198			if (is_overlap_unlock(lkb)) {
4199				wait_type = DLM_MSG_UNLOCK;
4200				if (lkb->lkb_grmode == DLM_LOCK_IV)
4201					stub_unlock_result = -ENOENT;
4202			}
4203
4204			log_debug(ls, "rwpre overlap %x %x %d %d %d",
4205				  lkb->lkb_id, lkb->lkb_flags, wait_type,
4206				  stub_cancel_result, stub_unlock_result);
4207		}
4208
4209		switch (wait_type) {
4210
4211		case DLM_MSG_REQUEST:
4212			lkb->lkb_flags |= DLM_IFL_RESEND;
4213			break;
4214
4215		case DLM_MSG_CONVERT:
4216			recover_convert_waiter(ls, lkb, ms_stub);
4217			break;
4218
4219		case DLM_MSG_UNLOCK:
4220			hold_lkb(lkb);
4221			memset(ms_stub, 0, sizeof(struct dlm_message));
4222			ms_stub->m_flags = DLM_IFL_STUB_MS;
4223			ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
4224			ms_stub->m_result = stub_unlock_result;
4225			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4226			_receive_unlock_reply(lkb, ms_stub);
4227			dlm_put_lkb(lkb);
4228			break;
4229
4230		case DLM_MSG_CANCEL:
4231			hold_lkb(lkb);
4232			memset(ms_stub, 0, sizeof(struct dlm_message));
4233			ms_stub->m_flags = DLM_IFL_STUB_MS;
4234			ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
4235			ms_stub->m_result = stub_cancel_result;
4236			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4237			_receive_cancel_reply(lkb, ms_stub);
4238			dlm_put_lkb(lkb);
4239			break;
4240
4241		default:
4242			log_error(ls, "invalid lkb wait_type %d %d",
4243				  lkb->lkb_wait_type, wait_type);
4244		}
4245		schedule();
4246	}
4247	mutex_unlock(&ls->ls_waiters_mutex);
4248	kfree(ms_stub);
4249}
4250
4251static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
4252{
4253	struct dlm_lkb *lkb;
4254	int found = 0;
4255
4256	mutex_lock(&ls->ls_waiters_mutex);
4257	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
4258		if (lkb->lkb_flags & DLM_IFL_RESEND) {
4259			hold_lkb(lkb);
4260			found = 1;
4261			break;
4262		}
4263	}
4264	mutex_unlock(&ls->ls_waiters_mutex);
4265
4266	if (!found)
4267		lkb = NULL;
4268	return lkb;
4269}
4270
4271/* Deal with lookups and lkb's marked RESEND from _pre.  We may now be the
4272   master or dir-node for r.  Processing the lkb may result in it being placed
4273   back on waiters. */
4274
4275/* We do this after normal locking has been enabled and any saved messages
4276   (in requestqueue) have been processed.  We should be confident that at
4277   this point we won't get or process a reply to any of these waiting
4278   operations.  But, new ops may be coming in on the rsbs/locks here from
4279   userspace or remotely. */
4280
4281/* there may have been an overlap unlock/cancel prior to recovery or after
4282   recovery.  if before, the lkb may still have a pos wait_count; if after, the
4283   overlap flag would just have been set and nothing new sent.  we can be
4284   confident here than any replies to either the initial op or overlap ops
4285   prior to recovery have been received. */
4286
4287int dlm_recover_waiters_post(struct dlm_ls *ls)
4288{
4289	struct dlm_lkb *lkb;
4290	struct dlm_rsb *r;
4291	int error = 0, mstype, err, oc, ou;
4292
4293	while (1) {
4294		if (dlm_locking_stopped(ls)) {
4295			log_debug(ls, "recover_waiters_post aborted");
4296			error = -EINTR;
4297			break;
4298		}
4299
4300		lkb = find_resend_waiter(ls);
4301		if (!lkb)
4302			break;
4303
4304		r = lkb->lkb_resource;
4305		hold_rsb(r);
4306		lock_rsb(r);
4307
4308		mstype = lkb->lkb_wait_type;
4309		oc = is_overlap_cancel(lkb);
4310		ou = is_overlap_unlock(lkb);
4311		err = 0;
4312
4313		log_debug(ls, "recover_waiter %x nodeid %d msg %d r_nodeid %d",
4314			  lkb->lkb_id, lkb->lkb_nodeid, mstype, r->res_nodeid);
 
 
 
4315
4316		/* At this point we assume that we won't get a reply to any
4317		   previous op or overlap op on this lock.  First, do a big
4318		   remove_from_waiters() for all previous ops. */
4319
4320		lkb->lkb_flags &= ~DLM_IFL_RESEND;
4321		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4322		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4323		lkb->lkb_wait_type = 0;
4324		lkb->lkb_wait_count = 0;
4325		mutex_lock(&ls->ls_waiters_mutex);
4326		list_del_init(&lkb->lkb_wait_reply);
4327		mutex_unlock(&ls->ls_waiters_mutex);
4328		unhold_lkb(lkb); /* for waiters list */
4329
4330		if (oc || ou) {
4331			/* do an unlock or cancel instead of resending */
4332			switch (mstype) {
4333			case DLM_MSG_LOOKUP:
4334			case DLM_MSG_REQUEST:
4335				queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
4336							-DLM_ECANCEL);
4337				unhold_lkb(lkb); /* undoes create_lkb() */
4338				break;
4339			case DLM_MSG_CONVERT:
4340				if (oc) {
4341					queue_cast(r, lkb, -DLM_ECANCEL);
4342				} else {
4343					lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
4344					_unlock_lock(r, lkb);
4345				}
4346				break;
4347			default:
4348				err = 1;
4349			}
4350		} else {
4351			switch (mstype) {
4352			case DLM_MSG_LOOKUP:
4353			case DLM_MSG_REQUEST:
4354				_request_lock(r, lkb);
4355				if (is_master(r))
4356					confirm_master(r, 0);
4357				break;
4358			case DLM_MSG_CONVERT:
4359				_convert_lock(r, lkb);
4360				break;
4361			default:
4362				err = 1;
4363			}
4364		}
4365
4366		if (err)
4367			log_error(ls, "recover_waiters_post %x %d %x %d %d",
4368			  	  lkb->lkb_id, mstype, lkb->lkb_flags, oc, ou);
 
 
 
4369		unlock_rsb(r);
4370		put_rsb(r);
4371		dlm_put_lkb(lkb);
4372	}
4373
4374	return error;
4375}
4376
4377static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
4378			int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
4379{
4380	struct dlm_ls *ls = r->res_ls;
4381	struct dlm_lkb *lkb, *safe;
4382
4383	list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
4384		if (test(ls, lkb)) {
4385			rsb_set_flag(r, RSB_LOCKS_PURGED);
4386			del_lkb(r, lkb);
4387			/* this put should free the lkb */
4388			if (!dlm_put_lkb(lkb))
4389				log_error(ls, "purged lkb not released");
4390		}
 
 
 
 
 
 
 
4391	}
4392}
4393
4394static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
4395{
4396	return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
4397}
4398
4399static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
4400{
4401	return is_master_copy(lkb);
4402}
4403
4404static void purge_dead_locks(struct dlm_rsb *r)
 
 
4405{
4406	purge_queue(r, &r->res_grantqueue, &purge_dead_test);
4407	purge_queue(r, &r->res_convertqueue, &purge_dead_test);
4408	purge_queue(r, &r->res_waitqueue, &purge_dead_test);
4409}
4410
4411void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
4412{
4413	purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
4414	purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
4415	purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4416}
4417
4418/* Get rid of locks held by nodes that are gone. */
4419
4420int dlm_purge_locks(struct dlm_ls *ls)
4421{
4422	struct dlm_rsb *r;
 
 
 
 
 
 
 
4423
4424	log_debug(ls, "dlm_purge_locks");
 
 
 
 
 
 
4425
4426	down_write(&ls->ls_root_sem);
4427	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
4428		hold_rsb(r);
4429		lock_rsb(r);
4430		if (is_master(r))
4431			purge_dead_locks(r);
 
 
 
 
 
 
4432		unlock_rsb(r);
4433		unhold_rsb(r);
4434
4435		schedule();
4436	}
4437	up_write(&ls->ls_root_sem);
4438
4439	return 0;
 
 
4440}
4441
4442static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
4443{
4444	struct dlm_rsb *r, *r_ret = NULL;
 
4445
4446	spin_lock(&ls->ls_rsbtbl[bucket].lock);
4447	list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
4448		if (!rsb_flag(r, RSB_LOCKS_PURGED))
 
 
4449			continue;
 
 
 
 
4450		hold_rsb(r);
4451		rsb_clear_flag(r, RSB_LOCKS_PURGED);
4452		r_ret = r;
4453		break;
4454	}
4455	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
4456	return r_ret;
4457}
4458
4459void dlm_grant_after_purge(struct dlm_ls *ls)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4460{
4461	struct dlm_rsb *r;
4462	int bucket = 0;
 
 
 
4463
4464	while (1) {
4465		r = find_purged_rsb(ls, bucket);
4466		if (!r) {
4467			if (bucket == ls->ls_rsbtbl_size - 1)
4468				break;
4469			bucket++;
4470			continue;
4471		}
 
 
4472		lock_rsb(r);
4473		if (is_master(r)) {
4474			grant_pending_locks(r);
4475			confirm_master(r, 0);
4476		}
 
4477		unlock_rsb(r);
4478		put_rsb(r);
4479		schedule();
4480	}
 
 
 
 
4481}
4482
4483static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
4484					 uint32_t remid)
4485{
4486	struct dlm_lkb *lkb;
4487
4488	list_for_each_entry(lkb, head, lkb_statequeue) {
4489		if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
4490			return lkb;
4491	}
4492	return NULL;
4493}
4494
4495static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
4496				    uint32_t remid)
4497{
4498	struct dlm_lkb *lkb;
4499
4500	lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
4501	if (lkb)
4502		return lkb;
4503	lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
4504	if (lkb)
4505		return lkb;
4506	lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
4507	if (lkb)
4508		return lkb;
4509	return NULL;
4510}
4511
4512/* needs at least dlm_rcom + rcom_lock */
4513static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
4514				  struct dlm_rsb *r, struct dlm_rcom *rc)
4515{
4516	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4517
4518	lkb->lkb_nodeid = rc->rc_header.h_nodeid;
4519	lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
4520	lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
4521	lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
4522	lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
4523	lkb->lkb_flags |= DLM_IFL_MSTCPY;
4524	lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
4525	lkb->lkb_rqmode = rl->rl_rqmode;
4526	lkb->lkb_grmode = rl->rl_grmode;
4527	/* don't set lkb_status because add_lkb wants to itself */
4528
4529	lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
4530	lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
4531
4532	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
4533		int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
4534			 sizeof(struct rcom_lock);
4535		if (lvblen > ls->ls_lvblen)
4536			return -EINVAL;
4537		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
4538		if (!lkb->lkb_lvbptr)
4539			return -ENOMEM;
4540		memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
4541	}
4542
4543	/* Conversions between PR and CW (middle modes) need special handling.
4544	   The real granted mode of these converting locks cannot be determined
4545	   until all locks have been rebuilt on the rsb (recover_conversion) */
4546
4547	if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
4548	    middle_conversion(lkb)) {
4549		rl->rl_status = DLM_LKSTS_CONVERT;
4550		lkb->lkb_grmode = DLM_LOCK_IV;
4551		rsb_set_flag(r, RSB_RECOVER_CONVERT);
4552	}
4553
4554	return 0;
4555}
4556
4557/* This lkb may have been recovered in a previous aborted recovery so we need
4558   to check if the rsb already has an lkb with the given remote nodeid/lkid.
4559   If so we just send back a standard reply.  If not, we create a new lkb with
4560   the given values and send back our lkid.  We send back our lkid by sending
4561   back the rcom_lock struct we got but with the remid field filled in. */
4562
4563/* needs at least dlm_rcom + rcom_lock */
4564int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4565{
4566	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4567	struct dlm_rsb *r;
4568	struct dlm_lkb *lkb;
 
 
4569	int error;
4570
4571	if (rl->rl_parent_lkid) {
4572		error = -EOPNOTSUPP;
4573		goto out;
4574	}
4575
 
 
 
 
 
 
 
 
 
 
4576	error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
4577			 R_MASTER, &r);
4578	if (error)
4579		goto out;
4580
4581	lock_rsb(r);
4582
4583	lkb = search_remid(r, rc->rc_header.h_nodeid, le32_to_cpu(rl->rl_lkid));
 
 
 
 
 
 
 
4584	if (lkb) {
4585		error = -EEXIST;
4586		goto out_remid;
4587	}
4588
4589	error = create_lkb(ls, &lkb);
4590	if (error)
4591		goto out_unlock;
4592
4593	error = receive_rcom_lock_args(ls, lkb, r, rc);
4594	if (error) {
4595		__put_lkb(ls, lkb);
4596		goto out_unlock;
4597	}
4598
4599	attach_lkb(r, lkb);
4600	add_lkb(r, lkb, rl->rl_status);
4601	error = 0;
 
 
 
 
4602
4603 out_remid:
4604	/* this is the new value returned to the lock holder for
4605	   saving in its process-copy lkb */
4606	rl->rl_remid = cpu_to_le32(lkb->lkb_id);
4607
 
 
4608 out_unlock:
4609	unlock_rsb(r);
4610	put_rsb(r);
4611 out:
4612	if (error)
4613		log_debug(ls, "recover_master_copy %d %x", error,
4614			  le32_to_cpu(rl->rl_lkid));
4615	rl->rl_result = cpu_to_le32(error);
4616	return error;
4617}
4618
4619/* needs at least dlm_rcom + rcom_lock */
4620int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4621{
4622	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4623	struct dlm_rsb *r;
4624	struct dlm_lkb *lkb;
4625	int error;
 
 
 
 
 
4626
4627	error = find_lkb(ls, le32_to_cpu(rl->rl_lkid), &lkb);
4628	if (error) {
4629		log_error(ls, "recover_process_copy no lkid %x",
4630				le32_to_cpu(rl->rl_lkid));
4631		return error;
4632	}
4633
4634	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
4635
4636	error = le32_to_cpu(rl->rl_result);
4637
4638	r = lkb->lkb_resource;
4639	hold_rsb(r);
4640	lock_rsb(r);
4641
4642	switch (error) {
 
 
 
 
 
 
 
 
 
 
4643	case -EBADR:
4644		/* There's a chance the new master received our lock before
4645		   dlm_recover_master_reply(), this wouldn't happen if we did
4646		   a barrier between recover_masters and recover_locks. */
4647		log_debug(ls, "master copy not ready %x r %lx %s", lkb->lkb_id,
4648			  (unsigned long)r, r->res_name);
 
 
4649		dlm_send_rcom_lock(r, lkb);
4650		goto out;
4651	case -EEXIST:
4652		log_debug(ls, "master copy exists %x", lkb->lkb_id);
4653		/* fall through */
4654	case 0:
4655		lkb->lkb_remid = le32_to_cpu(rl->rl_remid);
4656		break;
4657	default:
4658		log_error(ls, "dlm_recover_process_copy unknown error %d %x",
4659			  error, lkb->lkb_id);
4660	}
4661
4662	/* an ack for dlm_recover_locks() which waits for replies from
4663	   all the locks it sends to new masters */
4664	dlm_recovered_lock(r);
4665 out:
4666	unlock_rsb(r);
4667	put_rsb(r);
4668	dlm_put_lkb(lkb);
4669
4670	return 0;
4671}
4672
4673int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
4674		     int mode, uint32_t flags, void *name, unsigned int namelen,
4675		     unsigned long timeout_cs)
4676{
4677	struct dlm_lkb *lkb;
4678	struct dlm_args args;
4679	int error;
4680
4681	dlm_lock_recovery(ls);
4682
4683	error = create_lkb(ls, &lkb);
4684	if (error) {
4685		kfree(ua);
4686		goto out;
4687	}
4688
4689	if (flags & DLM_LKF_VALBLK) {
4690		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
4691		if (!ua->lksb.sb_lvbptr) {
4692			kfree(ua);
4693			__put_lkb(ls, lkb);
4694			error = -ENOMEM;
4695			goto out;
4696		}
4697	}
4698
4699	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
4700	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
4701	   lock and that lkb_astparam is the dlm_user_args structure. */
4702
4703	error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
4704			      fake_astfn, ua, fake_bastfn, &args);
4705	lkb->lkb_flags |= DLM_IFL_USER;
4706
4707	if (error) {
4708		__put_lkb(ls, lkb);
4709		goto out;
4710	}
4711
4712	error = request_lock(ls, lkb, name, namelen, &args);
4713
4714	switch (error) {
4715	case 0:
4716		break;
4717	case -EINPROGRESS:
4718		error = 0;
4719		break;
4720	case -EAGAIN:
4721		error = 0;
4722		/* fall through */
4723	default:
4724		__put_lkb(ls, lkb);
4725		goto out;
4726	}
4727
4728	/* add this new lkb to the per-process list of locks */
4729	spin_lock(&ua->proc->locks_spin);
4730	hold_lkb(lkb);
4731	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
4732	spin_unlock(&ua->proc->locks_spin);
4733 out:
4734	dlm_unlock_recovery(ls);
4735	return error;
4736}
4737
4738int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4739		     int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
4740		     unsigned long timeout_cs)
4741{
4742	struct dlm_lkb *lkb;
4743	struct dlm_args args;
4744	struct dlm_user_args *ua;
4745	int error;
4746
4747	dlm_lock_recovery(ls);
4748
4749	error = find_lkb(ls, lkid, &lkb);
4750	if (error)
4751		goto out;
4752
4753	/* user can change the params on its lock when it converts it, or
4754	   add an lvb that didn't exist before */
4755
4756	ua = lkb->lkb_ua;
4757
4758	if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
4759		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
4760		if (!ua->lksb.sb_lvbptr) {
4761			error = -ENOMEM;
4762			goto out_put;
4763		}
4764	}
4765	if (lvb_in && ua->lksb.sb_lvbptr)
4766		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4767
4768	ua->xid = ua_tmp->xid;
4769	ua->castparam = ua_tmp->castparam;
4770	ua->castaddr = ua_tmp->castaddr;
4771	ua->bastparam = ua_tmp->bastparam;
4772	ua->bastaddr = ua_tmp->bastaddr;
4773	ua->user_lksb = ua_tmp->user_lksb;
4774
4775	error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
4776			      fake_astfn, ua, fake_bastfn, &args);
4777	if (error)
4778		goto out_put;
4779
4780	error = convert_lock(ls, lkb, &args);
4781
4782	if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
4783		error = 0;
4784 out_put:
4785	dlm_put_lkb(lkb);
4786 out:
4787	dlm_unlock_recovery(ls);
4788	kfree(ua_tmp);
4789	return error;
4790}
4791
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4792int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4793		    uint32_t flags, uint32_t lkid, char *lvb_in)
4794{
4795	struct dlm_lkb *lkb;
4796	struct dlm_args args;
4797	struct dlm_user_args *ua;
4798	int error;
4799
4800	dlm_lock_recovery(ls);
4801
4802	error = find_lkb(ls, lkid, &lkb);
4803	if (error)
4804		goto out;
4805
4806	ua = lkb->lkb_ua;
4807
4808	if (lvb_in && ua->lksb.sb_lvbptr)
4809		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4810	if (ua_tmp->castparam)
4811		ua->castparam = ua_tmp->castparam;
4812	ua->user_lksb = ua_tmp->user_lksb;
4813
4814	error = set_unlock_args(flags, ua, &args);
4815	if (error)
4816		goto out_put;
4817
4818	error = unlock_lock(ls, lkb, &args);
4819
4820	if (error == -DLM_EUNLOCK)
4821		error = 0;
4822	/* from validate_unlock_args() */
4823	if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
4824		error = 0;
4825	if (error)
4826		goto out_put;
4827
4828	spin_lock(&ua->proc->locks_spin);
4829	/* dlm_user_add_cb() may have already taken lkb off the proc list */
4830	if (!list_empty(&lkb->lkb_ownqueue))
4831		list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
4832	spin_unlock(&ua->proc->locks_spin);
4833 out_put:
4834	dlm_put_lkb(lkb);
4835 out:
4836	dlm_unlock_recovery(ls);
4837	kfree(ua_tmp);
4838	return error;
4839}
4840
4841int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4842		    uint32_t flags, uint32_t lkid)
4843{
4844	struct dlm_lkb *lkb;
4845	struct dlm_args args;
4846	struct dlm_user_args *ua;
4847	int error;
4848
4849	dlm_lock_recovery(ls);
4850
4851	error = find_lkb(ls, lkid, &lkb);
4852	if (error)
4853		goto out;
4854
4855	ua = lkb->lkb_ua;
4856	if (ua_tmp->castparam)
4857		ua->castparam = ua_tmp->castparam;
4858	ua->user_lksb = ua_tmp->user_lksb;
4859
4860	error = set_unlock_args(flags, ua, &args);
4861	if (error)
4862		goto out_put;
4863
4864	error = cancel_lock(ls, lkb, &args);
4865
4866	if (error == -DLM_ECANCEL)
4867		error = 0;
4868	/* from validate_unlock_args() */
4869	if (error == -EBUSY)
4870		error = 0;
4871 out_put:
4872	dlm_put_lkb(lkb);
4873 out:
4874	dlm_unlock_recovery(ls);
4875	kfree(ua_tmp);
4876	return error;
4877}
4878
4879int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
4880{
4881	struct dlm_lkb *lkb;
4882	struct dlm_args args;
4883	struct dlm_user_args *ua;
4884	struct dlm_rsb *r;
4885	int error;
4886
4887	dlm_lock_recovery(ls);
4888
4889	error = find_lkb(ls, lkid, &lkb);
4890	if (error)
4891		goto out;
4892
4893	ua = lkb->lkb_ua;
4894
4895	error = set_unlock_args(flags, ua, &args);
4896	if (error)
4897		goto out_put;
4898
4899	/* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
4900
4901	r = lkb->lkb_resource;
4902	hold_rsb(r);
4903	lock_rsb(r);
4904
4905	error = validate_unlock_args(lkb, &args);
4906	if (error)
4907		goto out_r;
4908	lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
4909
4910	error = _cancel_lock(r, lkb);
4911 out_r:
4912	unlock_rsb(r);
4913	put_rsb(r);
4914
4915	if (error == -DLM_ECANCEL)
4916		error = 0;
4917	/* from validate_unlock_args() */
4918	if (error == -EBUSY)
4919		error = 0;
4920 out_put:
4921	dlm_put_lkb(lkb);
4922 out:
4923	dlm_unlock_recovery(ls);
4924	return error;
4925}
4926
4927/* lkb's that are removed from the waiters list by revert are just left on the
4928   orphans list with the granted orphan locks, to be freed by purge */
4929
4930static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4931{
4932	struct dlm_args args;
4933	int error;
4934
4935	hold_lkb(lkb);
4936	mutex_lock(&ls->ls_orphans_mutex);
4937	list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
4938	mutex_unlock(&ls->ls_orphans_mutex);
4939
4940	set_unlock_args(0, lkb->lkb_ua, &args);
4941
4942	error = cancel_lock(ls, lkb, &args);
4943	if (error == -DLM_ECANCEL)
4944		error = 0;
4945	return error;
4946}
4947
4948/* The force flag allows the unlock to go ahead even if the lkb isn't granted.
4949   Regardless of what rsb queue the lock is on, it's removed and freed. */
 
 
4950
4951static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4952{
4953	struct dlm_args args;
4954	int error;
4955
4956	set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args);
 
4957
4958	error = unlock_lock(ls, lkb, &args);
4959	if (error == -DLM_EUNLOCK)
4960		error = 0;
4961	return error;
4962}
4963
4964/* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
4965   (which does lock_rsb) due to deadlock with receiving a message that does
4966   lock_rsb followed by dlm_user_add_cb() */
4967
4968static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
4969				     struct dlm_user_proc *proc)
4970{
4971	struct dlm_lkb *lkb = NULL;
4972
4973	mutex_lock(&ls->ls_clear_proc_locks);
4974	if (list_empty(&proc->locks))
4975		goto out;
4976
4977	lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
4978	list_del_init(&lkb->lkb_ownqueue);
4979
4980	if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
4981		lkb->lkb_flags |= DLM_IFL_ORPHAN;
4982	else
4983		lkb->lkb_flags |= DLM_IFL_DEAD;
4984 out:
4985	mutex_unlock(&ls->ls_clear_proc_locks);
4986	return lkb;
4987}
4988
4989/* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
4990   1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
4991   which we clear here. */
4992
4993/* proc CLOSING flag is set so no more device_reads should look at proc->asts
4994   list, and no more device_writes should add lkb's to proc->locks list; so we
4995   shouldn't need to take asts_spin or locks_spin here.  this assumes that
4996   device reads/writes/closes are serialized -- FIXME: we may need to serialize
4997   them ourself. */
4998
4999void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
5000{
5001	struct dlm_lkb *lkb, *safe;
5002
5003	dlm_lock_recovery(ls);
5004
5005	while (1) {
5006		lkb = del_proc_lock(ls, proc);
5007		if (!lkb)
5008			break;
5009		del_timeout(lkb);
5010		if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
5011			orphan_proc_lock(ls, lkb);
5012		else
5013			unlock_proc_lock(ls, lkb);
5014
5015		/* this removes the reference for the proc->locks list
5016		   added by dlm_user_request, it may result in the lkb
5017		   being freed */
5018
5019		dlm_put_lkb(lkb);
5020	}
5021
5022	mutex_lock(&ls->ls_clear_proc_locks);
5023
5024	/* in-progress unlocks */
5025	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
5026		list_del_init(&lkb->lkb_ownqueue);
5027		lkb->lkb_flags |= DLM_IFL_DEAD;
5028		dlm_put_lkb(lkb);
5029	}
5030
5031	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
5032		memset(&lkb->lkb_callbacks, 0,
5033		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
5034		list_del_init(&lkb->lkb_cb_list);
5035		dlm_put_lkb(lkb);
5036	}
5037
5038	mutex_unlock(&ls->ls_clear_proc_locks);
5039	dlm_unlock_recovery(ls);
5040}
5041
5042static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
5043{
5044	struct dlm_lkb *lkb, *safe;
5045
5046	while (1) {
5047		lkb = NULL;
5048		spin_lock(&proc->locks_spin);
5049		if (!list_empty(&proc->locks)) {
5050			lkb = list_entry(proc->locks.next, struct dlm_lkb,
5051					 lkb_ownqueue);
5052			list_del_init(&lkb->lkb_ownqueue);
5053		}
5054		spin_unlock(&proc->locks_spin);
5055
5056		if (!lkb)
5057			break;
5058
5059		lkb->lkb_flags |= DLM_IFL_DEAD;
5060		unlock_proc_lock(ls, lkb);
5061		dlm_put_lkb(lkb); /* ref from proc->locks list */
5062	}
5063
5064	spin_lock(&proc->locks_spin);
5065	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
5066		list_del_init(&lkb->lkb_ownqueue);
5067		lkb->lkb_flags |= DLM_IFL_DEAD;
5068		dlm_put_lkb(lkb);
5069	}
5070	spin_unlock(&proc->locks_spin);
5071
5072	spin_lock(&proc->asts_spin);
5073	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
5074		memset(&lkb->lkb_callbacks, 0,
5075		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
5076		list_del_init(&lkb->lkb_cb_list);
5077		dlm_put_lkb(lkb);
5078	}
5079	spin_unlock(&proc->asts_spin);
5080}
5081
5082/* pid of 0 means purge all orphans */
5083
5084static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
5085{
5086	struct dlm_lkb *lkb, *safe;
5087
5088	mutex_lock(&ls->ls_orphans_mutex);
5089	list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
5090		if (pid && lkb->lkb_ownpid != pid)
5091			continue;
5092		unlock_proc_lock(ls, lkb);
5093		list_del_init(&lkb->lkb_ownqueue);
5094		dlm_put_lkb(lkb);
5095	}
5096	mutex_unlock(&ls->ls_orphans_mutex);
5097}
5098
5099static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
5100{
5101	struct dlm_message *ms;
5102	struct dlm_mhandle *mh;
5103	int error;
5104
5105	error = _create_message(ls, sizeof(struct dlm_message), nodeid,
5106				DLM_MSG_PURGE, &ms, &mh);
5107	if (error)
5108		return error;
5109	ms->m_nodeid = nodeid;
5110	ms->m_pid = pid;
5111
5112	return send_message(mh, ms);
5113}
5114
5115int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
5116		   int nodeid, int pid)
5117{
5118	int error = 0;
5119
5120	if (nodeid != dlm_our_nodeid()) {
5121		error = send_purge(ls, nodeid, pid);
5122	} else {
5123		dlm_lock_recovery(ls);
5124		if (pid == current->pid)
5125			purge_proc_locks(ls, proc);
5126		else
5127			do_purge(ls, nodeid, pid);
5128		dlm_unlock_recovery(ls);
5129	}
5130	return error;
5131}
5132
v4.10.11
   1/******************************************************************************
   2*******************************************************************************
   3**
   4**  Copyright (C) 2005-2010 Red Hat, Inc.  All rights reserved.
   5**
   6**  This copyrighted material is made available to anyone wishing to use,
   7**  modify, copy, or redistribute it subject to the terms and conditions
   8**  of the GNU General Public License v.2.
   9**
  10*******************************************************************************
  11******************************************************************************/
  12
  13/* Central locking logic has four stages:
  14
  15   dlm_lock()
  16   dlm_unlock()
  17
  18   request_lock(ls, lkb)
  19   convert_lock(ls, lkb)
  20   unlock_lock(ls, lkb)
  21   cancel_lock(ls, lkb)
  22
  23   _request_lock(r, lkb)
  24   _convert_lock(r, lkb)
  25   _unlock_lock(r, lkb)
  26   _cancel_lock(r, lkb)
  27
  28   do_request(r, lkb)
  29   do_convert(r, lkb)
  30   do_unlock(r, lkb)
  31   do_cancel(r, lkb)
  32
  33   Stage 1 (lock, unlock) is mainly about checking input args and
  34   splitting into one of the four main operations:
  35
  36       dlm_lock          = request_lock
  37       dlm_lock+CONVERT  = convert_lock
  38       dlm_unlock        = unlock_lock
  39       dlm_unlock+CANCEL = cancel_lock
  40
  41   Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
  42   provided to the next stage.
  43
  44   Stage 3, _xxxx_lock(), determines if the operation is local or remote.
  45   When remote, it calls send_xxxx(), when local it calls do_xxxx().
  46
  47   Stage 4, do_xxxx(), is the guts of the operation.  It manipulates the
  48   given rsb and lkb and queues callbacks.
  49
  50   For remote operations, send_xxxx() results in the corresponding do_xxxx()
  51   function being executed on the remote node.  The connecting send/receive
  52   calls on local (L) and remote (R) nodes:
  53
  54   L: send_xxxx()              ->  R: receive_xxxx()
  55                                   R: do_xxxx()
  56   L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
  57*/
  58#include <linux/types.h>
  59#include <linux/rbtree.h>
  60#include <linux/slab.h>
  61#include "dlm_internal.h"
  62#include <linux/dlm_device.h>
  63#include "memory.h"
  64#include "lowcomms.h"
  65#include "requestqueue.h"
  66#include "util.h"
  67#include "dir.h"
  68#include "member.h"
  69#include "lockspace.h"
  70#include "ast.h"
  71#include "lock.h"
  72#include "rcom.h"
  73#include "recover.h"
  74#include "lvb_table.h"
  75#include "user.h"
  76#include "config.h"
  77
  78static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
  79static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
  80static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  81static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
  82static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
  83static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
  84static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
  85static int send_remove(struct dlm_rsb *r);
  86static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  87static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  88static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
  89				    struct dlm_message *ms);
  90static int receive_extralen(struct dlm_message *ms);
  91static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
  92static void del_timeout(struct dlm_lkb *lkb);
  93static void toss_rsb(struct kref *kref);
  94
  95/*
  96 * Lock compatibilty matrix - thanks Steve
  97 * UN = Unlocked state. Not really a state, used as a flag
  98 * PD = Padding. Used to make the matrix a nice power of two in size
  99 * Other states are the same as the VMS DLM.
 100 * Usage: matrix[grmode+1][rqmode+1]  (although m[rq+1][gr+1] is the same)
 101 */
 102
 103static const int __dlm_compat_matrix[8][8] = {
 104      /* UN NL CR CW PR PW EX PD */
 105        {1, 1, 1, 1, 1, 1, 1, 0},       /* UN */
 106        {1, 1, 1, 1, 1, 1, 1, 0},       /* NL */
 107        {1, 1, 1, 1, 1, 1, 0, 0},       /* CR */
 108        {1, 1, 1, 1, 0, 0, 0, 0},       /* CW */
 109        {1, 1, 1, 0, 1, 0, 0, 0},       /* PR */
 110        {1, 1, 1, 0, 0, 0, 0, 0},       /* PW */
 111        {1, 1, 0, 0, 0, 0, 0, 0},       /* EX */
 112        {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
 113};
 114
 115/*
 116 * This defines the direction of transfer of LVB data.
 117 * Granted mode is the row; requested mode is the column.
 118 * Usage: matrix[grmode+1][rqmode+1]
 119 * 1 = LVB is returned to the caller
 120 * 0 = LVB is written to the resource
 121 * -1 = nothing happens to the LVB
 122 */
 123
 124const int dlm_lvb_operations[8][8] = {
 125        /* UN   NL  CR  CW  PR  PW  EX  PD*/
 126        {  -1,  1,  1,  1,  1,  1,  1, -1 }, /* UN */
 127        {  -1,  1,  1,  1,  1,  1,  1,  0 }, /* NL */
 128        {  -1, -1,  1,  1,  1,  1,  1,  0 }, /* CR */
 129        {  -1, -1, -1,  1,  1,  1,  1,  0 }, /* CW */
 130        {  -1, -1, -1, -1,  1,  1,  1,  0 }, /* PR */
 131        {  -1,  0,  0,  0,  0,  0,  1,  0 }, /* PW */
 132        {  -1,  0,  0,  0,  0,  0,  0,  0 }, /* EX */
 133        {  -1,  0,  0,  0,  0,  0,  0,  0 }  /* PD */
 134};
 135
 136#define modes_compat(gr, rq) \
 137	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
 138
 139int dlm_modes_compat(int mode1, int mode2)
 140{
 141	return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
 142}
 143
 144/*
 145 * Compatibility matrix for conversions with QUECVT set.
 146 * Granted mode is the row; requested mode is the column.
 147 * Usage: matrix[grmode+1][rqmode+1]
 148 */
 149
 150static const int __quecvt_compat_matrix[8][8] = {
 151      /* UN NL CR CW PR PW EX PD */
 152        {0, 0, 0, 0, 0, 0, 0, 0},       /* UN */
 153        {0, 0, 1, 1, 1, 1, 1, 0},       /* NL */
 154        {0, 0, 0, 1, 1, 1, 1, 0},       /* CR */
 155        {0, 0, 0, 0, 1, 1, 1, 0},       /* CW */
 156        {0, 0, 0, 1, 0, 1, 1, 0},       /* PR */
 157        {0, 0, 0, 0, 0, 0, 1, 0},       /* PW */
 158        {0, 0, 0, 0, 0, 0, 0, 0},       /* EX */
 159        {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
 160};
 161
 162void dlm_print_lkb(struct dlm_lkb *lkb)
 163{
 164	printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
 165	       "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
 166	       lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
 167	       lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
 168	       lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
 169	       (unsigned long long)lkb->lkb_recover_seq);
 170}
 171
 172static void dlm_print_rsb(struct dlm_rsb *r)
 173{
 174	printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
 175	       "rlc %d name %s\n",
 176	       r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
 177	       r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
 178	       r->res_name);
 179}
 180
 181void dlm_dump_rsb(struct dlm_rsb *r)
 182{
 183	struct dlm_lkb *lkb;
 184
 185	dlm_print_rsb(r);
 186
 187	printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
 188	       list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
 189	printk(KERN_ERR "rsb lookup list\n");
 190	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
 191		dlm_print_lkb(lkb);
 192	printk(KERN_ERR "rsb grant queue:\n");
 193	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
 194		dlm_print_lkb(lkb);
 195	printk(KERN_ERR "rsb convert queue:\n");
 196	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
 197		dlm_print_lkb(lkb);
 198	printk(KERN_ERR "rsb wait queue:\n");
 199	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
 200		dlm_print_lkb(lkb);
 201}
 202
 203/* Threads cannot use the lockspace while it's being recovered */
 204
 205static inline void dlm_lock_recovery(struct dlm_ls *ls)
 206{
 207	down_read(&ls->ls_in_recovery);
 208}
 209
 210void dlm_unlock_recovery(struct dlm_ls *ls)
 211{
 212	up_read(&ls->ls_in_recovery);
 213}
 214
 215int dlm_lock_recovery_try(struct dlm_ls *ls)
 216{
 217	return down_read_trylock(&ls->ls_in_recovery);
 218}
 219
 220static inline int can_be_queued(struct dlm_lkb *lkb)
 221{
 222	return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
 223}
 224
 225static inline int force_blocking_asts(struct dlm_lkb *lkb)
 226{
 227	return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
 228}
 229
 230static inline int is_demoted(struct dlm_lkb *lkb)
 231{
 232	return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
 233}
 234
 235static inline int is_altmode(struct dlm_lkb *lkb)
 236{
 237	return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
 238}
 239
 240static inline int is_granted(struct dlm_lkb *lkb)
 241{
 242	return (lkb->lkb_status == DLM_LKSTS_GRANTED);
 243}
 244
 245static inline int is_remote(struct dlm_rsb *r)
 246{
 247	DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
 248	return !!r->res_nodeid;
 249}
 250
 251static inline int is_process_copy(struct dlm_lkb *lkb)
 252{
 253	return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
 254}
 255
 256static inline int is_master_copy(struct dlm_lkb *lkb)
 257{
 
 
 258	return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
 259}
 260
 261static inline int middle_conversion(struct dlm_lkb *lkb)
 262{
 263	if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
 264	    (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
 265		return 1;
 266	return 0;
 267}
 268
 269static inline int down_conversion(struct dlm_lkb *lkb)
 270{
 271	return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
 272}
 273
 274static inline int is_overlap_unlock(struct dlm_lkb *lkb)
 275{
 276	return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
 277}
 278
 279static inline int is_overlap_cancel(struct dlm_lkb *lkb)
 280{
 281	return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
 282}
 283
 284static inline int is_overlap(struct dlm_lkb *lkb)
 285{
 286	return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
 287				  DLM_IFL_OVERLAP_CANCEL));
 288}
 289
 290static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
 291{
 292	if (is_master_copy(lkb))
 293		return;
 294
 295	del_timeout(lkb);
 296
 297	DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
 298
 299	/* if the operation was a cancel, then return -DLM_ECANCEL, if a
 300	   timeout caused the cancel then return -ETIMEDOUT */
 301	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
 302		lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
 303		rv = -ETIMEDOUT;
 304	}
 305
 306	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
 307		lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
 308		rv = -EDEADLK;
 309	}
 310
 311	dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
 312}
 313
 314static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
 315{
 316	queue_cast(r, lkb,
 317		   is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
 318}
 319
 320static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
 321{
 322	if (is_master_copy(lkb)) {
 323		send_bast(r, lkb, rqmode);
 324	} else {
 325		dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
 326	}
 327}
 328
 329/*
 330 * Basic operations on rsb's and lkb's
 331 */
 332
 333/* This is only called to add a reference when the code already holds
 334   a valid reference to the rsb, so there's no need for locking. */
 335
 336static inline void hold_rsb(struct dlm_rsb *r)
 337{
 338	kref_get(&r->res_ref);
 339}
 340
 341void dlm_hold_rsb(struct dlm_rsb *r)
 342{
 343	hold_rsb(r);
 344}
 345
 346/* When all references to the rsb are gone it's transferred to
 347   the tossed list for later disposal. */
 348
 349static void put_rsb(struct dlm_rsb *r)
 350{
 351	struct dlm_ls *ls = r->res_ls;
 352	uint32_t bucket = r->res_bucket;
 353
 354	spin_lock(&ls->ls_rsbtbl[bucket].lock);
 355	kref_put(&r->res_ref, toss_rsb);
 356	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 357}
 358
 359void dlm_put_rsb(struct dlm_rsb *r)
 360{
 361	put_rsb(r);
 362}
 363
 364static int pre_rsb_struct(struct dlm_ls *ls)
 365{
 366	struct dlm_rsb *r1, *r2;
 367	int count = 0;
 368
 369	spin_lock(&ls->ls_new_rsb_spin);
 370	if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
 371		spin_unlock(&ls->ls_new_rsb_spin);
 372		return 0;
 373	}
 374	spin_unlock(&ls->ls_new_rsb_spin);
 375
 376	r1 = dlm_allocate_rsb(ls);
 377	r2 = dlm_allocate_rsb(ls);
 378
 379	spin_lock(&ls->ls_new_rsb_spin);
 380	if (r1) {
 381		list_add(&r1->res_hashchain, &ls->ls_new_rsb);
 382		ls->ls_new_rsb_count++;
 383	}
 384	if (r2) {
 385		list_add(&r2->res_hashchain, &ls->ls_new_rsb);
 386		ls->ls_new_rsb_count++;
 387	}
 388	count = ls->ls_new_rsb_count;
 389	spin_unlock(&ls->ls_new_rsb_spin);
 390
 391	if (!count)
 392		return -ENOMEM;
 393	return 0;
 394}
 395
 396/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
 397   unlock any spinlocks, go back and call pre_rsb_struct again.
 398   Otherwise, take an rsb off the list and return it. */
 399
 400static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
 401			  struct dlm_rsb **r_ret)
 402{
 403	struct dlm_rsb *r;
 404	int count;
 405
 406	spin_lock(&ls->ls_new_rsb_spin);
 407	if (list_empty(&ls->ls_new_rsb)) {
 408		count = ls->ls_new_rsb_count;
 409		spin_unlock(&ls->ls_new_rsb_spin);
 410		log_debug(ls, "find_rsb retry %d %d %s",
 411			  count, dlm_config.ci_new_rsb_count, name);
 412		return -EAGAIN;
 413	}
 414
 415	r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
 416	list_del(&r->res_hashchain);
 417	/* Convert the empty list_head to a NULL rb_node for tree usage: */
 418	memset(&r->res_hashnode, 0, sizeof(struct rb_node));
 419	ls->ls_new_rsb_count--;
 420	spin_unlock(&ls->ls_new_rsb_spin);
 421
 422	r->res_ls = ls;
 423	r->res_length = len;
 424	memcpy(r->res_name, name, len);
 425	mutex_init(&r->res_mutex);
 426
 
 427	INIT_LIST_HEAD(&r->res_lookup);
 428	INIT_LIST_HEAD(&r->res_grantqueue);
 429	INIT_LIST_HEAD(&r->res_convertqueue);
 430	INIT_LIST_HEAD(&r->res_waitqueue);
 431	INIT_LIST_HEAD(&r->res_root_list);
 432	INIT_LIST_HEAD(&r->res_recover_list);
 433
 434	*r_ret = r;
 435	return 0;
 436}
 437
 438static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
 439{
 440	char maxname[DLM_RESNAME_MAXLEN];
 441
 442	memset(maxname, 0, DLM_RESNAME_MAXLEN);
 443	memcpy(maxname, name, nlen);
 444	return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
 445}
 446
 447int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
 448			struct dlm_rsb **r_ret)
 449{
 450	struct rb_node *node = tree->rb_node;
 451	struct dlm_rsb *r;
 452	int rc;
 453
 454	while (node) {
 455		r = rb_entry(node, struct dlm_rsb, res_hashnode);
 456		rc = rsb_cmp(r, name, len);
 457		if (rc < 0)
 458			node = node->rb_left;
 459		else if (rc > 0)
 460			node = node->rb_right;
 461		else
 462			goto found;
 463	}
 464	*r_ret = NULL;
 465	return -EBADR;
 466
 467 found:
 
 
 468	*r_ret = r;
 469	return 0;
 470}
 471
 472static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
 
 473{
 474	struct rb_node **newn = &tree->rb_node;
 475	struct rb_node *parent = NULL;
 476	int rc;
 477
 478	while (*newn) {
 479		struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
 480					       res_hashnode);
 481
 482		parent = *newn;
 483		rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
 484		if (rc < 0)
 485			newn = &parent->rb_left;
 486		else if (rc > 0)
 487			newn = &parent->rb_right;
 488		else {
 489			log_print("rsb_insert match");
 490			dlm_dump_rsb(rsb);
 491			dlm_dump_rsb(cur);
 492			return -EEXIST;
 493		}
 494	}
 
 
 
 
 
 
 
 
 495
 496	rb_link_node(&rsb->res_hashnode, parent, newn);
 497	rb_insert_color(&rsb->res_hashnode, tree);
 498	return 0;
 
 
 
 
 
 
 
 
 
 
 499}
 500
 501/*
 502 * Find rsb in rsbtbl and potentially create/add one
 503 *
 504 * Delaying the release of rsb's has a similar benefit to applications keeping
 505 * NL locks on an rsb, but without the guarantee that the cached master value
 506 * will still be valid when the rsb is reused.  Apps aren't always smart enough
 507 * to keep NL locks on an rsb that they may lock again shortly; this can lead
 508 * to excessive master lookups and removals if we don't delay the release.
 509 *
 510 * Searching for an rsb means looking through both the normal list and toss
 511 * list.  When found on the toss list the rsb is moved to the normal list with
 512 * ref count of 1; when found on normal list the ref count is incremented.
 513 *
 514 * rsb's on the keep list are being used locally and refcounted.
 515 * rsb's on the toss list are not being used locally, and are not refcounted.
 516 *
 517 * The toss list rsb's were either
 518 * - previously used locally but not any more (were on keep list, then
 519 *   moved to toss list when last refcount dropped)
 520 * - created and put on toss list as a directory record for a lookup
 521 *   (we are the dir node for the res, but are not using the res right now,
 522 *   but some other node is)
 523 *
 524 * The purpose of find_rsb() is to return a refcounted rsb for local use.
 525 * So, if the given rsb is on the toss list, it is moved to the keep list
 526 * before being returned.
 527 *
 528 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
 529 * more refcounts exist, so the rsb is moved from the keep list to the
 530 * toss list.
 531 *
 532 * rsb's on both keep and toss lists are used for doing a name to master
 533 * lookups.  rsb's that are in use locally (and being refcounted) are on
 534 * the keep list, rsb's that are not in use locally (not refcounted) and
 535 * only exist for name/master lookups are on the toss list.
 536 *
 537 * rsb's on the toss list who's dir_nodeid is not local can have stale
 538 * name/master mappings.  So, remote requests on such rsb's can potentially
 539 * return with an error, which means the mapping is stale and needs to
 540 * be updated with a new lookup.  (The idea behind MASTER UNCERTAIN and
 541 * first_lkid is to keep only a single outstanding request on an rsb
 542 * while that rsb has a potentially stale master.)
 543 */
 544
 545static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
 546			uint32_t hash, uint32_t b,
 547			int dir_nodeid, int from_nodeid,
 548			unsigned int flags, struct dlm_rsb **r_ret)
 549{
 550	struct dlm_rsb *r = NULL;
 551	int our_nodeid = dlm_our_nodeid();
 552	int from_local = 0;
 553	int from_other = 0;
 554	int from_dir = 0;
 555	int create = 0;
 556	int error;
 557
 558	if (flags & R_RECEIVE_REQUEST) {
 559		if (from_nodeid == dir_nodeid)
 560			from_dir = 1;
 561		else
 562			from_other = 1;
 563	} else if (flags & R_REQUEST) {
 564		from_local = 1;
 565	}
 566
 567	/*
 568	 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
 569	 * from_nodeid has sent us a lock in dlm_recover_locks, believing
 570	 * we're the new master.  Our local recovery may not have set
 571	 * res_master_nodeid to our_nodeid yet, so allow either.  Don't
 572	 * create the rsb; dlm_recover_process_copy() will handle EBADR
 573	 * by resending.
 574	 *
 575	 * If someone sends us a request, we are the dir node, and we do
 576	 * not find the rsb anywhere, then recreate it.  This happens if
 577	 * someone sends us a request after we have removed/freed an rsb
 578	 * from our toss list.  (They sent a request instead of lookup
 579	 * because they are using an rsb from their toss list.)
 580	 */
 581
 582	if (from_local || from_dir ||
 583	    (from_other && (dir_nodeid == our_nodeid))) {
 584		create = 1;
 585	}
 586
 587 retry:
 588	if (create) {
 589		error = pre_rsb_struct(ls);
 590		if (error < 0)
 591			goto out;
 592	}
 593
 594	spin_lock(&ls->ls_rsbtbl[b].lock);
 595
 596	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
 597	if (error)
 598		goto do_toss;
 599	
 600	/*
 601	 * rsb is active, so we can't check master_nodeid without lock_rsb.
 602	 */
 603
 604	kref_get(&r->res_ref);
 605	error = 0;
 606	goto out_unlock;
 607
 608
 609 do_toss:
 610	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
 611	if (error)
 612		goto do_new;
 613
 614	/*
 615	 * rsb found inactive (master_nodeid may be out of date unless
 616	 * we are the dir_nodeid or were the master)  No other thread
 617	 * is using this rsb because it's on the toss list, so we can
 618	 * look at or update res_master_nodeid without lock_rsb.
 619	 */
 620
 621	if ((r->res_master_nodeid != our_nodeid) && from_other) {
 622		/* our rsb was not master, and another node (not the dir node)
 623		   has sent us a request */
 624		log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
 625			  from_nodeid, r->res_master_nodeid, dir_nodeid,
 626			  r->res_name);
 627		error = -ENOTBLK;
 628		goto out_unlock;
 629	}
 630
 631	if ((r->res_master_nodeid != our_nodeid) && from_dir) {
 632		/* don't think this should ever happen */
 633		log_error(ls, "find_rsb toss from_dir %d master %d",
 634			  from_nodeid, r->res_master_nodeid);
 635		dlm_print_rsb(r);
 636		/* fix it and go on */
 637		r->res_master_nodeid = our_nodeid;
 638		r->res_nodeid = 0;
 639		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
 640		r->res_first_lkid = 0;
 641	}
 642
 643	if (from_local && (r->res_master_nodeid != our_nodeid)) {
 644		/* Because we have held no locks on this rsb,
 645		   res_master_nodeid could have become stale. */
 646		rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
 647		r->res_first_lkid = 0;
 648	}
 649
 650	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
 651	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
 652	goto out_unlock;
 653
 654
 655 do_new:
 656	/*
 657	 * rsb not found
 658	 */
 659
 660	if (error == -EBADR && !create)
 661		goto out_unlock;
 662
 663	error = get_rsb_struct(ls, name, len, &r);
 664	if (error == -EAGAIN) {
 665		spin_unlock(&ls->ls_rsbtbl[b].lock);
 666		goto retry;
 667	}
 668	if (error)
 669		goto out_unlock;
 670
 671	r->res_hash = hash;
 672	r->res_bucket = b;
 673	r->res_dir_nodeid = dir_nodeid;
 674	kref_init(&r->res_ref);
 675
 676	if (from_dir) {
 677		/* want to see how often this happens */
 678		log_debug(ls, "find_rsb new from_dir %d recreate %s",
 679			  from_nodeid, r->res_name);
 680		r->res_master_nodeid = our_nodeid;
 681		r->res_nodeid = 0;
 682		goto out_add;
 683	}
 684
 685	if (from_other && (dir_nodeid != our_nodeid)) {
 686		/* should never happen */
 687		log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
 688			  from_nodeid, dir_nodeid, our_nodeid, r->res_name);
 689		dlm_free_rsb(r);
 690		r = NULL;
 691		error = -ENOTBLK;
 692		goto out_unlock;
 693	}
 694
 695	if (from_other) {
 696		log_debug(ls, "find_rsb new from_other %d dir %d %s",
 697			  from_nodeid, dir_nodeid, r->res_name);
 698	}
 699
 700	if (dir_nodeid == our_nodeid) {
 701		/* When we are the dir nodeid, we can set the master
 702		   node immediately */
 703		r->res_master_nodeid = our_nodeid;
 704		r->res_nodeid = 0;
 705	} else {
 706		/* set_master will send_lookup to dir_nodeid */
 707		r->res_master_nodeid = 0;
 708		r->res_nodeid = -1;
 709	}
 710
 711 out_add:
 712	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
 713 out_unlock:
 714	spin_unlock(&ls->ls_rsbtbl[b].lock);
 715 out:
 716	*r_ret = r;
 717	return error;
 718}
 719
 720/* During recovery, other nodes can send us new MSTCPY locks (from
 721   dlm_recover_locks) before we've made ourself master (in
 722   dlm_recover_masters). */
 723
 724static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
 725			  uint32_t hash, uint32_t b,
 726			  int dir_nodeid, int from_nodeid,
 727			  unsigned int flags, struct dlm_rsb **r_ret)
 728{
 729	struct dlm_rsb *r = NULL;
 730	int our_nodeid = dlm_our_nodeid();
 731	int recover = (flags & R_RECEIVE_RECOVER);
 732	int error;
 733
 734 retry:
 735	error = pre_rsb_struct(ls);
 736	if (error < 0)
 737		goto out;
 738
 739	spin_lock(&ls->ls_rsbtbl[b].lock);
 740
 741	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
 742	if (error)
 743		goto do_toss;
 744
 745	/*
 746	 * rsb is active, so we can't check master_nodeid without lock_rsb.
 747	 */
 748
 749	kref_get(&r->res_ref);
 750	goto out_unlock;
 751
 752
 753 do_toss:
 754	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
 755	if (error)
 756		goto do_new;
 757
 758	/*
 759	 * rsb found inactive. No other thread is using this rsb because
 760	 * it's on the toss list, so we can look at or update
 761	 * res_master_nodeid without lock_rsb.
 762	 */
 763
 764	if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
 765		/* our rsb is not master, and another node has sent us a
 766		   request; this should never happen */
 767		log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
 768			  from_nodeid, r->res_master_nodeid, dir_nodeid);
 769		dlm_print_rsb(r);
 770		error = -ENOTBLK;
 771		goto out_unlock;
 772	}
 773
 774	if (!recover && (r->res_master_nodeid != our_nodeid) &&
 775	    (dir_nodeid == our_nodeid)) {
 776		/* our rsb is not master, and we are dir; may as well fix it;
 777		   this should never happen */
 778		log_error(ls, "find_rsb toss our %d master %d dir %d",
 779			  our_nodeid, r->res_master_nodeid, dir_nodeid);
 780		dlm_print_rsb(r);
 781		r->res_master_nodeid = our_nodeid;
 782		r->res_nodeid = 0;
 783	}
 784
 785	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
 786	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
 787	goto out_unlock;
 788
 789
 790 do_new:
 791	/*
 792	 * rsb not found
 793	 */
 794
 795	error = get_rsb_struct(ls, name, len, &r);
 796	if (error == -EAGAIN) {
 797		spin_unlock(&ls->ls_rsbtbl[b].lock);
 798		goto retry;
 799	}
 800	if (error)
 801		goto out_unlock;
 802
 803	r->res_hash = hash;
 804	r->res_bucket = b;
 805	r->res_dir_nodeid = dir_nodeid;
 806	r->res_master_nodeid = dir_nodeid;
 807	r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
 808	kref_init(&r->res_ref);
 809
 810	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
 811 out_unlock:
 812	spin_unlock(&ls->ls_rsbtbl[b].lock);
 813 out:
 814	*r_ret = r;
 815	return error;
 816}
 817
 818static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
 819		    unsigned int flags, struct dlm_rsb **r_ret)
 820{
 821	uint32_t hash, b;
 822	int dir_nodeid;
 823
 824	if (len > DLM_RESNAME_MAXLEN)
 825		return -EINVAL;
 826
 827	hash = jhash(name, len, 0);
 828	b = hash & (ls->ls_rsbtbl_size - 1);
 829
 830	dir_nodeid = dlm_hash2nodeid(ls, hash);
 831
 832	if (dlm_no_directory(ls))
 833		return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
 834				      from_nodeid, flags, r_ret);
 835	else
 836		return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
 837				      from_nodeid, flags, r_ret);
 838}
 839
 840/* we have received a request and found that res_master_nodeid != our_nodeid,
 841   so we need to return an error or make ourself the master */
 842
 843static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
 844				  int from_nodeid)
 845{
 846	if (dlm_no_directory(ls)) {
 847		log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
 848			  from_nodeid, r->res_master_nodeid,
 849			  r->res_dir_nodeid);
 850		dlm_print_rsb(r);
 851		return -ENOTBLK;
 852	}
 853
 854	if (from_nodeid != r->res_dir_nodeid) {
 855		/* our rsb is not master, and another node (not the dir node)
 856	   	   has sent us a request.  this is much more common when our
 857	   	   master_nodeid is zero, so limit debug to non-zero.  */
 858
 859		if (r->res_master_nodeid) {
 860			log_debug(ls, "validate master from_other %d master %d "
 861				  "dir %d first %x %s", from_nodeid,
 862				  r->res_master_nodeid, r->res_dir_nodeid,
 863				  r->res_first_lkid, r->res_name);
 864		}
 865		return -ENOTBLK;
 866	} else {
 867		/* our rsb is not master, but the dir nodeid has sent us a
 868	   	   request; this could happen with master 0 / res_nodeid -1 */
 869
 870		if (r->res_master_nodeid) {
 871			log_error(ls, "validate master from_dir %d master %d "
 872				  "first %x %s",
 873				  from_nodeid, r->res_master_nodeid,
 874				  r->res_first_lkid, r->res_name);
 875		}
 876
 877		r->res_master_nodeid = dlm_our_nodeid();
 878		r->res_nodeid = 0;
 879		return 0;
 880	}
 881}
 882
 883/*
 884 * We're the dir node for this res and another node wants to know the
 885 * master nodeid.  During normal operation (non recovery) this is only
 886 * called from receive_lookup(); master lookups when the local node is
 887 * the dir node are done by find_rsb().
 888 *
 889 * normal operation, we are the dir node for a resource
 890 * . _request_lock
 891 * . set_master
 892 * . send_lookup
 893 * . receive_lookup
 894 * . dlm_master_lookup flags 0
 895 *
 896 * recover directory, we are rebuilding dir for all resources
 897 * . dlm_recover_directory
 898 * . dlm_rcom_names
 899 *   remote node sends back the rsb names it is master of and we are dir of
 900 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
 901 *   we either create new rsb setting remote node as master, or find existing
 902 *   rsb and set master to be the remote node.
 903 *
 904 * recover masters, we are finding the new master for resources
 905 * . dlm_recover_masters
 906 * . recover_master
 907 * . dlm_send_rcom_lookup
 908 * . receive_rcom_lookup
 909 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
 910 */
 911
 912int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
 913		      unsigned int flags, int *r_nodeid, int *result)
 914{
 915	struct dlm_rsb *r = NULL;
 916	uint32_t hash, b;
 917	int from_master = (flags & DLM_LU_RECOVER_DIR);
 918	int fix_master = (flags & DLM_LU_RECOVER_MASTER);
 919	int our_nodeid = dlm_our_nodeid();
 920	int dir_nodeid, error, toss_list = 0;
 921
 922	if (len > DLM_RESNAME_MAXLEN)
 923		return -EINVAL;
 924
 925	if (from_nodeid == our_nodeid) {
 926		log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
 927			  our_nodeid, flags);
 928		return -EINVAL;
 929	}
 930
 931	hash = jhash(name, len, 0);
 932	b = hash & (ls->ls_rsbtbl_size - 1);
 933
 934	dir_nodeid = dlm_hash2nodeid(ls, hash);
 935	if (dir_nodeid != our_nodeid) {
 936		log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
 937			  from_nodeid, dir_nodeid, our_nodeid, hash,
 938			  ls->ls_num_nodes);
 939		*r_nodeid = -1;
 940		return -EINVAL;
 941	}
 942
 943 retry:
 944	error = pre_rsb_struct(ls);
 945	if (error < 0)
 946		return error;
 947
 948	spin_lock(&ls->ls_rsbtbl[b].lock);
 949	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
 950	if (!error) {
 951		/* because the rsb is active, we need to lock_rsb before
 952		   checking/changing re_master_nodeid */
 953
 954		hold_rsb(r);
 955		spin_unlock(&ls->ls_rsbtbl[b].lock);
 956		lock_rsb(r);
 957		goto found;
 958	}
 959
 960	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
 961	if (error)
 962		goto not_found;
 963
 964	/* because the rsb is inactive (on toss list), it's not refcounted
 965	   and lock_rsb is not used, but is protected by the rsbtbl lock */
 966
 967	toss_list = 1;
 968 found:
 969	if (r->res_dir_nodeid != our_nodeid) {
 970		/* should not happen, but may as well fix it and carry on */
 971		log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
 972			  r->res_dir_nodeid, our_nodeid, r->res_name);
 973		r->res_dir_nodeid = our_nodeid;
 974	}
 975
 976	if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
 977		/* Recovery uses this function to set a new master when
 978		   the previous master failed.  Setting NEW_MASTER will
 979		   force dlm_recover_masters to call recover_master on this
 980		   rsb even though the res_nodeid is no longer removed. */
 981
 982		r->res_master_nodeid = from_nodeid;
 983		r->res_nodeid = from_nodeid;
 984		rsb_set_flag(r, RSB_NEW_MASTER);
 985
 986		if (toss_list) {
 987			/* I don't think we should ever find it on toss list. */
 988			log_error(ls, "dlm_master_lookup fix_master on toss");
 989			dlm_dump_rsb(r);
 990		}
 991	}
 992
 993	if (from_master && (r->res_master_nodeid != from_nodeid)) {
 994		/* this will happen if from_nodeid became master during
 995		   a previous recovery cycle, and we aborted the previous
 996		   cycle before recovering this master value */
 997
 998		log_limit(ls, "dlm_master_lookup from_master %d "
 999			  "master_nodeid %d res_nodeid %d first %x %s",
1000			  from_nodeid, r->res_master_nodeid, r->res_nodeid,
1001			  r->res_first_lkid, r->res_name);
1002
1003		if (r->res_master_nodeid == our_nodeid) {
1004			log_error(ls, "from_master %d our_master", from_nodeid);
1005			dlm_dump_rsb(r);
1006			dlm_send_rcom_lookup_dump(r, from_nodeid);
1007			goto out_found;
1008		}
1009
1010		r->res_master_nodeid = from_nodeid;
1011		r->res_nodeid = from_nodeid;
1012		rsb_set_flag(r, RSB_NEW_MASTER);
1013	}
1014
1015	if (!r->res_master_nodeid) {
1016		/* this will happen if recovery happens while we're looking
1017		   up the master for this rsb */
1018
1019		log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1020			  from_nodeid, r->res_first_lkid, r->res_name);
1021		r->res_master_nodeid = from_nodeid;
1022		r->res_nodeid = from_nodeid;
1023	}
1024
1025	if (!from_master && !fix_master &&
1026	    (r->res_master_nodeid == from_nodeid)) {
1027		/* this can happen when the master sends remove, the dir node
1028		   finds the rsb on the keep list and ignores the remove,
1029		   and the former master sends a lookup */
1030
1031		log_limit(ls, "dlm_master_lookup from master %d flags %x "
1032			  "first %x %s", from_nodeid, flags,
1033			  r->res_first_lkid, r->res_name);
1034	}
1035
1036 out_found:
1037	*r_nodeid = r->res_master_nodeid;
1038	if (result)
1039		*result = DLM_LU_MATCH;
1040
1041	if (toss_list) {
1042		r->res_toss_time = jiffies;
1043		/* the rsb was inactive (on toss list) */
1044		spin_unlock(&ls->ls_rsbtbl[b].lock);
1045	} else {
1046		/* the rsb was active */
1047		unlock_rsb(r);
1048		put_rsb(r);
1049	}
1050	return 0;
1051
1052 not_found:
1053	error = get_rsb_struct(ls, name, len, &r);
1054	if (error == -EAGAIN) {
1055		spin_unlock(&ls->ls_rsbtbl[b].lock);
1056		goto retry;
1057	}
1058	if (error)
1059		goto out_unlock;
1060
1061	r->res_hash = hash;
1062	r->res_bucket = b;
1063	r->res_dir_nodeid = our_nodeid;
1064	r->res_master_nodeid = from_nodeid;
1065	r->res_nodeid = from_nodeid;
1066	kref_init(&r->res_ref);
 
1067	r->res_toss_time = jiffies;
1068
1069	error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1070	if (error) {
1071		/* should never happen */
1072		dlm_free_rsb(r);
1073		spin_unlock(&ls->ls_rsbtbl[b].lock);
1074		goto retry;
1075	}
1076
1077	if (result)
1078		*result = DLM_LU_ADD;
1079	*r_nodeid = from_nodeid;
1080	error = 0;
1081 out_unlock:
1082	spin_unlock(&ls->ls_rsbtbl[b].lock);
1083	return error;
1084}
1085
1086static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1087{
1088	struct rb_node *n;
1089	struct dlm_rsb *r;
1090	int i;
1091
1092	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1093		spin_lock(&ls->ls_rsbtbl[i].lock);
1094		for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1095			r = rb_entry(n, struct dlm_rsb, res_hashnode);
1096			if (r->res_hash == hash)
1097				dlm_dump_rsb(r);
1098		}
1099		spin_unlock(&ls->ls_rsbtbl[i].lock);
1100	}
1101}
1102
1103void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1104{
1105	struct dlm_rsb *r = NULL;
1106	uint32_t hash, b;
1107	int error;
1108
1109	hash = jhash(name, len, 0);
1110	b = hash & (ls->ls_rsbtbl_size - 1);
1111
1112	spin_lock(&ls->ls_rsbtbl[b].lock);
1113	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1114	if (!error)
1115		goto out_dump;
1116
1117	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1118	if (error)
1119		goto out;
1120 out_dump:
1121	dlm_dump_rsb(r);
1122 out:
1123	spin_unlock(&ls->ls_rsbtbl[b].lock);
1124}
1125
1126static void toss_rsb(struct kref *kref)
1127{
1128	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1129	struct dlm_ls *ls = r->res_ls;
1130
1131	DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1132	kref_init(&r->res_ref);
1133	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1134	rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1135	r->res_toss_time = jiffies;
1136	ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1137	if (r->res_lvbptr) {
1138		dlm_free_lvb(r->res_lvbptr);
1139		r->res_lvbptr = NULL;
1140	}
1141}
1142
1143/* See comment for unhold_lkb */
1144
1145static void unhold_rsb(struct dlm_rsb *r)
1146{
1147	int rv;
1148	rv = kref_put(&r->res_ref, toss_rsb);
1149	DLM_ASSERT(!rv, dlm_dump_rsb(r););
1150}
1151
1152static void kill_rsb(struct kref *kref)
1153{
1154	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1155
1156	/* All work is done after the return from kref_put() so we
1157	   can release the write_lock before the remove and free. */
1158
1159	DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1160	DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1161	DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1162	DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1163	DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1164	DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1165}
1166
1167/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1168   The rsb must exist as long as any lkb's for it do. */
1169
1170static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1171{
1172	hold_rsb(r);
1173	lkb->lkb_resource = r;
1174}
1175
1176static void detach_lkb(struct dlm_lkb *lkb)
1177{
1178	if (lkb->lkb_resource) {
1179		put_rsb(lkb->lkb_resource);
1180		lkb->lkb_resource = NULL;
1181	}
1182}
1183
1184static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1185{
1186	struct dlm_lkb *lkb;
1187	int rv;
1188
1189	lkb = dlm_allocate_lkb(ls);
1190	if (!lkb)
1191		return -ENOMEM;
1192
1193	lkb->lkb_nodeid = -1;
1194	lkb->lkb_grmode = DLM_LOCK_IV;
1195	kref_init(&lkb->lkb_ref);
1196	INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1197	INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1198	INIT_LIST_HEAD(&lkb->lkb_time_list);
1199	INIT_LIST_HEAD(&lkb->lkb_cb_list);
1200	mutex_init(&lkb->lkb_cb_mutex);
1201	INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1202
1203	idr_preload(GFP_NOFS);
 
 
 
 
1204	spin_lock(&ls->ls_lkbidr_spin);
1205	rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1206	if (rv >= 0)
1207		lkb->lkb_id = rv;
1208	spin_unlock(&ls->ls_lkbidr_spin);
1209	idr_preload_end();
 
 
1210
1211	if (rv < 0) {
1212		log_error(ls, "create_lkb idr error %d", rv);
1213		return rv;
1214	}
1215
1216	*lkb_ret = lkb;
1217	return 0;
1218}
1219
1220static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1221{
1222	struct dlm_lkb *lkb;
1223
1224	spin_lock(&ls->ls_lkbidr_spin);
1225	lkb = idr_find(&ls->ls_lkbidr, lkid);
1226	if (lkb)
1227		kref_get(&lkb->lkb_ref);
1228	spin_unlock(&ls->ls_lkbidr_spin);
1229
1230	*lkb_ret = lkb;
1231	return lkb ? 0 : -ENOENT;
1232}
1233
1234static void kill_lkb(struct kref *kref)
1235{
1236	struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1237
1238	/* All work is done after the return from kref_put() so we
1239	   can release the write_lock before the detach_lkb */
1240
1241	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1242}
1243
1244/* __put_lkb() is used when an lkb may not have an rsb attached to
1245   it so we need to provide the lockspace explicitly */
1246
1247static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1248{
1249	uint32_t lkid = lkb->lkb_id;
1250
1251	spin_lock(&ls->ls_lkbidr_spin);
1252	if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1253		idr_remove(&ls->ls_lkbidr, lkid);
1254		spin_unlock(&ls->ls_lkbidr_spin);
1255
1256		detach_lkb(lkb);
1257
1258		/* for local/process lkbs, lvbptr points to caller's lksb */
1259		if (lkb->lkb_lvbptr && is_master_copy(lkb))
1260			dlm_free_lvb(lkb->lkb_lvbptr);
1261		dlm_free_lkb(lkb);
1262		return 1;
1263	} else {
1264		spin_unlock(&ls->ls_lkbidr_spin);
1265		return 0;
1266	}
1267}
1268
1269int dlm_put_lkb(struct dlm_lkb *lkb)
1270{
1271	struct dlm_ls *ls;
1272
1273	DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1274	DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1275
1276	ls = lkb->lkb_resource->res_ls;
1277	return __put_lkb(ls, lkb);
1278}
1279
1280/* This is only called to add a reference when the code already holds
1281   a valid reference to the lkb, so there's no need for locking. */
1282
1283static inline void hold_lkb(struct dlm_lkb *lkb)
1284{
1285	kref_get(&lkb->lkb_ref);
1286}
1287
1288/* This is called when we need to remove a reference and are certain
1289   it's not the last ref.  e.g. del_lkb is always called between a
1290   find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1291   put_lkb would work fine, but would involve unnecessary locking */
1292
1293static inline void unhold_lkb(struct dlm_lkb *lkb)
1294{
1295	int rv;
1296	rv = kref_put(&lkb->lkb_ref, kill_lkb);
1297	DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1298}
1299
1300static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1301			    int mode)
1302{
1303	struct dlm_lkb *lkb = NULL;
1304
1305	list_for_each_entry(lkb, head, lkb_statequeue)
1306		if (lkb->lkb_rqmode < mode)
1307			break;
1308
1309	__list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1310}
1311
1312/* add/remove lkb to rsb's grant/convert/wait queue */
1313
1314static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1315{
1316	kref_get(&lkb->lkb_ref);
1317
1318	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1319
1320	lkb->lkb_timestamp = ktime_get();
1321
1322	lkb->lkb_status = status;
1323
1324	switch (status) {
1325	case DLM_LKSTS_WAITING:
1326		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1327			list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1328		else
1329			list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1330		break;
1331	case DLM_LKSTS_GRANTED:
1332		/* convention says granted locks kept in order of grmode */
1333		lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1334				lkb->lkb_grmode);
1335		break;
1336	case DLM_LKSTS_CONVERT:
1337		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1338			list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1339		else
1340			list_add_tail(&lkb->lkb_statequeue,
1341				      &r->res_convertqueue);
1342		break;
1343	default:
1344		DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1345	}
1346}
1347
1348static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1349{
1350	lkb->lkb_status = 0;
1351	list_del(&lkb->lkb_statequeue);
1352	unhold_lkb(lkb);
1353}
1354
1355static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1356{
1357	hold_lkb(lkb);
1358	del_lkb(r, lkb);
1359	add_lkb(r, lkb, sts);
1360	unhold_lkb(lkb);
1361}
1362
1363static int msg_reply_type(int mstype)
1364{
1365	switch (mstype) {
1366	case DLM_MSG_REQUEST:
1367		return DLM_MSG_REQUEST_REPLY;
1368	case DLM_MSG_CONVERT:
1369		return DLM_MSG_CONVERT_REPLY;
1370	case DLM_MSG_UNLOCK:
1371		return DLM_MSG_UNLOCK_REPLY;
1372	case DLM_MSG_CANCEL:
1373		return DLM_MSG_CANCEL_REPLY;
1374	case DLM_MSG_LOOKUP:
1375		return DLM_MSG_LOOKUP_REPLY;
1376	}
1377	return -1;
1378}
1379
1380static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1381{
1382	int i;
1383
1384	for (i = 0; i < num_nodes; i++) {
1385		if (!warned[i]) {
1386			warned[i] = nodeid;
1387			return 0;
1388		}
1389		if (warned[i] == nodeid)
1390			return 1;
1391	}
1392	return 0;
1393}
1394
1395void dlm_scan_waiters(struct dlm_ls *ls)
1396{
1397	struct dlm_lkb *lkb;
 
1398	s64 us;
1399	s64 debug_maxus = 0;
1400	u32 debug_scanned = 0;
1401	u32 debug_expired = 0;
1402	int num_nodes = 0;
1403	int *warned = NULL;
1404
1405	if (!dlm_config.ci_waitwarn_us)
1406		return;
1407
1408	mutex_lock(&ls->ls_waiters_mutex);
1409
1410	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1411		if (!lkb->lkb_wait_time)
1412			continue;
1413
1414		debug_scanned++;
1415
1416		us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1417
1418		if (us < dlm_config.ci_waitwarn_us)
1419			continue;
1420
1421		lkb->lkb_wait_time = 0;
1422
1423		debug_expired++;
1424		if (us > debug_maxus)
1425			debug_maxus = us;
1426
1427		if (!num_nodes) {
1428			num_nodes = ls->ls_num_nodes;
1429			warned = kzalloc(num_nodes * sizeof(int), GFP_KERNEL);
1430		}
1431		if (!warned)
1432			continue;
1433		if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1434			continue;
1435
1436		log_error(ls, "waitwarn %x %lld %d us check connection to "
1437			  "node %d", lkb->lkb_id, (long long)us,
1438			  dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1439	}
1440	mutex_unlock(&ls->ls_waiters_mutex);
1441	kfree(warned);
1442
1443	if (debug_expired)
1444		log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1445			  debug_scanned, debug_expired,
1446			  dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1447}
1448
1449/* add/remove lkb from global waiters list of lkb's waiting for
1450   a reply from a remote node */
1451
1452static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1453{
1454	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1455	int error = 0;
1456
1457	mutex_lock(&ls->ls_waiters_mutex);
1458
1459	if (is_overlap_unlock(lkb) ||
1460	    (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1461		error = -EINVAL;
1462		goto out;
1463	}
1464
1465	if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1466		switch (mstype) {
1467		case DLM_MSG_UNLOCK:
1468			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1469			break;
1470		case DLM_MSG_CANCEL:
1471			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1472			break;
1473		default:
1474			error = -EBUSY;
1475			goto out;
1476		}
1477		lkb->lkb_wait_count++;
1478		hold_lkb(lkb);
1479
1480		log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1481			  lkb->lkb_id, lkb->lkb_wait_type, mstype,
1482			  lkb->lkb_wait_count, lkb->lkb_flags);
1483		goto out;
1484	}
1485
1486	DLM_ASSERT(!lkb->lkb_wait_count,
1487		   dlm_print_lkb(lkb);
1488		   printk("wait_count %d\n", lkb->lkb_wait_count););
1489
1490	lkb->lkb_wait_count++;
1491	lkb->lkb_wait_type = mstype;
1492	lkb->lkb_wait_time = ktime_get();
1493	lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1494	hold_lkb(lkb);
1495	list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1496 out:
1497	if (error)
1498		log_error(ls, "addwait error %x %d flags %x %d %d %s",
1499			  lkb->lkb_id, error, lkb->lkb_flags, mstype,
1500			  lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1501	mutex_unlock(&ls->ls_waiters_mutex);
1502	return error;
1503}
1504
1505/* We clear the RESEND flag because we might be taking an lkb off the waiters
1506   list as part of process_requestqueue (e.g. a lookup that has an optimized
1507   request reply on the requestqueue) between dlm_recover_waiters_pre() which
1508   set RESEND and dlm_recover_waiters_post() */
1509
1510static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1511				struct dlm_message *ms)
1512{
1513	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1514	int overlap_done = 0;
1515
1516	if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1517		log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1518		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1519		overlap_done = 1;
1520		goto out_del;
1521	}
1522
1523	if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1524		log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1525		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1526		overlap_done = 1;
1527		goto out_del;
1528	}
1529
1530	/* Cancel state was preemptively cleared by a successful convert,
1531	   see next comment, nothing to do. */
1532
1533	if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1534	    (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1535		log_debug(ls, "remwait %x cancel_reply wait_type %d",
1536			  lkb->lkb_id, lkb->lkb_wait_type);
1537		return -1;
1538	}
1539
1540	/* Remove for the convert reply, and premptively remove for the
1541	   cancel reply.  A convert has been granted while there's still
1542	   an outstanding cancel on it (the cancel is moot and the result
1543	   in the cancel reply should be 0).  We preempt the cancel reply
1544	   because the app gets the convert result and then can follow up
1545	   with another op, like convert.  This subsequent op would see the
1546	   lingering state of the cancel and fail with -EBUSY. */
1547
1548	if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1549	    (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1550	    is_overlap_cancel(lkb) && ms && !ms->m_result) {
1551		log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1552			  lkb->lkb_id);
1553		lkb->lkb_wait_type = 0;
1554		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1555		lkb->lkb_wait_count--;
1556		goto out_del;
1557	}
1558
1559	/* N.B. type of reply may not always correspond to type of original
1560	   msg due to lookup->request optimization, verify others? */
1561
1562	if (lkb->lkb_wait_type) {
1563		lkb->lkb_wait_type = 0;
1564		goto out_del;
1565	}
1566
1567	log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1568		  lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1569		  mstype, lkb->lkb_flags);
1570	return -1;
1571
1572 out_del:
1573	/* the force-unlock/cancel has completed and we haven't recvd a reply
1574	   to the op that was in progress prior to the unlock/cancel; we
1575	   give up on any reply to the earlier op.  FIXME: not sure when/how
1576	   this would happen */
1577
1578	if (overlap_done && lkb->lkb_wait_type) {
1579		log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1580			  lkb->lkb_id, mstype, lkb->lkb_wait_type);
1581		lkb->lkb_wait_count--;
1582		lkb->lkb_wait_type = 0;
1583	}
1584
1585	DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1586
1587	lkb->lkb_flags &= ~DLM_IFL_RESEND;
1588	lkb->lkb_wait_count--;
1589	if (!lkb->lkb_wait_count)
1590		list_del_init(&lkb->lkb_wait_reply);
1591	unhold_lkb(lkb);
1592	return 0;
1593}
1594
1595static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1596{
1597	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1598	int error;
1599
1600	mutex_lock(&ls->ls_waiters_mutex);
1601	error = _remove_from_waiters(lkb, mstype, NULL);
1602	mutex_unlock(&ls->ls_waiters_mutex);
1603	return error;
1604}
1605
1606/* Handles situations where we might be processing a "fake" or "stub" reply in
1607   which we can't try to take waiters_mutex again. */
1608
1609static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1610{
1611	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1612	int error;
1613
1614	if (ms->m_flags != DLM_IFL_STUB_MS)
1615		mutex_lock(&ls->ls_waiters_mutex);
1616	error = _remove_from_waiters(lkb, ms->m_type, ms);
1617	if (ms->m_flags != DLM_IFL_STUB_MS)
1618		mutex_unlock(&ls->ls_waiters_mutex);
1619	return error;
1620}
1621
1622/* If there's an rsb for the same resource being removed, ensure
1623   that the remove message is sent before the new lookup message.
1624   It should be rare to need a delay here, but if not, then it may
1625   be worthwhile to add a proper wait mechanism rather than a delay. */
1626
1627static void wait_pending_remove(struct dlm_rsb *r)
1628{
1629	struct dlm_ls *ls = r->res_ls;
1630 restart:
1631	spin_lock(&ls->ls_remove_spin);
1632	if (ls->ls_remove_len &&
1633	    !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
1634		log_debug(ls, "delay lookup for remove dir %d %s",
1635		  	  r->res_dir_nodeid, r->res_name);
1636		spin_unlock(&ls->ls_remove_spin);
1637		msleep(1);
1638		goto restart;
1639	}
1640	spin_unlock(&ls->ls_remove_spin);
1641}
1642
1643/*
1644 * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1645 * read by other threads in wait_pending_remove.  ls_remove_names
1646 * and ls_remove_lens are only used by the scan thread, so they do
1647 * not need protection.
1648 */
1649
1650static void shrink_bucket(struct dlm_ls *ls, int b)
1651{
1652	struct rb_node *n, *next;
1653	struct dlm_rsb *r;
1654	char *name;
1655	int our_nodeid = dlm_our_nodeid();
1656	int remote_count = 0;
1657	int need_shrink = 0;
1658	int i, len, rv;
1659
1660	memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1661
1662	spin_lock(&ls->ls_rsbtbl[b].lock);
1663
1664	if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1665		spin_unlock(&ls->ls_rsbtbl[b].lock);
1666		return;
1667	}
1668
1669	for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1670		next = rb_next(n);
1671		r = rb_entry(n, struct dlm_rsb, res_hashnode);
1672
1673		/* If we're the directory record for this rsb, and
1674		   we're not the master of it, then we need to wait
1675		   for the master node to send us a dir remove for
1676		   before removing the dir record. */
1677
1678		if (!dlm_no_directory(ls) &&
1679		    (r->res_master_nodeid != our_nodeid) &&
1680		    (dlm_dir_nodeid(r) == our_nodeid)) {
1681			continue;
1682		}
1683
1684		need_shrink = 1;
1685
1686		if (!time_after_eq(jiffies, r->res_toss_time +
1687				   dlm_config.ci_toss_secs * HZ)) {
1688			continue;
1689		}
1690
1691		if (!dlm_no_directory(ls) &&
1692		    (r->res_master_nodeid == our_nodeid) &&
1693		    (dlm_dir_nodeid(r) != our_nodeid)) {
1694
1695			/* We're the master of this rsb but we're not
1696			   the directory record, so we need to tell the
1697			   dir node to remove the dir record. */
1698
1699			ls->ls_remove_lens[remote_count] = r->res_length;
1700			memcpy(ls->ls_remove_names[remote_count], r->res_name,
1701			       DLM_RESNAME_MAXLEN);
1702			remote_count++;
1703
1704			if (remote_count >= DLM_REMOVE_NAMES_MAX)
1705				break;
1706			continue;
1707		}
1708
1709		if (!kref_put(&r->res_ref, kill_rsb)) {
1710			log_error(ls, "tossed rsb in use %s", r->res_name);
1711			continue;
1712		}
1713
1714		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1715		dlm_free_rsb(r);
1716	}
1717
1718	if (need_shrink)
1719		ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1720	else
1721		ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1722	spin_unlock(&ls->ls_rsbtbl[b].lock);
 
1723
1724	/*
1725	 * While searching for rsb's to free, we found some that require
1726	 * remote removal.  We leave them in place and find them again here
1727	 * so there is a very small gap between removing them from the toss
1728	 * list and sending the removal.  Keeping this gap small is
1729	 * important to keep us (the master node) from being out of sync
1730	 * with the remote dir node for very long.
1731	 *
1732	 * From the time the rsb is removed from toss until just after
1733	 * send_remove, the rsb name is saved in ls_remove_name.  A new
1734	 * lookup checks this to ensure that a new lookup message for the
1735	 * same resource name is not sent just before the remove message.
1736	 */
1737
1738	for (i = 0; i < remote_count; i++) {
1739		name = ls->ls_remove_names[i];
1740		len = ls->ls_remove_lens[i];
 
1741
 
 
1742		spin_lock(&ls->ls_rsbtbl[b].lock);
1743		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1744		if (rv) {
1745			spin_unlock(&ls->ls_rsbtbl[b].lock);
1746			log_debug(ls, "remove_name not toss %s", name);
1747			continue;
 
 
1748		}
1749
1750		if (r->res_master_nodeid != our_nodeid) {
1751			spin_unlock(&ls->ls_rsbtbl[b].lock);
1752			log_debug(ls, "remove_name master %d dir %d our %d %s",
1753				  r->res_master_nodeid, r->res_dir_nodeid,
1754				  our_nodeid, name);
1755			continue;
1756		}
1757
1758		if (r->res_dir_nodeid == our_nodeid) {
1759			/* should never happen */
1760			spin_unlock(&ls->ls_rsbtbl[b].lock);
1761			log_error(ls, "remove_name dir %d master %d our %d %s",
1762				  r->res_dir_nodeid, r->res_master_nodeid,
1763				  our_nodeid, name);
1764			continue;
1765		}
1766
1767		if (!time_after_eq(jiffies, r->res_toss_time +
1768				   dlm_config.ci_toss_secs * HZ)) {
 
 
 
1769			spin_unlock(&ls->ls_rsbtbl[b].lock);
1770			log_debug(ls, "remove_name toss_time %lu now %lu %s",
1771				  r->res_toss_time, jiffies, name);
1772			continue;
1773		}
 
1774
1775		if (!kref_put(&r->res_ref, kill_rsb)) {
1776			spin_unlock(&ls->ls_rsbtbl[b].lock);
1777			log_error(ls, "remove_name in use %s", name);
1778			continue;
1779		}
1780
1781		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1782
1783		/* block lookup of same name until we've sent remove */
1784		spin_lock(&ls->ls_remove_spin);
1785		ls->ls_remove_len = len;
1786		memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1787		spin_unlock(&ls->ls_remove_spin);
1788		spin_unlock(&ls->ls_rsbtbl[b].lock);
1789
1790		send_remove(r);
1791
1792		/* allow lookup of name again */
1793		spin_lock(&ls->ls_remove_spin);
1794		ls->ls_remove_len = 0;
1795		memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1796		spin_unlock(&ls->ls_remove_spin);
1797
1798		dlm_free_rsb(r);
1799	}
1800}
1801
1802void dlm_scan_rsbs(struct dlm_ls *ls)
1803{
1804	int i;
1805
1806	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1807		shrink_bucket(ls, i);
1808		if (dlm_locking_stopped(ls))
1809			break;
1810		cond_resched();
1811	}
1812}
1813
1814static void add_timeout(struct dlm_lkb *lkb)
1815{
1816	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1817
1818	if (is_master_copy(lkb))
1819		return;
1820
1821	if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1822	    !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1823		lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1824		goto add_it;
1825	}
1826	if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1827		goto add_it;
1828	return;
1829
1830 add_it:
1831	DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1832	mutex_lock(&ls->ls_timeout_mutex);
1833	hold_lkb(lkb);
1834	list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1835	mutex_unlock(&ls->ls_timeout_mutex);
1836}
1837
1838static void del_timeout(struct dlm_lkb *lkb)
1839{
1840	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1841
1842	mutex_lock(&ls->ls_timeout_mutex);
1843	if (!list_empty(&lkb->lkb_time_list)) {
1844		list_del_init(&lkb->lkb_time_list);
1845		unhold_lkb(lkb);
1846	}
1847	mutex_unlock(&ls->ls_timeout_mutex);
1848}
1849
1850/* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1851   lkb_lksb_timeout without lock_rsb?  Note: we can't lock timeout_mutex
1852   and then lock rsb because of lock ordering in add_timeout.  We may need
1853   to specify some special timeout-related bits in the lkb that are just to
1854   be accessed under the timeout_mutex. */
1855
1856void dlm_scan_timeout(struct dlm_ls *ls)
1857{
1858	struct dlm_rsb *r;
1859	struct dlm_lkb *lkb;
1860	int do_cancel, do_warn;
1861	s64 wait_us;
1862
1863	for (;;) {
1864		if (dlm_locking_stopped(ls))
1865			break;
1866
1867		do_cancel = 0;
1868		do_warn = 0;
1869		mutex_lock(&ls->ls_timeout_mutex);
1870		list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1871
1872			wait_us = ktime_to_us(ktime_sub(ktime_get(),
1873					      		lkb->lkb_timestamp));
1874
1875			if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1876			    wait_us >= (lkb->lkb_timeout_cs * 10000))
1877				do_cancel = 1;
1878
1879			if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1880			    wait_us >= dlm_config.ci_timewarn_cs * 10000)
1881				do_warn = 1;
1882
1883			if (!do_cancel && !do_warn)
1884				continue;
1885			hold_lkb(lkb);
1886			break;
1887		}
1888		mutex_unlock(&ls->ls_timeout_mutex);
1889
1890		if (!do_cancel && !do_warn)
1891			break;
1892
1893		r = lkb->lkb_resource;
1894		hold_rsb(r);
1895		lock_rsb(r);
1896
1897		if (do_warn) {
1898			/* clear flag so we only warn once */
1899			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1900			if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1901				del_timeout(lkb);
1902			dlm_timeout_warn(lkb);
1903		}
1904
1905		if (do_cancel) {
1906			log_debug(ls, "timeout cancel %x node %d %s",
1907				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1908			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1909			lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1910			del_timeout(lkb);
1911			_cancel_lock(r, lkb);
1912		}
1913
1914		unlock_rsb(r);
1915		unhold_rsb(r);
1916		dlm_put_lkb(lkb);
1917	}
1918}
1919
1920/* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1921   dlm_recoverd before checking/setting ls_recover_begin. */
1922
1923void dlm_adjust_timeouts(struct dlm_ls *ls)
1924{
1925	struct dlm_lkb *lkb;
1926	u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1927
1928	ls->ls_recover_begin = 0;
1929	mutex_lock(&ls->ls_timeout_mutex);
1930	list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1931		lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1932	mutex_unlock(&ls->ls_timeout_mutex);
1933
1934	if (!dlm_config.ci_waitwarn_us)
1935		return;
1936
1937	mutex_lock(&ls->ls_waiters_mutex);
1938	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1939		if (ktime_to_us(lkb->lkb_wait_time))
1940			lkb->lkb_wait_time = ktime_get();
1941	}
1942	mutex_unlock(&ls->ls_waiters_mutex);
1943}
1944
1945/* lkb is master or local copy */
1946
1947static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1948{
1949	int b, len = r->res_ls->ls_lvblen;
1950
1951	/* b=1 lvb returned to caller
1952	   b=0 lvb written to rsb or invalidated
1953	   b=-1 do nothing */
1954
1955	b =  dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1956
1957	if (b == 1) {
1958		if (!lkb->lkb_lvbptr)
1959			return;
1960
1961		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1962			return;
1963
1964		if (!r->res_lvbptr)
1965			return;
1966
1967		memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1968		lkb->lkb_lvbseq = r->res_lvbseq;
1969
1970	} else if (b == 0) {
1971		if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1972			rsb_set_flag(r, RSB_VALNOTVALID);
1973			return;
1974		}
1975
1976		if (!lkb->lkb_lvbptr)
1977			return;
1978
1979		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1980			return;
1981
1982		if (!r->res_lvbptr)
1983			r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1984
1985		if (!r->res_lvbptr)
1986			return;
1987
1988		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1989		r->res_lvbseq++;
1990		lkb->lkb_lvbseq = r->res_lvbseq;
1991		rsb_clear_flag(r, RSB_VALNOTVALID);
1992	}
1993
1994	if (rsb_flag(r, RSB_VALNOTVALID))
1995		lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1996}
1997
1998static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1999{
2000	if (lkb->lkb_grmode < DLM_LOCK_PW)
2001		return;
2002
2003	if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
2004		rsb_set_flag(r, RSB_VALNOTVALID);
2005		return;
2006	}
2007
2008	if (!lkb->lkb_lvbptr)
2009		return;
2010
2011	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2012		return;
2013
2014	if (!r->res_lvbptr)
2015		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
2016
2017	if (!r->res_lvbptr)
2018		return;
2019
2020	memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2021	r->res_lvbseq++;
2022	rsb_clear_flag(r, RSB_VALNOTVALID);
2023}
2024
2025/* lkb is process copy (pc) */
2026
2027static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2028			    struct dlm_message *ms)
2029{
2030	int b;
2031
2032	if (!lkb->lkb_lvbptr)
2033		return;
2034
2035	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2036		return;
2037
2038	b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
2039	if (b == 1) {
2040		int len = receive_extralen(ms);
2041		if (len > r->res_ls->ls_lvblen)
2042			len = r->res_ls->ls_lvblen;
2043		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2044		lkb->lkb_lvbseq = ms->m_lvbseq;
2045	}
2046}
2047
2048/* Manipulate lkb's on rsb's convert/granted/waiting queues
2049   remove_lock -- used for unlock, removes lkb from granted
2050   revert_lock -- used for cancel, moves lkb from convert to granted
2051   grant_lock  -- used for request and convert, adds lkb to granted or
2052                  moves lkb from convert or waiting to granted
2053
2054   Each of these is used for master or local copy lkb's.  There is
2055   also a _pc() variation used to make the corresponding change on
2056   a process copy (pc) lkb. */
2057
2058static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2059{
2060	del_lkb(r, lkb);
2061	lkb->lkb_grmode = DLM_LOCK_IV;
2062	/* this unhold undoes the original ref from create_lkb()
2063	   so this leads to the lkb being freed */
2064	unhold_lkb(lkb);
2065}
2066
2067static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2068{
2069	set_lvb_unlock(r, lkb);
2070	_remove_lock(r, lkb);
2071}
2072
2073static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2074{
2075	_remove_lock(r, lkb);
2076}
2077
2078/* returns: 0 did nothing
2079	    1 moved lock to granted
2080	   -1 removed lock */
2081
2082static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2083{
2084	int rv = 0;
2085
2086	lkb->lkb_rqmode = DLM_LOCK_IV;
2087
2088	switch (lkb->lkb_status) {
2089	case DLM_LKSTS_GRANTED:
2090		break;
2091	case DLM_LKSTS_CONVERT:
2092		move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2093		rv = 1;
2094		break;
2095	case DLM_LKSTS_WAITING:
2096		del_lkb(r, lkb);
2097		lkb->lkb_grmode = DLM_LOCK_IV;
2098		/* this unhold undoes the original ref from create_lkb()
2099		   so this leads to the lkb being freed */
2100		unhold_lkb(lkb);
2101		rv = -1;
2102		break;
2103	default:
2104		log_print("invalid status for revert %d", lkb->lkb_status);
2105	}
2106	return rv;
2107}
2108
2109static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2110{
2111	return revert_lock(r, lkb);
2112}
2113
2114static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2115{
2116	if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2117		lkb->lkb_grmode = lkb->lkb_rqmode;
2118		if (lkb->lkb_status)
2119			move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2120		else
2121			add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2122	}
2123
2124	lkb->lkb_rqmode = DLM_LOCK_IV;
2125	lkb->lkb_highbast = 0;
2126}
2127
2128static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2129{
2130	set_lvb_lock(r, lkb);
2131	_grant_lock(r, lkb);
 
2132}
2133
2134static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2135			  struct dlm_message *ms)
2136{
2137	set_lvb_lock_pc(r, lkb, ms);
2138	_grant_lock(r, lkb);
2139}
2140
2141/* called by grant_pending_locks() which means an async grant message must
2142   be sent to the requesting node in addition to granting the lock if the
2143   lkb belongs to a remote node. */
2144
2145static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2146{
2147	grant_lock(r, lkb);
2148	if (is_master_copy(lkb))
2149		send_grant(r, lkb);
2150	else
2151		queue_cast(r, lkb, 0);
2152}
2153
2154/* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2155   change the granted/requested modes.  We're munging things accordingly in
2156   the process copy.
2157   CONVDEADLK: our grmode may have been forced down to NL to resolve a
2158   conversion deadlock
2159   ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2160   compatible with other granted locks */
2161
2162static void munge_demoted(struct dlm_lkb *lkb)
2163{
2164	if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2165		log_print("munge_demoted %x invalid modes gr %d rq %d",
2166			  lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2167		return;
2168	}
2169
2170	lkb->lkb_grmode = DLM_LOCK_NL;
2171}
2172
2173static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2174{
2175	if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2176	    ms->m_type != DLM_MSG_GRANT) {
2177		log_print("munge_altmode %x invalid reply type %d",
2178			  lkb->lkb_id, ms->m_type);
2179		return;
2180	}
2181
2182	if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2183		lkb->lkb_rqmode = DLM_LOCK_PR;
2184	else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2185		lkb->lkb_rqmode = DLM_LOCK_CW;
2186	else {
2187		log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2188		dlm_print_lkb(lkb);
2189	}
2190}
2191
2192static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2193{
2194	struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2195					   lkb_statequeue);
2196	if (lkb->lkb_id == first->lkb_id)
2197		return 1;
2198
2199	return 0;
2200}
2201
2202/* Check if the given lkb conflicts with another lkb on the queue. */
2203
2204static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2205{
2206	struct dlm_lkb *this;
2207
2208	list_for_each_entry(this, head, lkb_statequeue) {
2209		if (this == lkb)
2210			continue;
2211		if (!modes_compat(this, lkb))
2212			return 1;
2213	}
2214	return 0;
2215}
2216
2217/*
2218 * "A conversion deadlock arises with a pair of lock requests in the converting
2219 * queue for one resource.  The granted mode of each lock blocks the requested
2220 * mode of the other lock."
2221 *
2222 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2223 * convert queue from being granted, then deadlk/demote lkb.
2224 *
2225 * Example:
2226 * Granted Queue: empty
2227 * Convert Queue: NL->EX (first lock)
2228 *                PR->EX (second lock)
2229 *
2230 * The first lock can't be granted because of the granted mode of the second
2231 * lock and the second lock can't be granted because it's not first in the
2232 * list.  We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2233 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2234 * flag set and return DEMOTED in the lksb flags.
2235 *
2236 * Originally, this function detected conv-deadlk in a more limited scope:
2237 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2238 * - if lkb1 was the first entry in the queue (not just earlier), and was
2239 *   blocked by the granted mode of lkb2, and there was nothing on the
2240 *   granted queue preventing lkb1 from being granted immediately, i.e.
2241 *   lkb2 was the only thing preventing lkb1 from being granted.
2242 *
2243 * That second condition meant we'd only say there was conv-deadlk if
2244 * resolving it (by demotion) would lead to the first lock on the convert
2245 * queue being granted right away.  It allowed conversion deadlocks to exist
2246 * between locks on the convert queue while they couldn't be granted anyway.
2247 *
2248 * Now, we detect and take action on conversion deadlocks immediately when
2249 * they're created, even if they may not be immediately consequential.  If
2250 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2251 * mode that would prevent lkb1's conversion from being granted, we do a
2252 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2253 * I think this means that the lkb_is_ahead condition below should always
2254 * be zero, i.e. there will never be conv-deadlk between two locks that are
2255 * both already on the convert queue.
2256 */
2257
2258static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2259{
2260	struct dlm_lkb *lkb1;
2261	int lkb_is_ahead = 0;
2262
2263	list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2264		if (lkb1 == lkb2) {
2265			lkb_is_ahead = 1;
2266			continue;
2267		}
2268
2269		if (!lkb_is_ahead) {
2270			if (!modes_compat(lkb2, lkb1))
2271				return 1;
2272		} else {
2273			if (!modes_compat(lkb2, lkb1) &&
2274			    !modes_compat(lkb1, lkb2))
2275				return 1;
2276		}
2277	}
2278	return 0;
2279}
2280
2281/*
2282 * Return 1 if the lock can be granted, 0 otherwise.
2283 * Also detect and resolve conversion deadlocks.
2284 *
2285 * lkb is the lock to be granted
2286 *
2287 * now is 1 if the function is being called in the context of the
2288 * immediate request, it is 0 if called later, after the lock has been
2289 * queued.
2290 *
2291 * recover is 1 if dlm_recover_grant() is trying to grant conversions
2292 * after recovery.
2293 *
2294 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2295 */
2296
2297static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2298			   int recover)
2299{
2300	int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2301
2302	/*
2303	 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2304	 * a new request for a NL mode lock being blocked.
2305	 *
2306	 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2307	 * request, then it would be granted.  In essence, the use of this flag
2308	 * tells the Lock Manager to expedite theis request by not considering
2309	 * what may be in the CONVERTING or WAITING queues...  As of this
2310	 * writing, the EXPEDITE flag can be used only with new requests for NL
2311	 * mode locks.  This flag is not valid for conversion requests.
2312	 *
2313	 * A shortcut.  Earlier checks return an error if EXPEDITE is used in a
2314	 * conversion or used with a non-NL requested mode.  We also know an
2315	 * EXPEDITE request is always granted immediately, so now must always
2316	 * be 1.  The full condition to grant an expedite request: (now &&
2317	 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2318	 * therefore be shortened to just checking the flag.
2319	 */
2320
2321	if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2322		return 1;
2323
2324	/*
2325	 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2326	 * added to the remaining conditions.
2327	 */
2328
2329	if (queue_conflict(&r->res_grantqueue, lkb))
2330		return 0;
2331
2332	/*
2333	 * 6-3: By default, a conversion request is immediately granted if the
2334	 * requested mode is compatible with the modes of all other granted
2335	 * locks
2336	 */
2337
2338	if (queue_conflict(&r->res_convertqueue, lkb))
2339		return 0;
2340
2341	/*
2342	 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2343	 * locks for a recovered rsb, on which lkb's have been rebuilt.
2344	 * The lkb's may have been rebuilt on the queues in a different
2345	 * order than they were in on the previous master.  So, granting
2346	 * queued conversions in order after recovery doesn't make sense
2347	 * since the order hasn't been preserved anyway.  The new order
2348	 * could also have created a new "in place" conversion deadlock.
2349	 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2350	 * After recovery, there would be no granted locks, and possibly
2351	 * NL->EX, PR->EX, an in-place conversion deadlock.)  So, after
2352	 * recovery, grant conversions without considering order.
2353	 */
2354
2355	if (conv && recover)
2356		return 1;
2357
2358	/*
2359	 * 6-5: But the default algorithm for deciding whether to grant or
2360	 * queue conversion requests does not by itself guarantee that such
2361	 * requests are serviced on a "first come first serve" basis.  This, in
2362	 * turn, can lead to a phenomenon known as "indefinate postponement".
2363	 *
2364	 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2365	 * the system service employed to request a lock conversion.  This flag
2366	 * forces certain conversion requests to be queued, even if they are
2367	 * compatible with the granted modes of other locks on the same
2368	 * resource.  Thus, the use of this flag results in conversion requests
2369	 * being ordered on a "first come first servce" basis.
2370	 *
2371	 * DCT: This condition is all about new conversions being able to occur
2372	 * "in place" while the lock remains on the granted queue (assuming
2373	 * nothing else conflicts.)  IOW if QUECVT isn't set, a conversion
2374	 * doesn't _have_ to go onto the convert queue where it's processed in
2375	 * order.  The "now" variable is necessary to distinguish converts
2376	 * being received and processed for the first time now, because once a
2377	 * convert is moved to the conversion queue the condition below applies
2378	 * requiring fifo granting.
2379	 */
2380
2381	if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2382		return 1;
2383
2384	/*
2385	 * Even if the convert is compat with all granted locks,
2386	 * QUECVT forces it behind other locks on the convert queue.
2387	 */
2388
2389	if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2390		if (list_empty(&r->res_convertqueue))
2391			return 1;
2392		else
2393			return 0;
2394	}
2395
2396	/*
2397	 * The NOORDER flag is set to avoid the standard vms rules on grant
2398	 * order.
2399	 */
2400
2401	if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2402		return 1;
2403
2404	/*
2405	 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2406	 * granted until all other conversion requests ahead of it are granted
2407	 * and/or canceled.
2408	 */
2409
2410	if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2411		return 1;
2412
2413	/*
2414	 * 6-4: By default, a new request is immediately granted only if all
2415	 * three of the following conditions are satisfied when the request is
2416	 * issued:
2417	 * - The queue of ungranted conversion requests for the resource is
2418	 *   empty.
2419	 * - The queue of ungranted new requests for the resource is empty.
2420	 * - The mode of the new request is compatible with the most
2421	 *   restrictive mode of all granted locks on the resource.
2422	 */
2423
2424	if (now && !conv && list_empty(&r->res_convertqueue) &&
2425	    list_empty(&r->res_waitqueue))
2426		return 1;
2427
2428	/*
2429	 * 6-4: Once a lock request is in the queue of ungranted new requests,
2430	 * it cannot be granted until the queue of ungranted conversion
2431	 * requests is empty, all ungranted new requests ahead of it are
2432	 * granted and/or canceled, and it is compatible with the granted mode
2433	 * of the most restrictive lock granted on the resource.
2434	 */
2435
2436	if (!now && !conv && list_empty(&r->res_convertqueue) &&
2437	    first_in_list(lkb, &r->res_waitqueue))
2438		return 1;
2439
2440	return 0;
2441}
2442
2443static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2444			  int recover, int *err)
2445{
2446	int rv;
2447	int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2448	int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2449
2450	if (err)
2451		*err = 0;
2452
2453	rv = _can_be_granted(r, lkb, now, recover);
2454	if (rv)
2455		goto out;
2456
2457	/*
2458	 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2459	 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2460	 * cancels one of the locks.
2461	 */
2462
2463	if (is_convert && can_be_queued(lkb) &&
2464	    conversion_deadlock_detect(r, lkb)) {
2465		if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2466			lkb->lkb_grmode = DLM_LOCK_NL;
2467			lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2468		} else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
2469			if (err)
2470				*err = -EDEADLK;
2471			else {
2472				log_print("can_be_granted deadlock %x now %d",
2473					  lkb->lkb_id, now);
2474				dlm_dump_rsb(r);
2475			}
2476		}
2477		goto out;
2478	}
2479
2480	/*
2481	 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2482	 * to grant a request in a mode other than the normal rqmode.  It's a
2483	 * simple way to provide a big optimization to applications that can
2484	 * use them.
2485	 */
2486
2487	if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2488		alt = DLM_LOCK_PR;
2489	else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2490		alt = DLM_LOCK_CW;
2491
2492	if (alt) {
2493		lkb->lkb_rqmode = alt;
2494		rv = _can_be_granted(r, lkb, now, 0);
2495		if (rv)
2496			lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2497		else
2498			lkb->lkb_rqmode = rqmode;
2499	}
2500 out:
2501	return rv;
2502}
2503
2504/* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
2505   for locks pending on the convert list.  Once verified (watch for these
2506   log_prints), we should be able to just call _can_be_granted() and not
2507   bother with the demote/deadlk cases here (and there's no easy way to deal
2508   with a deadlk here, we'd have to generate something like grant_lock with
2509   the deadlk error.) */
2510
2511/* Returns the highest requested mode of all blocked conversions; sets
2512   cw if there's a blocked conversion to DLM_LOCK_CW. */
2513
2514static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2515				 unsigned int *count)
2516{
2517	struct dlm_lkb *lkb, *s;
2518	int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2519	int hi, demoted, quit, grant_restart, demote_restart;
2520	int deadlk;
2521
2522	quit = 0;
2523 restart:
2524	grant_restart = 0;
2525	demote_restart = 0;
2526	hi = DLM_LOCK_IV;
2527
2528	list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2529		demoted = is_demoted(lkb);
2530		deadlk = 0;
2531
2532		if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2533			grant_lock_pending(r, lkb);
2534			grant_restart = 1;
2535			if (count)
2536				(*count)++;
2537			continue;
2538		}
2539
2540		if (!demoted && is_demoted(lkb)) {
2541			log_print("WARN: pending demoted %x node %d %s",
2542				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2543			demote_restart = 1;
2544			continue;
2545		}
2546
2547		if (deadlk) {
2548			log_print("WARN: pending deadlock %x node %d %s",
2549				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2550			dlm_dump_rsb(r);
2551			continue;
2552		}
2553
2554		hi = max_t(int, lkb->lkb_rqmode, hi);
2555
2556		if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2557			*cw = 1;
2558	}
2559
2560	if (grant_restart)
2561		goto restart;
2562	if (demote_restart && !quit) {
2563		quit = 1;
2564		goto restart;
2565	}
2566
2567	return max_t(int, high, hi);
2568}
2569
2570static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2571			      unsigned int *count)
2572{
2573	struct dlm_lkb *lkb, *s;
2574
2575	list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2576		if (can_be_granted(r, lkb, 0, 0, NULL)) {
2577			grant_lock_pending(r, lkb);
2578			if (count)
2579				(*count)++;
2580		} else {
2581			high = max_t(int, lkb->lkb_rqmode, high);
2582			if (lkb->lkb_rqmode == DLM_LOCK_CW)
2583				*cw = 1;
2584		}
2585	}
2586
2587	return high;
2588}
2589
2590/* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2591   on either the convert or waiting queue.
2592   high is the largest rqmode of all locks blocked on the convert or
2593   waiting queue. */
2594
2595static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2596{
2597	if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2598		if (gr->lkb_highbast < DLM_LOCK_EX)
2599			return 1;
2600		return 0;
2601	}
2602
2603	if (gr->lkb_highbast < high &&
2604	    !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2605		return 1;
2606	return 0;
2607}
2608
2609static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2610{
2611	struct dlm_lkb *lkb, *s;
2612	int high = DLM_LOCK_IV;
2613	int cw = 0;
2614
2615	if (!is_master(r)) {
2616		log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2617		dlm_dump_rsb(r);
2618		return;
2619	}
2620
2621	high = grant_pending_convert(r, high, &cw, count);
2622	high = grant_pending_wait(r, high, &cw, count);
2623
2624	if (high == DLM_LOCK_IV)
2625		return;
2626
2627	/*
2628	 * If there are locks left on the wait/convert queue then send blocking
2629	 * ASTs to granted locks based on the largest requested mode (high)
2630	 * found above.
2631	 */
2632
2633	list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2634		if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2635			if (cw && high == DLM_LOCK_PR &&
2636			    lkb->lkb_grmode == DLM_LOCK_PR)
2637				queue_bast(r, lkb, DLM_LOCK_CW);
2638			else
2639				queue_bast(r, lkb, high);
2640			lkb->lkb_highbast = high;
2641		}
2642	}
2643}
2644
2645static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2646{
2647	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2648	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2649		if (gr->lkb_highbast < DLM_LOCK_EX)
2650			return 1;
2651		return 0;
2652	}
2653
2654	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2655		return 1;
2656	return 0;
2657}
2658
2659static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2660			    struct dlm_lkb *lkb)
2661{
2662	struct dlm_lkb *gr;
2663
2664	list_for_each_entry(gr, head, lkb_statequeue) {
2665		/* skip self when sending basts to convertqueue */
2666		if (gr == lkb)
2667			continue;
2668		if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2669			queue_bast(r, gr, lkb->lkb_rqmode);
2670			gr->lkb_highbast = lkb->lkb_rqmode;
2671		}
2672	}
2673}
2674
2675static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2676{
2677	send_bast_queue(r, &r->res_grantqueue, lkb);
2678}
2679
2680static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2681{
2682	send_bast_queue(r, &r->res_grantqueue, lkb);
2683	send_bast_queue(r, &r->res_convertqueue, lkb);
2684}
2685
2686/* set_master(r, lkb) -- set the master nodeid of a resource
2687
2688   The purpose of this function is to set the nodeid field in the given
2689   lkb using the nodeid field in the given rsb.  If the rsb's nodeid is
2690   known, it can just be copied to the lkb and the function will return
2691   0.  If the rsb's nodeid is _not_ known, it needs to be looked up
2692   before it can be copied to the lkb.
2693
2694   When the rsb nodeid is being looked up remotely, the initial lkb
2695   causing the lookup is kept on the ls_waiters list waiting for the
2696   lookup reply.  Other lkb's waiting for the same rsb lookup are kept
2697   on the rsb's res_lookup list until the master is verified.
2698
2699   Return values:
2700   0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2701   1: the rsb master is not available and the lkb has been placed on
2702      a wait queue
2703*/
2704
2705static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2706{
2707	int our_nodeid = dlm_our_nodeid();
 
2708
2709	if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2710		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2711		r->res_first_lkid = lkb->lkb_id;
2712		lkb->lkb_nodeid = r->res_nodeid;
2713		return 0;
2714	}
2715
2716	if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2717		list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2718		return 1;
2719	}
2720
2721	if (r->res_master_nodeid == our_nodeid) {
2722		lkb->lkb_nodeid = 0;
2723		return 0;
2724	}
2725
2726	if (r->res_master_nodeid) {
2727		lkb->lkb_nodeid = r->res_master_nodeid;
2728		return 0;
2729	}
2730
2731	if (dlm_dir_nodeid(r) == our_nodeid) {
2732		/* This is a somewhat unusual case; find_rsb will usually
2733		   have set res_master_nodeid when dir nodeid is local, but
2734		   there are cases where we become the dir node after we've
2735		   past find_rsb and go through _request_lock again.
2736		   confirm_master() or process_lookup_list() needs to be
2737		   called after this. */
2738		log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2739			  lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2740			  r->res_name);
2741		r->res_master_nodeid = our_nodeid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2742		r->res_nodeid = 0;
2743		lkb->lkb_nodeid = 0;
2744		return 0;
 
 
 
2745	}
2746
2747	wait_pending_remove(r);
2748
2749	r->res_first_lkid = lkb->lkb_id;
2750	send_lookup(r, lkb);
2751	return 1;
2752}
2753
2754static void process_lookup_list(struct dlm_rsb *r)
2755{
2756	struct dlm_lkb *lkb, *safe;
2757
2758	list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2759		list_del_init(&lkb->lkb_rsb_lookup);
2760		_request_lock(r, lkb);
2761		schedule();
2762	}
2763}
2764
2765/* confirm_master -- confirm (or deny) an rsb's master nodeid */
2766
2767static void confirm_master(struct dlm_rsb *r, int error)
2768{
2769	struct dlm_lkb *lkb;
2770
2771	if (!r->res_first_lkid)
2772		return;
2773
2774	switch (error) {
2775	case 0:
2776	case -EINPROGRESS:
2777		r->res_first_lkid = 0;
2778		process_lookup_list(r);
2779		break;
2780
2781	case -EAGAIN:
2782	case -EBADR:
2783	case -ENOTBLK:
2784		/* the remote request failed and won't be retried (it was
2785		   a NOQUEUE, or has been canceled/unlocked); make a waiting
2786		   lkb the first_lkid */
2787
2788		r->res_first_lkid = 0;
2789
2790		if (!list_empty(&r->res_lookup)) {
2791			lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2792					 lkb_rsb_lookup);
2793			list_del_init(&lkb->lkb_rsb_lookup);
2794			r->res_first_lkid = lkb->lkb_id;
2795			_request_lock(r, lkb);
2796		}
2797		break;
2798
2799	default:
2800		log_error(r->res_ls, "confirm_master unknown error %d", error);
2801	}
2802}
2803
2804static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2805			 int namelen, unsigned long timeout_cs,
2806			 void (*ast) (void *astparam),
2807			 void *astparam,
2808			 void (*bast) (void *astparam, int mode),
2809			 struct dlm_args *args)
2810{
2811	int rv = -EINVAL;
2812
2813	/* check for invalid arg usage */
2814
2815	if (mode < 0 || mode > DLM_LOCK_EX)
2816		goto out;
2817
2818	if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2819		goto out;
2820
2821	if (flags & DLM_LKF_CANCEL)
2822		goto out;
2823
2824	if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2825		goto out;
2826
2827	if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2828		goto out;
2829
2830	if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2831		goto out;
2832
2833	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2834		goto out;
2835
2836	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2837		goto out;
2838
2839	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2840		goto out;
2841
2842	if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2843		goto out;
2844
2845	if (!ast || !lksb)
2846		goto out;
2847
2848	if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2849		goto out;
2850
2851	if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2852		goto out;
2853
2854	/* these args will be copied to the lkb in validate_lock_args,
2855	   it cannot be done now because when converting locks, fields in
2856	   an active lkb cannot be modified before locking the rsb */
2857
2858	args->flags = flags;
2859	args->astfn = ast;
2860	args->astparam = astparam;
2861	args->bastfn = bast;
2862	args->timeout = timeout_cs;
2863	args->mode = mode;
2864	args->lksb = lksb;
2865	rv = 0;
2866 out:
2867	return rv;
2868}
2869
2870static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2871{
2872	if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2873 		      DLM_LKF_FORCEUNLOCK))
2874		return -EINVAL;
2875
2876	if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2877		return -EINVAL;
2878
2879	args->flags = flags;
2880	args->astparam = astarg;
2881	return 0;
2882}
2883
2884static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2885			      struct dlm_args *args)
2886{
2887	int rv = -EINVAL;
2888
2889	if (args->flags & DLM_LKF_CONVERT) {
2890		if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2891			goto out;
2892
2893		if (args->flags & DLM_LKF_QUECVT &&
2894		    !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2895			goto out;
2896
2897		rv = -EBUSY;
2898		if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2899			goto out;
2900
2901		if (lkb->lkb_wait_type)
2902			goto out;
2903
2904		if (is_overlap(lkb))
2905			goto out;
2906	}
2907
2908	lkb->lkb_exflags = args->flags;
2909	lkb->lkb_sbflags = 0;
2910	lkb->lkb_astfn = args->astfn;
2911	lkb->lkb_astparam = args->astparam;
2912	lkb->lkb_bastfn = args->bastfn;
2913	lkb->lkb_rqmode = args->mode;
2914	lkb->lkb_lksb = args->lksb;
2915	lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2916	lkb->lkb_ownpid = (int) current->pid;
2917	lkb->lkb_timeout_cs = args->timeout;
2918	rv = 0;
2919 out:
2920	if (rv)
2921		log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2922			  rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2923			  lkb->lkb_status, lkb->lkb_wait_type,
2924			  lkb->lkb_resource->res_name);
2925	return rv;
2926}
2927
2928/* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2929   for success */
2930
2931/* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2932   because there may be a lookup in progress and it's valid to do
2933   cancel/unlockf on it */
2934
2935static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2936{
2937	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2938	int rv = -EINVAL;
2939
2940	if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2941		log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2942		dlm_print_lkb(lkb);
2943		goto out;
2944	}
2945
2946	/* an lkb may still exist even though the lock is EOL'ed due to a
2947	   cancel, unlock or failed noqueue request; an app can't use these
2948	   locks; return same error as if the lkid had not been found at all */
2949
2950	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2951		log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2952		rv = -ENOENT;
2953		goto out;
2954	}
2955
2956	/* an lkb may be waiting for an rsb lookup to complete where the
2957	   lookup was initiated by another lock */
2958
2959	if (!list_empty(&lkb->lkb_rsb_lookup)) {
2960		if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2961			log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2962			list_del_init(&lkb->lkb_rsb_lookup);
2963			queue_cast(lkb->lkb_resource, lkb,
2964				   args->flags & DLM_LKF_CANCEL ?
2965				   -DLM_ECANCEL : -DLM_EUNLOCK);
2966			unhold_lkb(lkb); /* undoes create_lkb() */
2967		}
2968		/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2969		rv = -EBUSY;
2970		goto out;
2971	}
2972
2973	/* cancel not allowed with another cancel/unlock in progress */
2974
2975	if (args->flags & DLM_LKF_CANCEL) {
2976		if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2977			goto out;
2978
2979		if (is_overlap(lkb))
2980			goto out;
2981
2982		/* don't let scand try to do a cancel */
2983		del_timeout(lkb);
2984
2985		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2986			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2987			rv = -EBUSY;
2988			goto out;
2989		}
2990
2991		/* there's nothing to cancel */
2992		if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2993		    !lkb->lkb_wait_type) {
2994			rv = -EBUSY;
2995			goto out;
2996		}
2997
2998		switch (lkb->lkb_wait_type) {
2999		case DLM_MSG_LOOKUP:
3000		case DLM_MSG_REQUEST:
3001			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3002			rv = -EBUSY;
3003			goto out;
3004		case DLM_MSG_UNLOCK:
3005		case DLM_MSG_CANCEL:
3006			goto out;
3007		}
3008		/* add_to_waiters() will set OVERLAP_CANCEL */
3009		goto out_ok;
3010	}
3011
3012	/* do we need to allow a force-unlock if there's a normal unlock
3013	   already in progress?  in what conditions could the normal unlock
3014	   fail such that we'd want to send a force-unlock to be sure? */
3015
3016	if (args->flags & DLM_LKF_FORCEUNLOCK) {
3017		if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3018			goto out;
3019
3020		if (is_overlap_unlock(lkb))
3021			goto out;
3022
3023		/* don't let scand try to do a cancel */
3024		del_timeout(lkb);
3025
3026		if (lkb->lkb_flags & DLM_IFL_RESEND) {
3027			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3028			rv = -EBUSY;
3029			goto out;
3030		}
3031
3032		switch (lkb->lkb_wait_type) {
3033		case DLM_MSG_LOOKUP:
3034		case DLM_MSG_REQUEST:
3035			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3036			rv = -EBUSY;
3037			goto out;
3038		case DLM_MSG_UNLOCK:
3039			goto out;
3040		}
3041		/* add_to_waiters() will set OVERLAP_UNLOCK */
3042		goto out_ok;
3043	}
3044
3045	/* normal unlock not allowed if there's any op in progress */
3046	rv = -EBUSY;
3047	if (lkb->lkb_wait_type || lkb->lkb_wait_count)
3048		goto out;
3049
3050 out_ok:
3051	/* an overlapping op shouldn't blow away exflags from other op */
3052	lkb->lkb_exflags |= args->flags;
3053	lkb->lkb_sbflags = 0;
3054	lkb->lkb_astparam = args->astparam;
3055	rv = 0;
3056 out:
3057	if (rv)
3058		log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
3059			  lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3060			  args->flags, lkb->lkb_wait_type,
3061			  lkb->lkb_resource->res_name);
3062	return rv;
3063}
3064
3065/*
3066 * Four stage 4 varieties:
3067 * do_request(), do_convert(), do_unlock(), do_cancel()
3068 * These are called on the master node for the given lock and
3069 * from the central locking logic.
3070 */
3071
3072static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3073{
3074	int error = 0;
3075
3076	if (can_be_granted(r, lkb, 1, 0, NULL)) {
3077		grant_lock(r, lkb);
3078		queue_cast(r, lkb, 0);
3079		goto out;
3080	}
3081
3082	if (can_be_queued(lkb)) {
3083		error = -EINPROGRESS;
3084		add_lkb(r, lkb, DLM_LKSTS_WAITING);
3085		add_timeout(lkb);
3086		goto out;
3087	}
3088
3089	error = -EAGAIN;
3090	queue_cast(r, lkb, -EAGAIN);
3091 out:
3092	return error;
3093}
3094
3095static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3096			       int error)
3097{
3098	switch (error) {
3099	case -EAGAIN:
3100		if (force_blocking_asts(lkb))
3101			send_blocking_asts_all(r, lkb);
3102		break;
3103	case -EINPROGRESS:
3104		send_blocking_asts(r, lkb);
3105		break;
3106	}
3107}
3108
3109static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3110{
3111	int error = 0;
3112	int deadlk = 0;
3113
3114	/* changing an existing lock may allow others to be granted */
3115
3116	if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3117		grant_lock(r, lkb);
3118		queue_cast(r, lkb, 0);
3119		goto out;
3120	}
3121
3122	/* can_be_granted() detected that this lock would block in a conversion
3123	   deadlock, so we leave it on the granted queue and return EDEADLK in
3124	   the ast for the convert. */
3125
3126	if (deadlk) {
3127		/* it's left on the granted queue */
3128		revert_lock(r, lkb);
3129		queue_cast(r, lkb, -EDEADLK);
3130		error = -EDEADLK;
3131		goto out;
3132	}
3133
3134	/* is_demoted() means the can_be_granted() above set the grmode
3135	   to NL, and left us on the granted queue.  This auto-demotion
3136	   (due to CONVDEADLK) might mean other locks, and/or this lock, are
3137	   now grantable.  We have to try to grant other converting locks
3138	   before we try again to grant this one. */
3139
3140	if (is_demoted(lkb)) {
3141		grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3142		if (_can_be_granted(r, lkb, 1, 0)) {
3143			grant_lock(r, lkb);
3144			queue_cast(r, lkb, 0);
3145			goto out;
3146		}
3147		/* else fall through and move to convert queue */
3148	}
3149
3150	if (can_be_queued(lkb)) {
3151		error = -EINPROGRESS;
3152		del_lkb(r, lkb);
3153		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3154		add_timeout(lkb);
3155		goto out;
3156	}
3157
3158	error = -EAGAIN;
3159	queue_cast(r, lkb, -EAGAIN);
3160 out:
3161	return error;
3162}
3163
3164static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3165			       int error)
3166{
3167	switch (error) {
3168	case 0:
3169		grant_pending_locks(r, NULL);
3170		/* grant_pending_locks also sends basts */
3171		break;
3172	case -EAGAIN:
3173		if (force_blocking_asts(lkb))
3174			send_blocking_asts_all(r, lkb);
3175		break;
3176	case -EINPROGRESS:
3177		send_blocking_asts(r, lkb);
3178		break;
3179	}
3180}
3181
3182static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3183{
3184	remove_lock(r, lkb);
3185	queue_cast(r, lkb, -DLM_EUNLOCK);
3186	return -DLM_EUNLOCK;
3187}
3188
3189static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3190			      int error)
3191{
3192	grant_pending_locks(r, NULL);
3193}
3194
3195/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3196
3197static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3198{
3199	int error;
3200
3201	error = revert_lock(r, lkb);
3202	if (error) {
3203		queue_cast(r, lkb, -DLM_ECANCEL);
3204		return -DLM_ECANCEL;
3205	}
3206	return 0;
3207}
3208
3209static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3210			      int error)
3211{
3212	if (error)
3213		grant_pending_locks(r, NULL);
3214}
3215
3216/*
3217 * Four stage 3 varieties:
3218 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3219 */
3220
3221/* add a new lkb to a possibly new rsb, called by requesting process */
3222
3223static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3224{
3225	int error;
3226
3227	/* set_master: sets lkb nodeid from r */
3228
3229	error = set_master(r, lkb);
3230	if (error < 0)
3231		goto out;
3232	if (error) {
3233		error = 0;
3234		goto out;
3235	}
3236
3237	if (is_remote(r)) {
3238		/* receive_request() calls do_request() on remote node */
3239		error = send_request(r, lkb);
3240	} else {
3241		error = do_request(r, lkb);
3242		/* for remote locks the request_reply is sent
3243		   between do_request and do_request_effects */
3244		do_request_effects(r, lkb, error);
3245	}
3246 out:
3247	return error;
3248}
3249
3250/* change some property of an existing lkb, e.g. mode */
3251
3252static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3253{
3254	int error;
3255
3256	if (is_remote(r)) {
3257		/* receive_convert() calls do_convert() on remote node */
3258		error = send_convert(r, lkb);
3259	} else {
3260		error = do_convert(r, lkb);
3261		/* for remote locks the convert_reply is sent
3262		   between do_convert and do_convert_effects */
3263		do_convert_effects(r, lkb, error);
3264	}
3265
3266	return error;
3267}
3268
3269/* remove an existing lkb from the granted queue */
3270
3271static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3272{
3273	int error;
3274
3275	if (is_remote(r)) {
3276		/* receive_unlock() calls do_unlock() on remote node */
3277		error = send_unlock(r, lkb);
3278	} else {
3279		error = do_unlock(r, lkb);
3280		/* for remote locks the unlock_reply is sent
3281		   between do_unlock and do_unlock_effects */
3282		do_unlock_effects(r, lkb, error);
3283	}
3284
3285	return error;
3286}
3287
3288/* remove an existing lkb from the convert or wait queue */
3289
3290static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3291{
3292	int error;
3293
3294	if (is_remote(r)) {
3295		/* receive_cancel() calls do_cancel() on remote node */
3296		error = send_cancel(r, lkb);
3297	} else {
3298		error = do_cancel(r, lkb);
3299		/* for remote locks the cancel_reply is sent
3300		   between do_cancel and do_cancel_effects */
3301		do_cancel_effects(r, lkb, error);
3302	}
3303
3304	return error;
3305}
3306
3307/*
3308 * Four stage 2 varieties:
3309 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3310 */
3311
3312static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3313			int len, struct dlm_args *args)
3314{
3315	struct dlm_rsb *r;
3316	int error;
3317
3318	error = validate_lock_args(ls, lkb, args);
3319	if (error)
3320		return error;
3321
3322	error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3323	if (error)
3324		return error;
3325
3326	lock_rsb(r);
3327
3328	attach_lkb(r, lkb);
3329	lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3330
3331	error = _request_lock(r, lkb);
3332
3333	unlock_rsb(r);
3334	put_rsb(r);
 
 
3335	return error;
3336}
3337
3338static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3339			struct dlm_args *args)
3340{
3341	struct dlm_rsb *r;
3342	int error;
3343
3344	r = lkb->lkb_resource;
3345
3346	hold_rsb(r);
3347	lock_rsb(r);
3348
3349	error = validate_lock_args(ls, lkb, args);
3350	if (error)
3351		goto out;
3352
3353	error = _convert_lock(r, lkb);
3354 out:
3355	unlock_rsb(r);
3356	put_rsb(r);
3357	return error;
3358}
3359
3360static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3361		       struct dlm_args *args)
3362{
3363	struct dlm_rsb *r;
3364	int error;
3365
3366	r = lkb->lkb_resource;
3367
3368	hold_rsb(r);
3369	lock_rsb(r);
3370
3371	error = validate_unlock_args(lkb, args);
3372	if (error)
3373		goto out;
3374
3375	error = _unlock_lock(r, lkb);
3376 out:
3377	unlock_rsb(r);
3378	put_rsb(r);
3379	return error;
3380}
3381
3382static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3383		       struct dlm_args *args)
3384{
3385	struct dlm_rsb *r;
3386	int error;
3387
3388	r = lkb->lkb_resource;
3389
3390	hold_rsb(r);
3391	lock_rsb(r);
3392
3393	error = validate_unlock_args(lkb, args);
3394	if (error)
3395		goto out;
3396
3397	error = _cancel_lock(r, lkb);
3398 out:
3399	unlock_rsb(r);
3400	put_rsb(r);
3401	return error;
3402}
3403
3404/*
3405 * Two stage 1 varieties:  dlm_lock() and dlm_unlock()
3406 */
3407
3408int dlm_lock(dlm_lockspace_t *lockspace,
3409	     int mode,
3410	     struct dlm_lksb *lksb,
3411	     uint32_t flags,
3412	     void *name,
3413	     unsigned int namelen,
3414	     uint32_t parent_lkid,
3415	     void (*ast) (void *astarg),
3416	     void *astarg,
3417	     void (*bast) (void *astarg, int mode))
3418{
3419	struct dlm_ls *ls;
3420	struct dlm_lkb *lkb;
3421	struct dlm_args args;
3422	int error, convert = flags & DLM_LKF_CONVERT;
3423
3424	ls = dlm_find_lockspace_local(lockspace);
3425	if (!ls)
3426		return -EINVAL;
3427
3428	dlm_lock_recovery(ls);
3429
3430	if (convert)
3431		error = find_lkb(ls, lksb->sb_lkid, &lkb);
3432	else
3433		error = create_lkb(ls, &lkb);
3434
3435	if (error)
3436		goto out;
3437
3438	error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3439			      astarg, bast, &args);
3440	if (error)
3441		goto out_put;
3442
3443	if (convert)
3444		error = convert_lock(ls, lkb, &args);
3445	else
3446		error = request_lock(ls, lkb, name, namelen, &args);
3447
3448	if (error == -EINPROGRESS)
3449		error = 0;
3450 out_put:
3451	if (convert || error)
3452		__put_lkb(ls, lkb);
3453	if (error == -EAGAIN || error == -EDEADLK)
3454		error = 0;
3455 out:
3456	dlm_unlock_recovery(ls);
3457	dlm_put_lockspace(ls);
3458	return error;
3459}
3460
3461int dlm_unlock(dlm_lockspace_t *lockspace,
3462	       uint32_t lkid,
3463	       uint32_t flags,
3464	       struct dlm_lksb *lksb,
3465	       void *astarg)
3466{
3467	struct dlm_ls *ls;
3468	struct dlm_lkb *lkb;
3469	struct dlm_args args;
3470	int error;
3471
3472	ls = dlm_find_lockspace_local(lockspace);
3473	if (!ls)
3474		return -EINVAL;
3475
3476	dlm_lock_recovery(ls);
3477
3478	error = find_lkb(ls, lkid, &lkb);
3479	if (error)
3480		goto out;
3481
3482	error = set_unlock_args(flags, astarg, &args);
3483	if (error)
3484		goto out_put;
3485
3486	if (flags & DLM_LKF_CANCEL)
3487		error = cancel_lock(ls, lkb, &args);
3488	else
3489		error = unlock_lock(ls, lkb, &args);
3490
3491	if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3492		error = 0;
3493	if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3494		error = 0;
3495 out_put:
3496	dlm_put_lkb(lkb);
3497 out:
3498	dlm_unlock_recovery(ls);
3499	dlm_put_lockspace(ls);
3500	return error;
3501}
3502
3503/*
3504 * send/receive routines for remote operations and replies
3505 *
3506 * send_args
3507 * send_common
3508 * send_request			receive_request
3509 * send_convert			receive_convert
3510 * send_unlock			receive_unlock
3511 * send_cancel			receive_cancel
3512 * send_grant			receive_grant
3513 * send_bast			receive_bast
3514 * send_lookup			receive_lookup
3515 * send_remove			receive_remove
3516 *
3517 * 				send_common_reply
3518 * receive_request_reply	send_request_reply
3519 * receive_convert_reply	send_convert_reply
3520 * receive_unlock_reply		send_unlock_reply
3521 * receive_cancel_reply		send_cancel_reply
3522 * receive_lookup_reply		send_lookup_reply
3523 */
3524
3525static int _create_message(struct dlm_ls *ls, int mb_len,
3526			   int to_nodeid, int mstype,
3527			   struct dlm_message **ms_ret,
3528			   struct dlm_mhandle **mh_ret)
3529{
3530	struct dlm_message *ms;
3531	struct dlm_mhandle *mh;
3532	char *mb;
3533
3534	/* get_buffer gives us a message handle (mh) that we need to
3535	   pass into lowcomms_commit and a message buffer (mb) that we
3536	   write our data into */
3537
3538	mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3539	if (!mh)
3540		return -ENOBUFS;
3541
3542	memset(mb, 0, mb_len);
3543
3544	ms = (struct dlm_message *) mb;
3545
3546	ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3547	ms->m_header.h_lockspace = ls->ls_global_id;
3548	ms->m_header.h_nodeid = dlm_our_nodeid();
3549	ms->m_header.h_length = mb_len;
3550	ms->m_header.h_cmd = DLM_MSG;
3551
3552	ms->m_type = mstype;
3553
3554	*mh_ret = mh;
3555	*ms_ret = ms;
3556	return 0;
3557}
3558
3559static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3560			  int to_nodeid, int mstype,
3561			  struct dlm_message **ms_ret,
3562			  struct dlm_mhandle **mh_ret)
3563{
3564	int mb_len = sizeof(struct dlm_message);
3565
3566	switch (mstype) {
3567	case DLM_MSG_REQUEST:
3568	case DLM_MSG_LOOKUP:
3569	case DLM_MSG_REMOVE:
3570		mb_len += r->res_length;
3571		break;
3572	case DLM_MSG_CONVERT:
3573	case DLM_MSG_UNLOCK:
3574	case DLM_MSG_REQUEST_REPLY:
3575	case DLM_MSG_CONVERT_REPLY:
3576	case DLM_MSG_GRANT:
3577		if (lkb && lkb->lkb_lvbptr)
3578			mb_len += r->res_ls->ls_lvblen;
3579		break;
3580	}
3581
3582	return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3583			       ms_ret, mh_ret);
3584}
3585
3586/* further lowcomms enhancements or alternate implementations may make
3587   the return value from this function useful at some point */
3588
3589static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3590{
3591	dlm_message_out(ms);
3592	dlm_lowcomms_commit_buffer(mh);
3593	return 0;
3594}
3595
3596static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3597		      struct dlm_message *ms)
3598{
3599	ms->m_nodeid   = lkb->lkb_nodeid;
3600	ms->m_pid      = lkb->lkb_ownpid;
3601	ms->m_lkid     = lkb->lkb_id;
3602	ms->m_remid    = lkb->lkb_remid;
3603	ms->m_exflags  = lkb->lkb_exflags;
3604	ms->m_sbflags  = lkb->lkb_sbflags;
3605	ms->m_flags    = lkb->lkb_flags;
3606	ms->m_lvbseq   = lkb->lkb_lvbseq;
3607	ms->m_status   = lkb->lkb_status;
3608	ms->m_grmode   = lkb->lkb_grmode;
3609	ms->m_rqmode   = lkb->lkb_rqmode;
3610	ms->m_hash     = r->res_hash;
3611
3612	/* m_result and m_bastmode are set from function args,
3613	   not from lkb fields */
3614
3615	if (lkb->lkb_bastfn)
3616		ms->m_asts |= DLM_CB_BAST;
3617	if (lkb->lkb_astfn)
3618		ms->m_asts |= DLM_CB_CAST;
3619
3620	/* compare with switch in create_message; send_remove() doesn't
3621	   use send_args() */
3622
3623	switch (ms->m_type) {
3624	case DLM_MSG_REQUEST:
3625	case DLM_MSG_LOOKUP:
3626		memcpy(ms->m_extra, r->res_name, r->res_length);
3627		break;
3628	case DLM_MSG_CONVERT:
3629	case DLM_MSG_UNLOCK:
3630	case DLM_MSG_REQUEST_REPLY:
3631	case DLM_MSG_CONVERT_REPLY:
3632	case DLM_MSG_GRANT:
3633		if (!lkb->lkb_lvbptr)
3634			break;
3635		memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3636		break;
3637	}
3638}
3639
3640static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3641{
3642	struct dlm_message *ms;
3643	struct dlm_mhandle *mh;
3644	int to_nodeid, error;
3645
3646	to_nodeid = r->res_nodeid;
3647
3648	error = add_to_waiters(lkb, mstype, to_nodeid);
3649	if (error)
3650		return error;
3651
3652	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3653	if (error)
3654		goto fail;
3655
3656	send_args(r, lkb, ms);
3657
3658	error = send_message(mh, ms);
3659	if (error)
3660		goto fail;
3661	return 0;
3662
3663 fail:
3664	remove_from_waiters(lkb, msg_reply_type(mstype));
3665	return error;
3666}
3667
3668static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3669{
3670	return send_common(r, lkb, DLM_MSG_REQUEST);
3671}
3672
3673static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3674{
3675	int error;
3676
3677	error = send_common(r, lkb, DLM_MSG_CONVERT);
3678
3679	/* down conversions go without a reply from the master */
3680	if (!error && down_conversion(lkb)) {
3681		remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3682		r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3683		r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3684		r->res_ls->ls_stub_ms.m_result = 0;
3685		__receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3686	}
3687
3688	return error;
3689}
3690
3691/* FIXME: if this lkb is the only lock we hold on the rsb, then set
3692   MASTER_UNCERTAIN to force the next request on the rsb to confirm
3693   that the master is still correct. */
3694
3695static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3696{
3697	return send_common(r, lkb, DLM_MSG_UNLOCK);
3698}
3699
3700static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3701{
3702	return send_common(r, lkb, DLM_MSG_CANCEL);
3703}
3704
3705static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3706{
3707	struct dlm_message *ms;
3708	struct dlm_mhandle *mh;
3709	int to_nodeid, error;
3710
3711	to_nodeid = lkb->lkb_nodeid;
3712
3713	error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3714	if (error)
3715		goto out;
3716
3717	send_args(r, lkb, ms);
3718
3719	ms->m_result = 0;
3720
3721	error = send_message(mh, ms);
3722 out:
3723	return error;
3724}
3725
3726static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3727{
3728	struct dlm_message *ms;
3729	struct dlm_mhandle *mh;
3730	int to_nodeid, error;
3731
3732	to_nodeid = lkb->lkb_nodeid;
3733
3734	error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3735	if (error)
3736		goto out;
3737
3738	send_args(r, lkb, ms);
3739
3740	ms->m_bastmode = mode;
3741
3742	error = send_message(mh, ms);
3743 out:
3744	return error;
3745}
3746
3747static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3748{
3749	struct dlm_message *ms;
3750	struct dlm_mhandle *mh;
3751	int to_nodeid, error;
3752
3753	to_nodeid = dlm_dir_nodeid(r);
3754
3755	error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3756	if (error)
3757		return error;
3758
3759	error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3760	if (error)
3761		goto fail;
3762
3763	send_args(r, lkb, ms);
3764
3765	error = send_message(mh, ms);
3766	if (error)
3767		goto fail;
3768	return 0;
3769
3770 fail:
3771	remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3772	return error;
3773}
3774
3775static int send_remove(struct dlm_rsb *r)
3776{
3777	struct dlm_message *ms;
3778	struct dlm_mhandle *mh;
3779	int to_nodeid, error;
3780
3781	to_nodeid = dlm_dir_nodeid(r);
3782
3783	error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3784	if (error)
3785		goto out;
3786
3787	memcpy(ms->m_extra, r->res_name, r->res_length);
3788	ms->m_hash = r->res_hash;
3789
3790	error = send_message(mh, ms);
3791 out:
3792	return error;
3793}
3794
3795static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3796			     int mstype, int rv)
3797{
3798	struct dlm_message *ms;
3799	struct dlm_mhandle *mh;
3800	int to_nodeid, error;
3801
3802	to_nodeid = lkb->lkb_nodeid;
3803
3804	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3805	if (error)
3806		goto out;
3807
3808	send_args(r, lkb, ms);
3809
3810	ms->m_result = rv;
3811
3812	error = send_message(mh, ms);
3813 out:
3814	return error;
3815}
3816
3817static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3818{
3819	return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3820}
3821
3822static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3823{
3824	return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3825}
3826
3827static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3828{
3829	return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3830}
3831
3832static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3833{
3834	return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3835}
3836
3837static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3838			     int ret_nodeid, int rv)
3839{
3840	struct dlm_rsb *r = &ls->ls_stub_rsb;
3841	struct dlm_message *ms;
3842	struct dlm_mhandle *mh;
3843	int error, nodeid = ms_in->m_header.h_nodeid;
3844
3845	error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3846	if (error)
3847		goto out;
3848
3849	ms->m_lkid = ms_in->m_lkid;
3850	ms->m_result = rv;
3851	ms->m_nodeid = ret_nodeid;
3852
3853	error = send_message(mh, ms);
3854 out:
3855	return error;
3856}
3857
3858/* which args we save from a received message depends heavily on the type
3859   of message, unlike the send side where we can safely send everything about
3860   the lkb for any type of message */
3861
3862static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3863{
3864	lkb->lkb_exflags = ms->m_exflags;
3865	lkb->lkb_sbflags = ms->m_sbflags;
3866	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3867		         (ms->m_flags & 0x0000FFFF);
3868}
3869
3870static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3871{
3872	if (ms->m_flags == DLM_IFL_STUB_MS)
3873		return;
3874
3875	lkb->lkb_sbflags = ms->m_sbflags;
3876	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3877		         (ms->m_flags & 0x0000FFFF);
3878}
3879
3880static int receive_extralen(struct dlm_message *ms)
3881{
3882	return (ms->m_header.h_length - sizeof(struct dlm_message));
3883}
3884
3885static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3886		       struct dlm_message *ms)
3887{
3888	int len;
3889
3890	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3891		if (!lkb->lkb_lvbptr)
3892			lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3893		if (!lkb->lkb_lvbptr)
3894			return -ENOMEM;
3895		len = receive_extralen(ms);
3896		if (len > ls->ls_lvblen)
3897			len = ls->ls_lvblen;
3898		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3899	}
3900	return 0;
3901}
3902
3903static void fake_bastfn(void *astparam, int mode)
3904{
3905	log_print("fake_bastfn should not be called");
3906}
3907
3908static void fake_astfn(void *astparam)
3909{
3910	log_print("fake_astfn should not be called");
3911}
3912
3913static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3914				struct dlm_message *ms)
3915{
3916	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3917	lkb->lkb_ownpid = ms->m_pid;
3918	lkb->lkb_remid = ms->m_lkid;
3919	lkb->lkb_grmode = DLM_LOCK_IV;
3920	lkb->lkb_rqmode = ms->m_rqmode;
3921
3922	lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3923	lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3924
3925	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3926		/* lkb was just created so there won't be an lvb yet */
3927		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3928		if (!lkb->lkb_lvbptr)
3929			return -ENOMEM;
3930	}
3931
3932	return 0;
3933}
3934
3935static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3936				struct dlm_message *ms)
3937{
3938	if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3939		return -EBUSY;
3940
3941	if (receive_lvb(ls, lkb, ms))
3942		return -ENOMEM;
3943
3944	lkb->lkb_rqmode = ms->m_rqmode;
3945	lkb->lkb_lvbseq = ms->m_lvbseq;
3946
3947	return 0;
3948}
3949
3950static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3951			       struct dlm_message *ms)
3952{
3953	if (receive_lvb(ls, lkb, ms))
3954		return -ENOMEM;
3955	return 0;
3956}
3957
3958/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3959   uses to send a reply and that the remote end uses to process the reply. */
3960
3961static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3962{
3963	struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3964	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3965	lkb->lkb_remid = ms->m_lkid;
3966}
3967
3968/* This is called after the rsb is locked so that we can safely inspect
3969   fields in the lkb. */
3970
3971static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3972{
3973	int from = ms->m_header.h_nodeid;
3974	int error = 0;
3975
3976	switch (ms->m_type) {
3977	case DLM_MSG_CONVERT:
3978	case DLM_MSG_UNLOCK:
3979	case DLM_MSG_CANCEL:
3980		if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3981			error = -EINVAL;
3982		break;
3983
3984	case DLM_MSG_CONVERT_REPLY:
3985	case DLM_MSG_UNLOCK_REPLY:
3986	case DLM_MSG_CANCEL_REPLY:
3987	case DLM_MSG_GRANT:
3988	case DLM_MSG_BAST:
3989		if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3990			error = -EINVAL;
3991		break;
3992
3993	case DLM_MSG_REQUEST_REPLY:
3994		if (!is_process_copy(lkb))
3995			error = -EINVAL;
3996		else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
3997			error = -EINVAL;
3998		break;
3999
4000	default:
4001		error = -EINVAL;
4002	}
4003
4004	if (error)
4005		log_error(lkb->lkb_resource->res_ls,
4006			  "ignore invalid message %d from %d %x %x %x %d",
4007			  ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
4008			  lkb->lkb_flags, lkb->lkb_nodeid);
4009	return error;
4010}
4011
4012static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4013{
4014	char name[DLM_RESNAME_MAXLEN + 1];
4015	struct dlm_message *ms;
4016	struct dlm_mhandle *mh;
4017	struct dlm_rsb *r;
4018	uint32_t hash, b;
4019	int rv, dir_nodeid;
4020
4021	memset(name, 0, sizeof(name));
4022	memcpy(name, ms_name, len);
4023
4024	hash = jhash(name, len, 0);
4025	b = hash & (ls->ls_rsbtbl_size - 1);
4026
4027	dir_nodeid = dlm_hash2nodeid(ls, hash);
4028
4029	log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4030
4031	spin_lock(&ls->ls_rsbtbl[b].lock);
4032	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4033	if (!rv) {
4034		spin_unlock(&ls->ls_rsbtbl[b].lock);
4035		log_error(ls, "repeat_remove on keep %s", name);
4036		return;
4037	}
4038
4039	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4040	if (!rv) {
4041		spin_unlock(&ls->ls_rsbtbl[b].lock);
4042		log_error(ls, "repeat_remove on toss %s", name);
4043		return;
4044	}
4045
4046	/* use ls->remove_name2 to avoid conflict with shrink? */
4047
4048	spin_lock(&ls->ls_remove_spin);
4049	ls->ls_remove_len = len;
4050	memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4051	spin_unlock(&ls->ls_remove_spin);
4052	spin_unlock(&ls->ls_rsbtbl[b].lock);
4053
4054	rv = _create_message(ls, sizeof(struct dlm_message) + len,
4055			     dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4056	if (rv)
4057		return;
4058
4059	memcpy(ms->m_extra, name, len);
4060	ms->m_hash = hash;
4061
4062	send_message(mh, ms);
4063
4064	spin_lock(&ls->ls_remove_spin);
4065	ls->ls_remove_len = 0;
4066	memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4067	spin_unlock(&ls->ls_remove_spin);
4068}
4069
4070static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4071{
4072	struct dlm_lkb *lkb;
4073	struct dlm_rsb *r;
4074	int from_nodeid;
4075	int error, namelen = 0;
4076
4077	from_nodeid = ms->m_header.h_nodeid;
4078
4079	error = create_lkb(ls, &lkb);
4080	if (error)
4081		goto fail;
4082
4083	receive_flags(lkb, ms);
4084	lkb->lkb_flags |= DLM_IFL_MSTCPY;
4085	error = receive_request_args(ls, lkb, ms);
4086	if (error) {
4087		__put_lkb(ls, lkb);
4088		goto fail;
4089	}
4090
4091	/* The dir node is the authority on whether we are the master
4092	   for this rsb or not, so if the master sends us a request, we should
4093	   recreate the rsb if we've destroyed it.   This race happens when we
4094	   send a remove message to the dir node at the same time that the dir
4095	   node sends us a request for the rsb. */
4096
4097	namelen = receive_extralen(ms);
4098
4099	error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4100			 R_RECEIVE_REQUEST, &r);
4101	if (error) {
4102		__put_lkb(ls, lkb);
4103		goto fail;
4104	}
4105
4106	lock_rsb(r);
4107
4108	if (r->res_master_nodeid != dlm_our_nodeid()) {
4109		error = validate_master_nodeid(ls, r, from_nodeid);
4110		if (error) {
4111			unlock_rsb(r);
4112			put_rsb(r);
4113			__put_lkb(ls, lkb);
4114			goto fail;
4115		}
4116	}
4117
4118	attach_lkb(r, lkb);
4119	error = do_request(r, lkb);
4120	send_request_reply(r, lkb, error);
4121	do_request_effects(r, lkb, error);
4122
4123	unlock_rsb(r);
4124	put_rsb(r);
4125
4126	if (error == -EINPROGRESS)
4127		error = 0;
4128	if (error)
4129		dlm_put_lkb(lkb);
4130	return 0;
4131
4132 fail:
4133	/* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4134	   and do this receive_request again from process_lookup_list once
4135	   we get the lookup reply.  This would avoid a many repeated
4136	   ENOTBLK request failures when the lookup reply designating us
4137	   as master is delayed. */
4138
4139	/* We could repeatedly return -EBADR here if our send_remove() is
4140	   delayed in being sent/arriving/being processed on the dir node.
4141	   Another node would repeatedly lookup up the master, and the dir
4142	   node would continue returning our nodeid until our send_remove
4143	   took effect.
4144
4145	   We send another remove message in case our previous send_remove
4146	   was lost/ignored/missed somehow. */
4147
4148	if (error != -ENOTBLK) {
4149		log_limit(ls, "receive_request %x from %d %d",
4150			  ms->m_lkid, from_nodeid, error);
4151	}
4152
4153	if (namelen && error == -EBADR) {
4154		send_repeat_remove(ls, ms->m_extra, namelen);
4155		msleep(1000);
4156	}
4157
4158	setup_stub_lkb(ls, ms);
4159	send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4160	return error;
4161}
4162
4163static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4164{
4165	struct dlm_lkb *lkb;
4166	struct dlm_rsb *r;
4167	int error, reply = 1;
4168
4169	error = find_lkb(ls, ms->m_remid, &lkb);
4170	if (error)
4171		goto fail;
4172
4173	if (lkb->lkb_remid != ms->m_lkid) {
4174		log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4175			  "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4176			  (unsigned long long)lkb->lkb_recover_seq,
4177			  ms->m_header.h_nodeid, ms->m_lkid);
4178		error = -ENOENT;
4179		goto fail;
4180	}
4181
4182	r = lkb->lkb_resource;
4183
4184	hold_rsb(r);
4185	lock_rsb(r);
4186
4187	error = validate_message(lkb, ms);
4188	if (error)
4189		goto out;
4190
4191	receive_flags(lkb, ms);
4192
4193	error = receive_convert_args(ls, lkb, ms);
4194	if (error) {
4195		send_convert_reply(r, lkb, error);
4196		goto out;
4197	}
4198
4199	reply = !down_conversion(lkb);
4200
4201	error = do_convert(r, lkb);
4202	if (reply)
4203		send_convert_reply(r, lkb, error);
4204	do_convert_effects(r, lkb, error);
4205 out:
4206	unlock_rsb(r);
4207	put_rsb(r);
4208	dlm_put_lkb(lkb);
4209	return 0;
4210
4211 fail:
4212	setup_stub_lkb(ls, ms);
4213	send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4214	return error;
4215}
4216
4217static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4218{
4219	struct dlm_lkb *lkb;
4220	struct dlm_rsb *r;
4221	int error;
4222
4223	error = find_lkb(ls, ms->m_remid, &lkb);
4224	if (error)
4225		goto fail;
4226
4227	if (lkb->lkb_remid != ms->m_lkid) {
4228		log_error(ls, "receive_unlock %x remid %x remote %d %x",
4229			  lkb->lkb_id, lkb->lkb_remid,
4230			  ms->m_header.h_nodeid, ms->m_lkid);
4231		error = -ENOENT;
4232		goto fail;
4233	}
4234
4235	r = lkb->lkb_resource;
4236
4237	hold_rsb(r);
4238	lock_rsb(r);
4239
4240	error = validate_message(lkb, ms);
4241	if (error)
4242		goto out;
4243
4244	receive_flags(lkb, ms);
4245
4246	error = receive_unlock_args(ls, lkb, ms);
4247	if (error) {
4248		send_unlock_reply(r, lkb, error);
4249		goto out;
4250	}
4251
4252	error = do_unlock(r, lkb);
4253	send_unlock_reply(r, lkb, error);
4254	do_unlock_effects(r, lkb, error);
4255 out:
4256	unlock_rsb(r);
4257	put_rsb(r);
4258	dlm_put_lkb(lkb);
4259	return 0;
4260
4261 fail:
4262	setup_stub_lkb(ls, ms);
4263	send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4264	return error;
4265}
4266
4267static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4268{
4269	struct dlm_lkb *lkb;
4270	struct dlm_rsb *r;
4271	int error;
4272
4273	error = find_lkb(ls, ms->m_remid, &lkb);
4274	if (error)
4275		goto fail;
4276
4277	receive_flags(lkb, ms);
4278
4279	r = lkb->lkb_resource;
4280
4281	hold_rsb(r);
4282	lock_rsb(r);
4283
4284	error = validate_message(lkb, ms);
4285	if (error)
4286		goto out;
4287
4288	error = do_cancel(r, lkb);
4289	send_cancel_reply(r, lkb, error);
4290	do_cancel_effects(r, lkb, error);
4291 out:
4292	unlock_rsb(r);
4293	put_rsb(r);
4294	dlm_put_lkb(lkb);
4295	return 0;
4296
4297 fail:
4298	setup_stub_lkb(ls, ms);
4299	send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4300	return error;
4301}
4302
4303static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4304{
4305	struct dlm_lkb *lkb;
4306	struct dlm_rsb *r;
4307	int error;
4308
4309	error = find_lkb(ls, ms->m_remid, &lkb);
4310	if (error)
4311		return error;
 
 
 
4312
4313	r = lkb->lkb_resource;
4314
4315	hold_rsb(r);
4316	lock_rsb(r);
4317
4318	error = validate_message(lkb, ms);
4319	if (error)
4320		goto out;
4321
4322	receive_flags_reply(lkb, ms);
4323	if (is_altmode(lkb))
4324		munge_altmode(lkb, ms);
4325	grant_lock_pc(r, lkb, ms);
4326	queue_cast(r, lkb, 0);
4327 out:
4328	unlock_rsb(r);
4329	put_rsb(r);
4330	dlm_put_lkb(lkb);
4331	return 0;
4332}
4333
4334static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4335{
4336	struct dlm_lkb *lkb;
4337	struct dlm_rsb *r;
4338	int error;
4339
4340	error = find_lkb(ls, ms->m_remid, &lkb);
4341	if (error)
4342		return error;
 
 
 
4343
4344	r = lkb->lkb_resource;
4345
4346	hold_rsb(r);
4347	lock_rsb(r);
4348
4349	error = validate_message(lkb, ms);
4350	if (error)
4351		goto out;
4352
4353	queue_bast(r, lkb, ms->m_bastmode);
4354	lkb->lkb_highbast = ms->m_bastmode;
4355 out:
4356	unlock_rsb(r);
4357	put_rsb(r);
4358	dlm_put_lkb(lkb);
4359	return 0;
4360}
4361
4362static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4363{
4364	int len, error, ret_nodeid, from_nodeid, our_nodeid;
4365
4366	from_nodeid = ms->m_header.h_nodeid;
4367	our_nodeid = dlm_our_nodeid();
4368
4369	len = receive_extralen(ms);
4370
4371	error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4372				  &ret_nodeid, NULL);
 
 
 
 
 
 
 
 
4373
4374	/* Optimization: we're master so treat lookup as a request */
4375	if (!error && ret_nodeid == our_nodeid) {
4376		receive_request(ls, ms);
4377		return;
4378	}
 
4379	send_lookup_reply(ls, ms, ret_nodeid, error);
4380}
4381
4382static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4383{
4384	char name[DLM_RESNAME_MAXLEN+1];
4385	struct dlm_rsb *r;
4386	uint32_t hash, b;
4387	int rv, len, dir_nodeid, from_nodeid;
4388
4389	from_nodeid = ms->m_header.h_nodeid;
4390
4391	len = receive_extralen(ms);
4392
4393	if (len > DLM_RESNAME_MAXLEN) {
4394		log_error(ls, "receive_remove from %d bad len %d",
4395			  from_nodeid, len);
4396		return;
4397	}
4398
4399	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4400	if (dir_nodeid != dlm_our_nodeid()) {
4401		log_error(ls, "receive_remove from %d bad nodeid %d",
4402			  from_nodeid, dir_nodeid);
4403		return;
4404	}
4405
4406	/* Look for name on rsbtbl.toss, if it's there, kill it.
4407	   If it's on rsbtbl.keep, it's being used, and we should ignore this
4408	   message.  This is an expected race between the dir node sending a
4409	   request to the master node at the same time as the master node sends
4410	   a remove to the dir node.  The resolution to that race is for the
4411	   dir node to ignore the remove message, and the master node to
4412	   recreate the master rsb when it gets a request from the dir node for
4413	   an rsb it doesn't have. */
4414
4415	memset(name, 0, sizeof(name));
4416	memcpy(name, ms->m_extra, len);
4417
4418	hash = jhash(name, len, 0);
4419	b = hash & (ls->ls_rsbtbl_size - 1);
4420
4421	spin_lock(&ls->ls_rsbtbl[b].lock);
4422
4423	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4424	if (rv) {
4425		/* verify the rsb is on keep list per comment above */
4426		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4427		if (rv) {
4428			/* should not happen */
4429			log_error(ls, "receive_remove from %d not found %s",
4430				  from_nodeid, name);
4431			spin_unlock(&ls->ls_rsbtbl[b].lock);
4432			return;
4433		}
4434		if (r->res_master_nodeid != from_nodeid) {
4435			/* should not happen */
4436			log_error(ls, "receive_remove keep from %d master %d",
4437				  from_nodeid, r->res_master_nodeid);
4438			dlm_print_rsb(r);
4439			spin_unlock(&ls->ls_rsbtbl[b].lock);
4440			return;
4441		}
4442
4443		log_debug(ls, "receive_remove from %d master %d first %x %s",
4444			  from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4445			  name);
4446		spin_unlock(&ls->ls_rsbtbl[b].lock);
4447		return;
4448	}
4449
4450	if (r->res_master_nodeid != from_nodeid) {
4451		log_error(ls, "receive_remove toss from %d master %d",
4452			  from_nodeid, r->res_master_nodeid);
4453		dlm_print_rsb(r);
4454		spin_unlock(&ls->ls_rsbtbl[b].lock);
4455		return;
4456	}
4457
4458	if (kref_put(&r->res_ref, kill_rsb)) {
4459		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4460		spin_unlock(&ls->ls_rsbtbl[b].lock);
4461		dlm_free_rsb(r);
4462	} else {
4463		log_error(ls, "receive_remove from %d rsb ref error",
4464			  from_nodeid);
4465		dlm_print_rsb(r);
4466		spin_unlock(&ls->ls_rsbtbl[b].lock);
4467	}
4468}
4469
4470static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4471{
4472	do_purge(ls, ms->m_nodeid, ms->m_pid);
4473}
4474
4475static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4476{
4477	struct dlm_lkb *lkb;
4478	struct dlm_rsb *r;
4479	int error, mstype, result;
4480	int from_nodeid = ms->m_header.h_nodeid;
4481
4482	error = find_lkb(ls, ms->m_remid, &lkb);
4483	if (error)
4484		return error;
 
 
 
4485
4486	r = lkb->lkb_resource;
4487	hold_rsb(r);
4488	lock_rsb(r);
4489
4490	error = validate_message(lkb, ms);
4491	if (error)
4492		goto out;
4493
4494	mstype = lkb->lkb_wait_type;
4495	error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4496	if (error) {
4497		log_error(ls, "receive_request_reply %x remote %d %x result %d",
4498			  lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4499		dlm_dump_rsb(r);
4500		goto out;
4501	}
4502
4503	/* Optimization: the dir node was also the master, so it took our
4504	   lookup as a request and sent request reply instead of lookup reply */
4505	if (mstype == DLM_MSG_LOOKUP) {
4506		r->res_master_nodeid = from_nodeid;
4507		r->res_nodeid = from_nodeid;
4508		lkb->lkb_nodeid = from_nodeid;
4509	}
4510
4511	/* this is the value returned from do_request() on the master */
4512	result = ms->m_result;
4513
4514	switch (result) {
4515	case -EAGAIN:
4516		/* request would block (be queued) on remote master */
4517		queue_cast(r, lkb, -EAGAIN);
4518		confirm_master(r, -EAGAIN);
4519		unhold_lkb(lkb); /* undoes create_lkb() */
4520		break;
4521
4522	case -EINPROGRESS:
4523	case 0:
4524		/* request was queued or granted on remote master */
4525		receive_flags_reply(lkb, ms);
4526		lkb->lkb_remid = ms->m_lkid;
4527		if (is_altmode(lkb))
4528			munge_altmode(lkb, ms);
4529		if (result) {
4530			add_lkb(r, lkb, DLM_LKSTS_WAITING);
4531			add_timeout(lkb);
4532		} else {
4533			grant_lock_pc(r, lkb, ms);
4534			queue_cast(r, lkb, 0);
4535		}
4536		confirm_master(r, result);
4537		break;
4538
4539	case -EBADR:
4540	case -ENOTBLK:
4541		/* find_rsb failed to find rsb or rsb wasn't master */
4542		log_limit(ls, "receive_request_reply %x from %d %d "
4543			  "master %d dir %d first %x %s", lkb->lkb_id,
4544			  from_nodeid, result, r->res_master_nodeid,
4545			  r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4546
4547		if (r->res_dir_nodeid != dlm_our_nodeid() &&
4548		    r->res_master_nodeid != dlm_our_nodeid()) {
4549			/* cause _request_lock->set_master->send_lookup */
4550			r->res_master_nodeid = 0;
4551			r->res_nodeid = -1;
4552			lkb->lkb_nodeid = -1;
4553		}
4554
4555		if (is_overlap(lkb)) {
4556			/* we'll ignore error in cancel/unlock reply */
4557			queue_cast_overlap(r, lkb);
4558			confirm_master(r, result);
4559			unhold_lkb(lkb); /* undoes create_lkb() */
4560		} else {
4561			_request_lock(r, lkb);
4562
4563			if (r->res_master_nodeid == dlm_our_nodeid())
4564				confirm_master(r, 0);
4565		}
4566		break;
4567
4568	default:
4569		log_error(ls, "receive_request_reply %x error %d",
4570			  lkb->lkb_id, result);
4571	}
4572
4573	if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4574		log_debug(ls, "receive_request_reply %x result %d unlock",
4575			  lkb->lkb_id, result);
4576		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4577		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4578		send_unlock(r, lkb);
4579	} else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4580		log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4581		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4582		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4583		send_cancel(r, lkb);
4584	} else {
4585		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4586		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4587	}
4588 out:
4589	unlock_rsb(r);
4590	put_rsb(r);
4591	dlm_put_lkb(lkb);
4592	return 0;
4593}
4594
4595static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4596				    struct dlm_message *ms)
4597{
4598	/* this is the value returned from do_convert() on the master */
4599	switch (ms->m_result) {
4600	case -EAGAIN:
4601		/* convert would block (be queued) on remote master */
4602		queue_cast(r, lkb, -EAGAIN);
4603		break;
4604
4605	case -EDEADLK:
4606		receive_flags_reply(lkb, ms);
4607		revert_lock_pc(r, lkb);
4608		queue_cast(r, lkb, -EDEADLK);
4609		break;
4610
4611	case -EINPROGRESS:
4612		/* convert was queued on remote master */
4613		receive_flags_reply(lkb, ms);
4614		if (is_demoted(lkb))
4615			munge_demoted(lkb);
4616		del_lkb(r, lkb);
4617		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4618		add_timeout(lkb);
4619		break;
4620
4621	case 0:
4622		/* convert was granted on remote master */
4623		receive_flags_reply(lkb, ms);
4624		if (is_demoted(lkb))
4625			munge_demoted(lkb);
4626		grant_lock_pc(r, lkb, ms);
4627		queue_cast(r, lkb, 0);
4628		break;
4629
4630	default:
4631		log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4632			  lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4633			  ms->m_result);
4634		dlm_print_rsb(r);
4635		dlm_print_lkb(lkb);
4636	}
4637}
4638
4639static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4640{
4641	struct dlm_rsb *r = lkb->lkb_resource;
4642	int error;
4643
4644	hold_rsb(r);
4645	lock_rsb(r);
4646
4647	error = validate_message(lkb, ms);
4648	if (error)
4649		goto out;
4650
4651	/* stub reply can happen with waiters_mutex held */
4652	error = remove_from_waiters_ms(lkb, ms);
4653	if (error)
4654		goto out;
4655
4656	__receive_convert_reply(r, lkb, ms);
4657 out:
4658	unlock_rsb(r);
4659	put_rsb(r);
4660}
4661
4662static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4663{
4664	struct dlm_lkb *lkb;
4665	int error;
4666
4667	error = find_lkb(ls, ms->m_remid, &lkb);
4668	if (error)
4669		return error;
 
 
 
4670
4671	_receive_convert_reply(lkb, ms);
4672	dlm_put_lkb(lkb);
4673	return 0;
4674}
4675
4676static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4677{
4678	struct dlm_rsb *r = lkb->lkb_resource;
4679	int error;
4680
4681	hold_rsb(r);
4682	lock_rsb(r);
4683
4684	error = validate_message(lkb, ms);
4685	if (error)
4686		goto out;
4687
4688	/* stub reply can happen with waiters_mutex held */
4689	error = remove_from_waiters_ms(lkb, ms);
4690	if (error)
4691		goto out;
4692
4693	/* this is the value returned from do_unlock() on the master */
4694
4695	switch (ms->m_result) {
4696	case -DLM_EUNLOCK:
4697		receive_flags_reply(lkb, ms);
4698		remove_lock_pc(r, lkb);
4699		queue_cast(r, lkb, -DLM_EUNLOCK);
4700		break;
4701	case -ENOENT:
4702		break;
4703	default:
4704		log_error(r->res_ls, "receive_unlock_reply %x error %d",
4705			  lkb->lkb_id, ms->m_result);
4706	}
4707 out:
4708	unlock_rsb(r);
4709	put_rsb(r);
4710}
4711
4712static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4713{
4714	struct dlm_lkb *lkb;
4715	int error;
4716
4717	error = find_lkb(ls, ms->m_remid, &lkb);
4718	if (error)
4719		return error;
 
 
 
4720
4721	_receive_unlock_reply(lkb, ms);
4722	dlm_put_lkb(lkb);
4723	return 0;
4724}
4725
4726static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4727{
4728	struct dlm_rsb *r = lkb->lkb_resource;
4729	int error;
4730
4731	hold_rsb(r);
4732	lock_rsb(r);
4733
4734	error = validate_message(lkb, ms);
4735	if (error)
4736		goto out;
4737
4738	/* stub reply can happen with waiters_mutex held */
4739	error = remove_from_waiters_ms(lkb, ms);
4740	if (error)
4741		goto out;
4742
4743	/* this is the value returned from do_cancel() on the master */
4744
4745	switch (ms->m_result) {
4746	case -DLM_ECANCEL:
4747		receive_flags_reply(lkb, ms);
4748		revert_lock_pc(r, lkb);
4749		queue_cast(r, lkb, -DLM_ECANCEL);
4750		break;
4751	case 0:
4752		break;
4753	default:
4754		log_error(r->res_ls, "receive_cancel_reply %x error %d",
4755			  lkb->lkb_id, ms->m_result);
4756	}
4757 out:
4758	unlock_rsb(r);
4759	put_rsb(r);
4760}
4761
4762static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4763{
4764	struct dlm_lkb *lkb;
4765	int error;
4766
4767	error = find_lkb(ls, ms->m_remid, &lkb);
4768	if (error)
4769		return error;
 
 
 
4770
4771	_receive_cancel_reply(lkb, ms);
4772	dlm_put_lkb(lkb);
4773	return 0;
4774}
4775
4776static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4777{
4778	struct dlm_lkb *lkb;
4779	struct dlm_rsb *r;
4780	int error, ret_nodeid;
4781	int do_lookup_list = 0;
4782
4783	error = find_lkb(ls, ms->m_lkid, &lkb);
4784	if (error) {
4785		log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4786		return;
4787	}
4788
4789	/* ms->m_result is the value returned by dlm_master_lookup on dir node
4790	   FIXME: will a non-zero error ever be returned? */
4791
4792	r = lkb->lkb_resource;
4793	hold_rsb(r);
4794	lock_rsb(r);
4795
4796	error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4797	if (error)
4798		goto out;
4799
4800	ret_nodeid = ms->m_nodeid;
4801
4802	/* We sometimes receive a request from the dir node for this
4803	   rsb before we've received the dir node's loookup_reply for it.
4804	   The request from the dir node implies we're the master, so we set
4805	   ourself as master in receive_request_reply, and verify here that
4806	   we are indeed the master. */
4807
4808	if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4809		/* This should never happen */
4810		log_error(ls, "receive_lookup_reply %x from %d ret %d "
4811			  "master %d dir %d our %d first %x %s",
4812			  lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4813			  r->res_master_nodeid, r->res_dir_nodeid,
4814			  dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4815	}
4816
4817	if (ret_nodeid == dlm_our_nodeid()) {
4818		r->res_master_nodeid = ret_nodeid;
4819		r->res_nodeid = 0;
4820		do_lookup_list = 1;
4821		r->res_first_lkid = 0;
4822	} else if (ret_nodeid == -1) {
4823		/* the remote node doesn't believe it's the dir node */
4824		log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4825			  lkb->lkb_id, ms->m_header.h_nodeid);
4826		r->res_master_nodeid = 0;
4827		r->res_nodeid = -1;
4828		lkb->lkb_nodeid = -1;
4829	} else {
4830		/* set_master() will set lkb_nodeid from r */
4831		r->res_master_nodeid = ret_nodeid;
4832		r->res_nodeid = ret_nodeid;
4833	}
4834
4835	if (is_overlap(lkb)) {
4836		log_debug(ls, "receive_lookup_reply %x unlock %x",
4837			  lkb->lkb_id, lkb->lkb_flags);
4838		queue_cast_overlap(r, lkb);
4839		unhold_lkb(lkb); /* undoes create_lkb() */
4840		goto out_list;
4841	}
4842
4843	_request_lock(r, lkb);
4844
4845 out_list:
4846	if (do_lookup_list)
4847		process_lookup_list(r);
4848 out:
4849	unlock_rsb(r);
4850	put_rsb(r);
4851	dlm_put_lkb(lkb);
4852}
4853
4854static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4855			     uint32_t saved_seq)
4856{
4857	int error = 0, noent = 0;
4858
4859	if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4860		log_limit(ls, "receive %d from non-member %d %x %x %d",
4861			  ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4862			  ms->m_remid, ms->m_result);
4863		return;
4864	}
4865
4866	switch (ms->m_type) {
4867
4868	/* messages sent to a master node */
4869
4870	case DLM_MSG_REQUEST:
4871		error = receive_request(ls, ms);
4872		break;
4873
4874	case DLM_MSG_CONVERT:
4875		error = receive_convert(ls, ms);
4876		break;
4877
4878	case DLM_MSG_UNLOCK:
4879		error = receive_unlock(ls, ms);
4880		break;
4881
4882	case DLM_MSG_CANCEL:
4883		noent = 1;
4884		error = receive_cancel(ls, ms);
4885		break;
4886
4887	/* messages sent from a master node (replies to above) */
4888
4889	case DLM_MSG_REQUEST_REPLY:
4890		error = receive_request_reply(ls, ms);
4891		break;
4892
4893	case DLM_MSG_CONVERT_REPLY:
4894		error = receive_convert_reply(ls, ms);
4895		break;
4896
4897	case DLM_MSG_UNLOCK_REPLY:
4898		error = receive_unlock_reply(ls, ms);
4899		break;
4900
4901	case DLM_MSG_CANCEL_REPLY:
4902		error = receive_cancel_reply(ls, ms);
4903		break;
4904
4905	/* messages sent from a master node (only two types of async msg) */
4906
4907	case DLM_MSG_GRANT:
4908		noent = 1;
4909		error = receive_grant(ls, ms);
4910		break;
4911
4912	case DLM_MSG_BAST:
4913		noent = 1;
4914		error = receive_bast(ls, ms);
4915		break;
4916
4917	/* messages sent to a dir node */
4918
4919	case DLM_MSG_LOOKUP:
4920		receive_lookup(ls, ms);
4921		break;
4922
4923	case DLM_MSG_REMOVE:
4924		receive_remove(ls, ms);
4925		break;
4926
4927	/* messages sent from a dir node (remove has no reply) */
4928
4929	case DLM_MSG_LOOKUP_REPLY:
4930		receive_lookup_reply(ls, ms);
4931		break;
4932
4933	/* other messages */
4934
4935	case DLM_MSG_PURGE:
4936		receive_purge(ls, ms);
4937		break;
4938
4939	default:
4940		log_error(ls, "unknown message type %d", ms->m_type);
4941	}
4942
4943	/*
4944	 * When checking for ENOENT, we're checking the result of
4945	 * find_lkb(m_remid):
4946	 *
4947	 * The lock id referenced in the message wasn't found.  This may
4948	 * happen in normal usage for the async messages and cancel, so
4949	 * only use log_debug for them.
4950	 *
4951	 * Some errors are expected and normal.
4952	 */
4953
4954	if (error == -ENOENT && noent) {
4955		log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4956			  ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4957			  ms->m_lkid, saved_seq);
4958	} else if (error == -ENOENT) {
4959		log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4960			  ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4961			  ms->m_lkid, saved_seq);
4962
4963		if (ms->m_type == DLM_MSG_CONVERT)
4964			dlm_dump_rsb_hash(ls, ms->m_hash);
4965	}
4966
4967	if (error == -EINVAL) {
4968		log_error(ls, "receive %d inval from %d lkid %x remid %x "
4969			  "saved_seq %u",
4970			  ms->m_type, ms->m_header.h_nodeid,
4971			  ms->m_lkid, ms->m_remid, saved_seq);
4972	}
4973}
4974
4975/* If the lockspace is in recovery mode (locking stopped), then normal
4976   messages are saved on the requestqueue for processing after recovery is
4977   done.  When not in recovery mode, we wait for dlm_recoverd to drain saved
4978   messages off the requestqueue before we process new ones. This occurs right
4979   after recovery completes when we transition from saving all messages on
4980   requestqueue, to processing all the saved messages, to processing new
4981   messages as they arrive. */
4982
4983static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4984				int nodeid)
4985{
4986	if (dlm_locking_stopped(ls)) {
4987		/* If we were a member of this lockspace, left, and rejoined,
4988		   other nodes may still be sending us messages from the
4989		   lockspace generation before we left. */
4990		if (!ls->ls_generation) {
4991			log_limit(ls, "receive %d from %d ignore old gen",
4992				  ms->m_type, nodeid);
4993			return;
4994		}
4995
4996		dlm_add_requestqueue(ls, nodeid, ms);
4997	} else {
4998		dlm_wait_requestqueue(ls);
4999		_receive_message(ls, ms, 0);
5000	}
5001}
5002
5003/* This is called by dlm_recoverd to process messages that were saved on
5004   the requestqueue. */
5005
5006void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5007			       uint32_t saved_seq)
5008{
5009	_receive_message(ls, ms, saved_seq);
5010}
5011
5012/* This is called by the midcomms layer when something is received for
5013   the lockspace.  It could be either a MSG (normal message sent as part of
5014   standard locking activity) or an RCOM (recovery message sent as part of
5015   lockspace recovery). */
5016
5017void dlm_receive_buffer(union dlm_packet *p, int nodeid)
5018{
5019	struct dlm_header *hd = &p->header;
5020	struct dlm_ls *ls;
5021	int type = 0;
5022
5023	switch (hd->h_cmd) {
5024	case DLM_MSG:
5025		dlm_message_in(&p->message);
5026		type = p->message.m_type;
5027		break;
5028	case DLM_RCOM:
5029		dlm_rcom_in(&p->rcom);
5030		type = p->rcom.rc_type;
5031		break;
5032	default:
5033		log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5034		return;
5035	}
5036
5037	if (hd->h_nodeid != nodeid) {
5038		log_print("invalid h_nodeid %d from %d lockspace %x",
5039			  hd->h_nodeid, nodeid, hd->h_lockspace);
5040		return;
5041	}
5042
5043	ls = dlm_find_lockspace_global(hd->h_lockspace);
5044	if (!ls) {
5045		if (dlm_config.ci_log_debug) {
5046			printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5047				"%u from %d cmd %d type %d\n",
5048				hd->h_lockspace, nodeid, hd->h_cmd, type);
5049		}
5050
5051		if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
5052			dlm_send_ls_not_ready(nodeid, &p->rcom);
5053		return;
5054	}
5055
5056	/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5057	   be inactive (in this ls) before transitioning to recovery mode */
5058
5059	down_read(&ls->ls_recv_active);
5060	if (hd->h_cmd == DLM_MSG)
5061		dlm_receive_message(ls, &p->message, nodeid);
5062	else
5063		dlm_receive_rcom(ls, &p->rcom, nodeid);
5064	up_read(&ls->ls_recv_active);
5065
5066	dlm_put_lockspace(ls);
5067}
5068
5069static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5070				   struct dlm_message *ms_stub)
5071{
5072	if (middle_conversion(lkb)) {
5073		hold_lkb(lkb);
5074		memset(ms_stub, 0, sizeof(struct dlm_message));
5075		ms_stub->m_flags = DLM_IFL_STUB_MS;
5076		ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
5077		ms_stub->m_result = -EINPROGRESS;
5078		ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5079		_receive_convert_reply(lkb, ms_stub);
5080
5081		/* Same special case as in receive_rcom_lock_args() */
5082		lkb->lkb_grmode = DLM_LOCK_IV;
5083		rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5084		unhold_lkb(lkb);
5085
5086	} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5087		lkb->lkb_flags |= DLM_IFL_RESEND;
5088	}
5089
5090	/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5091	   conversions are async; there's no reply from the remote master */
5092}
5093
5094/* A waiting lkb needs recovery if the master node has failed, or
5095   the master node is changing (only when no directory is used) */
5096
5097static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5098				 int dir_nodeid)
5099{
5100	if (dlm_no_directory(ls))
5101		return 1;
5102
5103	if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
 
 
 
5104		return 1;
5105
5106	return 0;
5107}
5108
5109/* Recovery for locks that are waiting for replies from nodes that are now
5110   gone.  We can just complete unlocks and cancels by faking a reply from the
5111   dead node.  Requests and up-conversions we flag to be resent after
5112   recovery.  Down-conversions can just be completed with a fake reply like
5113   unlocks.  Conversions between PR and CW need special attention. */
5114
5115void dlm_recover_waiters_pre(struct dlm_ls *ls)
5116{
5117	struct dlm_lkb *lkb, *safe;
5118	struct dlm_message *ms_stub;
5119	int wait_type, stub_unlock_result, stub_cancel_result;
5120	int dir_nodeid;
5121
5122	ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL);
5123	if (!ms_stub) {
5124		log_error(ls, "dlm_recover_waiters_pre no mem");
5125		return;
5126	}
5127
5128	mutex_lock(&ls->ls_waiters_mutex);
5129
5130	list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5131
5132		dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5133
5134		/* exclude debug messages about unlocks because there can be so
5135		   many and they aren't very interesting */
5136
5137		if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5138			log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5139				  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5140				  lkb->lkb_id,
5141				  lkb->lkb_remid,
5142				  lkb->lkb_wait_type,
5143				  lkb->lkb_resource->res_nodeid,
5144				  lkb->lkb_nodeid,
5145				  lkb->lkb_wait_nodeid,
5146				  dir_nodeid);
5147		}
5148
5149		/* all outstanding lookups, regardless of destination  will be
5150		   resent after recovery is done */
5151
5152		if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5153			lkb->lkb_flags |= DLM_IFL_RESEND;
5154			continue;
5155		}
5156
5157		if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5158			continue;
5159
5160		wait_type = lkb->lkb_wait_type;
5161		stub_unlock_result = -DLM_EUNLOCK;
5162		stub_cancel_result = -DLM_ECANCEL;
5163
5164		/* Main reply may have been received leaving a zero wait_type,
5165		   but a reply for the overlapping op may not have been
5166		   received.  In that case we need to fake the appropriate
5167		   reply for the overlap op. */
5168
5169		if (!wait_type) {
5170			if (is_overlap_cancel(lkb)) {
5171				wait_type = DLM_MSG_CANCEL;
5172				if (lkb->lkb_grmode == DLM_LOCK_IV)
5173					stub_cancel_result = 0;
5174			}
5175			if (is_overlap_unlock(lkb)) {
5176				wait_type = DLM_MSG_UNLOCK;
5177				if (lkb->lkb_grmode == DLM_LOCK_IV)
5178					stub_unlock_result = -ENOENT;
5179			}
5180
5181			log_debug(ls, "rwpre overlap %x %x %d %d %d",
5182				  lkb->lkb_id, lkb->lkb_flags, wait_type,
5183				  stub_cancel_result, stub_unlock_result);
5184		}
5185
5186		switch (wait_type) {
5187
5188		case DLM_MSG_REQUEST:
5189			lkb->lkb_flags |= DLM_IFL_RESEND;
5190			break;
5191
5192		case DLM_MSG_CONVERT:
5193			recover_convert_waiter(ls, lkb, ms_stub);
5194			break;
5195
5196		case DLM_MSG_UNLOCK:
5197			hold_lkb(lkb);
5198			memset(ms_stub, 0, sizeof(struct dlm_message));
5199			ms_stub->m_flags = DLM_IFL_STUB_MS;
5200			ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
5201			ms_stub->m_result = stub_unlock_result;
5202			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5203			_receive_unlock_reply(lkb, ms_stub);
5204			dlm_put_lkb(lkb);
5205			break;
5206
5207		case DLM_MSG_CANCEL:
5208			hold_lkb(lkb);
5209			memset(ms_stub, 0, sizeof(struct dlm_message));
5210			ms_stub->m_flags = DLM_IFL_STUB_MS;
5211			ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5212			ms_stub->m_result = stub_cancel_result;
5213			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5214			_receive_cancel_reply(lkb, ms_stub);
5215			dlm_put_lkb(lkb);
5216			break;
5217
5218		default:
5219			log_error(ls, "invalid lkb wait_type %d %d",
5220				  lkb->lkb_wait_type, wait_type);
5221		}
5222		schedule();
5223	}
5224	mutex_unlock(&ls->ls_waiters_mutex);
5225	kfree(ms_stub);
5226}
5227
5228static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5229{
5230	struct dlm_lkb *lkb;
5231	int found = 0;
5232
5233	mutex_lock(&ls->ls_waiters_mutex);
5234	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
5235		if (lkb->lkb_flags & DLM_IFL_RESEND) {
5236			hold_lkb(lkb);
5237			found = 1;
5238			break;
5239		}
5240	}
5241	mutex_unlock(&ls->ls_waiters_mutex);
5242
5243	if (!found)
5244		lkb = NULL;
5245	return lkb;
5246}
5247
5248/* Deal with lookups and lkb's marked RESEND from _pre.  We may now be the
5249   master or dir-node for r.  Processing the lkb may result in it being placed
5250   back on waiters. */
5251
5252/* We do this after normal locking has been enabled and any saved messages
5253   (in requestqueue) have been processed.  We should be confident that at
5254   this point we won't get or process a reply to any of these waiting
5255   operations.  But, new ops may be coming in on the rsbs/locks here from
5256   userspace or remotely. */
5257
5258/* there may have been an overlap unlock/cancel prior to recovery or after
5259   recovery.  if before, the lkb may still have a pos wait_count; if after, the
5260   overlap flag would just have been set and nothing new sent.  we can be
5261   confident here than any replies to either the initial op or overlap ops
5262   prior to recovery have been received. */
5263
5264int dlm_recover_waiters_post(struct dlm_ls *ls)
5265{
5266	struct dlm_lkb *lkb;
5267	struct dlm_rsb *r;
5268	int error = 0, mstype, err, oc, ou;
5269
5270	while (1) {
5271		if (dlm_locking_stopped(ls)) {
5272			log_debug(ls, "recover_waiters_post aborted");
5273			error = -EINTR;
5274			break;
5275		}
5276
5277		lkb = find_resend_waiter(ls);
5278		if (!lkb)
5279			break;
5280
5281		r = lkb->lkb_resource;
5282		hold_rsb(r);
5283		lock_rsb(r);
5284
5285		mstype = lkb->lkb_wait_type;
5286		oc = is_overlap_cancel(lkb);
5287		ou = is_overlap_unlock(lkb);
5288		err = 0;
5289
5290		log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5291			  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5292			  "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5293			  r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5294			  dlm_dir_nodeid(r), oc, ou);
5295
5296		/* At this point we assume that we won't get a reply to any
5297		   previous op or overlap op on this lock.  First, do a big
5298		   remove_from_waiters() for all previous ops. */
5299
5300		lkb->lkb_flags &= ~DLM_IFL_RESEND;
5301		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5302		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5303		lkb->lkb_wait_type = 0;
5304		lkb->lkb_wait_count = 0;
5305		mutex_lock(&ls->ls_waiters_mutex);
5306		list_del_init(&lkb->lkb_wait_reply);
5307		mutex_unlock(&ls->ls_waiters_mutex);
5308		unhold_lkb(lkb); /* for waiters list */
5309
5310		if (oc || ou) {
5311			/* do an unlock or cancel instead of resending */
5312			switch (mstype) {
5313			case DLM_MSG_LOOKUP:
5314			case DLM_MSG_REQUEST:
5315				queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5316							-DLM_ECANCEL);
5317				unhold_lkb(lkb); /* undoes create_lkb() */
5318				break;
5319			case DLM_MSG_CONVERT:
5320				if (oc) {
5321					queue_cast(r, lkb, -DLM_ECANCEL);
5322				} else {
5323					lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5324					_unlock_lock(r, lkb);
5325				}
5326				break;
5327			default:
5328				err = 1;
5329			}
5330		} else {
5331			switch (mstype) {
5332			case DLM_MSG_LOOKUP:
5333			case DLM_MSG_REQUEST:
5334				_request_lock(r, lkb);
5335				if (is_master(r))
5336					confirm_master(r, 0);
5337				break;
5338			case DLM_MSG_CONVERT:
5339				_convert_lock(r, lkb);
5340				break;
5341			default:
5342				err = 1;
5343			}
5344		}
5345
5346		if (err) {
5347			log_error(ls, "waiter %x msg %d r_nodeid %d "
5348				  "dir_nodeid %d overlap %d %d",
5349				  lkb->lkb_id, mstype, r->res_nodeid,
5350				  dlm_dir_nodeid(r), oc, ou);
5351		}
5352		unlock_rsb(r);
5353		put_rsb(r);
5354		dlm_put_lkb(lkb);
5355	}
5356
5357	return error;
5358}
5359
5360static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5361			      struct list_head *list)
5362{
 
5363	struct dlm_lkb *lkb, *safe;
5364
5365	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5366		if (!is_master_copy(lkb))
5367			continue;
5368
5369		/* don't purge lkbs we've added in recover_master_copy for
5370		   the current recovery seq */
5371
5372		if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5373			continue;
5374
5375		del_lkb(r, lkb);
5376
5377		/* this put should free the lkb */
5378		if (!dlm_put_lkb(lkb))
5379			log_error(ls, "purged mstcpy lkb not released");
5380	}
5381}
5382
5383void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5384{
5385	struct dlm_ls *ls = r->res_ls;
 
5386
5387	purge_mstcpy_list(ls, r, &r->res_grantqueue);
5388	purge_mstcpy_list(ls, r, &r->res_convertqueue);
5389	purge_mstcpy_list(ls, r, &r->res_waitqueue);
5390}
5391
5392static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5393			    struct list_head *list,
5394			    int nodeid_gone, unsigned int *count)
5395{
5396	struct dlm_lkb *lkb, *safe;
 
 
 
5397
5398	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5399		if (!is_master_copy(lkb))
5400			continue;
5401
5402		if ((lkb->lkb_nodeid == nodeid_gone) ||
5403		    dlm_is_removed(ls, lkb->lkb_nodeid)) {
5404
5405			/* tell recover_lvb to invalidate the lvb
5406			   because a node holding EX/PW failed */
5407			if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5408			    (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5409				rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5410			}
5411
5412			del_lkb(r, lkb);
5413
5414			/* this put should free the lkb */
5415			if (!dlm_put_lkb(lkb))
5416				log_error(ls, "purged dead lkb not released");
5417
5418			rsb_set_flag(r, RSB_RECOVER_GRANT);
5419
5420			(*count)++;
5421		}
5422	}
5423}
5424
5425/* Get rid of locks held by nodes that are gone. */
5426
5427void dlm_recover_purge(struct dlm_ls *ls)
5428{
5429	struct dlm_rsb *r;
5430	struct dlm_member *memb;
5431	int nodes_count = 0;
5432	int nodeid_gone = 0;
5433	unsigned int lkb_count = 0;
5434
5435	/* cache one removed nodeid to optimize the common
5436	   case of a single node removed */
5437
5438	list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5439		nodes_count++;
5440		nodeid_gone = memb->nodeid;
5441	}
5442
5443	if (!nodes_count)
5444		return;
5445
5446	down_write(&ls->ls_root_sem);
5447	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5448		hold_rsb(r);
5449		lock_rsb(r);
5450		if (is_master(r)) {
5451			purge_dead_list(ls, r, &r->res_grantqueue,
5452					nodeid_gone, &lkb_count);
5453			purge_dead_list(ls, r, &r->res_convertqueue,
5454					nodeid_gone, &lkb_count);
5455			purge_dead_list(ls, r, &r->res_waitqueue,
5456					nodeid_gone, &lkb_count);
5457		}
5458		unlock_rsb(r);
5459		unhold_rsb(r);
5460		cond_resched();
 
5461	}
5462	up_write(&ls->ls_root_sem);
5463
5464	if (lkb_count)
5465		log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5466			  lkb_count, nodes_count);
5467}
5468
5469static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5470{
5471	struct rb_node *n;
5472	struct dlm_rsb *r;
5473
5474	spin_lock(&ls->ls_rsbtbl[bucket].lock);
5475	for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5476		r = rb_entry(n, struct dlm_rsb, res_hashnode);
5477
5478		if (!rsb_flag(r, RSB_RECOVER_GRANT))
5479			continue;
5480		if (!is_master(r)) {
5481			rsb_clear_flag(r, RSB_RECOVER_GRANT);
5482			continue;
5483		}
5484		hold_rsb(r);
5485		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5486		return r;
 
5487	}
5488	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5489	return NULL;
5490}
5491
5492/*
5493 * Attempt to grant locks on resources that we are the master of.
5494 * Locks may have become grantable during recovery because locks
5495 * from departed nodes have been purged (or not rebuilt), allowing
5496 * previously blocked locks to now be granted.  The subset of rsb's
5497 * we are interested in are those with lkb's on either the convert or
5498 * waiting queues.
5499 *
5500 * Simplest would be to go through each master rsb and check for non-empty
5501 * convert or waiting queues, and attempt to grant on those rsbs.
5502 * Checking the queues requires lock_rsb, though, for which we'd need
5503 * to release the rsbtbl lock.  This would make iterating through all
5504 * rsb's very inefficient.  So, we rely on earlier recovery routines
5505 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5506 * locks for.
5507 */
5508
5509void dlm_recover_grant(struct dlm_ls *ls)
5510{
5511	struct dlm_rsb *r;
5512	int bucket = 0;
5513	unsigned int count = 0;
5514	unsigned int rsb_count = 0;
5515	unsigned int lkb_count = 0;
5516
5517	while (1) {
5518		r = find_grant_rsb(ls, bucket);
5519		if (!r) {
5520			if (bucket == ls->ls_rsbtbl_size - 1)
5521				break;
5522			bucket++;
5523			continue;
5524		}
5525		rsb_count++;
5526		count = 0;
5527		lock_rsb(r);
5528		/* the RECOVER_GRANT flag is checked in the grant path */
5529		grant_pending_locks(r, &count);
5530		rsb_clear_flag(r, RSB_RECOVER_GRANT);
5531		lkb_count += count;
5532		confirm_master(r, 0);
5533		unlock_rsb(r);
5534		put_rsb(r);
5535		cond_resched();
5536	}
5537
5538	if (lkb_count)
5539		log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5540			  lkb_count, rsb_count);
5541}
5542
5543static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5544					 uint32_t remid)
5545{
5546	struct dlm_lkb *lkb;
5547
5548	list_for_each_entry(lkb, head, lkb_statequeue) {
5549		if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5550			return lkb;
5551	}
5552	return NULL;
5553}
5554
5555static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5556				    uint32_t remid)
5557{
5558	struct dlm_lkb *lkb;
5559
5560	lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5561	if (lkb)
5562		return lkb;
5563	lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5564	if (lkb)
5565		return lkb;
5566	lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5567	if (lkb)
5568		return lkb;
5569	return NULL;
5570}
5571
5572/* needs at least dlm_rcom + rcom_lock */
5573static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5574				  struct dlm_rsb *r, struct dlm_rcom *rc)
5575{
5576	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5577
5578	lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5579	lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5580	lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5581	lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5582	lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5583	lkb->lkb_flags |= DLM_IFL_MSTCPY;
5584	lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5585	lkb->lkb_rqmode = rl->rl_rqmode;
5586	lkb->lkb_grmode = rl->rl_grmode;
5587	/* don't set lkb_status because add_lkb wants to itself */
5588
5589	lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5590	lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5591
5592	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5593		int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5594			 sizeof(struct rcom_lock);
5595		if (lvblen > ls->ls_lvblen)
5596			return -EINVAL;
5597		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5598		if (!lkb->lkb_lvbptr)
5599			return -ENOMEM;
5600		memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5601	}
5602
5603	/* Conversions between PR and CW (middle modes) need special handling.
5604	   The real granted mode of these converting locks cannot be determined
5605	   until all locks have been rebuilt on the rsb (recover_conversion) */
5606
5607	if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5608	    middle_conversion(lkb)) {
5609		rl->rl_status = DLM_LKSTS_CONVERT;
5610		lkb->lkb_grmode = DLM_LOCK_IV;
5611		rsb_set_flag(r, RSB_RECOVER_CONVERT);
5612	}
5613
5614	return 0;
5615}
5616
5617/* This lkb may have been recovered in a previous aborted recovery so we need
5618   to check if the rsb already has an lkb with the given remote nodeid/lkid.
5619   If so we just send back a standard reply.  If not, we create a new lkb with
5620   the given values and send back our lkid.  We send back our lkid by sending
5621   back the rcom_lock struct we got but with the remid field filled in. */
5622
5623/* needs at least dlm_rcom + rcom_lock */
5624int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5625{
5626	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5627	struct dlm_rsb *r;
5628	struct dlm_lkb *lkb;
5629	uint32_t remid = 0;
5630	int from_nodeid = rc->rc_header.h_nodeid;
5631	int error;
5632
5633	if (rl->rl_parent_lkid) {
5634		error = -EOPNOTSUPP;
5635		goto out;
5636	}
5637
5638	remid = le32_to_cpu(rl->rl_lkid);
5639
5640	/* In general we expect the rsb returned to be R_MASTER, but we don't
5641	   have to require it.  Recovery of masters on one node can overlap
5642	   recovery of locks on another node, so one node can send us MSTCPY
5643	   locks before we've made ourselves master of this rsb.  We can still
5644	   add new MSTCPY locks that we receive here without any harm; when
5645	   we make ourselves master, dlm_recover_masters() won't touch the
5646	   MSTCPY locks we've received early. */
5647
5648	error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5649			 from_nodeid, R_RECEIVE_RECOVER, &r);
5650	if (error)
5651		goto out;
5652
5653	lock_rsb(r);
5654
5655	if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5656		log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5657			  from_nodeid, remid);
5658		error = -EBADR;
5659		goto out_unlock;
5660	}
5661
5662	lkb = search_remid(r, from_nodeid, remid);
5663	if (lkb) {
5664		error = -EEXIST;
5665		goto out_remid;
5666	}
5667
5668	error = create_lkb(ls, &lkb);
5669	if (error)
5670		goto out_unlock;
5671
5672	error = receive_rcom_lock_args(ls, lkb, r, rc);
5673	if (error) {
5674		__put_lkb(ls, lkb);
5675		goto out_unlock;
5676	}
5677
5678	attach_lkb(r, lkb);
5679	add_lkb(r, lkb, rl->rl_status);
5680	error = 0;
5681	ls->ls_recover_locks_in++;
5682
5683	if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5684		rsb_set_flag(r, RSB_RECOVER_GRANT);
5685
5686 out_remid:
5687	/* this is the new value returned to the lock holder for
5688	   saving in its process-copy lkb */
5689	rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5690
5691	lkb->lkb_recover_seq = ls->ls_recover_seq;
5692
5693 out_unlock:
5694	unlock_rsb(r);
5695	put_rsb(r);
5696 out:
5697	if (error && error != -EEXIST)
5698		log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5699			  from_nodeid, remid, error);
5700	rl->rl_result = cpu_to_le32(error);
5701	return error;
5702}
5703
5704/* needs at least dlm_rcom + rcom_lock */
5705int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5706{
5707	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5708	struct dlm_rsb *r;
5709	struct dlm_lkb *lkb;
5710	uint32_t lkid, remid;
5711	int error, result;
5712
5713	lkid = le32_to_cpu(rl->rl_lkid);
5714	remid = le32_to_cpu(rl->rl_remid);
5715	result = le32_to_cpu(rl->rl_result);
5716
5717	error = find_lkb(ls, lkid, &lkb);
5718	if (error) {
5719		log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5720			  lkid, rc->rc_header.h_nodeid, remid, result);
5721		return error;
5722	}
5723
 
 
 
 
5724	r = lkb->lkb_resource;
5725	hold_rsb(r);
5726	lock_rsb(r);
5727
5728	if (!is_process_copy(lkb)) {
5729		log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5730			  lkid, rc->rc_header.h_nodeid, remid, result);
5731		dlm_dump_rsb(r);
5732		unlock_rsb(r);
5733		put_rsb(r);
5734		dlm_put_lkb(lkb);
5735		return -EINVAL;
5736	}
5737
5738	switch (result) {
5739	case -EBADR:
5740		/* There's a chance the new master received our lock before
5741		   dlm_recover_master_reply(), this wouldn't happen if we did
5742		   a barrier between recover_masters and recover_locks. */
5743
5744		log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5745			  lkid, rc->rc_header.h_nodeid, remid, result);
5746	
5747		dlm_send_rcom_lock(r, lkb);
5748		goto out;
5749	case -EEXIST:
 
 
5750	case 0:
5751		lkb->lkb_remid = remid;
5752		break;
5753	default:
5754		log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5755			  lkid, rc->rc_header.h_nodeid, remid, result);
5756	}
5757
5758	/* an ack for dlm_recover_locks() which waits for replies from
5759	   all the locks it sends to new masters */
5760	dlm_recovered_lock(r);
5761 out:
5762	unlock_rsb(r);
5763	put_rsb(r);
5764	dlm_put_lkb(lkb);
5765
5766	return 0;
5767}
5768
5769int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5770		     int mode, uint32_t flags, void *name, unsigned int namelen,
5771		     unsigned long timeout_cs)
5772{
5773	struct dlm_lkb *lkb;
5774	struct dlm_args args;
5775	int error;
5776
5777	dlm_lock_recovery(ls);
5778
5779	error = create_lkb(ls, &lkb);
5780	if (error) {
5781		kfree(ua);
5782		goto out;
5783	}
5784
5785	if (flags & DLM_LKF_VALBLK) {
5786		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5787		if (!ua->lksb.sb_lvbptr) {
5788			kfree(ua);
5789			__put_lkb(ls, lkb);
5790			error = -ENOMEM;
5791			goto out;
5792		}
5793	}
5794
5795	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
5796	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
5797	   lock and that lkb_astparam is the dlm_user_args structure. */
5798
5799	error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5800			      fake_astfn, ua, fake_bastfn, &args);
5801	lkb->lkb_flags |= DLM_IFL_USER;
5802
5803	if (error) {
5804		__put_lkb(ls, lkb);
5805		goto out;
5806	}
5807
5808	error = request_lock(ls, lkb, name, namelen, &args);
5809
5810	switch (error) {
5811	case 0:
5812		break;
5813	case -EINPROGRESS:
5814		error = 0;
5815		break;
5816	case -EAGAIN:
5817		error = 0;
5818		/* fall through */
5819	default:
5820		__put_lkb(ls, lkb);
5821		goto out;
5822	}
5823
5824	/* add this new lkb to the per-process list of locks */
5825	spin_lock(&ua->proc->locks_spin);
5826	hold_lkb(lkb);
5827	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5828	spin_unlock(&ua->proc->locks_spin);
5829 out:
5830	dlm_unlock_recovery(ls);
5831	return error;
5832}
5833
5834int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5835		     int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5836		     unsigned long timeout_cs)
5837{
5838	struct dlm_lkb *lkb;
5839	struct dlm_args args;
5840	struct dlm_user_args *ua;
5841	int error;
5842
5843	dlm_lock_recovery(ls);
5844
5845	error = find_lkb(ls, lkid, &lkb);
5846	if (error)
5847		goto out;
5848
5849	/* user can change the params on its lock when it converts it, or
5850	   add an lvb that didn't exist before */
5851
5852	ua = lkb->lkb_ua;
5853
5854	if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5855		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5856		if (!ua->lksb.sb_lvbptr) {
5857			error = -ENOMEM;
5858			goto out_put;
5859		}
5860	}
5861	if (lvb_in && ua->lksb.sb_lvbptr)
5862		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5863
5864	ua->xid = ua_tmp->xid;
5865	ua->castparam = ua_tmp->castparam;
5866	ua->castaddr = ua_tmp->castaddr;
5867	ua->bastparam = ua_tmp->bastparam;
5868	ua->bastaddr = ua_tmp->bastaddr;
5869	ua->user_lksb = ua_tmp->user_lksb;
5870
5871	error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5872			      fake_astfn, ua, fake_bastfn, &args);
5873	if (error)
5874		goto out_put;
5875
5876	error = convert_lock(ls, lkb, &args);
5877
5878	if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5879		error = 0;
5880 out_put:
5881	dlm_put_lkb(lkb);
5882 out:
5883	dlm_unlock_recovery(ls);
5884	kfree(ua_tmp);
5885	return error;
5886}
5887
5888/*
5889 * The caller asks for an orphan lock on a given resource with a given mode.
5890 * If a matching lock exists, it's moved to the owner's list of locks and
5891 * the lkid is returned.
5892 */
5893
5894int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5895		     int mode, uint32_t flags, void *name, unsigned int namelen,
5896		     unsigned long timeout_cs, uint32_t *lkid)
5897{
5898	struct dlm_lkb *lkb;
5899	struct dlm_user_args *ua;
5900	int found_other_mode = 0;
5901	int found = 0;
5902	int rv = 0;
5903
5904	mutex_lock(&ls->ls_orphans_mutex);
5905	list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
5906		if (lkb->lkb_resource->res_length != namelen)
5907			continue;
5908		if (memcmp(lkb->lkb_resource->res_name, name, namelen))
5909			continue;
5910		if (lkb->lkb_grmode != mode) {
5911			found_other_mode = 1;
5912			continue;
5913		}
5914
5915		found = 1;
5916		list_del_init(&lkb->lkb_ownqueue);
5917		lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
5918		*lkid = lkb->lkb_id;
5919		break;
5920	}
5921	mutex_unlock(&ls->ls_orphans_mutex);
5922
5923	if (!found && found_other_mode) {
5924		rv = -EAGAIN;
5925		goto out;
5926	}
5927
5928	if (!found) {
5929		rv = -ENOENT;
5930		goto out;
5931	}
5932
5933	lkb->lkb_exflags = flags;
5934	lkb->lkb_ownpid = (int) current->pid;
5935
5936	ua = lkb->lkb_ua;
5937
5938	ua->proc = ua_tmp->proc;
5939	ua->xid = ua_tmp->xid;
5940	ua->castparam = ua_tmp->castparam;
5941	ua->castaddr = ua_tmp->castaddr;
5942	ua->bastparam = ua_tmp->bastparam;
5943	ua->bastaddr = ua_tmp->bastaddr;
5944	ua->user_lksb = ua_tmp->user_lksb;
5945
5946	/*
5947	 * The lkb reference from the ls_orphans list was not
5948	 * removed above, and is now considered the reference
5949	 * for the proc locks list.
5950	 */
5951
5952	spin_lock(&ua->proc->locks_spin);
5953	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5954	spin_unlock(&ua->proc->locks_spin);
5955 out:
5956	kfree(ua_tmp);
5957	return rv;
5958}
5959
5960int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5961		    uint32_t flags, uint32_t lkid, char *lvb_in)
5962{
5963	struct dlm_lkb *lkb;
5964	struct dlm_args args;
5965	struct dlm_user_args *ua;
5966	int error;
5967
5968	dlm_lock_recovery(ls);
5969
5970	error = find_lkb(ls, lkid, &lkb);
5971	if (error)
5972		goto out;
5973
5974	ua = lkb->lkb_ua;
5975
5976	if (lvb_in && ua->lksb.sb_lvbptr)
5977		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5978	if (ua_tmp->castparam)
5979		ua->castparam = ua_tmp->castparam;
5980	ua->user_lksb = ua_tmp->user_lksb;
5981
5982	error = set_unlock_args(flags, ua, &args);
5983	if (error)
5984		goto out_put;
5985
5986	error = unlock_lock(ls, lkb, &args);
5987
5988	if (error == -DLM_EUNLOCK)
5989		error = 0;
5990	/* from validate_unlock_args() */
5991	if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
5992		error = 0;
5993	if (error)
5994		goto out_put;
5995
5996	spin_lock(&ua->proc->locks_spin);
5997	/* dlm_user_add_cb() may have already taken lkb off the proc list */
5998	if (!list_empty(&lkb->lkb_ownqueue))
5999		list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
6000	spin_unlock(&ua->proc->locks_spin);
6001 out_put:
6002	dlm_put_lkb(lkb);
6003 out:
6004	dlm_unlock_recovery(ls);
6005	kfree(ua_tmp);
6006	return error;
6007}
6008
6009int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6010		    uint32_t flags, uint32_t lkid)
6011{
6012	struct dlm_lkb *lkb;
6013	struct dlm_args args;
6014	struct dlm_user_args *ua;
6015	int error;
6016
6017	dlm_lock_recovery(ls);
6018
6019	error = find_lkb(ls, lkid, &lkb);
6020	if (error)
6021		goto out;
6022
6023	ua = lkb->lkb_ua;
6024	if (ua_tmp->castparam)
6025		ua->castparam = ua_tmp->castparam;
6026	ua->user_lksb = ua_tmp->user_lksb;
6027
6028	error = set_unlock_args(flags, ua, &args);
6029	if (error)
6030		goto out_put;
6031
6032	error = cancel_lock(ls, lkb, &args);
6033
6034	if (error == -DLM_ECANCEL)
6035		error = 0;
6036	/* from validate_unlock_args() */
6037	if (error == -EBUSY)
6038		error = 0;
6039 out_put:
6040	dlm_put_lkb(lkb);
6041 out:
6042	dlm_unlock_recovery(ls);
6043	kfree(ua_tmp);
6044	return error;
6045}
6046
6047int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6048{
6049	struct dlm_lkb *lkb;
6050	struct dlm_args args;
6051	struct dlm_user_args *ua;
6052	struct dlm_rsb *r;
6053	int error;
6054
6055	dlm_lock_recovery(ls);
6056
6057	error = find_lkb(ls, lkid, &lkb);
6058	if (error)
6059		goto out;
6060
6061	ua = lkb->lkb_ua;
6062
6063	error = set_unlock_args(flags, ua, &args);
6064	if (error)
6065		goto out_put;
6066
6067	/* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6068
6069	r = lkb->lkb_resource;
6070	hold_rsb(r);
6071	lock_rsb(r);
6072
6073	error = validate_unlock_args(lkb, &args);
6074	if (error)
6075		goto out_r;
6076	lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6077
6078	error = _cancel_lock(r, lkb);
6079 out_r:
6080	unlock_rsb(r);
6081	put_rsb(r);
6082
6083	if (error == -DLM_ECANCEL)
6084		error = 0;
6085	/* from validate_unlock_args() */
6086	if (error == -EBUSY)
6087		error = 0;
6088 out_put:
6089	dlm_put_lkb(lkb);
6090 out:
6091	dlm_unlock_recovery(ls);
6092	return error;
6093}
6094
6095/* lkb's that are removed from the waiters list by revert are just left on the
6096   orphans list with the granted orphan locks, to be freed by purge */
6097
6098static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6099{
6100	struct dlm_args args;
6101	int error;
6102
6103	hold_lkb(lkb); /* reference for the ls_orphans list */
6104	mutex_lock(&ls->ls_orphans_mutex);
6105	list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6106	mutex_unlock(&ls->ls_orphans_mutex);
6107
6108	set_unlock_args(0, lkb->lkb_ua, &args);
6109
6110	error = cancel_lock(ls, lkb, &args);
6111	if (error == -DLM_ECANCEL)
6112		error = 0;
6113	return error;
6114}
6115
6116/* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6117   granted.  Regardless of what rsb queue the lock is on, it's removed and
6118   freed.  The IVVALBLK flag causes the lvb on the resource to be invalidated
6119   if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6120
6121static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6122{
6123	struct dlm_args args;
6124	int error;
6125
6126	set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6127			lkb->lkb_ua, &args);
6128
6129	error = unlock_lock(ls, lkb, &args);
6130	if (error == -DLM_EUNLOCK)
6131		error = 0;
6132	return error;
6133}
6134
6135/* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6136   (which does lock_rsb) due to deadlock with receiving a message that does
6137   lock_rsb followed by dlm_user_add_cb() */
6138
6139static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6140				     struct dlm_user_proc *proc)
6141{
6142	struct dlm_lkb *lkb = NULL;
6143
6144	mutex_lock(&ls->ls_clear_proc_locks);
6145	if (list_empty(&proc->locks))
6146		goto out;
6147
6148	lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6149	list_del_init(&lkb->lkb_ownqueue);
6150
6151	if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6152		lkb->lkb_flags |= DLM_IFL_ORPHAN;
6153	else
6154		lkb->lkb_flags |= DLM_IFL_DEAD;
6155 out:
6156	mutex_unlock(&ls->ls_clear_proc_locks);
6157	return lkb;
6158}
6159
6160/* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6161   1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6162   which we clear here. */
6163
6164/* proc CLOSING flag is set so no more device_reads should look at proc->asts
6165   list, and no more device_writes should add lkb's to proc->locks list; so we
6166   shouldn't need to take asts_spin or locks_spin here.  this assumes that
6167   device reads/writes/closes are serialized -- FIXME: we may need to serialize
6168   them ourself. */
6169
6170void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6171{
6172	struct dlm_lkb *lkb, *safe;
6173
6174	dlm_lock_recovery(ls);
6175
6176	while (1) {
6177		lkb = del_proc_lock(ls, proc);
6178		if (!lkb)
6179			break;
6180		del_timeout(lkb);
6181		if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6182			orphan_proc_lock(ls, lkb);
6183		else
6184			unlock_proc_lock(ls, lkb);
6185
6186		/* this removes the reference for the proc->locks list
6187		   added by dlm_user_request, it may result in the lkb
6188		   being freed */
6189
6190		dlm_put_lkb(lkb);
6191	}
6192
6193	mutex_lock(&ls->ls_clear_proc_locks);
6194
6195	/* in-progress unlocks */
6196	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6197		list_del_init(&lkb->lkb_ownqueue);
6198		lkb->lkb_flags |= DLM_IFL_DEAD;
6199		dlm_put_lkb(lkb);
6200	}
6201
6202	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6203		memset(&lkb->lkb_callbacks, 0,
6204		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6205		list_del_init(&lkb->lkb_cb_list);
6206		dlm_put_lkb(lkb);
6207	}
6208
6209	mutex_unlock(&ls->ls_clear_proc_locks);
6210	dlm_unlock_recovery(ls);
6211}
6212
6213static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6214{
6215	struct dlm_lkb *lkb, *safe;
6216
6217	while (1) {
6218		lkb = NULL;
6219		spin_lock(&proc->locks_spin);
6220		if (!list_empty(&proc->locks)) {
6221			lkb = list_entry(proc->locks.next, struct dlm_lkb,
6222					 lkb_ownqueue);
6223			list_del_init(&lkb->lkb_ownqueue);
6224		}
6225		spin_unlock(&proc->locks_spin);
6226
6227		if (!lkb)
6228			break;
6229
6230		lkb->lkb_flags |= DLM_IFL_DEAD;
6231		unlock_proc_lock(ls, lkb);
6232		dlm_put_lkb(lkb); /* ref from proc->locks list */
6233	}
6234
6235	spin_lock(&proc->locks_spin);
6236	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6237		list_del_init(&lkb->lkb_ownqueue);
6238		lkb->lkb_flags |= DLM_IFL_DEAD;
6239		dlm_put_lkb(lkb);
6240	}
6241	spin_unlock(&proc->locks_spin);
6242
6243	spin_lock(&proc->asts_spin);
6244	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6245		memset(&lkb->lkb_callbacks, 0,
6246		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6247		list_del_init(&lkb->lkb_cb_list);
6248		dlm_put_lkb(lkb);
6249	}
6250	spin_unlock(&proc->asts_spin);
6251}
6252
6253/* pid of 0 means purge all orphans */
6254
6255static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6256{
6257	struct dlm_lkb *lkb, *safe;
6258
6259	mutex_lock(&ls->ls_orphans_mutex);
6260	list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6261		if (pid && lkb->lkb_ownpid != pid)
6262			continue;
6263		unlock_proc_lock(ls, lkb);
6264		list_del_init(&lkb->lkb_ownqueue);
6265		dlm_put_lkb(lkb);
6266	}
6267	mutex_unlock(&ls->ls_orphans_mutex);
6268}
6269
6270static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6271{
6272	struct dlm_message *ms;
6273	struct dlm_mhandle *mh;
6274	int error;
6275
6276	error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6277				DLM_MSG_PURGE, &ms, &mh);
6278	if (error)
6279		return error;
6280	ms->m_nodeid = nodeid;
6281	ms->m_pid = pid;
6282
6283	return send_message(mh, ms);
6284}
6285
6286int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6287		   int nodeid, int pid)
6288{
6289	int error = 0;
6290
6291	if (nodeid && (nodeid != dlm_our_nodeid())) {
6292		error = send_purge(ls, nodeid, pid);
6293	} else {
6294		dlm_lock_recovery(ls);
6295		if (pid == current->pid)
6296			purge_proc_locks(ls, proc);
6297		else
6298			do_purge(ls, nodeid, pid);
6299		dlm_unlock_recovery(ls);
6300	}
6301	return error;
6302}
6303