Linux Audio

Check our new training course

Loading...
v3.1
   1/******************************************************************************
   2*******************************************************************************
   3**
   4**  Copyright (C) 2005-2010 Red Hat, Inc.  All rights reserved.
   5**
   6**  This copyrighted material is made available to anyone wishing to use,
   7**  modify, copy, or redistribute it subject to the terms and conditions
   8**  of the GNU General Public License v.2.
   9**
  10*******************************************************************************
  11******************************************************************************/
  12
  13/* Central locking logic has four stages:
  14
  15   dlm_lock()
  16   dlm_unlock()
  17
  18   request_lock(ls, lkb)
  19   convert_lock(ls, lkb)
  20   unlock_lock(ls, lkb)
  21   cancel_lock(ls, lkb)
  22
  23   _request_lock(r, lkb)
  24   _convert_lock(r, lkb)
  25   _unlock_lock(r, lkb)
  26   _cancel_lock(r, lkb)
  27
  28   do_request(r, lkb)
  29   do_convert(r, lkb)
  30   do_unlock(r, lkb)
  31   do_cancel(r, lkb)
  32
  33   Stage 1 (lock, unlock) is mainly about checking input args and
  34   splitting into one of the four main operations:
  35
  36       dlm_lock          = request_lock
  37       dlm_lock+CONVERT  = convert_lock
  38       dlm_unlock        = unlock_lock
  39       dlm_unlock+CANCEL = cancel_lock
  40
  41   Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
  42   provided to the next stage.
  43
  44   Stage 3, _xxxx_lock(), determines if the operation is local or remote.
  45   When remote, it calls send_xxxx(), when local it calls do_xxxx().
  46
  47   Stage 4, do_xxxx(), is the guts of the operation.  It manipulates the
  48   given rsb and lkb and queues callbacks.
  49
  50   For remote operations, send_xxxx() results in the corresponding do_xxxx()
  51   function being executed on the remote node.  The connecting send/receive
  52   calls on local (L) and remote (R) nodes:
  53
  54   L: send_xxxx()              ->  R: receive_xxxx()
  55                                   R: do_xxxx()
  56   L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
  57*/
  58#include <linux/types.h>
 
  59#include <linux/slab.h>
  60#include "dlm_internal.h"
  61#include <linux/dlm_device.h>
  62#include "memory.h"
  63#include "lowcomms.h"
  64#include "requestqueue.h"
  65#include "util.h"
  66#include "dir.h"
  67#include "member.h"
  68#include "lockspace.h"
  69#include "ast.h"
  70#include "lock.h"
  71#include "rcom.h"
  72#include "recover.h"
  73#include "lvb_table.h"
  74#include "user.h"
  75#include "config.h"
  76
  77static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
  78static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
  79static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  80static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
  81static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
  82static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
  83static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
  84static int send_remove(struct dlm_rsb *r);
  85static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  86static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  87static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
  88				    struct dlm_message *ms);
  89static int receive_extralen(struct dlm_message *ms);
  90static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
  91static void del_timeout(struct dlm_lkb *lkb);
 
  92
  93/*
  94 * Lock compatibilty matrix - thanks Steve
  95 * UN = Unlocked state. Not really a state, used as a flag
  96 * PD = Padding. Used to make the matrix a nice power of two in size
  97 * Other states are the same as the VMS DLM.
  98 * Usage: matrix[grmode+1][rqmode+1]  (although m[rq+1][gr+1] is the same)
  99 */
 100
 101static const int __dlm_compat_matrix[8][8] = {
 102      /* UN NL CR CW PR PW EX PD */
 103        {1, 1, 1, 1, 1, 1, 1, 0},       /* UN */
 104        {1, 1, 1, 1, 1, 1, 1, 0},       /* NL */
 105        {1, 1, 1, 1, 1, 1, 0, 0},       /* CR */
 106        {1, 1, 1, 1, 0, 0, 0, 0},       /* CW */
 107        {1, 1, 1, 0, 1, 0, 0, 0},       /* PR */
 108        {1, 1, 1, 0, 0, 0, 0, 0},       /* PW */
 109        {1, 1, 0, 0, 0, 0, 0, 0},       /* EX */
 110        {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
 111};
 112
 113/*
 114 * This defines the direction of transfer of LVB data.
 115 * Granted mode is the row; requested mode is the column.
 116 * Usage: matrix[grmode+1][rqmode+1]
 117 * 1 = LVB is returned to the caller
 118 * 0 = LVB is written to the resource
 119 * -1 = nothing happens to the LVB
 120 */
 121
 122const int dlm_lvb_operations[8][8] = {
 123        /* UN   NL  CR  CW  PR  PW  EX  PD*/
 124        {  -1,  1,  1,  1,  1,  1,  1, -1 }, /* UN */
 125        {  -1,  1,  1,  1,  1,  1,  1,  0 }, /* NL */
 126        {  -1, -1,  1,  1,  1,  1,  1,  0 }, /* CR */
 127        {  -1, -1, -1,  1,  1,  1,  1,  0 }, /* CW */
 128        {  -1, -1, -1, -1,  1,  1,  1,  0 }, /* PR */
 129        {  -1,  0,  0,  0,  0,  0,  1,  0 }, /* PW */
 130        {  -1,  0,  0,  0,  0,  0,  0,  0 }, /* EX */
 131        {  -1,  0,  0,  0,  0,  0,  0,  0 }  /* PD */
 132};
 133
 134#define modes_compat(gr, rq) \
 135	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
 136
 137int dlm_modes_compat(int mode1, int mode2)
 138{
 139	return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
 140}
 141
 142/*
 143 * Compatibility matrix for conversions with QUECVT set.
 144 * Granted mode is the row; requested mode is the column.
 145 * Usage: matrix[grmode+1][rqmode+1]
 146 */
 147
 148static const int __quecvt_compat_matrix[8][8] = {
 149      /* UN NL CR CW PR PW EX PD */
 150        {0, 0, 0, 0, 0, 0, 0, 0},       /* UN */
 151        {0, 0, 1, 1, 1, 1, 1, 0},       /* NL */
 152        {0, 0, 0, 1, 1, 1, 1, 0},       /* CR */
 153        {0, 0, 0, 0, 1, 1, 1, 0},       /* CW */
 154        {0, 0, 0, 1, 0, 1, 1, 0},       /* PR */
 155        {0, 0, 0, 0, 0, 0, 1, 0},       /* PW */
 156        {0, 0, 0, 0, 0, 0, 0, 0},       /* EX */
 157        {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
 158};
 159
 160void dlm_print_lkb(struct dlm_lkb *lkb)
 161{
 162	printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
 163	       "     status %d rqmode %d grmode %d wait_type %d\n",
 164	       lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
 165	       lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
 166	       lkb->lkb_grmode, lkb->lkb_wait_type);
 
 167}
 168
 169static void dlm_print_rsb(struct dlm_rsb *r)
 170{
 171	printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
 172	       r->res_nodeid, r->res_flags, r->res_first_lkid,
 173	       r->res_recover_locks_count, r->res_name);
 
 
 174}
 175
 176void dlm_dump_rsb(struct dlm_rsb *r)
 177{
 178	struct dlm_lkb *lkb;
 179
 180	dlm_print_rsb(r);
 181
 182	printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
 183	       list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
 184	printk(KERN_ERR "rsb lookup list\n");
 185	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
 186		dlm_print_lkb(lkb);
 187	printk(KERN_ERR "rsb grant queue:\n");
 188	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
 189		dlm_print_lkb(lkb);
 190	printk(KERN_ERR "rsb convert queue:\n");
 191	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
 192		dlm_print_lkb(lkb);
 193	printk(KERN_ERR "rsb wait queue:\n");
 194	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
 195		dlm_print_lkb(lkb);
 196}
 197
 198/* Threads cannot use the lockspace while it's being recovered */
 199
 200static inline void dlm_lock_recovery(struct dlm_ls *ls)
 201{
 202	down_read(&ls->ls_in_recovery);
 203}
 204
 205void dlm_unlock_recovery(struct dlm_ls *ls)
 206{
 207	up_read(&ls->ls_in_recovery);
 208}
 209
 210int dlm_lock_recovery_try(struct dlm_ls *ls)
 211{
 212	return down_read_trylock(&ls->ls_in_recovery);
 213}
 214
 215static inline int can_be_queued(struct dlm_lkb *lkb)
 216{
 217	return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
 218}
 219
 220static inline int force_blocking_asts(struct dlm_lkb *lkb)
 221{
 222	return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
 223}
 224
 225static inline int is_demoted(struct dlm_lkb *lkb)
 226{
 227	return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
 228}
 229
 230static inline int is_altmode(struct dlm_lkb *lkb)
 231{
 232	return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
 233}
 234
 235static inline int is_granted(struct dlm_lkb *lkb)
 236{
 237	return (lkb->lkb_status == DLM_LKSTS_GRANTED);
 238}
 239
 240static inline int is_remote(struct dlm_rsb *r)
 241{
 242	DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
 243	return !!r->res_nodeid;
 244}
 245
 246static inline int is_process_copy(struct dlm_lkb *lkb)
 247{
 248	return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
 249}
 250
 251static inline int is_master_copy(struct dlm_lkb *lkb)
 252{
 253	if (lkb->lkb_flags & DLM_IFL_MSTCPY)
 254		DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
 255	return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
 256}
 257
 258static inline int middle_conversion(struct dlm_lkb *lkb)
 259{
 260	if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
 261	    (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
 262		return 1;
 263	return 0;
 264}
 265
 266static inline int down_conversion(struct dlm_lkb *lkb)
 267{
 268	return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
 269}
 270
 271static inline int is_overlap_unlock(struct dlm_lkb *lkb)
 272{
 273	return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
 274}
 275
 276static inline int is_overlap_cancel(struct dlm_lkb *lkb)
 277{
 278	return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
 279}
 280
 281static inline int is_overlap(struct dlm_lkb *lkb)
 282{
 283	return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
 284				  DLM_IFL_OVERLAP_CANCEL));
 285}
 286
 287static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
 288{
 289	if (is_master_copy(lkb))
 290		return;
 291
 292	del_timeout(lkb);
 293
 294	DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
 295
 296	/* if the operation was a cancel, then return -DLM_ECANCEL, if a
 297	   timeout caused the cancel then return -ETIMEDOUT */
 298	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
 299		lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
 300		rv = -ETIMEDOUT;
 301	}
 302
 303	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
 304		lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
 305		rv = -EDEADLK;
 306	}
 307
 308	dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
 309}
 310
 311static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
 312{
 313	queue_cast(r, lkb,
 314		   is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
 315}
 316
 317static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
 318{
 319	if (is_master_copy(lkb)) {
 320		send_bast(r, lkb, rqmode);
 321	} else {
 322		dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
 323	}
 324}
 325
 326/*
 327 * Basic operations on rsb's and lkb's
 328 */
 329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330static int pre_rsb_struct(struct dlm_ls *ls)
 331{
 332	struct dlm_rsb *r1, *r2;
 333	int count = 0;
 334
 335	spin_lock(&ls->ls_new_rsb_spin);
 336	if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
 337		spin_unlock(&ls->ls_new_rsb_spin);
 338		return 0;
 339	}
 340	spin_unlock(&ls->ls_new_rsb_spin);
 341
 342	r1 = dlm_allocate_rsb(ls);
 343	r2 = dlm_allocate_rsb(ls);
 344
 345	spin_lock(&ls->ls_new_rsb_spin);
 346	if (r1) {
 347		list_add(&r1->res_hashchain, &ls->ls_new_rsb);
 348		ls->ls_new_rsb_count++;
 349	}
 350	if (r2) {
 351		list_add(&r2->res_hashchain, &ls->ls_new_rsb);
 352		ls->ls_new_rsb_count++;
 353	}
 354	count = ls->ls_new_rsb_count;
 355	spin_unlock(&ls->ls_new_rsb_spin);
 356
 357	if (!count)
 358		return -ENOMEM;
 359	return 0;
 360}
 361
 362/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
 363   unlock any spinlocks, go back and call pre_rsb_struct again.
 364   Otherwise, take an rsb off the list and return it. */
 365
 366static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
 367			  struct dlm_rsb **r_ret)
 368{
 369	struct dlm_rsb *r;
 370	int count;
 371
 372	spin_lock(&ls->ls_new_rsb_spin);
 373	if (list_empty(&ls->ls_new_rsb)) {
 374		count = ls->ls_new_rsb_count;
 375		spin_unlock(&ls->ls_new_rsb_spin);
 376		log_debug(ls, "find_rsb retry %d %d %s",
 377			  count, dlm_config.ci_new_rsb_count, name);
 378		return -EAGAIN;
 379	}
 380
 381	r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
 382	list_del(&r->res_hashchain);
 
 
 383	ls->ls_new_rsb_count--;
 384	spin_unlock(&ls->ls_new_rsb_spin);
 385
 386	r->res_ls = ls;
 387	r->res_length = len;
 388	memcpy(r->res_name, name, len);
 389	mutex_init(&r->res_mutex);
 390
 391	INIT_LIST_HEAD(&r->res_hashchain);
 392	INIT_LIST_HEAD(&r->res_lookup);
 393	INIT_LIST_HEAD(&r->res_grantqueue);
 394	INIT_LIST_HEAD(&r->res_convertqueue);
 395	INIT_LIST_HEAD(&r->res_waitqueue);
 396	INIT_LIST_HEAD(&r->res_root_list);
 397	INIT_LIST_HEAD(&r->res_recover_list);
 398
 399	*r_ret = r;
 400	return 0;
 401}
 402
 403static int search_rsb_list(struct list_head *head, char *name, int len,
 404			   unsigned int flags, struct dlm_rsb **r_ret)
 
 
 
 
 
 
 
 
 
 405{
 
 406	struct dlm_rsb *r;
 407	int error = 0;
 408
 409	list_for_each_entry(r, head, res_hashchain) {
 410		if (len == r->res_length && !memcmp(name, r->res_name, len))
 
 
 
 
 
 
 411			goto found;
 412	}
 413	*r_ret = NULL;
 414	return -EBADR;
 415
 416 found:
 417	if (r->res_nodeid && (flags & R_MASTER))
 418		error = -ENOTBLK;
 419	*r_ret = r;
 420	return error;
 421}
 422
 423static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
 424		       unsigned int flags, struct dlm_rsb **r_ret)
 425{
 426	struct dlm_rsb *r;
 427	int error;
 428
 429	error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
 430	if (!error) {
 431		kref_get(&r->res_ref);
 432		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 433	}
 434	error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
 435	if (error)
 436		goto out;
 437
 438	list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
 439
 440	if (dlm_no_directory(ls))
 441		goto out;
 442
 443	if (r->res_nodeid == -1) {
 444		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
 445		r->res_first_lkid = 0;
 446	} else if (r->res_nodeid > 0) {
 447		rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
 448		r->res_first_lkid = 0;
 449	} else {
 450		DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
 451		DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
 452	}
 453 out:
 454	*r_ret = r;
 455	return error;
 456}
 457
 458/*
 459 * Find rsb in rsbtbl and potentially create/add one
 460 *
 461 * Delaying the release of rsb's has a similar benefit to applications keeping
 462 * NL locks on an rsb, but without the guarantee that the cached master value
 463 * will still be valid when the rsb is reused.  Apps aren't always smart enough
 464 * to keep NL locks on an rsb that they may lock again shortly; this can lead
 465 * to excessive master lookups and removals if we don't delay the release.
 466 *
 467 * Searching for an rsb means looking through both the normal list and toss
 468 * list.  When found on the toss list the rsb is moved to the normal list with
 469 * ref count of 1; when found on normal list the ref count is incremented.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 470 */
 471
 472static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
 473		    unsigned int flags, struct dlm_rsb **r_ret)
 
 
 474{
 475	struct dlm_rsb *r = NULL;
 476	uint32_t hash, bucket;
 
 
 
 
 477	int error;
 478
 479	if (namelen > DLM_RESNAME_MAXLEN) {
 480		error = -EINVAL;
 481		goto out;
 
 
 
 
 482	}
 483
 484	if (dlm_no_directory(ls))
 485		flags |= R_CREATE;
 
 
 
 
 
 
 
 
 
 
 
 
 486
 487	hash = jhash(name, namelen, 0);
 488	bucket = hash & (ls->ls_rsbtbl_size - 1);
 
 
 489
 490 retry:
 491	if (flags & R_CREATE) {
 492		error = pre_rsb_struct(ls);
 493		if (error < 0)
 494			goto out;
 495	}
 496
 497	spin_lock(&ls->ls_rsbtbl[bucket].lock);
 498
 499	error = _search_rsb(ls, name, namelen, bucket, flags, &r);
 500	if (!error)
 501		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502
 503	if (error == -EBADR && !(flags & R_CREATE))
 
 
 
 
 
 
 504		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 505
 506	/* the rsb was found but wasn't a master copy */
 507	if (error == -ENOTBLK)
 
 
 
 
 
 508		goto out_unlock;
 509
 510	error = get_rsb_struct(ls, name, namelen, &r);
 511	if (error == -EAGAIN) {
 512		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 513		goto retry;
 514	}
 515	if (error)
 516		goto out_unlock;
 517
 518	r->res_hash = hash;
 519	r->res_bucket = bucket;
 520	r->res_nodeid = -1;
 521	kref_init(&r->res_ref);
 522
 523	/* With no directory, the master can be set immediately */
 524	if (dlm_no_directory(ls)) {
 525		int nodeid = dlm_dir_nodeid(r);
 526		if (nodeid == dlm_our_nodeid())
 527			nodeid = 0;
 528		r->res_nodeid = nodeid;
 
 529	}
 530	list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
 531	error = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 532 out_unlock:
 533	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 534 out:
 535	*r_ret = r;
 536	return error;
 537}
 538
 539/* This is only called to add a reference when the code already holds
 540   a valid reference to the rsb, so there's no need for locking. */
 541
 542static inline void hold_rsb(struct dlm_rsb *r)
 
 
 
 
 543{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544	kref_get(&r->res_ref);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 545}
 546
 547void dlm_hold_rsb(struct dlm_rsb *r)
 
 548{
 549	hold_rsb(r);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550}
 551
 552static void toss_rsb(struct kref *kref)
 
 
 
 
 553{
 554	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
 555	struct dlm_ls *ls = r->res_ls;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 556
 557	DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 558	kref_init(&r->res_ref);
 559	list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
 560	r->res_toss_time = jiffies;
 561	if (r->res_lvbptr) {
 562		dlm_free_lvb(r->res_lvbptr);
 563		r->res_lvbptr = NULL;
 
 
 
 
 564	}
 
 
 
 
 
 
 
 
 565}
 566
 567/* When all references to the rsb are gone it's transferred to
 568   the tossed list for later disposal. */
 
 
 
 569
 570static void put_rsb(struct dlm_rsb *r)
 
 
 
 
 
 
 
 
 
 
 
 571{
 572	struct dlm_ls *ls = r->res_ls;
 573	uint32_t bucket = r->res_bucket;
 
 574
 575	spin_lock(&ls->ls_rsbtbl[bucket].lock);
 576	kref_put(&r->res_ref, toss_rsb);
 577	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 
 
 
 
 
 
 
 
 
 
 
 
 578}
 579
 580void dlm_put_rsb(struct dlm_rsb *r)
 581{
 582	put_rsb(r);
 
 
 
 
 
 
 
 
 
 
 
 
 583}
 584
 585/* See comment for unhold_lkb */
 586
 587static void unhold_rsb(struct dlm_rsb *r)
 588{
 589	int rv;
 590	rv = kref_put(&r->res_ref, toss_rsb);
 591	DLM_ASSERT(!rv, dlm_dump_rsb(r););
 592}
 593
 594static void kill_rsb(struct kref *kref)
 595{
 596	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
 597
 598	/* All work is done after the return from kref_put() so we
 599	   can release the write_lock before the remove and free. */
 600
 601	DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
 602	DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
 603	DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
 604	DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
 605	DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
 606	DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
 607}
 608
 609/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
 610   The rsb must exist as long as any lkb's for it do. */
 611
 612static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
 613{
 614	hold_rsb(r);
 615	lkb->lkb_resource = r;
 616}
 617
 618static void detach_lkb(struct dlm_lkb *lkb)
 619{
 620	if (lkb->lkb_resource) {
 621		put_rsb(lkb->lkb_resource);
 622		lkb->lkb_resource = NULL;
 623	}
 624}
 625
 626static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
 627{
 628	struct dlm_lkb *lkb;
 629	int rv, id;
 630
 631	lkb = dlm_allocate_lkb(ls);
 632	if (!lkb)
 633		return -ENOMEM;
 634
 635	lkb->lkb_nodeid = -1;
 636	lkb->lkb_grmode = DLM_LOCK_IV;
 637	kref_init(&lkb->lkb_ref);
 638	INIT_LIST_HEAD(&lkb->lkb_ownqueue);
 639	INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
 640	INIT_LIST_HEAD(&lkb->lkb_time_list);
 641	INIT_LIST_HEAD(&lkb->lkb_cb_list);
 642	mutex_init(&lkb->lkb_cb_mutex);
 643	INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
 644
 645 retry:
 646	rv = idr_pre_get(&ls->ls_lkbidr, GFP_NOFS);
 647	if (!rv)
 648		return -ENOMEM;
 649
 650	spin_lock(&ls->ls_lkbidr_spin);
 651	rv = idr_get_new_above(&ls->ls_lkbidr, lkb, 1, &id);
 652	if (!rv)
 653		lkb->lkb_id = id;
 654	spin_unlock(&ls->ls_lkbidr_spin);
 655
 656	if (rv == -EAGAIN)
 657		goto retry;
 658
 659	if (rv < 0) {
 660		log_error(ls, "create_lkb idr error %d", rv);
 661		return rv;
 662	}
 663
 664	*lkb_ret = lkb;
 665	return 0;
 666}
 667
 668static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
 669{
 670	struct dlm_lkb *lkb;
 671
 672	spin_lock(&ls->ls_lkbidr_spin);
 673	lkb = idr_find(&ls->ls_lkbidr, lkid);
 674	if (lkb)
 675		kref_get(&lkb->lkb_ref);
 676	spin_unlock(&ls->ls_lkbidr_spin);
 677
 678	*lkb_ret = lkb;
 679	return lkb ? 0 : -ENOENT;
 680}
 681
 682static void kill_lkb(struct kref *kref)
 683{
 684	struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
 685
 686	/* All work is done after the return from kref_put() so we
 687	   can release the write_lock before the detach_lkb */
 688
 689	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
 690}
 691
 692/* __put_lkb() is used when an lkb may not have an rsb attached to
 693   it so we need to provide the lockspace explicitly */
 694
 695static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
 696{
 697	uint32_t lkid = lkb->lkb_id;
 698
 699	spin_lock(&ls->ls_lkbidr_spin);
 700	if (kref_put(&lkb->lkb_ref, kill_lkb)) {
 701		idr_remove(&ls->ls_lkbidr, lkid);
 702		spin_unlock(&ls->ls_lkbidr_spin);
 703
 704		detach_lkb(lkb);
 705
 706		/* for local/process lkbs, lvbptr points to caller's lksb */
 707		if (lkb->lkb_lvbptr && is_master_copy(lkb))
 708			dlm_free_lvb(lkb->lkb_lvbptr);
 709		dlm_free_lkb(lkb);
 710		return 1;
 711	} else {
 712		spin_unlock(&ls->ls_lkbidr_spin);
 713		return 0;
 714	}
 715}
 716
 717int dlm_put_lkb(struct dlm_lkb *lkb)
 718{
 719	struct dlm_ls *ls;
 720
 721	DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
 722	DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
 723
 724	ls = lkb->lkb_resource->res_ls;
 725	return __put_lkb(ls, lkb);
 726}
 727
 728/* This is only called to add a reference when the code already holds
 729   a valid reference to the lkb, so there's no need for locking. */
 730
 731static inline void hold_lkb(struct dlm_lkb *lkb)
 732{
 733	kref_get(&lkb->lkb_ref);
 734}
 735
 736/* This is called when we need to remove a reference and are certain
 737   it's not the last ref.  e.g. del_lkb is always called between a
 738   find_lkb/put_lkb and is always the inverse of a previous add_lkb.
 739   put_lkb would work fine, but would involve unnecessary locking */
 740
 741static inline void unhold_lkb(struct dlm_lkb *lkb)
 742{
 743	int rv;
 744	rv = kref_put(&lkb->lkb_ref, kill_lkb);
 745	DLM_ASSERT(!rv, dlm_print_lkb(lkb););
 746}
 747
 748static void lkb_add_ordered(struct list_head *new, struct list_head *head,
 749			    int mode)
 750{
 751	struct dlm_lkb *lkb = NULL;
 752
 753	list_for_each_entry(lkb, head, lkb_statequeue)
 754		if (lkb->lkb_rqmode < mode)
 755			break;
 756
 757	__list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
 758}
 759
 760/* add/remove lkb to rsb's grant/convert/wait queue */
 761
 762static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
 763{
 764	kref_get(&lkb->lkb_ref);
 765
 766	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
 767
 768	lkb->lkb_timestamp = ktime_get();
 769
 770	lkb->lkb_status = status;
 771
 772	switch (status) {
 773	case DLM_LKSTS_WAITING:
 774		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
 775			list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
 776		else
 777			list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
 778		break;
 779	case DLM_LKSTS_GRANTED:
 780		/* convention says granted locks kept in order of grmode */
 781		lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
 782				lkb->lkb_grmode);
 783		break;
 784	case DLM_LKSTS_CONVERT:
 785		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
 786			list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
 787		else
 788			list_add_tail(&lkb->lkb_statequeue,
 789				      &r->res_convertqueue);
 790		break;
 791	default:
 792		DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
 793	}
 794}
 795
 796static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
 797{
 798	lkb->lkb_status = 0;
 799	list_del(&lkb->lkb_statequeue);
 800	unhold_lkb(lkb);
 801}
 802
 803static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
 804{
 805	hold_lkb(lkb);
 806	del_lkb(r, lkb);
 807	add_lkb(r, lkb, sts);
 808	unhold_lkb(lkb);
 809}
 810
 811static int msg_reply_type(int mstype)
 812{
 813	switch (mstype) {
 814	case DLM_MSG_REQUEST:
 815		return DLM_MSG_REQUEST_REPLY;
 816	case DLM_MSG_CONVERT:
 817		return DLM_MSG_CONVERT_REPLY;
 818	case DLM_MSG_UNLOCK:
 819		return DLM_MSG_UNLOCK_REPLY;
 820	case DLM_MSG_CANCEL:
 821		return DLM_MSG_CANCEL_REPLY;
 822	case DLM_MSG_LOOKUP:
 823		return DLM_MSG_LOOKUP_REPLY;
 824	}
 825	return -1;
 826}
 827
 828static int nodeid_warned(int nodeid, int num_nodes, int *warned)
 829{
 830	int i;
 831
 832	for (i = 0; i < num_nodes; i++) {
 833		if (!warned[i]) {
 834			warned[i] = nodeid;
 835			return 0;
 836		}
 837		if (warned[i] == nodeid)
 838			return 1;
 839	}
 840	return 0;
 841}
 842
 843void dlm_scan_waiters(struct dlm_ls *ls)
 844{
 845	struct dlm_lkb *lkb;
 846	ktime_t zero = ktime_set(0, 0);
 847	s64 us;
 848	s64 debug_maxus = 0;
 849	u32 debug_scanned = 0;
 850	u32 debug_expired = 0;
 851	int num_nodes = 0;
 852	int *warned = NULL;
 853
 854	if (!dlm_config.ci_waitwarn_us)
 855		return;
 856
 857	mutex_lock(&ls->ls_waiters_mutex);
 858
 859	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
 860		if (ktime_equal(lkb->lkb_wait_time, zero))
 861			continue;
 862
 863		debug_scanned++;
 864
 865		us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
 866
 867		if (us < dlm_config.ci_waitwarn_us)
 868			continue;
 869
 870		lkb->lkb_wait_time = zero;
 871
 872		debug_expired++;
 873		if (us > debug_maxus)
 874			debug_maxus = us;
 875
 876		if (!num_nodes) {
 877			num_nodes = ls->ls_num_nodes;
 878			warned = kzalloc(num_nodes * sizeof(int), GFP_KERNEL);
 879		}
 880		if (!warned)
 881			continue;
 882		if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
 883			continue;
 884
 885		log_error(ls, "waitwarn %x %lld %d us check connection to "
 886			  "node %d", lkb->lkb_id, (long long)us,
 887			  dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
 888	}
 889	mutex_unlock(&ls->ls_waiters_mutex);
 890	kfree(warned);
 891
 892	if (debug_expired)
 893		log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
 894			  debug_scanned, debug_expired,
 895			  dlm_config.ci_waitwarn_us, (long long)debug_maxus);
 896}
 897
 898/* add/remove lkb from global waiters list of lkb's waiting for
 899   a reply from a remote node */
 900
 901static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
 902{
 903	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
 904	int error = 0;
 905
 906	mutex_lock(&ls->ls_waiters_mutex);
 907
 908	if (is_overlap_unlock(lkb) ||
 909	    (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
 910		error = -EINVAL;
 911		goto out;
 912	}
 913
 914	if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
 915		switch (mstype) {
 916		case DLM_MSG_UNLOCK:
 917			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
 918			break;
 919		case DLM_MSG_CANCEL:
 920			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
 921			break;
 922		default:
 923			error = -EBUSY;
 924			goto out;
 925		}
 926		lkb->lkb_wait_count++;
 927		hold_lkb(lkb);
 928
 929		log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
 930			  lkb->lkb_id, lkb->lkb_wait_type, mstype,
 931			  lkb->lkb_wait_count, lkb->lkb_flags);
 932		goto out;
 933	}
 934
 935	DLM_ASSERT(!lkb->lkb_wait_count,
 936		   dlm_print_lkb(lkb);
 937		   printk("wait_count %d\n", lkb->lkb_wait_count););
 938
 939	lkb->lkb_wait_count++;
 940	lkb->lkb_wait_type = mstype;
 941	lkb->lkb_wait_time = ktime_get();
 942	lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
 943	hold_lkb(lkb);
 944	list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
 945 out:
 946	if (error)
 947		log_error(ls, "addwait error %x %d flags %x %d %d %s",
 948			  lkb->lkb_id, error, lkb->lkb_flags, mstype,
 949			  lkb->lkb_wait_type, lkb->lkb_resource->res_name);
 950	mutex_unlock(&ls->ls_waiters_mutex);
 951	return error;
 952}
 953
 954/* We clear the RESEND flag because we might be taking an lkb off the waiters
 955   list as part of process_requestqueue (e.g. a lookup that has an optimized
 956   request reply on the requestqueue) between dlm_recover_waiters_pre() which
 957   set RESEND and dlm_recover_waiters_post() */
 958
 959static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
 960				struct dlm_message *ms)
 961{
 962	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
 963	int overlap_done = 0;
 964
 965	if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
 966		log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
 967		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
 968		overlap_done = 1;
 969		goto out_del;
 970	}
 971
 972	if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
 973		log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
 974		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
 975		overlap_done = 1;
 976		goto out_del;
 977	}
 978
 979	/* Cancel state was preemptively cleared by a successful convert,
 980	   see next comment, nothing to do. */
 981
 982	if ((mstype == DLM_MSG_CANCEL_REPLY) &&
 983	    (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
 984		log_debug(ls, "remwait %x cancel_reply wait_type %d",
 985			  lkb->lkb_id, lkb->lkb_wait_type);
 986		return -1;
 987	}
 988
 989	/* Remove for the convert reply, and premptively remove for the
 990	   cancel reply.  A convert has been granted while there's still
 991	   an outstanding cancel on it (the cancel is moot and the result
 992	   in the cancel reply should be 0).  We preempt the cancel reply
 993	   because the app gets the convert result and then can follow up
 994	   with another op, like convert.  This subsequent op would see the
 995	   lingering state of the cancel and fail with -EBUSY. */
 996
 997	if ((mstype == DLM_MSG_CONVERT_REPLY) &&
 998	    (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
 999	    is_overlap_cancel(lkb) && ms && !ms->m_result) {
1000		log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1001			  lkb->lkb_id);
1002		lkb->lkb_wait_type = 0;
1003		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1004		lkb->lkb_wait_count--;
1005		goto out_del;
1006	}
1007
1008	/* N.B. type of reply may not always correspond to type of original
1009	   msg due to lookup->request optimization, verify others? */
1010
1011	if (lkb->lkb_wait_type) {
1012		lkb->lkb_wait_type = 0;
1013		goto out_del;
1014	}
1015
1016	log_error(ls, "remwait error %x reply %d flags %x no wait_type",
1017		  lkb->lkb_id, mstype, lkb->lkb_flags);
 
1018	return -1;
1019
1020 out_del:
1021	/* the force-unlock/cancel has completed and we haven't recvd a reply
1022	   to the op that was in progress prior to the unlock/cancel; we
1023	   give up on any reply to the earlier op.  FIXME: not sure when/how
1024	   this would happen */
1025
1026	if (overlap_done && lkb->lkb_wait_type) {
1027		log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1028			  lkb->lkb_id, mstype, lkb->lkb_wait_type);
1029		lkb->lkb_wait_count--;
1030		lkb->lkb_wait_type = 0;
1031	}
1032
1033	DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1034
1035	lkb->lkb_flags &= ~DLM_IFL_RESEND;
1036	lkb->lkb_wait_count--;
1037	if (!lkb->lkb_wait_count)
1038		list_del_init(&lkb->lkb_wait_reply);
1039	unhold_lkb(lkb);
1040	return 0;
1041}
1042
1043static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1044{
1045	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1046	int error;
1047
1048	mutex_lock(&ls->ls_waiters_mutex);
1049	error = _remove_from_waiters(lkb, mstype, NULL);
1050	mutex_unlock(&ls->ls_waiters_mutex);
1051	return error;
1052}
1053
1054/* Handles situations where we might be processing a "fake" or "stub" reply in
1055   which we can't try to take waiters_mutex again. */
1056
1057static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1058{
1059	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1060	int error;
1061
1062	if (ms->m_flags != DLM_IFL_STUB_MS)
1063		mutex_lock(&ls->ls_waiters_mutex);
1064	error = _remove_from_waiters(lkb, ms->m_type, ms);
1065	if (ms->m_flags != DLM_IFL_STUB_MS)
1066		mutex_unlock(&ls->ls_waiters_mutex);
1067	return error;
1068}
1069
1070static void dir_remove(struct dlm_rsb *r)
 
 
 
 
 
1071{
1072	int to_nodeid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073
1074	if (dlm_no_directory(r->res_ls))
 
 
 
1075		return;
 
1076
1077	to_nodeid = dlm_dir_nodeid(r);
1078	if (to_nodeid != dlm_our_nodeid())
1079		send_remove(r);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1080	else
1081		dlm_dir_remove_entry(r->res_ls, to_nodeid,
1082				     r->res_name, r->res_length);
1083}
1084
1085/* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
1086   found since they are in order of newest to oldest? */
 
 
 
 
 
 
 
 
 
 
 
1087
1088static int shrink_bucket(struct dlm_ls *ls, int b)
1089{
1090	struct dlm_rsb *r;
1091	int count = 0, found;
1092
1093	for (;;) {
1094		found = 0;
1095		spin_lock(&ls->ls_rsbtbl[b].lock);
1096		list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
1097					    res_hashchain) {
1098			if (!time_after_eq(jiffies, r->res_toss_time +
1099					   dlm_config.ci_toss_secs * HZ))
1100				continue;
1101			found = 1;
1102			break;
1103		}
1104
1105		if (!found) {
1106			spin_unlock(&ls->ls_rsbtbl[b].lock);
1107			break;
 
 
 
1108		}
1109
1110		if (kref_put(&r->res_ref, kill_rsb)) {
1111			list_del(&r->res_hashchain);
1112			spin_unlock(&ls->ls_rsbtbl[b].lock);
 
 
 
 
 
1113
1114			if (is_master(r))
1115				dir_remove(r);
1116			dlm_free_rsb(r);
1117			count++;
1118		} else {
1119			spin_unlock(&ls->ls_rsbtbl[b].lock);
1120			log_error(ls, "tossed rsb in use %s", r->res_name);
 
 
1121		}
1122	}
1123
1124	return count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1125}
1126
1127void dlm_scan_rsbs(struct dlm_ls *ls)
1128{
1129	int i;
1130
1131	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1132		shrink_bucket(ls, i);
1133		if (dlm_locking_stopped(ls))
1134			break;
1135		cond_resched();
1136	}
1137}
1138
1139static void add_timeout(struct dlm_lkb *lkb)
1140{
1141	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1142
1143	if (is_master_copy(lkb))
1144		return;
1145
1146	if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1147	    !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1148		lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1149		goto add_it;
1150	}
1151	if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1152		goto add_it;
1153	return;
1154
1155 add_it:
1156	DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1157	mutex_lock(&ls->ls_timeout_mutex);
1158	hold_lkb(lkb);
1159	list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1160	mutex_unlock(&ls->ls_timeout_mutex);
1161}
1162
1163static void del_timeout(struct dlm_lkb *lkb)
1164{
1165	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1166
1167	mutex_lock(&ls->ls_timeout_mutex);
1168	if (!list_empty(&lkb->lkb_time_list)) {
1169		list_del_init(&lkb->lkb_time_list);
1170		unhold_lkb(lkb);
1171	}
1172	mutex_unlock(&ls->ls_timeout_mutex);
1173}
1174
1175/* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1176   lkb_lksb_timeout without lock_rsb?  Note: we can't lock timeout_mutex
1177   and then lock rsb because of lock ordering in add_timeout.  We may need
1178   to specify some special timeout-related bits in the lkb that are just to
1179   be accessed under the timeout_mutex. */
1180
1181void dlm_scan_timeout(struct dlm_ls *ls)
1182{
1183	struct dlm_rsb *r;
1184	struct dlm_lkb *lkb;
1185	int do_cancel, do_warn;
1186	s64 wait_us;
1187
1188	for (;;) {
1189		if (dlm_locking_stopped(ls))
1190			break;
1191
1192		do_cancel = 0;
1193		do_warn = 0;
1194		mutex_lock(&ls->ls_timeout_mutex);
1195		list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1196
1197			wait_us = ktime_to_us(ktime_sub(ktime_get(),
1198					      		lkb->lkb_timestamp));
1199
1200			if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1201			    wait_us >= (lkb->lkb_timeout_cs * 10000))
1202				do_cancel = 1;
1203
1204			if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1205			    wait_us >= dlm_config.ci_timewarn_cs * 10000)
1206				do_warn = 1;
1207
1208			if (!do_cancel && !do_warn)
1209				continue;
1210			hold_lkb(lkb);
1211			break;
1212		}
1213		mutex_unlock(&ls->ls_timeout_mutex);
1214
1215		if (!do_cancel && !do_warn)
1216			break;
1217
1218		r = lkb->lkb_resource;
1219		hold_rsb(r);
1220		lock_rsb(r);
1221
1222		if (do_warn) {
1223			/* clear flag so we only warn once */
1224			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1225			if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1226				del_timeout(lkb);
1227			dlm_timeout_warn(lkb);
1228		}
1229
1230		if (do_cancel) {
1231			log_debug(ls, "timeout cancel %x node %d %s",
1232				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1233			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1234			lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1235			del_timeout(lkb);
1236			_cancel_lock(r, lkb);
1237		}
1238
1239		unlock_rsb(r);
1240		unhold_rsb(r);
1241		dlm_put_lkb(lkb);
1242	}
1243}
1244
1245/* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1246   dlm_recoverd before checking/setting ls_recover_begin. */
1247
1248void dlm_adjust_timeouts(struct dlm_ls *ls)
1249{
1250	struct dlm_lkb *lkb;
1251	u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1252
1253	ls->ls_recover_begin = 0;
1254	mutex_lock(&ls->ls_timeout_mutex);
1255	list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1256		lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1257	mutex_unlock(&ls->ls_timeout_mutex);
1258
1259	if (!dlm_config.ci_waitwarn_us)
1260		return;
1261
1262	mutex_lock(&ls->ls_waiters_mutex);
1263	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1264		if (ktime_to_us(lkb->lkb_wait_time))
1265			lkb->lkb_wait_time = ktime_get();
1266	}
1267	mutex_unlock(&ls->ls_waiters_mutex);
1268}
1269
1270/* lkb is master or local copy */
1271
1272static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1273{
1274	int b, len = r->res_ls->ls_lvblen;
1275
1276	/* b=1 lvb returned to caller
1277	   b=0 lvb written to rsb or invalidated
1278	   b=-1 do nothing */
1279
1280	b =  dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1281
1282	if (b == 1) {
1283		if (!lkb->lkb_lvbptr)
1284			return;
1285
1286		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1287			return;
1288
1289		if (!r->res_lvbptr)
1290			return;
1291
1292		memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1293		lkb->lkb_lvbseq = r->res_lvbseq;
1294
1295	} else if (b == 0) {
1296		if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1297			rsb_set_flag(r, RSB_VALNOTVALID);
1298			return;
1299		}
1300
1301		if (!lkb->lkb_lvbptr)
1302			return;
1303
1304		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1305			return;
1306
1307		if (!r->res_lvbptr)
1308			r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1309
1310		if (!r->res_lvbptr)
1311			return;
1312
1313		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1314		r->res_lvbseq++;
1315		lkb->lkb_lvbseq = r->res_lvbseq;
1316		rsb_clear_flag(r, RSB_VALNOTVALID);
1317	}
1318
1319	if (rsb_flag(r, RSB_VALNOTVALID))
1320		lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1321}
1322
1323static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1324{
1325	if (lkb->lkb_grmode < DLM_LOCK_PW)
1326		return;
1327
1328	if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1329		rsb_set_flag(r, RSB_VALNOTVALID);
1330		return;
1331	}
1332
1333	if (!lkb->lkb_lvbptr)
1334		return;
1335
1336	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1337		return;
1338
1339	if (!r->res_lvbptr)
1340		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1341
1342	if (!r->res_lvbptr)
1343		return;
1344
1345	memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1346	r->res_lvbseq++;
1347	rsb_clear_flag(r, RSB_VALNOTVALID);
1348}
1349
1350/* lkb is process copy (pc) */
1351
1352static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1353			    struct dlm_message *ms)
1354{
1355	int b;
1356
1357	if (!lkb->lkb_lvbptr)
1358		return;
1359
1360	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1361		return;
1362
1363	b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1364	if (b == 1) {
1365		int len = receive_extralen(ms);
1366		if (len > DLM_RESNAME_MAXLEN)
1367			len = DLM_RESNAME_MAXLEN;
1368		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
1369		lkb->lkb_lvbseq = ms->m_lvbseq;
1370	}
1371}
1372
1373/* Manipulate lkb's on rsb's convert/granted/waiting queues
1374   remove_lock -- used for unlock, removes lkb from granted
1375   revert_lock -- used for cancel, moves lkb from convert to granted
1376   grant_lock  -- used for request and convert, adds lkb to granted or
1377                  moves lkb from convert or waiting to granted
1378
1379   Each of these is used for master or local copy lkb's.  There is
1380   also a _pc() variation used to make the corresponding change on
1381   a process copy (pc) lkb. */
1382
1383static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1384{
1385	del_lkb(r, lkb);
1386	lkb->lkb_grmode = DLM_LOCK_IV;
1387	/* this unhold undoes the original ref from create_lkb()
1388	   so this leads to the lkb being freed */
1389	unhold_lkb(lkb);
1390}
1391
1392static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1393{
1394	set_lvb_unlock(r, lkb);
1395	_remove_lock(r, lkb);
1396}
1397
1398static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1399{
1400	_remove_lock(r, lkb);
1401}
1402
1403/* returns: 0 did nothing
1404	    1 moved lock to granted
1405	   -1 removed lock */
1406
1407static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1408{
1409	int rv = 0;
1410
1411	lkb->lkb_rqmode = DLM_LOCK_IV;
1412
1413	switch (lkb->lkb_status) {
1414	case DLM_LKSTS_GRANTED:
1415		break;
1416	case DLM_LKSTS_CONVERT:
1417		move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1418		rv = 1;
1419		break;
1420	case DLM_LKSTS_WAITING:
1421		del_lkb(r, lkb);
1422		lkb->lkb_grmode = DLM_LOCK_IV;
1423		/* this unhold undoes the original ref from create_lkb()
1424		   so this leads to the lkb being freed */
1425		unhold_lkb(lkb);
1426		rv = -1;
1427		break;
1428	default:
1429		log_print("invalid status for revert %d", lkb->lkb_status);
1430	}
1431	return rv;
1432}
1433
1434static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1435{
1436	return revert_lock(r, lkb);
1437}
1438
1439static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1440{
1441	if (lkb->lkb_grmode != lkb->lkb_rqmode) {
1442		lkb->lkb_grmode = lkb->lkb_rqmode;
1443		if (lkb->lkb_status)
1444			move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1445		else
1446			add_lkb(r, lkb, DLM_LKSTS_GRANTED);
1447	}
1448
1449	lkb->lkb_rqmode = DLM_LOCK_IV;
 
1450}
1451
1452static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1453{
1454	set_lvb_lock(r, lkb);
1455	_grant_lock(r, lkb);
1456	lkb->lkb_highbast = 0;
1457}
1458
1459static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1460			  struct dlm_message *ms)
1461{
1462	set_lvb_lock_pc(r, lkb, ms);
1463	_grant_lock(r, lkb);
1464}
1465
1466/* called by grant_pending_locks() which means an async grant message must
1467   be sent to the requesting node in addition to granting the lock if the
1468   lkb belongs to a remote node. */
1469
1470static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1471{
1472	grant_lock(r, lkb);
1473	if (is_master_copy(lkb))
1474		send_grant(r, lkb);
1475	else
1476		queue_cast(r, lkb, 0);
1477}
1478
1479/* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
1480   change the granted/requested modes.  We're munging things accordingly in
1481   the process copy.
1482   CONVDEADLK: our grmode may have been forced down to NL to resolve a
1483   conversion deadlock
1484   ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
1485   compatible with other granted locks */
1486
1487static void munge_demoted(struct dlm_lkb *lkb)
1488{
1489	if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
1490		log_print("munge_demoted %x invalid modes gr %d rq %d",
1491			  lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
1492		return;
1493	}
1494
1495	lkb->lkb_grmode = DLM_LOCK_NL;
1496}
1497
1498static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
1499{
1500	if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
1501	    ms->m_type != DLM_MSG_GRANT) {
1502		log_print("munge_altmode %x invalid reply type %d",
1503			  lkb->lkb_id, ms->m_type);
1504		return;
1505	}
1506
1507	if (lkb->lkb_exflags & DLM_LKF_ALTPR)
1508		lkb->lkb_rqmode = DLM_LOCK_PR;
1509	else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
1510		lkb->lkb_rqmode = DLM_LOCK_CW;
1511	else {
1512		log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
1513		dlm_print_lkb(lkb);
1514	}
1515}
1516
1517static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
1518{
1519	struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
1520					   lkb_statequeue);
1521	if (lkb->lkb_id == first->lkb_id)
1522		return 1;
1523
1524	return 0;
1525}
1526
1527/* Check if the given lkb conflicts with another lkb on the queue. */
1528
1529static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
1530{
1531	struct dlm_lkb *this;
1532
1533	list_for_each_entry(this, head, lkb_statequeue) {
1534		if (this == lkb)
1535			continue;
1536		if (!modes_compat(this, lkb))
1537			return 1;
1538	}
1539	return 0;
1540}
1541
1542/*
1543 * "A conversion deadlock arises with a pair of lock requests in the converting
1544 * queue for one resource.  The granted mode of each lock blocks the requested
1545 * mode of the other lock."
1546 *
1547 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
1548 * convert queue from being granted, then deadlk/demote lkb.
1549 *
1550 * Example:
1551 * Granted Queue: empty
1552 * Convert Queue: NL->EX (first lock)
1553 *                PR->EX (second lock)
1554 *
1555 * The first lock can't be granted because of the granted mode of the second
1556 * lock and the second lock can't be granted because it's not first in the
1557 * list.  We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
1558 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
1559 * flag set and return DEMOTED in the lksb flags.
1560 *
1561 * Originally, this function detected conv-deadlk in a more limited scope:
1562 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
1563 * - if lkb1 was the first entry in the queue (not just earlier), and was
1564 *   blocked by the granted mode of lkb2, and there was nothing on the
1565 *   granted queue preventing lkb1 from being granted immediately, i.e.
1566 *   lkb2 was the only thing preventing lkb1 from being granted.
1567 *
1568 * That second condition meant we'd only say there was conv-deadlk if
1569 * resolving it (by demotion) would lead to the first lock on the convert
1570 * queue being granted right away.  It allowed conversion deadlocks to exist
1571 * between locks on the convert queue while they couldn't be granted anyway.
1572 *
1573 * Now, we detect and take action on conversion deadlocks immediately when
1574 * they're created, even if they may not be immediately consequential.  If
1575 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
1576 * mode that would prevent lkb1's conversion from being granted, we do a
1577 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
1578 * I think this means that the lkb_is_ahead condition below should always
1579 * be zero, i.e. there will never be conv-deadlk between two locks that are
1580 * both already on the convert queue.
1581 */
1582
1583static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
1584{
1585	struct dlm_lkb *lkb1;
1586	int lkb_is_ahead = 0;
1587
1588	list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
1589		if (lkb1 == lkb2) {
1590			lkb_is_ahead = 1;
1591			continue;
1592		}
1593
1594		if (!lkb_is_ahead) {
1595			if (!modes_compat(lkb2, lkb1))
1596				return 1;
1597		} else {
1598			if (!modes_compat(lkb2, lkb1) &&
1599			    !modes_compat(lkb1, lkb2))
1600				return 1;
1601		}
1602	}
1603	return 0;
1604}
1605
1606/*
1607 * Return 1 if the lock can be granted, 0 otherwise.
1608 * Also detect and resolve conversion deadlocks.
1609 *
1610 * lkb is the lock to be granted
1611 *
1612 * now is 1 if the function is being called in the context of the
1613 * immediate request, it is 0 if called later, after the lock has been
1614 * queued.
1615 *
 
 
 
1616 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
1617 */
1618
1619static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
 
1620{
1621	int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
1622
1623	/*
1624	 * 6-10: Version 5.4 introduced an option to address the phenomenon of
1625	 * a new request for a NL mode lock being blocked.
1626	 *
1627	 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
1628	 * request, then it would be granted.  In essence, the use of this flag
1629	 * tells the Lock Manager to expedite theis request by not considering
1630	 * what may be in the CONVERTING or WAITING queues...  As of this
1631	 * writing, the EXPEDITE flag can be used only with new requests for NL
1632	 * mode locks.  This flag is not valid for conversion requests.
1633	 *
1634	 * A shortcut.  Earlier checks return an error if EXPEDITE is used in a
1635	 * conversion or used with a non-NL requested mode.  We also know an
1636	 * EXPEDITE request is always granted immediately, so now must always
1637	 * be 1.  The full condition to grant an expedite request: (now &&
1638	 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
1639	 * therefore be shortened to just checking the flag.
1640	 */
1641
1642	if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
1643		return 1;
1644
1645	/*
1646	 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
1647	 * added to the remaining conditions.
1648	 */
1649
1650	if (queue_conflict(&r->res_grantqueue, lkb))
1651		goto out;
1652
1653	/*
1654	 * 6-3: By default, a conversion request is immediately granted if the
1655	 * requested mode is compatible with the modes of all other granted
1656	 * locks
1657	 */
1658
1659	if (queue_conflict(&r->res_convertqueue, lkb))
1660		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1661
1662	/*
1663	 * 6-5: But the default algorithm for deciding whether to grant or
1664	 * queue conversion requests does not by itself guarantee that such
1665	 * requests are serviced on a "first come first serve" basis.  This, in
1666	 * turn, can lead to a phenomenon known as "indefinate postponement".
1667	 *
1668	 * 6-7: This issue is dealt with by using the optional QUECVT flag with
1669	 * the system service employed to request a lock conversion.  This flag
1670	 * forces certain conversion requests to be queued, even if they are
1671	 * compatible with the granted modes of other locks on the same
1672	 * resource.  Thus, the use of this flag results in conversion requests
1673	 * being ordered on a "first come first servce" basis.
1674	 *
1675	 * DCT: This condition is all about new conversions being able to occur
1676	 * "in place" while the lock remains on the granted queue (assuming
1677	 * nothing else conflicts.)  IOW if QUECVT isn't set, a conversion
1678	 * doesn't _have_ to go onto the convert queue where it's processed in
1679	 * order.  The "now" variable is necessary to distinguish converts
1680	 * being received and processed for the first time now, because once a
1681	 * convert is moved to the conversion queue the condition below applies
1682	 * requiring fifo granting.
1683	 */
1684
1685	if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
1686		return 1;
1687
1688	/*
 
 
 
 
 
 
 
 
 
 
 
 
1689	 * The NOORDER flag is set to avoid the standard vms rules on grant
1690	 * order.
1691	 */
1692
1693	if (lkb->lkb_exflags & DLM_LKF_NOORDER)
1694		return 1;
1695
1696	/*
1697	 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
1698	 * granted until all other conversion requests ahead of it are granted
1699	 * and/or canceled.
1700	 */
1701
1702	if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
1703		return 1;
1704
1705	/*
1706	 * 6-4: By default, a new request is immediately granted only if all
1707	 * three of the following conditions are satisfied when the request is
1708	 * issued:
1709	 * - The queue of ungranted conversion requests for the resource is
1710	 *   empty.
1711	 * - The queue of ungranted new requests for the resource is empty.
1712	 * - The mode of the new request is compatible with the most
1713	 *   restrictive mode of all granted locks on the resource.
1714	 */
1715
1716	if (now && !conv && list_empty(&r->res_convertqueue) &&
1717	    list_empty(&r->res_waitqueue))
1718		return 1;
1719
1720	/*
1721	 * 6-4: Once a lock request is in the queue of ungranted new requests,
1722	 * it cannot be granted until the queue of ungranted conversion
1723	 * requests is empty, all ungranted new requests ahead of it are
1724	 * granted and/or canceled, and it is compatible with the granted mode
1725	 * of the most restrictive lock granted on the resource.
1726	 */
1727
1728	if (!now && !conv && list_empty(&r->res_convertqueue) &&
1729	    first_in_list(lkb, &r->res_waitqueue))
1730		return 1;
1731 out:
1732	return 0;
1733}
1734
1735static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
1736			  int *err)
1737{
1738	int rv;
1739	int8_t alt = 0, rqmode = lkb->lkb_rqmode;
1740	int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
1741
1742	if (err)
1743		*err = 0;
1744
1745	rv = _can_be_granted(r, lkb, now);
1746	if (rv)
1747		goto out;
1748
1749	/*
1750	 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
1751	 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
1752	 * cancels one of the locks.
1753	 */
1754
1755	if (is_convert && can_be_queued(lkb) &&
1756	    conversion_deadlock_detect(r, lkb)) {
1757		if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
1758			lkb->lkb_grmode = DLM_LOCK_NL;
1759			lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
1760		} else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1761			if (err)
1762				*err = -EDEADLK;
1763			else {
1764				log_print("can_be_granted deadlock %x now %d",
1765					  lkb->lkb_id, now);
1766				dlm_dump_rsb(r);
1767			}
1768		}
1769		goto out;
1770	}
1771
1772	/*
1773	 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
1774	 * to grant a request in a mode other than the normal rqmode.  It's a
1775	 * simple way to provide a big optimization to applications that can
1776	 * use them.
1777	 */
1778
1779	if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
1780		alt = DLM_LOCK_PR;
1781	else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
1782		alt = DLM_LOCK_CW;
1783
1784	if (alt) {
1785		lkb->lkb_rqmode = alt;
1786		rv = _can_be_granted(r, lkb, now);
1787		if (rv)
1788			lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
1789		else
1790			lkb->lkb_rqmode = rqmode;
1791	}
1792 out:
1793	return rv;
1794}
1795
1796/* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
1797   for locks pending on the convert list.  Once verified (watch for these
1798   log_prints), we should be able to just call _can_be_granted() and not
1799   bother with the demote/deadlk cases here (and there's no easy way to deal
1800   with a deadlk here, we'd have to generate something like grant_lock with
1801   the deadlk error.) */
1802
1803/* Returns the highest requested mode of all blocked conversions; sets
1804   cw if there's a blocked conversion to DLM_LOCK_CW. */
1805
1806static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw)
 
1807{
1808	struct dlm_lkb *lkb, *s;
 
1809	int hi, demoted, quit, grant_restart, demote_restart;
1810	int deadlk;
1811
1812	quit = 0;
1813 restart:
1814	grant_restart = 0;
1815	demote_restart = 0;
1816	hi = DLM_LOCK_IV;
1817
1818	list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
1819		demoted = is_demoted(lkb);
1820		deadlk = 0;
1821
1822		if (can_be_granted(r, lkb, 0, &deadlk)) {
1823			grant_lock_pending(r, lkb);
1824			grant_restart = 1;
 
 
1825			continue;
1826		}
1827
1828		if (!demoted && is_demoted(lkb)) {
1829			log_print("WARN: pending demoted %x node %d %s",
1830				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1831			demote_restart = 1;
1832			continue;
1833		}
1834
1835		if (deadlk) {
1836			log_print("WARN: pending deadlock %x node %d %s",
1837				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1838			dlm_dump_rsb(r);
 
 
 
 
 
 
 
 
 
 
 
 
 
1839			continue;
1840		}
1841
1842		hi = max_t(int, lkb->lkb_rqmode, hi);
1843
1844		if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
1845			*cw = 1;
1846	}
1847
1848	if (grant_restart)
1849		goto restart;
1850	if (demote_restart && !quit) {
1851		quit = 1;
1852		goto restart;
1853	}
1854
1855	return max_t(int, high, hi);
1856}
1857
1858static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw)
 
1859{
1860	struct dlm_lkb *lkb, *s;
1861
1862	list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
1863		if (can_be_granted(r, lkb, 0, NULL))
1864			grant_lock_pending(r, lkb);
1865                else {
 
 
1866			high = max_t(int, lkb->lkb_rqmode, high);
1867			if (lkb->lkb_rqmode == DLM_LOCK_CW)
1868				*cw = 1;
1869		}
1870	}
1871
1872	return high;
1873}
1874
1875/* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
1876   on either the convert or waiting queue.
1877   high is the largest rqmode of all locks blocked on the convert or
1878   waiting queue. */
1879
1880static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
1881{
1882	if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
1883		if (gr->lkb_highbast < DLM_LOCK_EX)
1884			return 1;
1885		return 0;
1886	}
1887
1888	if (gr->lkb_highbast < high &&
1889	    !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
1890		return 1;
1891	return 0;
1892}
1893
1894static void grant_pending_locks(struct dlm_rsb *r)
1895{
1896	struct dlm_lkb *lkb, *s;
1897	int high = DLM_LOCK_IV;
1898	int cw = 0;
1899
1900	DLM_ASSERT(is_master(r), dlm_dump_rsb(r););
 
 
 
 
1901
1902	high = grant_pending_convert(r, high, &cw);
1903	high = grant_pending_wait(r, high, &cw);
1904
1905	if (high == DLM_LOCK_IV)
1906		return;
1907
1908	/*
1909	 * If there are locks left on the wait/convert queue then send blocking
1910	 * ASTs to granted locks based on the largest requested mode (high)
1911	 * found above.
1912	 */
1913
1914	list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
1915		if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
1916			if (cw && high == DLM_LOCK_PR &&
1917			    lkb->lkb_grmode == DLM_LOCK_PR)
1918				queue_bast(r, lkb, DLM_LOCK_CW);
1919			else
1920				queue_bast(r, lkb, high);
1921			lkb->lkb_highbast = high;
1922		}
1923	}
1924}
1925
1926static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
1927{
1928	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
1929	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
1930		if (gr->lkb_highbast < DLM_LOCK_EX)
1931			return 1;
1932		return 0;
1933	}
1934
1935	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
1936		return 1;
1937	return 0;
1938}
1939
1940static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1941			    struct dlm_lkb *lkb)
1942{
1943	struct dlm_lkb *gr;
1944
1945	list_for_each_entry(gr, head, lkb_statequeue) {
1946		/* skip self when sending basts to convertqueue */
1947		if (gr == lkb)
1948			continue;
1949		if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
1950			queue_bast(r, gr, lkb->lkb_rqmode);
1951			gr->lkb_highbast = lkb->lkb_rqmode;
1952		}
1953	}
1954}
1955
1956static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
1957{
1958	send_bast_queue(r, &r->res_grantqueue, lkb);
1959}
1960
1961static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
1962{
1963	send_bast_queue(r, &r->res_grantqueue, lkb);
1964	send_bast_queue(r, &r->res_convertqueue, lkb);
1965}
1966
1967/* set_master(r, lkb) -- set the master nodeid of a resource
1968
1969   The purpose of this function is to set the nodeid field in the given
1970   lkb using the nodeid field in the given rsb.  If the rsb's nodeid is
1971   known, it can just be copied to the lkb and the function will return
1972   0.  If the rsb's nodeid is _not_ known, it needs to be looked up
1973   before it can be copied to the lkb.
1974
1975   When the rsb nodeid is being looked up remotely, the initial lkb
1976   causing the lookup is kept on the ls_waiters list waiting for the
1977   lookup reply.  Other lkb's waiting for the same rsb lookup are kept
1978   on the rsb's res_lookup list until the master is verified.
1979
1980   Return values:
1981   0: nodeid is set in rsb/lkb and the caller should go ahead and use it
1982   1: the rsb master is not available and the lkb has been placed on
1983      a wait queue
1984*/
1985
1986static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
1987{
1988	struct dlm_ls *ls = r->res_ls;
1989	int i, error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
1990
1991	if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
1992		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
1993		r->res_first_lkid = lkb->lkb_id;
1994		lkb->lkb_nodeid = r->res_nodeid;
1995		return 0;
1996	}
1997
1998	if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
1999		list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2000		return 1;
2001	}
2002
2003	if (r->res_nodeid == 0) {
2004		lkb->lkb_nodeid = 0;
2005		return 0;
2006	}
2007
2008	if (r->res_nodeid > 0) {
2009		lkb->lkb_nodeid = r->res_nodeid;
2010		return 0;
2011	}
2012
2013	DLM_ASSERT(r->res_nodeid == -1, dlm_dump_rsb(r););
2014
2015	dir_nodeid = dlm_dir_nodeid(r);
2016
2017	if (dir_nodeid != our_nodeid) {
2018		r->res_first_lkid = lkb->lkb_id;
2019		send_lookup(r, lkb);
2020		return 1;
2021	}
2022
2023	for (i = 0; i < 2; i++) {
2024		/* It's possible for dlm_scand to remove an old rsb for
2025		   this same resource from the toss list, us to create
2026		   a new one, look up the master locally, and find it
2027		   already exists just before dlm_scand does the
2028		   dir_remove() on the previous rsb. */
2029
2030		error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
2031				       r->res_length, &ret_nodeid);
2032		if (!error)
2033			break;
2034		log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
2035		schedule();
2036	}
2037	if (error && error != -EEXIST)
2038		return error;
2039
2040	if (ret_nodeid == our_nodeid) {
2041		r->res_first_lkid = 0;
2042		r->res_nodeid = 0;
2043		lkb->lkb_nodeid = 0;
2044	} else {
2045		r->res_first_lkid = lkb->lkb_id;
2046		r->res_nodeid = ret_nodeid;
2047		lkb->lkb_nodeid = ret_nodeid;
2048	}
2049	return 0;
 
 
 
 
 
2050}
2051
2052static void process_lookup_list(struct dlm_rsb *r)
2053{
2054	struct dlm_lkb *lkb, *safe;
2055
2056	list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2057		list_del_init(&lkb->lkb_rsb_lookup);
2058		_request_lock(r, lkb);
2059		schedule();
2060	}
2061}
2062
2063/* confirm_master -- confirm (or deny) an rsb's master nodeid */
2064
2065static void confirm_master(struct dlm_rsb *r, int error)
2066{
2067	struct dlm_lkb *lkb;
2068
2069	if (!r->res_first_lkid)
2070		return;
2071
2072	switch (error) {
2073	case 0:
2074	case -EINPROGRESS:
2075		r->res_first_lkid = 0;
2076		process_lookup_list(r);
2077		break;
2078
2079	case -EAGAIN:
2080	case -EBADR:
2081	case -ENOTBLK:
2082		/* the remote request failed and won't be retried (it was
2083		   a NOQUEUE, or has been canceled/unlocked); make a waiting
2084		   lkb the first_lkid */
2085
2086		r->res_first_lkid = 0;
2087
2088		if (!list_empty(&r->res_lookup)) {
2089			lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2090					 lkb_rsb_lookup);
2091			list_del_init(&lkb->lkb_rsb_lookup);
2092			r->res_first_lkid = lkb->lkb_id;
2093			_request_lock(r, lkb);
2094		}
2095		break;
2096
2097	default:
2098		log_error(r->res_ls, "confirm_master unknown error %d", error);
2099	}
2100}
2101
2102static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2103			 int namelen, unsigned long timeout_cs,
2104			 void (*ast) (void *astparam),
2105			 void *astparam,
2106			 void (*bast) (void *astparam, int mode),
2107			 struct dlm_args *args)
2108{
2109	int rv = -EINVAL;
2110
2111	/* check for invalid arg usage */
2112
2113	if (mode < 0 || mode > DLM_LOCK_EX)
2114		goto out;
2115
2116	if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2117		goto out;
2118
2119	if (flags & DLM_LKF_CANCEL)
2120		goto out;
2121
2122	if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2123		goto out;
2124
2125	if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2126		goto out;
2127
2128	if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2129		goto out;
2130
2131	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2132		goto out;
2133
2134	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2135		goto out;
2136
2137	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2138		goto out;
2139
2140	if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2141		goto out;
2142
2143	if (!ast || !lksb)
2144		goto out;
2145
2146	if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2147		goto out;
2148
2149	if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2150		goto out;
2151
2152	/* these args will be copied to the lkb in validate_lock_args,
2153	   it cannot be done now because when converting locks, fields in
2154	   an active lkb cannot be modified before locking the rsb */
2155
2156	args->flags = flags;
2157	args->astfn = ast;
2158	args->astparam = astparam;
2159	args->bastfn = bast;
2160	args->timeout = timeout_cs;
2161	args->mode = mode;
2162	args->lksb = lksb;
2163	rv = 0;
2164 out:
2165	return rv;
2166}
2167
2168static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2169{
2170	if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2171 		      DLM_LKF_FORCEUNLOCK))
2172		return -EINVAL;
2173
2174	if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2175		return -EINVAL;
2176
2177	args->flags = flags;
2178	args->astparam = astarg;
2179	return 0;
2180}
2181
2182static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2183			      struct dlm_args *args)
2184{
2185	int rv = -EINVAL;
2186
2187	if (args->flags & DLM_LKF_CONVERT) {
2188		if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2189			goto out;
2190
2191		if (args->flags & DLM_LKF_QUECVT &&
2192		    !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2193			goto out;
2194
2195		rv = -EBUSY;
2196		if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2197			goto out;
2198
2199		if (lkb->lkb_wait_type)
2200			goto out;
2201
2202		if (is_overlap(lkb))
2203			goto out;
2204	}
2205
2206	lkb->lkb_exflags = args->flags;
2207	lkb->lkb_sbflags = 0;
2208	lkb->lkb_astfn = args->astfn;
2209	lkb->lkb_astparam = args->astparam;
2210	lkb->lkb_bastfn = args->bastfn;
2211	lkb->lkb_rqmode = args->mode;
2212	lkb->lkb_lksb = args->lksb;
2213	lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2214	lkb->lkb_ownpid = (int) current->pid;
2215	lkb->lkb_timeout_cs = args->timeout;
2216	rv = 0;
2217 out:
2218	if (rv)
2219		log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2220			  rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2221			  lkb->lkb_status, lkb->lkb_wait_type,
2222			  lkb->lkb_resource->res_name);
2223	return rv;
2224}
2225
2226/* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2227   for success */
2228
2229/* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2230   because there may be a lookup in progress and it's valid to do
2231   cancel/unlockf on it */
2232
2233static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2234{
2235	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2236	int rv = -EINVAL;
2237
2238	if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2239		log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2240		dlm_print_lkb(lkb);
2241		goto out;
2242	}
2243
2244	/* an lkb may still exist even though the lock is EOL'ed due to a
2245	   cancel, unlock or failed noqueue request; an app can't use these
2246	   locks; return same error as if the lkid had not been found at all */
2247
2248	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2249		log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2250		rv = -ENOENT;
2251		goto out;
2252	}
2253
2254	/* an lkb may be waiting for an rsb lookup to complete where the
2255	   lookup was initiated by another lock */
2256
2257	if (!list_empty(&lkb->lkb_rsb_lookup)) {
2258		if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2259			log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2260			list_del_init(&lkb->lkb_rsb_lookup);
2261			queue_cast(lkb->lkb_resource, lkb,
2262				   args->flags & DLM_LKF_CANCEL ?
2263				   -DLM_ECANCEL : -DLM_EUNLOCK);
2264			unhold_lkb(lkb); /* undoes create_lkb() */
2265		}
2266		/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2267		rv = -EBUSY;
2268		goto out;
2269	}
2270
2271	/* cancel not allowed with another cancel/unlock in progress */
2272
2273	if (args->flags & DLM_LKF_CANCEL) {
2274		if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2275			goto out;
2276
2277		if (is_overlap(lkb))
2278			goto out;
2279
2280		/* don't let scand try to do a cancel */
2281		del_timeout(lkb);
2282
2283		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2284			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2285			rv = -EBUSY;
2286			goto out;
2287		}
2288
2289		/* there's nothing to cancel */
2290		if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2291		    !lkb->lkb_wait_type) {
2292			rv = -EBUSY;
2293			goto out;
2294		}
2295
2296		switch (lkb->lkb_wait_type) {
2297		case DLM_MSG_LOOKUP:
2298		case DLM_MSG_REQUEST:
2299			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2300			rv = -EBUSY;
2301			goto out;
2302		case DLM_MSG_UNLOCK:
2303		case DLM_MSG_CANCEL:
2304			goto out;
2305		}
2306		/* add_to_waiters() will set OVERLAP_CANCEL */
2307		goto out_ok;
2308	}
2309
2310	/* do we need to allow a force-unlock if there's a normal unlock
2311	   already in progress?  in what conditions could the normal unlock
2312	   fail such that we'd want to send a force-unlock to be sure? */
2313
2314	if (args->flags & DLM_LKF_FORCEUNLOCK) {
2315		if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
2316			goto out;
2317
2318		if (is_overlap_unlock(lkb))
2319			goto out;
2320
2321		/* don't let scand try to do a cancel */
2322		del_timeout(lkb);
2323
2324		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2325			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2326			rv = -EBUSY;
2327			goto out;
2328		}
2329
2330		switch (lkb->lkb_wait_type) {
2331		case DLM_MSG_LOOKUP:
2332		case DLM_MSG_REQUEST:
2333			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2334			rv = -EBUSY;
2335			goto out;
2336		case DLM_MSG_UNLOCK:
2337			goto out;
2338		}
2339		/* add_to_waiters() will set OVERLAP_UNLOCK */
2340		goto out_ok;
2341	}
2342
2343	/* normal unlock not allowed if there's any op in progress */
2344	rv = -EBUSY;
2345	if (lkb->lkb_wait_type || lkb->lkb_wait_count)
2346		goto out;
2347
2348 out_ok:
2349	/* an overlapping op shouldn't blow away exflags from other op */
2350	lkb->lkb_exflags |= args->flags;
2351	lkb->lkb_sbflags = 0;
2352	lkb->lkb_astparam = args->astparam;
2353	rv = 0;
2354 out:
2355	if (rv)
2356		log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
2357			  lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
2358			  args->flags, lkb->lkb_wait_type,
2359			  lkb->lkb_resource->res_name);
2360	return rv;
2361}
2362
2363/*
2364 * Four stage 4 varieties:
2365 * do_request(), do_convert(), do_unlock(), do_cancel()
2366 * These are called on the master node for the given lock and
2367 * from the central locking logic.
2368 */
2369
2370static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2371{
2372	int error = 0;
2373
2374	if (can_be_granted(r, lkb, 1, NULL)) {
2375		grant_lock(r, lkb);
2376		queue_cast(r, lkb, 0);
2377		goto out;
2378	}
2379
2380	if (can_be_queued(lkb)) {
2381		error = -EINPROGRESS;
2382		add_lkb(r, lkb, DLM_LKSTS_WAITING);
2383		add_timeout(lkb);
2384		goto out;
2385	}
2386
2387	error = -EAGAIN;
2388	queue_cast(r, lkb, -EAGAIN);
2389 out:
2390	return error;
2391}
2392
2393static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2394			       int error)
2395{
2396	switch (error) {
2397	case -EAGAIN:
2398		if (force_blocking_asts(lkb))
2399			send_blocking_asts_all(r, lkb);
2400		break;
2401	case -EINPROGRESS:
2402		send_blocking_asts(r, lkb);
2403		break;
2404	}
2405}
2406
2407static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2408{
2409	int error = 0;
2410	int deadlk = 0;
2411
2412	/* changing an existing lock may allow others to be granted */
2413
2414	if (can_be_granted(r, lkb, 1, &deadlk)) {
2415		grant_lock(r, lkb);
2416		queue_cast(r, lkb, 0);
2417		goto out;
2418	}
2419
2420	/* can_be_granted() detected that this lock would block in a conversion
2421	   deadlock, so we leave it on the granted queue and return EDEADLK in
2422	   the ast for the convert. */
2423
2424	if (deadlk) {
2425		/* it's left on the granted queue */
2426		revert_lock(r, lkb);
2427		queue_cast(r, lkb, -EDEADLK);
2428		error = -EDEADLK;
2429		goto out;
2430	}
2431
2432	/* is_demoted() means the can_be_granted() above set the grmode
2433	   to NL, and left us on the granted queue.  This auto-demotion
2434	   (due to CONVDEADLK) might mean other locks, and/or this lock, are
2435	   now grantable.  We have to try to grant other converting locks
2436	   before we try again to grant this one. */
2437
2438	if (is_demoted(lkb)) {
2439		grant_pending_convert(r, DLM_LOCK_IV, NULL);
2440		if (_can_be_granted(r, lkb, 1)) {
2441			grant_lock(r, lkb);
2442			queue_cast(r, lkb, 0);
2443			goto out;
2444		}
2445		/* else fall through and move to convert queue */
2446	}
2447
2448	if (can_be_queued(lkb)) {
2449		error = -EINPROGRESS;
2450		del_lkb(r, lkb);
2451		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
2452		add_timeout(lkb);
2453		goto out;
2454	}
2455
2456	error = -EAGAIN;
2457	queue_cast(r, lkb, -EAGAIN);
2458 out:
2459	return error;
2460}
2461
2462static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2463			       int error)
2464{
2465	switch (error) {
2466	case 0:
2467		grant_pending_locks(r);
2468		/* grant_pending_locks also sends basts */
2469		break;
2470	case -EAGAIN:
2471		if (force_blocking_asts(lkb))
2472			send_blocking_asts_all(r, lkb);
2473		break;
2474	case -EINPROGRESS:
2475		send_blocking_asts(r, lkb);
2476		break;
2477	}
2478}
2479
2480static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2481{
2482	remove_lock(r, lkb);
2483	queue_cast(r, lkb, -DLM_EUNLOCK);
2484	return -DLM_EUNLOCK;
2485}
2486
2487static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2488			      int error)
2489{
2490	grant_pending_locks(r);
2491}
2492
2493/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
2494 
2495static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2496{
2497	int error;
2498
2499	error = revert_lock(r, lkb);
2500	if (error) {
2501		queue_cast(r, lkb, -DLM_ECANCEL);
2502		return -DLM_ECANCEL;
2503	}
2504	return 0;
2505}
2506
2507static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2508			      int error)
2509{
2510	if (error)
2511		grant_pending_locks(r);
2512}
2513
2514/*
2515 * Four stage 3 varieties:
2516 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
2517 */
2518
2519/* add a new lkb to a possibly new rsb, called by requesting process */
2520
2521static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2522{
2523	int error;
2524
2525	/* set_master: sets lkb nodeid from r */
2526
2527	error = set_master(r, lkb);
2528	if (error < 0)
2529		goto out;
2530	if (error) {
2531		error = 0;
2532		goto out;
2533	}
2534
2535	if (is_remote(r)) {
2536		/* receive_request() calls do_request() on remote node */
2537		error = send_request(r, lkb);
2538	} else {
2539		error = do_request(r, lkb);
2540		/* for remote locks the request_reply is sent
2541		   between do_request and do_request_effects */
2542		do_request_effects(r, lkb, error);
2543	}
2544 out:
2545	return error;
2546}
2547
2548/* change some property of an existing lkb, e.g. mode */
2549
2550static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2551{
2552	int error;
2553
2554	if (is_remote(r)) {
2555		/* receive_convert() calls do_convert() on remote node */
2556		error = send_convert(r, lkb);
2557	} else {
2558		error = do_convert(r, lkb);
2559		/* for remote locks the convert_reply is sent
2560		   between do_convert and do_convert_effects */
2561		do_convert_effects(r, lkb, error);
2562	}
2563
2564	return error;
2565}
2566
2567/* remove an existing lkb from the granted queue */
2568
2569static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2570{
2571	int error;
2572
2573	if (is_remote(r)) {
2574		/* receive_unlock() calls do_unlock() on remote node */
2575		error = send_unlock(r, lkb);
2576	} else {
2577		error = do_unlock(r, lkb);
2578		/* for remote locks the unlock_reply is sent
2579		   between do_unlock and do_unlock_effects */
2580		do_unlock_effects(r, lkb, error);
2581	}
2582
2583	return error;
2584}
2585
2586/* remove an existing lkb from the convert or wait queue */
2587
2588static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2589{
2590	int error;
2591
2592	if (is_remote(r)) {
2593		/* receive_cancel() calls do_cancel() on remote node */
2594		error = send_cancel(r, lkb);
2595	} else {
2596		error = do_cancel(r, lkb);
2597		/* for remote locks the cancel_reply is sent
2598		   between do_cancel and do_cancel_effects */
2599		do_cancel_effects(r, lkb, error);
2600	}
2601
2602	return error;
2603}
2604
2605/*
2606 * Four stage 2 varieties:
2607 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
2608 */
2609
2610static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
2611			int len, struct dlm_args *args)
2612{
2613	struct dlm_rsb *r;
2614	int error;
2615
2616	error = validate_lock_args(ls, lkb, args);
2617	if (error)
2618		goto out;
2619
2620	error = find_rsb(ls, name, len, R_CREATE, &r);
2621	if (error)
2622		goto out;
2623
2624	lock_rsb(r);
2625
2626	attach_lkb(r, lkb);
2627	lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
2628
2629	error = _request_lock(r, lkb);
2630
2631	unlock_rsb(r);
2632	put_rsb(r);
2633
2634 out:
2635	return error;
2636}
2637
2638static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2639			struct dlm_args *args)
2640{
2641	struct dlm_rsb *r;
2642	int error;
2643
2644	r = lkb->lkb_resource;
2645
2646	hold_rsb(r);
2647	lock_rsb(r);
2648
2649	error = validate_lock_args(ls, lkb, args);
2650	if (error)
2651		goto out;
2652
2653	error = _convert_lock(r, lkb);
2654 out:
2655	unlock_rsb(r);
2656	put_rsb(r);
2657	return error;
2658}
2659
2660static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2661		       struct dlm_args *args)
2662{
2663	struct dlm_rsb *r;
2664	int error;
2665
2666	r = lkb->lkb_resource;
2667
2668	hold_rsb(r);
2669	lock_rsb(r);
2670
2671	error = validate_unlock_args(lkb, args);
2672	if (error)
2673		goto out;
2674
2675	error = _unlock_lock(r, lkb);
2676 out:
2677	unlock_rsb(r);
2678	put_rsb(r);
2679	return error;
2680}
2681
2682static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2683		       struct dlm_args *args)
2684{
2685	struct dlm_rsb *r;
2686	int error;
2687
2688	r = lkb->lkb_resource;
2689
2690	hold_rsb(r);
2691	lock_rsb(r);
2692
2693	error = validate_unlock_args(lkb, args);
2694	if (error)
2695		goto out;
2696
2697	error = _cancel_lock(r, lkb);
2698 out:
2699	unlock_rsb(r);
2700	put_rsb(r);
2701	return error;
2702}
2703
2704/*
2705 * Two stage 1 varieties:  dlm_lock() and dlm_unlock()
2706 */
2707
2708int dlm_lock(dlm_lockspace_t *lockspace,
2709	     int mode,
2710	     struct dlm_lksb *lksb,
2711	     uint32_t flags,
2712	     void *name,
2713	     unsigned int namelen,
2714	     uint32_t parent_lkid,
2715	     void (*ast) (void *astarg),
2716	     void *astarg,
2717	     void (*bast) (void *astarg, int mode))
2718{
2719	struct dlm_ls *ls;
2720	struct dlm_lkb *lkb;
2721	struct dlm_args args;
2722	int error, convert = flags & DLM_LKF_CONVERT;
2723
2724	ls = dlm_find_lockspace_local(lockspace);
2725	if (!ls)
2726		return -EINVAL;
2727
2728	dlm_lock_recovery(ls);
2729
2730	if (convert)
2731		error = find_lkb(ls, lksb->sb_lkid, &lkb);
2732	else
2733		error = create_lkb(ls, &lkb);
2734
2735	if (error)
2736		goto out;
2737
2738	error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
2739			      astarg, bast, &args);
2740	if (error)
2741		goto out_put;
2742
2743	if (convert)
2744		error = convert_lock(ls, lkb, &args);
2745	else
2746		error = request_lock(ls, lkb, name, namelen, &args);
2747
2748	if (error == -EINPROGRESS)
2749		error = 0;
2750 out_put:
2751	if (convert || error)
2752		__put_lkb(ls, lkb);
2753	if (error == -EAGAIN || error == -EDEADLK)
2754		error = 0;
2755 out:
2756	dlm_unlock_recovery(ls);
2757	dlm_put_lockspace(ls);
2758	return error;
2759}
2760
2761int dlm_unlock(dlm_lockspace_t *lockspace,
2762	       uint32_t lkid,
2763	       uint32_t flags,
2764	       struct dlm_lksb *lksb,
2765	       void *astarg)
2766{
2767	struct dlm_ls *ls;
2768	struct dlm_lkb *lkb;
2769	struct dlm_args args;
2770	int error;
2771
2772	ls = dlm_find_lockspace_local(lockspace);
2773	if (!ls)
2774		return -EINVAL;
2775
2776	dlm_lock_recovery(ls);
2777
2778	error = find_lkb(ls, lkid, &lkb);
2779	if (error)
2780		goto out;
2781
2782	error = set_unlock_args(flags, astarg, &args);
2783	if (error)
2784		goto out_put;
2785
2786	if (flags & DLM_LKF_CANCEL)
2787		error = cancel_lock(ls, lkb, &args);
2788	else
2789		error = unlock_lock(ls, lkb, &args);
2790
2791	if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
2792		error = 0;
2793	if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
2794		error = 0;
2795 out_put:
2796	dlm_put_lkb(lkb);
2797 out:
2798	dlm_unlock_recovery(ls);
2799	dlm_put_lockspace(ls);
2800	return error;
2801}
2802
2803/*
2804 * send/receive routines for remote operations and replies
2805 *
2806 * send_args
2807 * send_common
2808 * send_request			receive_request
2809 * send_convert			receive_convert
2810 * send_unlock			receive_unlock
2811 * send_cancel			receive_cancel
2812 * send_grant			receive_grant
2813 * send_bast			receive_bast
2814 * send_lookup			receive_lookup
2815 * send_remove			receive_remove
2816 *
2817 * 				send_common_reply
2818 * receive_request_reply	send_request_reply
2819 * receive_convert_reply	send_convert_reply
2820 * receive_unlock_reply		send_unlock_reply
2821 * receive_cancel_reply		send_cancel_reply
2822 * receive_lookup_reply		send_lookup_reply
2823 */
2824
2825static int _create_message(struct dlm_ls *ls, int mb_len,
2826			   int to_nodeid, int mstype,
2827			   struct dlm_message **ms_ret,
2828			   struct dlm_mhandle **mh_ret)
2829{
2830	struct dlm_message *ms;
2831	struct dlm_mhandle *mh;
2832	char *mb;
2833
2834	/* get_buffer gives us a message handle (mh) that we need to
2835	   pass into lowcomms_commit and a message buffer (mb) that we
2836	   write our data into */
2837
2838	mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
2839	if (!mh)
2840		return -ENOBUFS;
2841
2842	memset(mb, 0, mb_len);
2843
2844	ms = (struct dlm_message *) mb;
2845
2846	ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
2847	ms->m_header.h_lockspace = ls->ls_global_id;
2848	ms->m_header.h_nodeid = dlm_our_nodeid();
2849	ms->m_header.h_length = mb_len;
2850	ms->m_header.h_cmd = DLM_MSG;
2851
2852	ms->m_type = mstype;
2853
2854	*mh_ret = mh;
2855	*ms_ret = ms;
2856	return 0;
2857}
2858
2859static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
2860			  int to_nodeid, int mstype,
2861			  struct dlm_message **ms_ret,
2862			  struct dlm_mhandle **mh_ret)
2863{
2864	int mb_len = sizeof(struct dlm_message);
2865
2866	switch (mstype) {
2867	case DLM_MSG_REQUEST:
2868	case DLM_MSG_LOOKUP:
2869	case DLM_MSG_REMOVE:
2870		mb_len += r->res_length;
2871		break;
2872	case DLM_MSG_CONVERT:
2873	case DLM_MSG_UNLOCK:
2874	case DLM_MSG_REQUEST_REPLY:
2875	case DLM_MSG_CONVERT_REPLY:
2876	case DLM_MSG_GRANT:
2877		if (lkb && lkb->lkb_lvbptr)
2878			mb_len += r->res_ls->ls_lvblen;
2879		break;
2880	}
2881
2882	return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
2883			       ms_ret, mh_ret);
2884}
2885
2886/* further lowcomms enhancements or alternate implementations may make
2887   the return value from this function useful at some point */
2888
2889static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
2890{
2891	dlm_message_out(ms);
2892	dlm_lowcomms_commit_buffer(mh);
2893	return 0;
2894}
2895
2896static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2897		      struct dlm_message *ms)
2898{
2899	ms->m_nodeid   = lkb->lkb_nodeid;
2900	ms->m_pid      = lkb->lkb_ownpid;
2901	ms->m_lkid     = lkb->lkb_id;
2902	ms->m_remid    = lkb->lkb_remid;
2903	ms->m_exflags  = lkb->lkb_exflags;
2904	ms->m_sbflags  = lkb->lkb_sbflags;
2905	ms->m_flags    = lkb->lkb_flags;
2906	ms->m_lvbseq   = lkb->lkb_lvbseq;
2907	ms->m_status   = lkb->lkb_status;
2908	ms->m_grmode   = lkb->lkb_grmode;
2909	ms->m_rqmode   = lkb->lkb_rqmode;
2910	ms->m_hash     = r->res_hash;
2911
2912	/* m_result and m_bastmode are set from function args,
2913	   not from lkb fields */
2914
2915	if (lkb->lkb_bastfn)
2916		ms->m_asts |= DLM_CB_BAST;
2917	if (lkb->lkb_astfn)
2918		ms->m_asts |= DLM_CB_CAST;
2919
2920	/* compare with switch in create_message; send_remove() doesn't
2921	   use send_args() */
2922
2923	switch (ms->m_type) {
2924	case DLM_MSG_REQUEST:
2925	case DLM_MSG_LOOKUP:
2926		memcpy(ms->m_extra, r->res_name, r->res_length);
2927		break;
2928	case DLM_MSG_CONVERT:
2929	case DLM_MSG_UNLOCK:
2930	case DLM_MSG_REQUEST_REPLY:
2931	case DLM_MSG_CONVERT_REPLY:
2932	case DLM_MSG_GRANT:
2933		if (!lkb->lkb_lvbptr)
2934			break;
2935		memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2936		break;
2937	}
2938}
2939
2940static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
2941{
2942	struct dlm_message *ms;
2943	struct dlm_mhandle *mh;
2944	int to_nodeid, error;
2945
2946	to_nodeid = r->res_nodeid;
2947
2948	error = add_to_waiters(lkb, mstype, to_nodeid);
2949	if (error)
2950		return error;
2951
2952	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2953	if (error)
2954		goto fail;
2955
2956	send_args(r, lkb, ms);
2957
2958	error = send_message(mh, ms);
2959	if (error)
2960		goto fail;
2961	return 0;
2962
2963 fail:
2964	remove_from_waiters(lkb, msg_reply_type(mstype));
2965	return error;
2966}
2967
2968static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2969{
2970	return send_common(r, lkb, DLM_MSG_REQUEST);
2971}
2972
2973static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2974{
2975	int error;
2976
2977	error = send_common(r, lkb, DLM_MSG_CONVERT);
2978
2979	/* down conversions go without a reply from the master */
2980	if (!error && down_conversion(lkb)) {
2981		remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
2982		r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
2983		r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
2984		r->res_ls->ls_stub_ms.m_result = 0;
2985		__receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
2986	}
2987
2988	return error;
2989}
2990
2991/* FIXME: if this lkb is the only lock we hold on the rsb, then set
2992   MASTER_UNCERTAIN to force the next request on the rsb to confirm
2993   that the master is still correct. */
2994
2995static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2996{
2997	return send_common(r, lkb, DLM_MSG_UNLOCK);
2998}
2999
3000static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3001{
3002	return send_common(r, lkb, DLM_MSG_CANCEL);
3003}
3004
3005static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3006{
3007	struct dlm_message *ms;
3008	struct dlm_mhandle *mh;
3009	int to_nodeid, error;
3010
3011	to_nodeid = lkb->lkb_nodeid;
3012
3013	error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3014	if (error)
3015		goto out;
3016
3017	send_args(r, lkb, ms);
3018
3019	ms->m_result = 0;
3020
3021	error = send_message(mh, ms);
3022 out:
3023	return error;
3024}
3025
3026static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3027{
3028	struct dlm_message *ms;
3029	struct dlm_mhandle *mh;
3030	int to_nodeid, error;
3031
3032	to_nodeid = lkb->lkb_nodeid;
3033
3034	error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3035	if (error)
3036		goto out;
3037
3038	send_args(r, lkb, ms);
3039
3040	ms->m_bastmode = mode;
3041
3042	error = send_message(mh, ms);
3043 out:
3044	return error;
3045}
3046
3047static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3048{
3049	struct dlm_message *ms;
3050	struct dlm_mhandle *mh;
3051	int to_nodeid, error;
3052
3053	to_nodeid = dlm_dir_nodeid(r);
3054
3055	error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3056	if (error)
3057		return error;
3058
3059	error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3060	if (error)
3061		goto fail;
3062
3063	send_args(r, lkb, ms);
3064
3065	error = send_message(mh, ms);
3066	if (error)
3067		goto fail;
3068	return 0;
3069
3070 fail:
3071	remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3072	return error;
3073}
3074
3075static int send_remove(struct dlm_rsb *r)
3076{
3077	struct dlm_message *ms;
3078	struct dlm_mhandle *mh;
3079	int to_nodeid, error;
3080
3081	to_nodeid = dlm_dir_nodeid(r);
3082
3083	error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3084	if (error)
3085		goto out;
3086
3087	memcpy(ms->m_extra, r->res_name, r->res_length);
3088	ms->m_hash = r->res_hash;
3089
3090	error = send_message(mh, ms);
3091 out:
3092	return error;
3093}
3094
3095static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3096			     int mstype, int rv)
3097{
3098	struct dlm_message *ms;
3099	struct dlm_mhandle *mh;
3100	int to_nodeid, error;
3101
3102	to_nodeid = lkb->lkb_nodeid;
3103
3104	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3105	if (error)
3106		goto out;
3107
3108	send_args(r, lkb, ms);
3109
3110	ms->m_result = rv;
3111
3112	error = send_message(mh, ms);
3113 out:
3114	return error;
3115}
3116
3117static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3118{
3119	return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3120}
3121
3122static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3123{
3124	return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3125}
3126
3127static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3128{
3129	return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3130}
3131
3132static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3133{
3134	return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3135}
3136
3137static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3138			     int ret_nodeid, int rv)
3139{
3140	struct dlm_rsb *r = &ls->ls_stub_rsb;
3141	struct dlm_message *ms;
3142	struct dlm_mhandle *mh;
3143	int error, nodeid = ms_in->m_header.h_nodeid;
3144
3145	error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3146	if (error)
3147		goto out;
3148
3149	ms->m_lkid = ms_in->m_lkid;
3150	ms->m_result = rv;
3151	ms->m_nodeid = ret_nodeid;
3152
3153	error = send_message(mh, ms);
3154 out:
3155	return error;
3156}
3157
3158/* which args we save from a received message depends heavily on the type
3159   of message, unlike the send side where we can safely send everything about
3160   the lkb for any type of message */
3161
3162static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3163{
3164	lkb->lkb_exflags = ms->m_exflags;
3165	lkb->lkb_sbflags = ms->m_sbflags;
3166	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3167		         (ms->m_flags & 0x0000FFFF);
3168}
3169
3170static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3171{
3172	if (ms->m_flags == DLM_IFL_STUB_MS)
3173		return;
3174
3175	lkb->lkb_sbflags = ms->m_sbflags;
3176	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3177		         (ms->m_flags & 0x0000FFFF);
3178}
3179
3180static int receive_extralen(struct dlm_message *ms)
3181{
3182	return (ms->m_header.h_length - sizeof(struct dlm_message));
3183}
3184
3185static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3186		       struct dlm_message *ms)
3187{
3188	int len;
3189
3190	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3191		if (!lkb->lkb_lvbptr)
3192			lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3193		if (!lkb->lkb_lvbptr)
3194			return -ENOMEM;
3195		len = receive_extralen(ms);
3196		if (len > DLM_RESNAME_MAXLEN)
3197			len = DLM_RESNAME_MAXLEN;
3198		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3199	}
3200	return 0;
3201}
3202
3203static void fake_bastfn(void *astparam, int mode)
3204{
3205	log_print("fake_bastfn should not be called");
3206}
3207
3208static void fake_astfn(void *astparam)
3209{
3210	log_print("fake_astfn should not be called");
3211}
3212
3213static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3214				struct dlm_message *ms)
3215{
3216	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3217	lkb->lkb_ownpid = ms->m_pid;
3218	lkb->lkb_remid = ms->m_lkid;
3219	lkb->lkb_grmode = DLM_LOCK_IV;
3220	lkb->lkb_rqmode = ms->m_rqmode;
3221
3222	lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3223	lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3224
3225	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3226		/* lkb was just created so there won't be an lvb yet */
3227		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3228		if (!lkb->lkb_lvbptr)
3229			return -ENOMEM;
3230	}
3231
3232	return 0;
3233}
3234
3235static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3236				struct dlm_message *ms)
3237{
3238	if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3239		return -EBUSY;
3240
3241	if (receive_lvb(ls, lkb, ms))
3242		return -ENOMEM;
3243
3244	lkb->lkb_rqmode = ms->m_rqmode;
3245	lkb->lkb_lvbseq = ms->m_lvbseq;
3246
3247	return 0;
3248}
3249
3250static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3251			       struct dlm_message *ms)
3252{
3253	if (receive_lvb(ls, lkb, ms))
3254		return -ENOMEM;
3255	return 0;
3256}
3257
3258/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3259   uses to send a reply and that the remote end uses to process the reply. */
3260
3261static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3262{
3263	struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3264	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3265	lkb->lkb_remid = ms->m_lkid;
3266}
3267
3268/* This is called after the rsb is locked so that we can safely inspect
3269   fields in the lkb. */
3270
3271static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3272{
3273	int from = ms->m_header.h_nodeid;
3274	int error = 0;
3275
3276	switch (ms->m_type) {
3277	case DLM_MSG_CONVERT:
3278	case DLM_MSG_UNLOCK:
3279	case DLM_MSG_CANCEL:
3280		if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3281			error = -EINVAL;
3282		break;
3283
3284	case DLM_MSG_CONVERT_REPLY:
3285	case DLM_MSG_UNLOCK_REPLY:
3286	case DLM_MSG_CANCEL_REPLY:
3287	case DLM_MSG_GRANT:
3288	case DLM_MSG_BAST:
3289		if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3290			error = -EINVAL;
3291		break;
3292
3293	case DLM_MSG_REQUEST_REPLY:
3294		if (!is_process_copy(lkb))
3295			error = -EINVAL;
3296		else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
3297			error = -EINVAL;
3298		break;
3299
3300	default:
3301		error = -EINVAL;
3302	}
3303
3304	if (error)
3305		log_error(lkb->lkb_resource->res_ls,
3306			  "ignore invalid message %d from %d %x %x %x %d",
3307			  ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
3308			  lkb->lkb_flags, lkb->lkb_nodeid);
3309	return error;
3310}
3311
3312static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3313{
3314	struct dlm_lkb *lkb;
3315	struct dlm_rsb *r;
3316	int error, namelen;
 
 
 
3317
3318	error = create_lkb(ls, &lkb);
3319	if (error)
3320		goto fail;
3321
3322	receive_flags(lkb, ms);
3323	lkb->lkb_flags |= DLM_IFL_MSTCPY;
3324	error = receive_request_args(ls, lkb, ms);
3325	if (error) {
3326		__put_lkb(ls, lkb);
3327		goto fail;
3328	}
3329
 
 
 
 
 
 
3330	namelen = receive_extralen(ms);
3331
3332	error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
 
3333	if (error) {
3334		__put_lkb(ls, lkb);
3335		goto fail;
3336	}
3337
3338	lock_rsb(r);
3339
 
 
 
 
 
 
 
 
 
 
3340	attach_lkb(r, lkb);
3341	error = do_request(r, lkb);
3342	send_request_reply(r, lkb, error);
3343	do_request_effects(r, lkb, error);
3344
3345	unlock_rsb(r);
3346	put_rsb(r);
3347
3348	if (error == -EINPROGRESS)
3349		error = 0;
3350	if (error)
3351		dlm_put_lkb(lkb);
3352	return;
3353
3354 fail:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3355	setup_stub_lkb(ls, ms);
3356	send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
 
3357}
3358
3359static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
3360{
3361	struct dlm_lkb *lkb;
3362	struct dlm_rsb *r;
3363	int error, reply = 1;
3364
3365	error = find_lkb(ls, ms->m_remid, &lkb);
3366	if (error)
3367		goto fail;
3368
 
 
 
 
 
 
 
 
 
3369	r = lkb->lkb_resource;
3370
3371	hold_rsb(r);
3372	lock_rsb(r);
3373
3374	error = validate_message(lkb, ms);
3375	if (error)
3376		goto out;
3377
3378	receive_flags(lkb, ms);
3379
3380	error = receive_convert_args(ls, lkb, ms);
3381	if (error) {
3382		send_convert_reply(r, lkb, error);
3383		goto out;
3384	}
3385
3386	reply = !down_conversion(lkb);
3387
3388	error = do_convert(r, lkb);
3389	if (reply)
3390		send_convert_reply(r, lkb, error);
3391	do_convert_effects(r, lkb, error);
3392 out:
3393	unlock_rsb(r);
3394	put_rsb(r);
3395	dlm_put_lkb(lkb);
3396	return;
3397
3398 fail:
3399	setup_stub_lkb(ls, ms);
3400	send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
 
3401}
3402
3403static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
3404{
3405	struct dlm_lkb *lkb;
3406	struct dlm_rsb *r;
3407	int error;
3408
3409	error = find_lkb(ls, ms->m_remid, &lkb);
3410	if (error)
3411		goto fail;
3412
 
 
 
 
 
 
 
 
3413	r = lkb->lkb_resource;
3414
3415	hold_rsb(r);
3416	lock_rsb(r);
3417
3418	error = validate_message(lkb, ms);
3419	if (error)
3420		goto out;
3421
3422	receive_flags(lkb, ms);
3423
3424	error = receive_unlock_args(ls, lkb, ms);
3425	if (error) {
3426		send_unlock_reply(r, lkb, error);
3427		goto out;
3428	}
3429
3430	error = do_unlock(r, lkb);
3431	send_unlock_reply(r, lkb, error);
3432	do_unlock_effects(r, lkb, error);
3433 out:
3434	unlock_rsb(r);
3435	put_rsb(r);
3436	dlm_put_lkb(lkb);
3437	return;
3438
3439 fail:
3440	setup_stub_lkb(ls, ms);
3441	send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
 
3442}
3443
3444static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
3445{
3446	struct dlm_lkb *lkb;
3447	struct dlm_rsb *r;
3448	int error;
3449
3450	error = find_lkb(ls, ms->m_remid, &lkb);
3451	if (error)
3452		goto fail;
3453
3454	receive_flags(lkb, ms);
3455
3456	r = lkb->lkb_resource;
3457
3458	hold_rsb(r);
3459	lock_rsb(r);
3460
3461	error = validate_message(lkb, ms);
3462	if (error)
3463		goto out;
3464
3465	error = do_cancel(r, lkb);
3466	send_cancel_reply(r, lkb, error);
3467	do_cancel_effects(r, lkb, error);
3468 out:
3469	unlock_rsb(r);
3470	put_rsb(r);
3471	dlm_put_lkb(lkb);
3472	return;
3473
3474 fail:
3475	setup_stub_lkb(ls, ms);
3476	send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
 
3477}
3478
3479static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
3480{
3481	struct dlm_lkb *lkb;
3482	struct dlm_rsb *r;
3483	int error;
3484
3485	error = find_lkb(ls, ms->m_remid, &lkb);
3486	if (error) {
3487		log_debug(ls, "receive_grant from %d no lkb %x",
3488			  ms->m_header.h_nodeid, ms->m_remid);
3489		return;
3490	}
3491
3492	r = lkb->lkb_resource;
3493
3494	hold_rsb(r);
3495	lock_rsb(r);
3496
3497	error = validate_message(lkb, ms);
3498	if (error)
3499		goto out;
3500
3501	receive_flags_reply(lkb, ms);
3502	if (is_altmode(lkb))
3503		munge_altmode(lkb, ms);
3504	grant_lock_pc(r, lkb, ms);
3505	queue_cast(r, lkb, 0);
3506 out:
3507	unlock_rsb(r);
3508	put_rsb(r);
3509	dlm_put_lkb(lkb);
 
3510}
3511
3512static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
3513{
3514	struct dlm_lkb *lkb;
3515	struct dlm_rsb *r;
3516	int error;
3517
3518	error = find_lkb(ls, ms->m_remid, &lkb);
3519	if (error) {
3520		log_debug(ls, "receive_bast from %d no lkb %x",
3521			  ms->m_header.h_nodeid, ms->m_remid);
3522		return;
3523	}
3524
3525	r = lkb->lkb_resource;
3526
3527	hold_rsb(r);
3528	lock_rsb(r);
3529
3530	error = validate_message(lkb, ms);
3531	if (error)
3532		goto out;
3533
3534	queue_bast(r, lkb, ms->m_bastmode);
 
3535 out:
3536	unlock_rsb(r);
3537	put_rsb(r);
3538	dlm_put_lkb(lkb);
 
3539}
3540
3541static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
3542{
3543	int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
3544
3545	from_nodeid = ms->m_header.h_nodeid;
3546	our_nodeid = dlm_our_nodeid();
3547
3548	len = receive_extralen(ms);
3549
3550	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
3551	if (dir_nodeid != our_nodeid) {
3552		log_error(ls, "lookup dir_nodeid %d from %d",
3553			  dir_nodeid, from_nodeid);
3554		error = -EINVAL;
3555		ret_nodeid = -1;
3556		goto out;
3557	}
3558
3559	error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
3560
3561	/* Optimization: we're master so treat lookup as a request */
3562	if (!error && ret_nodeid == our_nodeid) {
3563		receive_request(ls, ms);
3564		return;
3565	}
3566 out:
3567	send_lookup_reply(ls, ms, ret_nodeid, error);
3568}
3569
3570static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
3571{
3572	int len, dir_nodeid, from_nodeid;
 
 
 
3573
3574	from_nodeid = ms->m_header.h_nodeid;
3575
3576	len = receive_extralen(ms);
3577
 
 
 
 
 
 
3578	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
3579	if (dir_nodeid != dlm_our_nodeid()) {
3580		log_error(ls, "remove dir entry dir_nodeid %d from %d",
3581			  dir_nodeid, from_nodeid);
3582		return;
3583	}
3584
3585	dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3586}
3587
3588static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
3589{
3590	do_purge(ls, ms->m_nodeid, ms->m_pid);
3591}
3592
3593static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
3594{
3595	struct dlm_lkb *lkb;
3596	struct dlm_rsb *r;
3597	int error, mstype, result;
 
3598
3599	error = find_lkb(ls, ms->m_remid, &lkb);
3600	if (error) {
3601		log_debug(ls, "receive_request_reply from %d no lkb %x",
3602			  ms->m_header.h_nodeid, ms->m_remid);
3603		return;
3604	}
3605
3606	r = lkb->lkb_resource;
3607	hold_rsb(r);
3608	lock_rsb(r);
3609
3610	error = validate_message(lkb, ms);
3611	if (error)
3612		goto out;
3613
3614	mstype = lkb->lkb_wait_type;
3615	error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
3616	if (error)
 
 
 
3617		goto out;
 
3618
3619	/* Optimization: the dir node was also the master, so it took our
3620	   lookup as a request and sent request reply instead of lookup reply */
3621	if (mstype == DLM_MSG_LOOKUP) {
3622		r->res_nodeid = ms->m_header.h_nodeid;
3623		lkb->lkb_nodeid = r->res_nodeid;
 
3624	}
3625
3626	/* this is the value returned from do_request() on the master */
3627	result = ms->m_result;
3628
3629	switch (result) {
3630	case -EAGAIN:
3631		/* request would block (be queued) on remote master */
3632		queue_cast(r, lkb, -EAGAIN);
3633		confirm_master(r, -EAGAIN);
3634		unhold_lkb(lkb); /* undoes create_lkb() */
3635		break;
3636
3637	case -EINPROGRESS:
3638	case 0:
3639		/* request was queued or granted on remote master */
3640		receive_flags_reply(lkb, ms);
3641		lkb->lkb_remid = ms->m_lkid;
3642		if (is_altmode(lkb))
3643			munge_altmode(lkb, ms);
3644		if (result) {
3645			add_lkb(r, lkb, DLM_LKSTS_WAITING);
3646			add_timeout(lkb);
3647		} else {
3648			grant_lock_pc(r, lkb, ms);
3649			queue_cast(r, lkb, 0);
3650		}
3651		confirm_master(r, result);
3652		break;
3653
3654	case -EBADR:
3655	case -ENOTBLK:
3656		/* find_rsb failed to find rsb or rsb wasn't master */
3657		log_debug(ls, "receive_request_reply %x %x master diff %d %d",
3658			  lkb->lkb_id, lkb->lkb_flags, r->res_nodeid, result);
3659		r->res_nodeid = -1;
3660		lkb->lkb_nodeid = -1;
 
 
 
 
 
 
 
 
3661
3662		if (is_overlap(lkb)) {
3663			/* we'll ignore error in cancel/unlock reply */
3664			queue_cast_overlap(r, lkb);
3665			confirm_master(r, result);
3666			unhold_lkb(lkb); /* undoes create_lkb() */
3667		} else
3668			_request_lock(r, lkb);
 
 
 
 
3669		break;
3670
3671	default:
3672		log_error(ls, "receive_request_reply %x error %d",
3673			  lkb->lkb_id, result);
3674	}
3675
3676	if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
3677		log_debug(ls, "receive_request_reply %x result %d unlock",
3678			  lkb->lkb_id, result);
3679		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3680		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3681		send_unlock(r, lkb);
3682	} else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
3683		log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
3684		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3685		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3686		send_cancel(r, lkb);
3687	} else {
3688		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3689		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3690	}
3691 out:
3692	unlock_rsb(r);
3693	put_rsb(r);
3694	dlm_put_lkb(lkb);
 
3695}
3696
3697static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3698				    struct dlm_message *ms)
3699{
3700	/* this is the value returned from do_convert() on the master */
3701	switch (ms->m_result) {
3702	case -EAGAIN:
3703		/* convert would block (be queued) on remote master */
3704		queue_cast(r, lkb, -EAGAIN);
3705		break;
3706
3707	case -EDEADLK:
3708		receive_flags_reply(lkb, ms);
3709		revert_lock_pc(r, lkb);
3710		queue_cast(r, lkb, -EDEADLK);
3711		break;
3712
3713	case -EINPROGRESS:
3714		/* convert was queued on remote master */
3715		receive_flags_reply(lkb, ms);
3716		if (is_demoted(lkb))
3717			munge_demoted(lkb);
3718		del_lkb(r, lkb);
3719		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3720		add_timeout(lkb);
3721		break;
3722
3723	case 0:
3724		/* convert was granted on remote master */
3725		receive_flags_reply(lkb, ms);
3726		if (is_demoted(lkb))
3727			munge_demoted(lkb);
3728		grant_lock_pc(r, lkb, ms);
3729		queue_cast(r, lkb, 0);
3730		break;
3731
3732	default:
3733		log_error(r->res_ls, "receive_convert_reply %x error %d",
3734			  lkb->lkb_id, ms->m_result);
 
 
 
3735	}
3736}
3737
3738static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3739{
3740	struct dlm_rsb *r = lkb->lkb_resource;
3741	int error;
3742
3743	hold_rsb(r);
3744	lock_rsb(r);
3745
3746	error = validate_message(lkb, ms);
3747	if (error)
3748		goto out;
3749
3750	/* stub reply can happen with waiters_mutex held */
3751	error = remove_from_waiters_ms(lkb, ms);
3752	if (error)
3753		goto out;
3754
3755	__receive_convert_reply(r, lkb, ms);
3756 out:
3757	unlock_rsb(r);
3758	put_rsb(r);
3759}
3760
3761static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
3762{
3763	struct dlm_lkb *lkb;
3764	int error;
3765
3766	error = find_lkb(ls, ms->m_remid, &lkb);
3767	if (error) {
3768		log_debug(ls, "receive_convert_reply from %d no lkb %x",
3769			  ms->m_header.h_nodeid, ms->m_remid);
3770		return;
3771	}
3772
3773	_receive_convert_reply(lkb, ms);
3774	dlm_put_lkb(lkb);
 
3775}
3776
3777static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3778{
3779	struct dlm_rsb *r = lkb->lkb_resource;
3780	int error;
3781
3782	hold_rsb(r);
3783	lock_rsb(r);
3784
3785	error = validate_message(lkb, ms);
3786	if (error)
3787		goto out;
3788
3789	/* stub reply can happen with waiters_mutex held */
3790	error = remove_from_waiters_ms(lkb, ms);
3791	if (error)
3792		goto out;
3793
3794	/* this is the value returned from do_unlock() on the master */
3795
3796	switch (ms->m_result) {
3797	case -DLM_EUNLOCK:
3798		receive_flags_reply(lkb, ms);
3799		remove_lock_pc(r, lkb);
3800		queue_cast(r, lkb, -DLM_EUNLOCK);
3801		break;
3802	case -ENOENT:
3803		break;
3804	default:
3805		log_error(r->res_ls, "receive_unlock_reply %x error %d",
3806			  lkb->lkb_id, ms->m_result);
3807	}
3808 out:
3809	unlock_rsb(r);
3810	put_rsb(r);
3811}
3812
3813static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
3814{
3815	struct dlm_lkb *lkb;
3816	int error;
3817
3818	error = find_lkb(ls, ms->m_remid, &lkb);
3819	if (error) {
3820		log_debug(ls, "receive_unlock_reply from %d no lkb %x",
3821			  ms->m_header.h_nodeid, ms->m_remid);
3822		return;
3823	}
3824
3825	_receive_unlock_reply(lkb, ms);
3826	dlm_put_lkb(lkb);
 
3827}
3828
3829static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3830{
3831	struct dlm_rsb *r = lkb->lkb_resource;
3832	int error;
3833
3834	hold_rsb(r);
3835	lock_rsb(r);
3836
3837	error = validate_message(lkb, ms);
3838	if (error)
3839		goto out;
3840
3841	/* stub reply can happen with waiters_mutex held */
3842	error = remove_from_waiters_ms(lkb, ms);
3843	if (error)
3844		goto out;
3845
3846	/* this is the value returned from do_cancel() on the master */
3847
3848	switch (ms->m_result) {
3849	case -DLM_ECANCEL:
3850		receive_flags_reply(lkb, ms);
3851		revert_lock_pc(r, lkb);
3852		queue_cast(r, lkb, -DLM_ECANCEL);
3853		break;
3854	case 0:
3855		break;
3856	default:
3857		log_error(r->res_ls, "receive_cancel_reply %x error %d",
3858			  lkb->lkb_id, ms->m_result);
3859	}
3860 out:
3861	unlock_rsb(r);
3862	put_rsb(r);
3863}
3864
3865static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
3866{
3867	struct dlm_lkb *lkb;
3868	int error;
3869
3870	error = find_lkb(ls, ms->m_remid, &lkb);
3871	if (error) {
3872		log_debug(ls, "receive_cancel_reply from %d no lkb %x",
3873			  ms->m_header.h_nodeid, ms->m_remid);
3874		return;
3875	}
3876
3877	_receive_cancel_reply(lkb, ms);
3878	dlm_put_lkb(lkb);
 
3879}
3880
3881static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
3882{
3883	struct dlm_lkb *lkb;
3884	struct dlm_rsb *r;
3885	int error, ret_nodeid;
 
3886
3887	error = find_lkb(ls, ms->m_lkid, &lkb);
3888	if (error) {
3889		log_error(ls, "receive_lookup_reply no lkb");
3890		return;
3891	}
3892
3893	/* ms->m_result is the value returned by dlm_dir_lookup on dir node
3894	   FIXME: will a non-zero error ever be returned? */
3895
3896	r = lkb->lkb_resource;
3897	hold_rsb(r);
3898	lock_rsb(r);
3899
3900	error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3901	if (error)
3902		goto out;
3903
3904	ret_nodeid = ms->m_nodeid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3905	if (ret_nodeid == dlm_our_nodeid()) {
 
3906		r->res_nodeid = 0;
3907		ret_nodeid = 0;
3908		r->res_first_lkid = 0;
 
 
 
 
 
 
 
3909	} else {
3910		/* set_master() will copy res_nodeid to lkb_nodeid */
 
3911		r->res_nodeid = ret_nodeid;
3912	}
3913
3914	if (is_overlap(lkb)) {
3915		log_debug(ls, "receive_lookup_reply %x unlock %x",
3916			  lkb->lkb_id, lkb->lkb_flags);
3917		queue_cast_overlap(r, lkb);
3918		unhold_lkb(lkb); /* undoes create_lkb() */
3919		goto out_list;
3920	}
3921
3922	_request_lock(r, lkb);
3923
3924 out_list:
3925	if (!ret_nodeid)
3926		process_lookup_list(r);
3927 out:
3928	unlock_rsb(r);
3929	put_rsb(r);
3930	dlm_put_lkb(lkb);
3931}
3932
3933static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms)
 
3934{
 
 
3935	if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
3936		log_debug(ls, "ignore non-member message %d from %d %x %x %d",
3937			  ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
3938			  ms->m_remid, ms->m_result);
3939		return;
3940	}
3941
3942	switch (ms->m_type) {
3943
3944	/* messages sent to a master node */
3945
3946	case DLM_MSG_REQUEST:
3947		receive_request(ls, ms);
3948		break;
3949
3950	case DLM_MSG_CONVERT:
3951		receive_convert(ls, ms);
3952		break;
3953
3954	case DLM_MSG_UNLOCK:
3955		receive_unlock(ls, ms);
3956		break;
3957
3958	case DLM_MSG_CANCEL:
3959		receive_cancel(ls, ms);
 
3960		break;
3961
3962	/* messages sent from a master node (replies to above) */
3963
3964	case DLM_MSG_REQUEST_REPLY:
3965		receive_request_reply(ls, ms);
3966		break;
3967
3968	case DLM_MSG_CONVERT_REPLY:
3969		receive_convert_reply(ls, ms);
3970		break;
3971
3972	case DLM_MSG_UNLOCK_REPLY:
3973		receive_unlock_reply(ls, ms);
3974		break;
3975
3976	case DLM_MSG_CANCEL_REPLY:
3977		receive_cancel_reply(ls, ms);
3978		break;
3979
3980	/* messages sent from a master node (only two types of async msg) */
3981
3982	case DLM_MSG_GRANT:
3983		receive_grant(ls, ms);
 
3984		break;
3985
3986	case DLM_MSG_BAST:
3987		receive_bast(ls, ms);
 
3988		break;
3989
3990	/* messages sent to a dir node */
3991
3992	case DLM_MSG_LOOKUP:
3993		receive_lookup(ls, ms);
3994		break;
3995
3996	case DLM_MSG_REMOVE:
3997		receive_remove(ls, ms);
3998		break;
3999
4000	/* messages sent from a dir node (remove has no reply) */
4001
4002	case DLM_MSG_LOOKUP_REPLY:
4003		receive_lookup_reply(ls, ms);
4004		break;
4005
4006	/* other messages */
4007
4008	case DLM_MSG_PURGE:
4009		receive_purge(ls, ms);
4010		break;
4011
4012	default:
4013		log_error(ls, "unknown message type %d", ms->m_type);
4014	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4015}
4016
4017/* If the lockspace is in recovery mode (locking stopped), then normal
4018   messages are saved on the requestqueue for processing after recovery is
4019   done.  When not in recovery mode, we wait for dlm_recoverd to drain saved
4020   messages off the requestqueue before we process new ones. This occurs right
4021   after recovery completes when we transition from saving all messages on
4022   requestqueue, to processing all the saved messages, to processing new
4023   messages as they arrive. */
4024
4025static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4026				int nodeid)
4027{
4028	if (dlm_locking_stopped(ls)) {
 
 
 
 
 
 
 
 
 
4029		dlm_add_requestqueue(ls, nodeid, ms);
4030	} else {
4031		dlm_wait_requestqueue(ls);
4032		_receive_message(ls, ms);
4033	}
4034}
4035
4036/* This is called by dlm_recoverd to process messages that were saved on
4037   the requestqueue. */
4038
4039void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms)
 
4040{
4041	_receive_message(ls, ms);
4042}
4043
4044/* This is called by the midcomms layer when something is received for
4045   the lockspace.  It could be either a MSG (normal message sent as part of
4046   standard locking activity) or an RCOM (recovery message sent as part of
4047   lockspace recovery). */
4048
4049void dlm_receive_buffer(union dlm_packet *p, int nodeid)
4050{
4051	struct dlm_header *hd = &p->header;
4052	struct dlm_ls *ls;
4053	int type = 0;
4054
4055	switch (hd->h_cmd) {
4056	case DLM_MSG:
4057		dlm_message_in(&p->message);
4058		type = p->message.m_type;
4059		break;
4060	case DLM_RCOM:
4061		dlm_rcom_in(&p->rcom);
4062		type = p->rcom.rc_type;
4063		break;
4064	default:
4065		log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
4066		return;
4067	}
4068
4069	if (hd->h_nodeid != nodeid) {
4070		log_print("invalid h_nodeid %d from %d lockspace %x",
4071			  hd->h_nodeid, nodeid, hd->h_lockspace);
4072		return;
4073	}
4074
4075	ls = dlm_find_lockspace_global(hd->h_lockspace);
4076	if (!ls) {
4077		if (dlm_config.ci_log_debug)
4078			log_print("invalid lockspace %x from %d cmd %d type %d",
4079				  hd->h_lockspace, nodeid, hd->h_cmd, type);
 
 
4080
4081		if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
4082			dlm_send_ls_not_ready(nodeid, &p->rcom);
4083		return;
4084	}
4085
4086	/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
4087	   be inactive (in this ls) before transitioning to recovery mode */
4088
4089	down_read(&ls->ls_recv_active);
4090	if (hd->h_cmd == DLM_MSG)
4091		dlm_receive_message(ls, &p->message, nodeid);
4092	else
4093		dlm_receive_rcom(ls, &p->rcom, nodeid);
4094	up_read(&ls->ls_recv_active);
4095
4096	dlm_put_lockspace(ls);
4097}
4098
4099static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
4100				   struct dlm_message *ms_stub)
4101{
4102	if (middle_conversion(lkb)) {
4103		hold_lkb(lkb);
4104		memset(ms_stub, 0, sizeof(struct dlm_message));
4105		ms_stub->m_flags = DLM_IFL_STUB_MS;
4106		ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
4107		ms_stub->m_result = -EINPROGRESS;
4108		ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4109		_receive_convert_reply(lkb, ms_stub);
4110
4111		/* Same special case as in receive_rcom_lock_args() */
4112		lkb->lkb_grmode = DLM_LOCK_IV;
4113		rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
4114		unhold_lkb(lkb);
4115
4116	} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
4117		lkb->lkb_flags |= DLM_IFL_RESEND;
4118	}
4119
4120	/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
4121	   conversions are async; there's no reply from the remote master */
4122}
4123
4124/* A waiting lkb needs recovery if the master node has failed, or
4125   the master node is changing (only when no directory is used) */
4126
4127static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
 
4128{
4129	if (dlm_is_removed(ls, lkb->lkb_nodeid))
4130		return 1;
4131
4132	if (!dlm_no_directory(ls))
4133		return 0;
4134
4135	if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
4136		return 1;
4137
4138	return 0;
4139}
4140
4141/* Recovery for locks that are waiting for replies from nodes that are now
4142   gone.  We can just complete unlocks and cancels by faking a reply from the
4143   dead node.  Requests and up-conversions we flag to be resent after
4144   recovery.  Down-conversions can just be completed with a fake reply like
4145   unlocks.  Conversions between PR and CW need special attention. */
4146
4147void dlm_recover_waiters_pre(struct dlm_ls *ls)
4148{
4149	struct dlm_lkb *lkb, *safe;
4150	struct dlm_message *ms_stub;
4151	int wait_type, stub_unlock_result, stub_cancel_result;
 
4152
4153	ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL);
4154	if (!ms_stub) {
4155		log_error(ls, "dlm_recover_waiters_pre no mem");
4156		return;
4157	}
4158
4159	mutex_lock(&ls->ls_waiters_mutex);
4160
4161	list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
4162
 
 
4163		/* exclude debug messages about unlocks because there can be so
4164		   many and they aren't very interesting */
4165
4166		if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
4167			log_debug(ls, "recover_waiter %x nodeid %d "
4168				  "msg %d to %d", lkb->lkb_id, lkb->lkb_nodeid,
4169				  lkb->lkb_wait_type, lkb->lkb_wait_nodeid);
 
 
 
 
 
 
4170		}
4171
4172		/* all outstanding lookups, regardless of destination  will be
4173		   resent after recovery is done */
4174
4175		if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
4176			lkb->lkb_flags |= DLM_IFL_RESEND;
4177			continue;
4178		}
4179
4180		if (!waiter_needs_recovery(ls, lkb))
4181			continue;
4182
4183		wait_type = lkb->lkb_wait_type;
4184		stub_unlock_result = -DLM_EUNLOCK;
4185		stub_cancel_result = -DLM_ECANCEL;
4186
4187		/* Main reply may have been received leaving a zero wait_type,
4188		   but a reply for the overlapping op may not have been
4189		   received.  In that case we need to fake the appropriate
4190		   reply for the overlap op. */
4191
4192		if (!wait_type) {
4193			if (is_overlap_cancel(lkb)) {
4194				wait_type = DLM_MSG_CANCEL;
4195				if (lkb->lkb_grmode == DLM_LOCK_IV)
4196					stub_cancel_result = 0;
4197			}
4198			if (is_overlap_unlock(lkb)) {
4199				wait_type = DLM_MSG_UNLOCK;
4200				if (lkb->lkb_grmode == DLM_LOCK_IV)
4201					stub_unlock_result = -ENOENT;
4202			}
4203
4204			log_debug(ls, "rwpre overlap %x %x %d %d %d",
4205				  lkb->lkb_id, lkb->lkb_flags, wait_type,
4206				  stub_cancel_result, stub_unlock_result);
4207		}
4208
4209		switch (wait_type) {
4210
4211		case DLM_MSG_REQUEST:
4212			lkb->lkb_flags |= DLM_IFL_RESEND;
4213			break;
4214
4215		case DLM_MSG_CONVERT:
4216			recover_convert_waiter(ls, lkb, ms_stub);
4217			break;
4218
4219		case DLM_MSG_UNLOCK:
4220			hold_lkb(lkb);
4221			memset(ms_stub, 0, sizeof(struct dlm_message));
4222			ms_stub->m_flags = DLM_IFL_STUB_MS;
4223			ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
4224			ms_stub->m_result = stub_unlock_result;
4225			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4226			_receive_unlock_reply(lkb, ms_stub);
4227			dlm_put_lkb(lkb);
4228			break;
4229
4230		case DLM_MSG_CANCEL:
4231			hold_lkb(lkb);
4232			memset(ms_stub, 0, sizeof(struct dlm_message));
4233			ms_stub->m_flags = DLM_IFL_STUB_MS;
4234			ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
4235			ms_stub->m_result = stub_cancel_result;
4236			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4237			_receive_cancel_reply(lkb, ms_stub);
4238			dlm_put_lkb(lkb);
4239			break;
4240
4241		default:
4242			log_error(ls, "invalid lkb wait_type %d %d",
4243				  lkb->lkb_wait_type, wait_type);
4244		}
4245		schedule();
4246	}
4247	mutex_unlock(&ls->ls_waiters_mutex);
4248	kfree(ms_stub);
4249}
4250
4251static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
4252{
4253	struct dlm_lkb *lkb;
4254	int found = 0;
4255
4256	mutex_lock(&ls->ls_waiters_mutex);
4257	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
4258		if (lkb->lkb_flags & DLM_IFL_RESEND) {
4259			hold_lkb(lkb);
4260			found = 1;
4261			break;
4262		}
4263	}
4264	mutex_unlock(&ls->ls_waiters_mutex);
4265
4266	if (!found)
4267		lkb = NULL;
4268	return lkb;
4269}
4270
4271/* Deal with lookups and lkb's marked RESEND from _pre.  We may now be the
4272   master or dir-node for r.  Processing the lkb may result in it being placed
4273   back on waiters. */
4274
4275/* We do this after normal locking has been enabled and any saved messages
4276   (in requestqueue) have been processed.  We should be confident that at
4277   this point we won't get or process a reply to any of these waiting
4278   operations.  But, new ops may be coming in on the rsbs/locks here from
4279   userspace or remotely. */
4280
4281/* there may have been an overlap unlock/cancel prior to recovery or after
4282   recovery.  if before, the lkb may still have a pos wait_count; if after, the
4283   overlap flag would just have been set and nothing new sent.  we can be
4284   confident here than any replies to either the initial op or overlap ops
4285   prior to recovery have been received. */
4286
4287int dlm_recover_waiters_post(struct dlm_ls *ls)
4288{
4289	struct dlm_lkb *lkb;
4290	struct dlm_rsb *r;
4291	int error = 0, mstype, err, oc, ou;
4292
4293	while (1) {
4294		if (dlm_locking_stopped(ls)) {
4295			log_debug(ls, "recover_waiters_post aborted");
4296			error = -EINTR;
4297			break;
4298		}
4299
4300		lkb = find_resend_waiter(ls);
4301		if (!lkb)
4302			break;
4303
4304		r = lkb->lkb_resource;
4305		hold_rsb(r);
4306		lock_rsb(r);
4307
4308		mstype = lkb->lkb_wait_type;
4309		oc = is_overlap_cancel(lkb);
4310		ou = is_overlap_unlock(lkb);
4311		err = 0;
4312
4313		log_debug(ls, "recover_waiter %x nodeid %d msg %d r_nodeid %d",
4314			  lkb->lkb_id, lkb->lkb_nodeid, mstype, r->res_nodeid);
 
 
 
4315
4316		/* At this point we assume that we won't get a reply to any
4317		   previous op or overlap op on this lock.  First, do a big
4318		   remove_from_waiters() for all previous ops. */
4319
4320		lkb->lkb_flags &= ~DLM_IFL_RESEND;
4321		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4322		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4323		lkb->lkb_wait_type = 0;
4324		lkb->lkb_wait_count = 0;
4325		mutex_lock(&ls->ls_waiters_mutex);
4326		list_del_init(&lkb->lkb_wait_reply);
4327		mutex_unlock(&ls->ls_waiters_mutex);
4328		unhold_lkb(lkb); /* for waiters list */
4329
4330		if (oc || ou) {
4331			/* do an unlock or cancel instead of resending */
4332			switch (mstype) {
4333			case DLM_MSG_LOOKUP:
4334			case DLM_MSG_REQUEST:
4335				queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
4336							-DLM_ECANCEL);
4337				unhold_lkb(lkb); /* undoes create_lkb() */
4338				break;
4339			case DLM_MSG_CONVERT:
4340				if (oc) {
4341					queue_cast(r, lkb, -DLM_ECANCEL);
4342				} else {
4343					lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
4344					_unlock_lock(r, lkb);
4345				}
4346				break;
4347			default:
4348				err = 1;
4349			}
4350		} else {
4351			switch (mstype) {
4352			case DLM_MSG_LOOKUP:
4353			case DLM_MSG_REQUEST:
4354				_request_lock(r, lkb);
4355				if (is_master(r))
4356					confirm_master(r, 0);
4357				break;
4358			case DLM_MSG_CONVERT:
4359				_convert_lock(r, lkb);
4360				break;
4361			default:
4362				err = 1;
4363			}
4364		}
4365
4366		if (err)
4367			log_error(ls, "recover_waiters_post %x %d %x %d %d",
4368			  	  lkb->lkb_id, mstype, lkb->lkb_flags, oc, ou);
 
 
 
4369		unlock_rsb(r);
4370		put_rsb(r);
4371		dlm_put_lkb(lkb);
4372	}
4373
4374	return error;
4375}
4376
4377static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
4378			int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
4379{
4380	struct dlm_ls *ls = r->res_ls;
4381	struct dlm_lkb *lkb, *safe;
4382
4383	list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
4384		if (test(ls, lkb)) {
4385			rsb_set_flag(r, RSB_LOCKS_PURGED);
4386			del_lkb(r, lkb);
4387			/* this put should free the lkb */
4388			if (!dlm_put_lkb(lkb))
4389				log_error(ls, "purged lkb not released");
4390		}
 
 
 
 
 
 
 
4391	}
4392}
4393
4394static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
4395{
4396	return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
4397}
4398
4399static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
4400{
4401	return is_master_copy(lkb);
4402}
4403
4404static void purge_dead_locks(struct dlm_rsb *r)
 
 
4405{
4406	purge_queue(r, &r->res_grantqueue, &purge_dead_test);
4407	purge_queue(r, &r->res_convertqueue, &purge_dead_test);
4408	purge_queue(r, &r->res_waitqueue, &purge_dead_test);
4409}
4410
4411void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
4412{
4413	purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
4414	purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
4415	purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4416}
4417
4418/* Get rid of locks held by nodes that are gone. */
4419
4420int dlm_purge_locks(struct dlm_ls *ls)
4421{
4422	struct dlm_rsb *r;
 
 
 
 
 
 
 
 
 
 
 
 
4423
4424	log_debug(ls, "dlm_purge_locks");
 
4425
4426	down_write(&ls->ls_root_sem);
4427	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
4428		hold_rsb(r);
4429		lock_rsb(r);
4430		if (is_master(r))
4431			purge_dead_locks(r);
 
 
 
 
 
 
4432		unlock_rsb(r);
4433		unhold_rsb(r);
4434
4435		schedule();
4436	}
4437	up_write(&ls->ls_root_sem);
4438
4439	return 0;
 
 
4440}
4441
4442static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
4443{
4444	struct dlm_rsb *r, *r_ret = NULL;
 
4445
4446	spin_lock(&ls->ls_rsbtbl[bucket].lock);
4447	list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
4448		if (!rsb_flag(r, RSB_LOCKS_PURGED))
 
 
 
 
 
4449			continue;
 
4450		hold_rsb(r);
4451		rsb_clear_flag(r, RSB_LOCKS_PURGED);
4452		r_ret = r;
4453		break;
4454	}
4455	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
4456	return r_ret;
4457}
4458
4459void dlm_grant_after_purge(struct dlm_ls *ls)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4460{
4461	struct dlm_rsb *r;
4462	int bucket = 0;
 
 
 
4463
4464	while (1) {
4465		r = find_purged_rsb(ls, bucket);
4466		if (!r) {
4467			if (bucket == ls->ls_rsbtbl_size - 1)
4468				break;
4469			bucket++;
4470			continue;
4471		}
 
 
4472		lock_rsb(r);
4473		if (is_master(r)) {
4474			grant_pending_locks(r);
4475			confirm_master(r, 0);
4476		}
 
4477		unlock_rsb(r);
4478		put_rsb(r);
4479		schedule();
4480	}
 
 
 
 
4481}
4482
4483static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
4484					 uint32_t remid)
4485{
4486	struct dlm_lkb *lkb;
4487
4488	list_for_each_entry(lkb, head, lkb_statequeue) {
4489		if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
4490			return lkb;
4491	}
4492	return NULL;
4493}
4494
4495static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
4496				    uint32_t remid)
4497{
4498	struct dlm_lkb *lkb;
4499
4500	lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
4501	if (lkb)
4502		return lkb;
4503	lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
4504	if (lkb)
4505		return lkb;
4506	lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
4507	if (lkb)
4508		return lkb;
4509	return NULL;
4510}
4511
4512/* needs at least dlm_rcom + rcom_lock */
4513static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
4514				  struct dlm_rsb *r, struct dlm_rcom *rc)
4515{
4516	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4517
4518	lkb->lkb_nodeid = rc->rc_header.h_nodeid;
4519	lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
4520	lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
4521	lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
4522	lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
4523	lkb->lkb_flags |= DLM_IFL_MSTCPY;
4524	lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
4525	lkb->lkb_rqmode = rl->rl_rqmode;
4526	lkb->lkb_grmode = rl->rl_grmode;
4527	/* don't set lkb_status because add_lkb wants to itself */
4528
4529	lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
4530	lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
4531
4532	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
4533		int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
4534			 sizeof(struct rcom_lock);
4535		if (lvblen > ls->ls_lvblen)
4536			return -EINVAL;
4537		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
4538		if (!lkb->lkb_lvbptr)
4539			return -ENOMEM;
4540		memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
4541	}
4542
4543	/* Conversions between PR and CW (middle modes) need special handling.
4544	   The real granted mode of these converting locks cannot be determined
4545	   until all locks have been rebuilt on the rsb (recover_conversion) */
4546
4547	if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
4548	    middle_conversion(lkb)) {
4549		rl->rl_status = DLM_LKSTS_CONVERT;
4550		lkb->lkb_grmode = DLM_LOCK_IV;
4551		rsb_set_flag(r, RSB_RECOVER_CONVERT);
4552	}
4553
4554	return 0;
4555}
4556
4557/* This lkb may have been recovered in a previous aborted recovery so we need
4558   to check if the rsb already has an lkb with the given remote nodeid/lkid.
4559   If so we just send back a standard reply.  If not, we create a new lkb with
4560   the given values and send back our lkid.  We send back our lkid by sending
4561   back the rcom_lock struct we got but with the remid field filled in. */
4562
4563/* needs at least dlm_rcom + rcom_lock */
4564int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4565{
4566	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4567	struct dlm_rsb *r;
4568	struct dlm_lkb *lkb;
 
 
4569	int error;
4570
4571	if (rl->rl_parent_lkid) {
4572		error = -EOPNOTSUPP;
4573		goto out;
4574	}
4575
 
 
 
 
 
 
 
 
 
 
4576	error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
4577			 R_MASTER, &r);
4578	if (error)
4579		goto out;
4580
4581	lock_rsb(r);
4582
4583	lkb = search_remid(r, rc->rc_header.h_nodeid, le32_to_cpu(rl->rl_lkid));
 
 
 
 
 
 
 
4584	if (lkb) {
4585		error = -EEXIST;
4586		goto out_remid;
4587	}
4588
4589	error = create_lkb(ls, &lkb);
4590	if (error)
4591		goto out_unlock;
4592
4593	error = receive_rcom_lock_args(ls, lkb, r, rc);
4594	if (error) {
4595		__put_lkb(ls, lkb);
4596		goto out_unlock;
4597	}
4598
4599	attach_lkb(r, lkb);
4600	add_lkb(r, lkb, rl->rl_status);
4601	error = 0;
 
 
 
 
4602
4603 out_remid:
4604	/* this is the new value returned to the lock holder for
4605	   saving in its process-copy lkb */
4606	rl->rl_remid = cpu_to_le32(lkb->lkb_id);
4607
 
 
4608 out_unlock:
4609	unlock_rsb(r);
4610	put_rsb(r);
4611 out:
4612	if (error)
4613		log_debug(ls, "recover_master_copy %d %x", error,
4614			  le32_to_cpu(rl->rl_lkid));
4615	rl->rl_result = cpu_to_le32(error);
4616	return error;
4617}
4618
4619/* needs at least dlm_rcom + rcom_lock */
4620int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4621{
4622	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4623	struct dlm_rsb *r;
4624	struct dlm_lkb *lkb;
4625	int error;
 
4626
4627	error = find_lkb(ls, le32_to_cpu(rl->rl_lkid), &lkb);
 
 
 
 
4628	if (error) {
4629		log_error(ls, "recover_process_copy no lkid %x",
4630				le32_to_cpu(rl->rl_lkid));
4631		return error;
4632	}
4633
4634	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
4635
4636	error = le32_to_cpu(rl->rl_result);
4637
4638	r = lkb->lkb_resource;
4639	hold_rsb(r);
4640	lock_rsb(r);
4641
4642	switch (error) {
 
 
 
 
 
 
 
 
 
 
4643	case -EBADR:
4644		/* There's a chance the new master received our lock before
4645		   dlm_recover_master_reply(), this wouldn't happen if we did
4646		   a barrier between recover_masters and recover_locks. */
4647		log_debug(ls, "master copy not ready %x r %lx %s", lkb->lkb_id,
4648			  (unsigned long)r, r->res_name);
 
 
4649		dlm_send_rcom_lock(r, lkb);
4650		goto out;
4651	case -EEXIST:
4652		log_debug(ls, "master copy exists %x", lkb->lkb_id);
4653		/* fall through */
4654	case 0:
4655		lkb->lkb_remid = le32_to_cpu(rl->rl_remid);
4656		break;
4657	default:
4658		log_error(ls, "dlm_recover_process_copy unknown error %d %x",
4659			  error, lkb->lkb_id);
4660	}
4661
4662	/* an ack for dlm_recover_locks() which waits for replies from
4663	   all the locks it sends to new masters */
4664	dlm_recovered_lock(r);
4665 out:
4666	unlock_rsb(r);
4667	put_rsb(r);
4668	dlm_put_lkb(lkb);
4669
4670	return 0;
4671}
4672
4673int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
4674		     int mode, uint32_t flags, void *name, unsigned int namelen,
4675		     unsigned long timeout_cs)
4676{
4677	struct dlm_lkb *lkb;
4678	struct dlm_args args;
4679	int error;
4680
4681	dlm_lock_recovery(ls);
4682
4683	error = create_lkb(ls, &lkb);
4684	if (error) {
4685		kfree(ua);
4686		goto out;
4687	}
4688
4689	if (flags & DLM_LKF_VALBLK) {
4690		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
4691		if (!ua->lksb.sb_lvbptr) {
4692			kfree(ua);
4693			__put_lkb(ls, lkb);
4694			error = -ENOMEM;
4695			goto out;
4696		}
4697	}
4698
4699	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
4700	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
4701	   lock and that lkb_astparam is the dlm_user_args structure. */
4702
4703	error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
4704			      fake_astfn, ua, fake_bastfn, &args);
4705	lkb->lkb_flags |= DLM_IFL_USER;
4706
4707	if (error) {
4708		__put_lkb(ls, lkb);
4709		goto out;
4710	}
4711
4712	error = request_lock(ls, lkb, name, namelen, &args);
4713
4714	switch (error) {
4715	case 0:
4716		break;
4717	case -EINPROGRESS:
4718		error = 0;
4719		break;
4720	case -EAGAIN:
4721		error = 0;
4722		/* fall through */
4723	default:
4724		__put_lkb(ls, lkb);
4725		goto out;
4726	}
4727
4728	/* add this new lkb to the per-process list of locks */
4729	spin_lock(&ua->proc->locks_spin);
4730	hold_lkb(lkb);
4731	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
4732	spin_unlock(&ua->proc->locks_spin);
4733 out:
4734	dlm_unlock_recovery(ls);
4735	return error;
4736}
4737
4738int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4739		     int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
4740		     unsigned long timeout_cs)
4741{
4742	struct dlm_lkb *lkb;
4743	struct dlm_args args;
4744	struct dlm_user_args *ua;
4745	int error;
4746
4747	dlm_lock_recovery(ls);
4748
4749	error = find_lkb(ls, lkid, &lkb);
4750	if (error)
4751		goto out;
4752
4753	/* user can change the params on its lock when it converts it, or
4754	   add an lvb that didn't exist before */
4755
4756	ua = lkb->lkb_ua;
4757
4758	if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
4759		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
4760		if (!ua->lksb.sb_lvbptr) {
4761			error = -ENOMEM;
4762			goto out_put;
4763		}
4764	}
4765	if (lvb_in && ua->lksb.sb_lvbptr)
4766		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4767
4768	ua->xid = ua_tmp->xid;
4769	ua->castparam = ua_tmp->castparam;
4770	ua->castaddr = ua_tmp->castaddr;
4771	ua->bastparam = ua_tmp->bastparam;
4772	ua->bastaddr = ua_tmp->bastaddr;
4773	ua->user_lksb = ua_tmp->user_lksb;
4774
4775	error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
4776			      fake_astfn, ua, fake_bastfn, &args);
4777	if (error)
4778		goto out_put;
4779
4780	error = convert_lock(ls, lkb, &args);
4781
4782	if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
4783		error = 0;
4784 out_put:
4785	dlm_put_lkb(lkb);
4786 out:
4787	dlm_unlock_recovery(ls);
4788	kfree(ua_tmp);
4789	return error;
4790}
4791
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4792int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4793		    uint32_t flags, uint32_t lkid, char *lvb_in)
4794{
4795	struct dlm_lkb *lkb;
4796	struct dlm_args args;
4797	struct dlm_user_args *ua;
4798	int error;
4799
4800	dlm_lock_recovery(ls);
4801
4802	error = find_lkb(ls, lkid, &lkb);
4803	if (error)
4804		goto out;
4805
4806	ua = lkb->lkb_ua;
4807
4808	if (lvb_in && ua->lksb.sb_lvbptr)
4809		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4810	if (ua_tmp->castparam)
4811		ua->castparam = ua_tmp->castparam;
4812	ua->user_lksb = ua_tmp->user_lksb;
4813
4814	error = set_unlock_args(flags, ua, &args);
4815	if (error)
4816		goto out_put;
4817
4818	error = unlock_lock(ls, lkb, &args);
4819
4820	if (error == -DLM_EUNLOCK)
4821		error = 0;
4822	/* from validate_unlock_args() */
4823	if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
4824		error = 0;
4825	if (error)
4826		goto out_put;
4827
4828	spin_lock(&ua->proc->locks_spin);
4829	/* dlm_user_add_cb() may have already taken lkb off the proc list */
4830	if (!list_empty(&lkb->lkb_ownqueue))
4831		list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
4832	spin_unlock(&ua->proc->locks_spin);
4833 out_put:
4834	dlm_put_lkb(lkb);
4835 out:
4836	dlm_unlock_recovery(ls);
4837	kfree(ua_tmp);
4838	return error;
4839}
4840
4841int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4842		    uint32_t flags, uint32_t lkid)
4843{
4844	struct dlm_lkb *lkb;
4845	struct dlm_args args;
4846	struct dlm_user_args *ua;
4847	int error;
4848
4849	dlm_lock_recovery(ls);
4850
4851	error = find_lkb(ls, lkid, &lkb);
4852	if (error)
4853		goto out;
4854
4855	ua = lkb->lkb_ua;
4856	if (ua_tmp->castparam)
4857		ua->castparam = ua_tmp->castparam;
4858	ua->user_lksb = ua_tmp->user_lksb;
4859
4860	error = set_unlock_args(flags, ua, &args);
4861	if (error)
4862		goto out_put;
4863
4864	error = cancel_lock(ls, lkb, &args);
4865
4866	if (error == -DLM_ECANCEL)
4867		error = 0;
4868	/* from validate_unlock_args() */
4869	if (error == -EBUSY)
4870		error = 0;
4871 out_put:
4872	dlm_put_lkb(lkb);
4873 out:
4874	dlm_unlock_recovery(ls);
4875	kfree(ua_tmp);
4876	return error;
4877}
4878
4879int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
4880{
4881	struct dlm_lkb *lkb;
4882	struct dlm_args args;
4883	struct dlm_user_args *ua;
4884	struct dlm_rsb *r;
4885	int error;
4886
4887	dlm_lock_recovery(ls);
4888
4889	error = find_lkb(ls, lkid, &lkb);
4890	if (error)
4891		goto out;
4892
4893	ua = lkb->lkb_ua;
4894
4895	error = set_unlock_args(flags, ua, &args);
4896	if (error)
4897		goto out_put;
4898
4899	/* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
4900
4901	r = lkb->lkb_resource;
4902	hold_rsb(r);
4903	lock_rsb(r);
4904
4905	error = validate_unlock_args(lkb, &args);
4906	if (error)
4907		goto out_r;
4908	lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
4909
4910	error = _cancel_lock(r, lkb);
4911 out_r:
4912	unlock_rsb(r);
4913	put_rsb(r);
4914
4915	if (error == -DLM_ECANCEL)
4916		error = 0;
4917	/* from validate_unlock_args() */
4918	if (error == -EBUSY)
4919		error = 0;
4920 out_put:
4921	dlm_put_lkb(lkb);
4922 out:
4923	dlm_unlock_recovery(ls);
4924	return error;
4925}
4926
4927/* lkb's that are removed from the waiters list by revert are just left on the
4928   orphans list with the granted orphan locks, to be freed by purge */
4929
4930static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4931{
4932	struct dlm_args args;
4933	int error;
4934
4935	hold_lkb(lkb);
4936	mutex_lock(&ls->ls_orphans_mutex);
4937	list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
4938	mutex_unlock(&ls->ls_orphans_mutex);
4939
4940	set_unlock_args(0, lkb->lkb_ua, &args);
4941
4942	error = cancel_lock(ls, lkb, &args);
4943	if (error == -DLM_ECANCEL)
4944		error = 0;
4945	return error;
4946}
4947
4948/* The force flag allows the unlock to go ahead even if the lkb isn't granted.
4949   Regardless of what rsb queue the lock is on, it's removed and freed. */
 
 
4950
4951static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4952{
4953	struct dlm_args args;
4954	int error;
4955
4956	set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args);
 
4957
4958	error = unlock_lock(ls, lkb, &args);
4959	if (error == -DLM_EUNLOCK)
4960		error = 0;
4961	return error;
4962}
4963
4964/* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
4965   (which does lock_rsb) due to deadlock with receiving a message that does
4966   lock_rsb followed by dlm_user_add_cb() */
4967
4968static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
4969				     struct dlm_user_proc *proc)
4970{
4971	struct dlm_lkb *lkb = NULL;
4972
4973	mutex_lock(&ls->ls_clear_proc_locks);
4974	if (list_empty(&proc->locks))
4975		goto out;
4976
4977	lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
4978	list_del_init(&lkb->lkb_ownqueue);
4979
4980	if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
4981		lkb->lkb_flags |= DLM_IFL_ORPHAN;
4982	else
4983		lkb->lkb_flags |= DLM_IFL_DEAD;
4984 out:
4985	mutex_unlock(&ls->ls_clear_proc_locks);
4986	return lkb;
4987}
4988
4989/* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
4990   1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
4991   which we clear here. */
4992
4993/* proc CLOSING flag is set so no more device_reads should look at proc->asts
4994   list, and no more device_writes should add lkb's to proc->locks list; so we
4995   shouldn't need to take asts_spin or locks_spin here.  this assumes that
4996   device reads/writes/closes are serialized -- FIXME: we may need to serialize
4997   them ourself. */
4998
4999void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
5000{
5001	struct dlm_lkb *lkb, *safe;
5002
5003	dlm_lock_recovery(ls);
5004
5005	while (1) {
5006		lkb = del_proc_lock(ls, proc);
5007		if (!lkb)
5008			break;
5009		del_timeout(lkb);
5010		if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
5011			orphan_proc_lock(ls, lkb);
5012		else
5013			unlock_proc_lock(ls, lkb);
5014
5015		/* this removes the reference for the proc->locks list
5016		   added by dlm_user_request, it may result in the lkb
5017		   being freed */
5018
5019		dlm_put_lkb(lkb);
5020	}
5021
5022	mutex_lock(&ls->ls_clear_proc_locks);
5023
5024	/* in-progress unlocks */
5025	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
5026		list_del_init(&lkb->lkb_ownqueue);
5027		lkb->lkb_flags |= DLM_IFL_DEAD;
5028		dlm_put_lkb(lkb);
5029	}
5030
5031	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
5032		memset(&lkb->lkb_callbacks, 0,
5033		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
5034		list_del_init(&lkb->lkb_cb_list);
5035		dlm_put_lkb(lkb);
5036	}
5037
5038	mutex_unlock(&ls->ls_clear_proc_locks);
5039	dlm_unlock_recovery(ls);
5040}
5041
5042static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
5043{
5044	struct dlm_lkb *lkb, *safe;
5045
5046	while (1) {
5047		lkb = NULL;
5048		spin_lock(&proc->locks_spin);
5049		if (!list_empty(&proc->locks)) {
5050			lkb = list_entry(proc->locks.next, struct dlm_lkb,
5051					 lkb_ownqueue);
5052			list_del_init(&lkb->lkb_ownqueue);
5053		}
5054		spin_unlock(&proc->locks_spin);
5055
5056		if (!lkb)
5057			break;
5058
5059		lkb->lkb_flags |= DLM_IFL_DEAD;
5060		unlock_proc_lock(ls, lkb);
5061		dlm_put_lkb(lkb); /* ref from proc->locks list */
5062	}
5063
5064	spin_lock(&proc->locks_spin);
5065	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
5066		list_del_init(&lkb->lkb_ownqueue);
5067		lkb->lkb_flags |= DLM_IFL_DEAD;
5068		dlm_put_lkb(lkb);
5069	}
5070	spin_unlock(&proc->locks_spin);
5071
5072	spin_lock(&proc->asts_spin);
5073	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
5074		memset(&lkb->lkb_callbacks, 0,
5075		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
5076		list_del_init(&lkb->lkb_cb_list);
5077		dlm_put_lkb(lkb);
5078	}
5079	spin_unlock(&proc->asts_spin);
5080}
5081
5082/* pid of 0 means purge all orphans */
5083
5084static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
5085{
5086	struct dlm_lkb *lkb, *safe;
5087
5088	mutex_lock(&ls->ls_orphans_mutex);
5089	list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
5090		if (pid && lkb->lkb_ownpid != pid)
5091			continue;
5092		unlock_proc_lock(ls, lkb);
5093		list_del_init(&lkb->lkb_ownqueue);
5094		dlm_put_lkb(lkb);
5095	}
5096	mutex_unlock(&ls->ls_orphans_mutex);
5097}
5098
5099static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
5100{
5101	struct dlm_message *ms;
5102	struct dlm_mhandle *mh;
5103	int error;
5104
5105	error = _create_message(ls, sizeof(struct dlm_message), nodeid,
5106				DLM_MSG_PURGE, &ms, &mh);
5107	if (error)
5108		return error;
5109	ms->m_nodeid = nodeid;
5110	ms->m_pid = pid;
5111
5112	return send_message(mh, ms);
5113}
5114
5115int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
5116		   int nodeid, int pid)
5117{
5118	int error = 0;
5119
5120	if (nodeid != dlm_our_nodeid()) {
5121		error = send_purge(ls, nodeid, pid);
5122	} else {
5123		dlm_lock_recovery(ls);
5124		if (pid == current->pid)
5125			purge_proc_locks(ls, proc);
5126		else
5127			do_purge(ls, nodeid, pid);
5128		dlm_unlock_recovery(ls);
5129	}
5130	return error;
5131}
5132
v4.17
   1/******************************************************************************
   2*******************************************************************************
   3**
   4**  Copyright (C) 2005-2010 Red Hat, Inc.  All rights reserved.
   5**
   6**  This copyrighted material is made available to anyone wishing to use,
   7**  modify, copy, or redistribute it subject to the terms and conditions
   8**  of the GNU General Public License v.2.
   9**
  10*******************************************************************************
  11******************************************************************************/
  12
  13/* Central locking logic has four stages:
  14
  15   dlm_lock()
  16   dlm_unlock()
  17
  18   request_lock(ls, lkb)
  19   convert_lock(ls, lkb)
  20   unlock_lock(ls, lkb)
  21   cancel_lock(ls, lkb)
  22
  23   _request_lock(r, lkb)
  24   _convert_lock(r, lkb)
  25   _unlock_lock(r, lkb)
  26   _cancel_lock(r, lkb)
  27
  28   do_request(r, lkb)
  29   do_convert(r, lkb)
  30   do_unlock(r, lkb)
  31   do_cancel(r, lkb)
  32
  33   Stage 1 (lock, unlock) is mainly about checking input args and
  34   splitting into one of the four main operations:
  35
  36       dlm_lock          = request_lock
  37       dlm_lock+CONVERT  = convert_lock
  38       dlm_unlock        = unlock_lock
  39       dlm_unlock+CANCEL = cancel_lock
  40
  41   Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
  42   provided to the next stage.
  43
  44   Stage 3, _xxxx_lock(), determines if the operation is local or remote.
  45   When remote, it calls send_xxxx(), when local it calls do_xxxx().
  46
  47   Stage 4, do_xxxx(), is the guts of the operation.  It manipulates the
  48   given rsb and lkb and queues callbacks.
  49
  50   For remote operations, send_xxxx() results in the corresponding do_xxxx()
  51   function being executed on the remote node.  The connecting send/receive
  52   calls on local (L) and remote (R) nodes:
  53
  54   L: send_xxxx()              ->  R: receive_xxxx()
  55                                   R: do_xxxx()
  56   L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
  57*/
  58#include <linux/types.h>
  59#include <linux/rbtree.h>
  60#include <linux/slab.h>
  61#include "dlm_internal.h"
  62#include <linux/dlm_device.h>
  63#include "memory.h"
  64#include "lowcomms.h"
  65#include "requestqueue.h"
  66#include "util.h"
  67#include "dir.h"
  68#include "member.h"
  69#include "lockspace.h"
  70#include "ast.h"
  71#include "lock.h"
  72#include "rcom.h"
  73#include "recover.h"
  74#include "lvb_table.h"
  75#include "user.h"
  76#include "config.h"
  77
  78static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
  79static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
  80static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  81static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
  82static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
  83static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
  84static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
  85static int send_remove(struct dlm_rsb *r);
  86static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  87static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
  88static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
  89				    struct dlm_message *ms);
  90static int receive_extralen(struct dlm_message *ms);
  91static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
  92static void del_timeout(struct dlm_lkb *lkb);
  93static void toss_rsb(struct kref *kref);
  94
  95/*
  96 * Lock compatibilty matrix - thanks Steve
  97 * UN = Unlocked state. Not really a state, used as a flag
  98 * PD = Padding. Used to make the matrix a nice power of two in size
  99 * Other states are the same as the VMS DLM.
 100 * Usage: matrix[grmode+1][rqmode+1]  (although m[rq+1][gr+1] is the same)
 101 */
 102
 103static const int __dlm_compat_matrix[8][8] = {
 104      /* UN NL CR CW PR PW EX PD */
 105        {1, 1, 1, 1, 1, 1, 1, 0},       /* UN */
 106        {1, 1, 1, 1, 1, 1, 1, 0},       /* NL */
 107        {1, 1, 1, 1, 1, 1, 0, 0},       /* CR */
 108        {1, 1, 1, 1, 0, 0, 0, 0},       /* CW */
 109        {1, 1, 1, 0, 1, 0, 0, 0},       /* PR */
 110        {1, 1, 1, 0, 0, 0, 0, 0},       /* PW */
 111        {1, 1, 0, 0, 0, 0, 0, 0},       /* EX */
 112        {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
 113};
 114
 115/*
 116 * This defines the direction of transfer of LVB data.
 117 * Granted mode is the row; requested mode is the column.
 118 * Usage: matrix[grmode+1][rqmode+1]
 119 * 1 = LVB is returned to the caller
 120 * 0 = LVB is written to the resource
 121 * -1 = nothing happens to the LVB
 122 */
 123
 124const int dlm_lvb_operations[8][8] = {
 125        /* UN   NL  CR  CW  PR  PW  EX  PD*/
 126        {  -1,  1,  1,  1,  1,  1,  1, -1 }, /* UN */
 127        {  -1,  1,  1,  1,  1,  1,  1,  0 }, /* NL */
 128        {  -1, -1,  1,  1,  1,  1,  1,  0 }, /* CR */
 129        {  -1, -1, -1,  1,  1,  1,  1,  0 }, /* CW */
 130        {  -1, -1, -1, -1,  1,  1,  1,  0 }, /* PR */
 131        {  -1,  0,  0,  0,  0,  0,  1,  0 }, /* PW */
 132        {  -1,  0,  0,  0,  0,  0,  0,  0 }, /* EX */
 133        {  -1,  0,  0,  0,  0,  0,  0,  0 }  /* PD */
 134};
 135
 136#define modes_compat(gr, rq) \
 137	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
 138
 139int dlm_modes_compat(int mode1, int mode2)
 140{
 141	return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
 142}
 143
 144/*
 145 * Compatibility matrix for conversions with QUECVT set.
 146 * Granted mode is the row; requested mode is the column.
 147 * Usage: matrix[grmode+1][rqmode+1]
 148 */
 149
 150static const int __quecvt_compat_matrix[8][8] = {
 151      /* UN NL CR CW PR PW EX PD */
 152        {0, 0, 0, 0, 0, 0, 0, 0},       /* UN */
 153        {0, 0, 1, 1, 1, 1, 1, 0},       /* NL */
 154        {0, 0, 0, 1, 1, 1, 1, 0},       /* CR */
 155        {0, 0, 0, 0, 1, 1, 1, 0},       /* CW */
 156        {0, 0, 0, 1, 0, 1, 1, 0},       /* PR */
 157        {0, 0, 0, 0, 0, 0, 1, 0},       /* PW */
 158        {0, 0, 0, 0, 0, 0, 0, 0},       /* EX */
 159        {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
 160};
 161
 162void dlm_print_lkb(struct dlm_lkb *lkb)
 163{
 164	printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
 165	       "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
 166	       lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
 167	       lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
 168	       lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
 169	       (unsigned long long)lkb->lkb_recover_seq);
 170}
 171
 172static void dlm_print_rsb(struct dlm_rsb *r)
 173{
 174	printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
 175	       "rlc %d name %s\n",
 176	       r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
 177	       r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
 178	       r->res_name);
 179}
 180
 181void dlm_dump_rsb(struct dlm_rsb *r)
 182{
 183	struct dlm_lkb *lkb;
 184
 185	dlm_print_rsb(r);
 186
 187	printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
 188	       list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
 189	printk(KERN_ERR "rsb lookup list\n");
 190	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
 191		dlm_print_lkb(lkb);
 192	printk(KERN_ERR "rsb grant queue:\n");
 193	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
 194		dlm_print_lkb(lkb);
 195	printk(KERN_ERR "rsb convert queue:\n");
 196	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
 197		dlm_print_lkb(lkb);
 198	printk(KERN_ERR "rsb wait queue:\n");
 199	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
 200		dlm_print_lkb(lkb);
 201}
 202
 203/* Threads cannot use the lockspace while it's being recovered */
 204
 205static inline void dlm_lock_recovery(struct dlm_ls *ls)
 206{
 207	down_read(&ls->ls_in_recovery);
 208}
 209
 210void dlm_unlock_recovery(struct dlm_ls *ls)
 211{
 212	up_read(&ls->ls_in_recovery);
 213}
 214
 215int dlm_lock_recovery_try(struct dlm_ls *ls)
 216{
 217	return down_read_trylock(&ls->ls_in_recovery);
 218}
 219
 220static inline int can_be_queued(struct dlm_lkb *lkb)
 221{
 222	return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
 223}
 224
 225static inline int force_blocking_asts(struct dlm_lkb *lkb)
 226{
 227	return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
 228}
 229
 230static inline int is_demoted(struct dlm_lkb *lkb)
 231{
 232	return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
 233}
 234
 235static inline int is_altmode(struct dlm_lkb *lkb)
 236{
 237	return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
 238}
 239
 240static inline int is_granted(struct dlm_lkb *lkb)
 241{
 242	return (lkb->lkb_status == DLM_LKSTS_GRANTED);
 243}
 244
 245static inline int is_remote(struct dlm_rsb *r)
 246{
 247	DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
 248	return !!r->res_nodeid;
 249}
 250
 251static inline int is_process_copy(struct dlm_lkb *lkb)
 252{
 253	return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
 254}
 255
 256static inline int is_master_copy(struct dlm_lkb *lkb)
 257{
 
 
 258	return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
 259}
 260
 261static inline int middle_conversion(struct dlm_lkb *lkb)
 262{
 263	if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
 264	    (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
 265		return 1;
 266	return 0;
 267}
 268
 269static inline int down_conversion(struct dlm_lkb *lkb)
 270{
 271	return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
 272}
 273
 274static inline int is_overlap_unlock(struct dlm_lkb *lkb)
 275{
 276	return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
 277}
 278
 279static inline int is_overlap_cancel(struct dlm_lkb *lkb)
 280{
 281	return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
 282}
 283
 284static inline int is_overlap(struct dlm_lkb *lkb)
 285{
 286	return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
 287				  DLM_IFL_OVERLAP_CANCEL));
 288}
 289
 290static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
 291{
 292	if (is_master_copy(lkb))
 293		return;
 294
 295	del_timeout(lkb);
 296
 297	DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
 298
 299	/* if the operation was a cancel, then return -DLM_ECANCEL, if a
 300	   timeout caused the cancel then return -ETIMEDOUT */
 301	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
 302		lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
 303		rv = -ETIMEDOUT;
 304	}
 305
 306	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
 307		lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
 308		rv = -EDEADLK;
 309	}
 310
 311	dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
 312}
 313
 314static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
 315{
 316	queue_cast(r, lkb,
 317		   is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
 318}
 319
 320static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
 321{
 322	if (is_master_copy(lkb)) {
 323		send_bast(r, lkb, rqmode);
 324	} else {
 325		dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
 326	}
 327}
 328
 329/*
 330 * Basic operations on rsb's and lkb's
 331 */
 332
 333/* This is only called to add a reference when the code already holds
 334   a valid reference to the rsb, so there's no need for locking. */
 335
 336static inline void hold_rsb(struct dlm_rsb *r)
 337{
 338	kref_get(&r->res_ref);
 339}
 340
 341void dlm_hold_rsb(struct dlm_rsb *r)
 342{
 343	hold_rsb(r);
 344}
 345
 346/* When all references to the rsb are gone it's transferred to
 347   the tossed list for later disposal. */
 348
 349static void put_rsb(struct dlm_rsb *r)
 350{
 351	struct dlm_ls *ls = r->res_ls;
 352	uint32_t bucket = r->res_bucket;
 353
 354	spin_lock(&ls->ls_rsbtbl[bucket].lock);
 355	kref_put(&r->res_ref, toss_rsb);
 356	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 357}
 358
 359void dlm_put_rsb(struct dlm_rsb *r)
 360{
 361	put_rsb(r);
 362}
 363
 364static int pre_rsb_struct(struct dlm_ls *ls)
 365{
 366	struct dlm_rsb *r1, *r2;
 367	int count = 0;
 368
 369	spin_lock(&ls->ls_new_rsb_spin);
 370	if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
 371		spin_unlock(&ls->ls_new_rsb_spin);
 372		return 0;
 373	}
 374	spin_unlock(&ls->ls_new_rsb_spin);
 375
 376	r1 = dlm_allocate_rsb(ls);
 377	r2 = dlm_allocate_rsb(ls);
 378
 379	spin_lock(&ls->ls_new_rsb_spin);
 380	if (r1) {
 381		list_add(&r1->res_hashchain, &ls->ls_new_rsb);
 382		ls->ls_new_rsb_count++;
 383	}
 384	if (r2) {
 385		list_add(&r2->res_hashchain, &ls->ls_new_rsb);
 386		ls->ls_new_rsb_count++;
 387	}
 388	count = ls->ls_new_rsb_count;
 389	spin_unlock(&ls->ls_new_rsb_spin);
 390
 391	if (!count)
 392		return -ENOMEM;
 393	return 0;
 394}
 395
 396/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
 397   unlock any spinlocks, go back and call pre_rsb_struct again.
 398   Otherwise, take an rsb off the list and return it. */
 399
 400static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
 401			  struct dlm_rsb **r_ret)
 402{
 403	struct dlm_rsb *r;
 404	int count;
 405
 406	spin_lock(&ls->ls_new_rsb_spin);
 407	if (list_empty(&ls->ls_new_rsb)) {
 408		count = ls->ls_new_rsb_count;
 409		spin_unlock(&ls->ls_new_rsb_spin);
 410		log_debug(ls, "find_rsb retry %d %d %s",
 411			  count, dlm_config.ci_new_rsb_count, name);
 412		return -EAGAIN;
 413	}
 414
 415	r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
 416	list_del(&r->res_hashchain);
 417	/* Convert the empty list_head to a NULL rb_node for tree usage: */
 418	memset(&r->res_hashnode, 0, sizeof(struct rb_node));
 419	ls->ls_new_rsb_count--;
 420	spin_unlock(&ls->ls_new_rsb_spin);
 421
 422	r->res_ls = ls;
 423	r->res_length = len;
 424	memcpy(r->res_name, name, len);
 425	mutex_init(&r->res_mutex);
 426
 
 427	INIT_LIST_HEAD(&r->res_lookup);
 428	INIT_LIST_HEAD(&r->res_grantqueue);
 429	INIT_LIST_HEAD(&r->res_convertqueue);
 430	INIT_LIST_HEAD(&r->res_waitqueue);
 431	INIT_LIST_HEAD(&r->res_root_list);
 432	INIT_LIST_HEAD(&r->res_recover_list);
 433
 434	*r_ret = r;
 435	return 0;
 436}
 437
 438static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
 439{
 440	char maxname[DLM_RESNAME_MAXLEN];
 441
 442	memset(maxname, 0, DLM_RESNAME_MAXLEN);
 443	memcpy(maxname, name, nlen);
 444	return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
 445}
 446
 447int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
 448			struct dlm_rsb **r_ret)
 449{
 450	struct rb_node *node = tree->rb_node;
 451	struct dlm_rsb *r;
 452	int rc;
 453
 454	while (node) {
 455		r = rb_entry(node, struct dlm_rsb, res_hashnode);
 456		rc = rsb_cmp(r, name, len);
 457		if (rc < 0)
 458			node = node->rb_left;
 459		else if (rc > 0)
 460			node = node->rb_right;
 461		else
 462			goto found;
 463	}
 464	*r_ret = NULL;
 465	return -EBADR;
 466
 467 found:
 
 
 468	*r_ret = r;
 469	return 0;
 470}
 471
 472static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
 
 473{
 474	struct rb_node **newn = &tree->rb_node;
 475	struct rb_node *parent = NULL;
 476	int rc;
 477
 478	while (*newn) {
 479		struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
 480					       res_hashnode);
 481
 482		parent = *newn;
 483		rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
 484		if (rc < 0)
 485			newn = &parent->rb_left;
 486		else if (rc > 0)
 487			newn = &parent->rb_right;
 488		else {
 489			log_print("rsb_insert match");
 490			dlm_dump_rsb(rsb);
 491			dlm_dump_rsb(cur);
 492			return -EEXIST;
 493		}
 494	}
 
 
 
 
 
 
 
 
 495
 496	rb_link_node(&rsb->res_hashnode, parent, newn);
 497	rb_insert_color(&rsb->res_hashnode, tree);
 498	return 0;
 
 
 
 
 
 
 
 
 
 
 499}
 500
 501/*
 502 * Find rsb in rsbtbl and potentially create/add one
 503 *
 504 * Delaying the release of rsb's has a similar benefit to applications keeping
 505 * NL locks on an rsb, but without the guarantee that the cached master value
 506 * will still be valid when the rsb is reused.  Apps aren't always smart enough
 507 * to keep NL locks on an rsb that they may lock again shortly; this can lead
 508 * to excessive master lookups and removals if we don't delay the release.
 509 *
 510 * Searching for an rsb means looking through both the normal list and toss
 511 * list.  When found on the toss list the rsb is moved to the normal list with
 512 * ref count of 1; when found on normal list the ref count is incremented.
 513 *
 514 * rsb's on the keep list are being used locally and refcounted.
 515 * rsb's on the toss list are not being used locally, and are not refcounted.
 516 *
 517 * The toss list rsb's were either
 518 * - previously used locally but not any more (were on keep list, then
 519 *   moved to toss list when last refcount dropped)
 520 * - created and put on toss list as a directory record for a lookup
 521 *   (we are the dir node for the res, but are not using the res right now,
 522 *   but some other node is)
 523 *
 524 * The purpose of find_rsb() is to return a refcounted rsb for local use.
 525 * So, if the given rsb is on the toss list, it is moved to the keep list
 526 * before being returned.
 527 *
 528 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
 529 * more refcounts exist, so the rsb is moved from the keep list to the
 530 * toss list.
 531 *
 532 * rsb's on both keep and toss lists are used for doing a name to master
 533 * lookups.  rsb's that are in use locally (and being refcounted) are on
 534 * the keep list, rsb's that are not in use locally (not refcounted) and
 535 * only exist for name/master lookups are on the toss list.
 536 *
 537 * rsb's on the toss list who's dir_nodeid is not local can have stale
 538 * name/master mappings.  So, remote requests on such rsb's can potentially
 539 * return with an error, which means the mapping is stale and needs to
 540 * be updated with a new lookup.  (The idea behind MASTER UNCERTAIN and
 541 * first_lkid is to keep only a single outstanding request on an rsb
 542 * while that rsb has a potentially stale master.)
 543 */
 544
 545static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
 546			uint32_t hash, uint32_t b,
 547			int dir_nodeid, int from_nodeid,
 548			unsigned int flags, struct dlm_rsb **r_ret)
 549{
 550	struct dlm_rsb *r = NULL;
 551	int our_nodeid = dlm_our_nodeid();
 552	int from_local = 0;
 553	int from_other = 0;
 554	int from_dir = 0;
 555	int create = 0;
 556	int error;
 557
 558	if (flags & R_RECEIVE_REQUEST) {
 559		if (from_nodeid == dir_nodeid)
 560			from_dir = 1;
 561		else
 562			from_other = 1;
 563	} else if (flags & R_REQUEST) {
 564		from_local = 1;
 565	}
 566
 567	/*
 568	 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
 569	 * from_nodeid has sent us a lock in dlm_recover_locks, believing
 570	 * we're the new master.  Our local recovery may not have set
 571	 * res_master_nodeid to our_nodeid yet, so allow either.  Don't
 572	 * create the rsb; dlm_recover_process_copy() will handle EBADR
 573	 * by resending.
 574	 *
 575	 * If someone sends us a request, we are the dir node, and we do
 576	 * not find the rsb anywhere, then recreate it.  This happens if
 577	 * someone sends us a request after we have removed/freed an rsb
 578	 * from our toss list.  (They sent a request instead of lookup
 579	 * because they are using an rsb from their toss list.)
 580	 */
 581
 582	if (from_local || from_dir ||
 583	    (from_other && (dir_nodeid == our_nodeid))) {
 584		create = 1;
 585	}
 586
 587 retry:
 588	if (create) {
 589		error = pre_rsb_struct(ls);
 590		if (error < 0)
 591			goto out;
 592	}
 593
 594	spin_lock(&ls->ls_rsbtbl[b].lock);
 595
 596	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
 597	if (error)
 598		goto do_toss;
 599	
 600	/*
 601	 * rsb is active, so we can't check master_nodeid without lock_rsb.
 602	 */
 603
 604	kref_get(&r->res_ref);
 605	error = 0;
 606	goto out_unlock;
 607
 608
 609 do_toss:
 610	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
 611	if (error)
 612		goto do_new;
 613
 614	/*
 615	 * rsb found inactive (master_nodeid may be out of date unless
 616	 * we are the dir_nodeid or were the master)  No other thread
 617	 * is using this rsb because it's on the toss list, so we can
 618	 * look at or update res_master_nodeid without lock_rsb.
 619	 */
 620
 621	if ((r->res_master_nodeid != our_nodeid) && from_other) {
 622		/* our rsb was not master, and another node (not the dir node)
 623		   has sent us a request */
 624		log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
 625			  from_nodeid, r->res_master_nodeid, dir_nodeid,
 626			  r->res_name);
 627		error = -ENOTBLK;
 628		goto out_unlock;
 629	}
 630
 631	if ((r->res_master_nodeid != our_nodeid) && from_dir) {
 632		/* don't think this should ever happen */
 633		log_error(ls, "find_rsb toss from_dir %d master %d",
 634			  from_nodeid, r->res_master_nodeid);
 635		dlm_print_rsb(r);
 636		/* fix it and go on */
 637		r->res_master_nodeid = our_nodeid;
 638		r->res_nodeid = 0;
 639		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
 640		r->res_first_lkid = 0;
 641	}
 642
 643	if (from_local && (r->res_master_nodeid != our_nodeid)) {
 644		/* Because we have held no locks on this rsb,
 645		   res_master_nodeid could have become stale. */
 646		rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
 647		r->res_first_lkid = 0;
 648	}
 649
 650	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
 651	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
 652	goto out_unlock;
 653
 654
 655 do_new:
 656	/*
 657	 * rsb not found
 658	 */
 659
 660	if (error == -EBADR && !create)
 661		goto out_unlock;
 662
 663	error = get_rsb_struct(ls, name, len, &r);
 664	if (error == -EAGAIN) {
 665		spin_unlock(&ls->ls_rsbtbl[b].lock);
 666		goto retry;
 667	}
 668	if (error)
 669		goto out_unlock;
 670
 671	r->res_hash = hash;
 672	r->res_bucket = b;
 673	r->res_dir_nodeid = dir_nodeid;
 674	kref_init(&r->res_ref);
 675
 676	if (from_dir) {
 677		/* want to see how often this happens */
 678		log_debug(ls, "find_rsb new from_dir %d recreate %s",
 679			  from_nodeid, r->res_name);
 680		r->res_master_nodeid = our_nodeid;
 681		r->res_nodeid = 0;
 682		goto out_add;
 683	}
 684
 685	if (from_other && (dir_nodeid != our_nodeid)) {
 686		/* should never happen */
 687		log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
 688			  from_nodeid, dir_nodeid, our_nodeid, r->res_name);
 689		dlm_free_rsb(r);
 690		r = NULL;
 691		error = -ENOTBLK;
 692		goto out_unlock;
 693	}
 694
 695	if (from_other) {
 696		log_debug(ls, "find_rsb new from_other %d dir %d %s",
 697			  from_nodeid, dir_nodeid, r->res_name);
 698	}
 699
 700	if (dir_nodeid == our_nodeid) {
 701		/* When we are the dir nodeid, we can set the master
 702		   node immediately */
 703		r->res_master_nodeid = our_nodeid;
 704		r->res_nodeid = 0;
 705	} else {
 706		/* set_master will send_lookup to dir_nodeid */
 707		r->res_master_nodeid = 0;
 708		r->res_nodeid = -1;
 709	}
 710
 711 out_add:
 712	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
 713 out_unlock:
 714	spin_unlock(&ls->ls_rsbtbl[b].lock);
 715 out:
 716	*r_ret = r;
 717	return error;
 718}
 719
 720/* During recovery, other nodes can send us new MSTCPY locks (from
 721   dlm_recover_locks) before we've made ourself master (in
 722   dlm_recover_masters). */
 723
 724static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
 725			  uint32_t hash, uint32_t b,
 726			  int dir_nodeid, int from_nodeid,
 727			  unsigned int flags, struct dlm_rsb **r_ret)
 728{
 729	struct dlm_rsb *r = NULL;
 730	int our_nodeid = dlm_our_nodeid();
 731	int recover = (flags & R_RECEIVE_RECOVER);
 732	int error;
 733
 734 retry:
 735	error = pre_rsb_struct(ls);
 736	if (error < 0)
 737		goto out;
 738
 739	spin_lock(&ls->ls_rsbtbl[b].lock);
 740
 741	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
 742	if (error)
 743		goto do_toss;
 744
 745	/*
 746	 * rsb is active, so we can't check master_nodeid without lock_rsb.
 747	 */
 748
 749	kref_get(&r->res_ref);
 750	goto out_unlock;
 751
 752
 753 do_toss:
 754	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
 755	if (error)
 756		goto do_new;
 757
 758	/*
 759	 * rsb found inactive. No other thread is using this rsb because
 760	 * it's on the toss list, so we can look at or update
 761	 * res_master_nodeid without lock_rsb.
 762	 */
 763
 764	if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
 765		/* our rsb is not master, and another node has sent us a
 766		   request; this should never happen */
 767		log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
 768			  from_nodeid, r->res_master_nodeid, dir_nodeid);
 769		dlm_print_rsb(r);
 770		error = -ENOTBLK;
 771		goto out_unlock;
 772	}
 773
 774	if (!recover && (r->res_master_nodeid != our_nodeid) &&
 775	    (dir_nodeid == our_nodeid)) {
 776		/* our rsb is not master, and we are dir; may as well fix it;
 777		   this should never happen */
 778		log_error(ls, "find_rsb toss our %d master %d dir %d",
 779			  our_nodeid, r->res_master_nodeid, dir_nodeid);
 780		dlm_print_rsb(r);
 781		r->res_master_nodeid = our_nodeid;
 782		r->res_nodeid = 0;
 783	}
 784
 785	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
 786	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
 787	goto out_unlock;
 788
 789
 790 do_new:
 791	/*
 792	 * rsb not found
 793	 */
 794
 795	error = get_rsb_struct(ls, name, len, &r);
 796	if (error == -EAGAIN) {
 797		spin_unlock(&ls->ls_rsbtbl[b].lock);
 798		goto retry;
 799	}
 800	if (error)
 801		goto out_unlock;
 802
 803	r->res_hash = hash;
 804	r->res_bucket = b;
 805	r->res_dir_nodeid = dir_nodeid;
 806	r->res_master_nodeid = dir_nodeid;
 807	r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
 808	kref_init(&r->res_ref);
 809
 810	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
 811 out_unlock:
 812	spin_unlock(&ls->ls_rsbtbl[b].lock);
 813 out:
 814	*r_ret = r;
 815	return error;
 816}
 817
 818static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
 819		    unsigned int flags, struct dlm_rsb **r_ret)
 820{
 821	uint32_t hash, b;
 822	int dir_nodeid;
 823
 824	if (len > DLM_RESNAME_MAXLEN)
 825		return -EINVAL;
 826
 827	hash = jhash(name, len, 0);
 828	b = hash & (ls->ls_rsbtbl_size - 1);
 829
 830	dir_nodeid = dlm_hash2nodeid(ls, hash);
 831
 832	if (dlm_no_directory(ls))
 833		return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
 834				      from_nodeid, flags, r_ret);
 835	else
 836		return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
 837				      from_nodeid, flags, r_ret);
 838}
 839
 840/* we have received a request and found that res_master_nodeid != our_nodeid,
 841   so we need to return an error or make ourself the master */
 842
 843static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
 844				  int from_nodeid)
 845{
 846	if (dlm_no_directory(ls)) {
 847		log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
 848			  from_nodeid, r->res_master_nodeid,
 849			  r->res_dir_nodeid);
 850		dlm_print_rsb(r);
 851		return -ENOTBLK;
 852	}
 853
 854	if (from_nodeid != r->res_dir_nodeid) {
 855		/* our rsb is not master, and another node (not the dir node)
 856	   	   has sent us a request.  this is much more common when our
 857	   	   master_nodeid is zero, so limit debug to non-zero.  */
 858
 859		if (r->res_master_nodeid) {
 860			log_debug(ls, "validate master from_other %d master %d "
 861				  "dir %d first %x %s", from_nodeid,
 862				  r->res_master_nodeid, r->res_dir_nodeid,
 863				  r->res_first_lkid, r->res_name);
 864		}
 865		return -ENOTBLK;
 866	} else {
 867		/* our rsb is not master, but the dir nodeid has sent us a
 868	   	   request; this could happen with master 0 / res_nodeid -1 */
 869
 870		if (r->res_master_nodeid) {
 871			log_error(ls, "validate master from_dir %d master %d "
 872				  "first %x %s",
 873				  from_nodeid, r->res_master_nodeid,
 874				  r->res_first_lkid, r->res_name);
 875		}
 876
 877		r->res_master_nodeid = dlm_our_nodeid();
 878		r->res_nodeid = 0;
 879		return 0;
 880	}
 881}
 882
 883/*
 884 * We're the dir node for this res and another node wants to know the
 885 * master nodeid.  During normal operation (non recovery) this is only
 886 * called from receive_lookup(); master lookups when the local node is
 887 * the dir node are done by find_rsb().
 888 *
 889 * normal operation, we are the dir node for a resource
 890 * . _request_lock
 891 * . set_master
 892 * . send_lookup
 893 * . receive_lookup
 894 * . dlm_master_lookup flags 0
 895 *
 896 * recover directory, we are rebuilding dir for all resources
 897 * . dlm_recover_directory
 898 * . dlm_rcom_names
 899 *   remote node sends back the rsb names it is master of and we are dir of
 900 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
 901 *   we either create new rsb setting remote node as master, or find existing
 902 *   rsb and set master to be the remote node.
 903 *
 904 * recover masters, we are finding the new master for resources
 905 * . dlm_recover_masters
 906 * . recover_master
 907 * . dlm_send_rcom_lookup
 908 * . receive_rcom_lookup
 909 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
 910 */
 911
 912int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
 913		      unsigned int flags, int *r_nodeid, int *result)
 914{
 915	struct dlm_rsb *r = NULL;
 916	uint32_t hash, b;
 917	int from_master = (flags & DLM_LU_RECOVER_DIR);
 918	int fix_master = (flags & DLM_LU_RECOVER_MASTER);
 919	int our_nodeid = dlm_our_nodeid();
 920	int dir_nodeid, error, toss_list = 0;
 921
 922	if (len > DLM_RESNAME_MAXLEN)
 923		return -EINVAL;
 924
 925	if (from_nodeid == our_nodeid) {
 926		log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
 927			  our_nodeid, flags);
 928		return -EINVAL;
 929	}
 930
 931	hash = jhash(name, len, 0);
 932	b = hash & (ls->ls_rsbtbl_size - 1);
 933
 934	dir_nodeid = dlm_hash2nodeid(ls, hash);
 935	if (dir_nodeid != our_nodeid) {
 936		log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
 937			  from_nodeid, dir_nodeid, our_nodeid, hash,
 938			  ls->ls_num_nodes);
 939		*r_nodeid = -1;
 940		return -EINVAL;
 941	}
 942
 943 retry:
 944	error = pre_rsb_struct(ls);
 945	if (error < 0)
 946		return error;
 947
 948	spin_lock(&ls->ls_rsbtbl[b].lock);
 949	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
 950	if (!error) {
 951		/* because the rsb is active, we need to lock_rsb before
 952		   checking/changing re_master_nodeid */
 953
 954		hold_rsb(r);
 955		spin_unlock(&ls->ls_rsbtbl[b].lock);
 956		lock_rsb(r);
 957		goto found;
 958	}
 959
 960	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
 961	if (error)
 962		goto not_found;
 963
 964	/* because the rsb is inactive (on toss list), it's not refcounted
 965	   and lock_rsb is not used, but is protected by the rsbtbl lock */
 966
 967	toss_list = 1;
 968 found:
 969	if (r->res_dir_nodeid != our_nodeid) {
 970		/* should not happen, but may as well fix it and carry on */
 971		log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
 972			  r->res_dir_nodeid, our_nodeid, r->res_name);
 973		r->res_dir_nodeid = our_nodeid;
 974	}
 975
 976	if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
 977		/* Recovery uses this function to set a new master when
 978		   the previous master failed.  Setting NEW_MASTER will
 979		   force dlm_recover_masters to call recover_master on this
 980		   rsb even though the res_nodeid is no longer removed. */
 981
 982		r->res_master_nodeid = from_nodeid;
 983		r->res_nodeid = from_nodeid;
 984		rsb_set_flag(r, RSB_NEW_MASTER);
 985
 986		if (toss_list) {
 987			/* I don't think we should ever find it on toss list. */
 988			log_error(ls, "dlm_master_lookup fix_master on toss");
 989			dlm_dump_rsb(r);
 990		}
 991	}
 992
 993	if (from_master && (r->res_master_nodeid != from_nodeid)) {
 994		/* this will happen if from_nodeid became master during
 995		   a previous recovery cycle, and we aborted the previous
 996		   cycle before recovering this master value */
 997
 998		log_limit(ls, "dlm_master_lookup from_master %d "
 999			  "master_nodeid %d res_nodeid %d first %x %s",
1000			  from_nodeid, r->res_master_nodeid, r->res_nodeid,
1001			  r->res_first_lkid, r->res_name);
1002
1003		if (r->res_master_nodeid == our_nodeid) {
1004			log_error(ls, "from_master %d our_master", from_nodeid);
1005			dlm_dump_rsb(r);
1006			goto out_found;
1007		}
1008
1009		r->res_master_nodeid = from_nodeid;
1010		r->res_nodeid = from_nodeid;
1011		rsb_set_flag(r, RSB_NEW_MASTER);
1012	}
1013
1014	if (!r->res_master_nodeid) {
1015		/* this will happen if recovery happens while we're looking
1016		   up the master for this rsb */
1017
1018		log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1019			  from_nodeid, r->res_first_lkid, r->res_name);
1020		r->res_master_nodeid = from_nodeid;
1021		r->res_nodeid = from_nodeid;
1022	}
1023
1024	if (!from_master && !fix_master &&
1025	    (r->res_master_nodeid == from_nodeid)) {
1026		/* this can happen when the master sends remove, the dir node
1027		   finds the rsb on the keep list and ignores the remove,
1028		   and the former master sends a lookup */
1029
1030		log_limit(ls, "dlm_master_lookup from master %d flags %x "
1031			  "first %x %s", from_nodeid, flags,
1032			  r->res_first_lkid, r->res_name);
1033	}
1034
1035 out_found:
1036	*r_nodeid = r->res_master_nodeid;
1037	if (result)
1038		*result = DLM_LU_MATCH;
1039
1040	if (toss_list) {
1041		r->res_toss_time = jiffies;
1042		/* the rsb was inactive (on toss list) */
1043		spin_unlock(&ls->ls_rsbtbl[b].lock);
1044	} else {
1045		/* the rsb was active */
1046		unlock_rsb(r);
1047		put_rsb(r);
1048	}
1049	return 0;
1050
1051 not_found:
1052	error = get_rsb_struct(ls, name, len, &r);
1053	if (error == -EAGAIN) {
1054		spin_unlock(&ls->ls_rsbtbl[b].lock);
1055		goto retry;
1056	}
1057	if (error)
1058		goto out_unlock;
1059
1060	r->res_hash = hash;
1061	r->res_bucket = b;
1062	r->res_dir_nodeid = our_nodeid;
1063	r->res_master_nodeid = from_nodeid;
1064	r->res_nodeid = from_nodeid;
1065	kref_init(&r->res_ref);
 
1066	r->res_toss_time = jiffies;
1067
1068	error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1069	if (error) {
1070		/* should never happen */
1071		dlm_free_rsb(r);
1072		spin_unlock(&ls->ls_rsbtbl[b].lock);
1073		goto retry;
1074	}
1075
1076	if (result)
1077		*result = DLM_LU_ADD;
1078	*r_nodeid = from_nodeid;
1079	error = 0;
1080 out_unlock:
1081	spin_unlock(&ls->ls_rsbtbl[b].lock);
1082	return error;
1083}
1084
1085static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1086{
1087	struct rb_node *n;
1088	struct dlm_rsb *r;
1089	int i;
1090
1091	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1092		spin_lock(&ls->ls_rsbtbl[i].lock);
1093		for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1094			r = rb_entry(n, struct dlm_rsb, res_hashnode);
1095			if (r->res_hash == hash)
1096				dlm_dump_rsb(r);
1097		}
1098		spin_unlock(&ls->ls_rsbtbl[i].lock);
1099	}
1100}
1101
1102void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1103{
1104	struct dlm_rsb *r = NULL;
1105	uint32_t hash, b;
1106	int error;
1107
1108	hash = jhash(name, len, 0);
1109	b = hash & (ls->ls_rsbtbl_size - 1);
1110
1111	spin_lock(&ls->ls_rsbtbl[b].lock);
1112	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1113	if (!error)
1114		goto out_dump;
1115
1116	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1117	if (error)
1118		goto out;
1119 out_dump:
1120	dlm_dump_rsb(r);
1121 out:
1122	spin_unlock(&ls->ls_rsbtbl[b].lock);
1123}
1124
1125static void toss_rsb(struct kref *kref)
1126{
1127	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1128	struct dlm_ls *ls = r->res_ls;
1129
1130	DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1131	kref_init(&r->res_ref);
1132	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1133	rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1134	r->res_toss_time = jiffies;
1135	ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1136	if (r->res_lvbptr) {
1137		dlm_free_lvb(r->res_lvbptr);
1138		r->res_lvbptr = NULL;
1139	}
1140}
1141
1142/* See comment for unhold_lkb */
1143
1144static void unhold_rsb(struct dlm_rsb *r)
1145{
1146	int rv;
1147	rv = kref_put(&r->res_ref, toss_rsb);
1148	DLM_ASSERT(!rv, dlm_dump_rsb(r););
1149}
1150
1151static void kill_rsb(struct kref *kref)
1152{
1153	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1154
1155	/* All work is done after the return from kref_put() so we
1156	   can release the write_lock before the remove and free. */
1157
1158	DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1159	DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1160	DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1161	DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1162	DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1163	DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1164}
1165
1166/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1167   The rsb must exist as long as any lkb's for it do. */
1168
1169static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1170{
1171	hold_rsb(r);
1172	lkb->lkb_resource = r;
1173}
1174
1175static void detach_lkb(struct dlm_lkb *lkb)
1176{
1177	if (lkb->lkb_resource) {
1178		put_rsb(lkb->lkb_resource);
1179		lkb->lkb_resource = NULL;
1180	}
1181}
1182
1183static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1184{
1185	struct dlm_lkb *lkb;
1186	int rv;
1187
1188	lkb = dlm_allocate_lkb(ls);
1189	if (!lkb)
1190		return -ENOMEM;
1191
1192	lkb->lkb_nodeid = -1;
1193	lkb->lkb_grmode = DLM_LOCK_IV;
1194	kref_init(&lkb->lkb_ref);
1195	INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1196	INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1197	INIT_LIST_HEAD(&lkb->lkb_time_list);
1198	INIT_LIST_HEAD(&lkb->lkb_cb_list);
1199	mutex_init(&lkb->lkb_cb_mutex);
1200	INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1201
1202	idr_preload(GFP_NOFS);
 
 
 
 
1203	spin_lock(&ls->ls_lkbidr_spin);
1204	rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1205	if (rv >= 0)
1206		lkb->lkb_id = rv;
1207	spin_unlock(&ls->ls_lkbidr_spin);
1208	idr_preload_end();
 
 
1209
1210	if (rv < 0) {
1211		log_error(ls, "create_lkb idr error %d", rv);
1212		return rv;
1213	}
1214
1215	*lkb_ret = lkb;
1216	return 0;
1217}
1218
1219static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1220{
1221	struct dlm_lkb *lkb;
1222
1223	spin_lock(&ls->ls_lkbidr_spin);
1224	lkb = idr_find(&ls->ls_lkbidr, lkid);
1225	if (lkb)
1226		kref_get(&lkb->lkb_ref);
1227	spin_unlock(&ls->ls_lkbidr_spin);
1228
1229	*lkb_ret = lkb;
1230	return lkb ? 0 : -ENOENT;
1231}
1232
1233static void kill_lkb(struct kref *kref)
1234{
1235	struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1236
1237	/* All work is done after the return from kref_put() so we
1238	   can release the write_lock before the detach_lkb */
1239
1240	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1241}
1242
1243/* __put_lkb() is used when an lkb may not have an rsb attached to
1244   it so we need to provide the lockspace explicitly */
1245
1246static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1247{
1248	uint32_t lkid = lkb->lkb_id;
1249
1250	spin_lock(&ls->ls_lkbidr_spin);
1251	if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1252		idr_remove(&ls->ls_lkbidr, lkid);
1253		spin_unlock(&ls->ls_lkbidr_spin);
1254
1255		detach_lkb(lkb);
1256
1257		/* for local/process lkbs, lvbptr points to caller's lksb */
1258		if (lkb->lkb_lvbptr && is_master_copy(lkb))
1259			dlm_free_lvb(lkb->lkb_lvbptr);
1260		dlm_free_lkb(lkb);
1261		return 1;
1262	} else {
1263		spin_unlock(&ls->ls_lkbidr_spin);
1264		return 0;
1265	}
1266}
1267
1268int dlm_put_lkb(struct dlm_lkb *lkb)
1269{
1270	struct dlm_ls *ls;
1271
1272	DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1273	DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1274
1275	ls = lkb->lkb_resource->res_ls;
1276	return __put_lkb(ls, lkb);
1277}
1278
1279/* This is only called to add a reference when the code already holds
1280   a valid reference to the lkb, so there's no need for locking. */
1281
1282static inline void hold_lkb(struct dlm_lkb *lkb)
1283{
1284	kref_get(&lkb->lkb_ref);
1285}
1286
1287/* This is called when we need to remove a reference and are certain
1288   it's not the last ref.  e.g. del_lkb is always called between a
1289   find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1290   put_lkb would work fine, but would involve unnecessary locking */
1291
1292static inline void unhold_lkb(struct dlm_lkb *lkb)
1293{
1294	int rv;
1295	rv = kref_put(&lkb->lkb_ref, kill_lkb);
1296	DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1297}
1298
1299static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1300			    int mode)
1301{
1302	struct dlm_lkb *lkb = NULL;
1303
1304	list_for_each_entry(lkb, head, lkb_statequeue)
1305		if (lkb->lkb_rqmode < mode)
1306			break;
1307
1308	__list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1309}
1310
1311/* add/remove lkb to rsb's grant/convert/wait queue */
1312
1313static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1314{
1315	kref_get(&lkb->lkb_ref);
1316
1317	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1318
1319	lkb->lkb_timestamp = ktime_get();
1320
1321	lkb->lkb_status = status;
1322
1323	switch (status) {
1324	case DLM_LKSTS_WAITING:
1325		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1326			list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1327		else
1328			list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1329		break;
1330	case DLM_LKSTS_GRANTED:
1331		/* convention says granted locks kept in order of grmode */
1332		lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1333				lkb->lkb_grmode);
1334		break;
1335	case DLM_LKSTS_CONVERT:
1336		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1337			list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1338		else
1339			list_add_tail(&lkb->lkb_statequeue,
1340				      &r->res_convertqueue);
1341		break;
1342	default:
1343		DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1344	}
1345}
1346
1347static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1348{
1349	lkb->lkb_status = 0;
1350	list_del(&lkb->lkb_statequeue);
1351	unhold_lkb(lkb);
1352}
1353
1354static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1355{
1356	hold_lkb(lkb);
1357	del_lkb(r, lkb);
1358	add_lkb(r, lkb, sts);
1359	unhold_lkb(lkb);
1360}
1361
1362static int msg_reply_type(int mstype)
1363{
1364	switch (mstype) {
1365	case DLM_MSG_REQUEST:
1366		return DLM_MSG_REQUEST_REPLY;
1367	case DLM_MSG_CONVERT:
1368		return DLM_MSG_CONVERT_REPLY;
1369	case DLM_MSG_UNLOCK:
1370		return DLM_MSG_UNLOCK_REPLY;
1371	case DLM_MSG_CANCEL:
1372		return DLM_MSG_CANCEL_REPLY;
1373	case DLM_MSG_LOOKUP:
1374		return DLM_MSG_LOOKUP_REPLY;
1375	}
1376	return -1;
1377}
1378
1379static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1380{
1381	int i;
1382
1383	for (i = 0; i < num_nodes; i++) {
1384		if (!warned[i]) {
1385			warned[i] = nodeid;
1386			return 0;
1387		}
1388		if (warned[i] == nodeid)
1389			return 1;
1390	}
1391	return 0;
1392}
1393
1394void dlm_scan_waiters(struct dlm_ls *ls)
1395{
1396	struct dlm_lkb *lkb;
 
1397	s64 us;
1398	s64 debug_maxus = 0;
1399	u32 debug_scanned = 0;
1400	u32 debug_expired = 0;
1401	int num_nodes = 0;
1402	int *warned = NULL;
1403
1404	if (!dlm_config.ci_waitwarn_us)
1405		return;
1406
1407	mutex_lock(&ls->ls_waiters_mutex);
1408
1409	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1410		if (!lkb->lkb_wait_time)
1411			continue;
1412
1413		debug_scanned++;
1414
1415		us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1416
1417		if (us < dlm_config.ci_waitwarn_us)
1418			continue;
1419
1420		lkb->lkb_wait_time = 0;
1421
1422		debug_expired++;
1423		if (us > debug_maxus)
1424			debug_maxus = us;
1425
1426		if (!num_nodes) {
1427			num_nodes = ls->ls_num_nodes;
1428			warned = kcalloc(num_nodes, sizeof(int), GFP_KERNEL);
1429		}
1430		if (!warned)
1431			continue;
1432		if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1433			continue;
1434
1435		log_error(ls, "waitwarn %x %lld %d us check connection to "
1436			  "node %d", lkb->lkb_id, (long long)us,
1437			  dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1438	}
1439	mutex_unlock(&ls->ls_waiters_mutex);
1440	kfree(warned);
1441
1442	if (debug_expired)
1443		log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1444			  debug_scanned, debug_expired,
1445			  dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1446}
1447
1448/* add/remove lkb from global waiters list of lkb's waiting for
1449   a reply from a remote node */
1450
1451static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1452{
1453	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1454	int error = 0;
1455
1456	mutex_lock(&ls->ls_waiters_mutex);
1457
1458	if (is_overlap_unlock(lkb) ||
1459	    (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1460		error = -EINVAL;
1461		goto out;
1462	}
1463
1464	if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1465		switch (mstype) {
1466		case DLM_MSG_UNLOCK:
1467			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1468			break;
1469		case DLM_MSG_CANCEL:
1470			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1471			break;
1472		default:
1473			error = -EBUSY;
1474			goto out;
1475		}
1476		lkb->lkb_wait_count++;
1477		hold_lkb(lkb);
1478
1479		log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1480			  lkb->lkb_id, lkb->lkb_wait_type, mstype,
1481			  lkb->lkb_wait_count, lkb->lkb_flags);
1482		goto out;
1483	}
1484
1485	DLM_ASSERT(!lkb->lkb_wait_count,
1486		   dlm_print_lkb(lkb);
1487		   printk("wait_count %d\n", lkb->lkb_wait_count););
1488
1489	lkb->lkb_wait_count++;
1490	lkb->lkb_wait_type = mstype;
1491	lkb->lkb_wait_time = ktime_get();
1492	lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1493	hold_lkb(lkb);
1494	list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1495 out:
1496	if (error)
1497		log_error(ls, "addwait error %x %d flags %x %d %d %s",
1498			  lkb->lkb_id, error, lkb->lkb_flags, mstype,
1499			  lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1500	mutex_unlock(&ls->ls_waiters_mutex);
1501	return error;
1502}
1503
1504/* We clear the RESEND flag because we might be taking an lkb off the waiters
1505   list as part of process_requestqueue (e.g. a lookup that has an optimized
1506   request reply on the requestqueue) between dlm_recover_waiters_pre() which
1507   set RESEND and dlm_recover_waiters_post() */
1508
1509static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1510				struct dlm_message *ms)
1511{
1512	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1513	int overlap_done = 0;
1514
1515	if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1516		log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1517		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1518		overlap_done = 1;
1519		goto out_del;
1520	}
1521
1522	if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1523		log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1524		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1525		overlap_done = 1;
1526		goto out_del;
1527	}
1528
1529	/* Cancel state was preemptively cleared by a successful convert,
1530	   see next comment, nothing to do. */
1531
1532	if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1533	    (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1534		log_debug(ls, "remwait %x cancel_reply wait_type %d",
1535			  lkb->lkb_id, lkb->lkb_wait_type);
1536		return -1;
1537	}
1538
1539	/* Remove for the convert reply, and premptively remove for the
1540	   cancel reply.  A convert has been granted while there's still
1541	   an outstanding cancel on it (the cancel is moot and the result
1542	   in the cancel reply should be 0).  We preempt the cancel reply
1543	   because the app gets the convert result and then can follow up
1544	   with another op, like convert.  This subsequent op would see the
1545	   lingering state of the cancel and fail with -EBUSY. */
1546
1547	if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1548	    (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1549	    is_overlap_cancel(lkb) && ms && !ms->m_result) {
1550		log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1551			  lkb->lkb_id);
1552		lkb->lkb_wait_type = 0;
1553		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1554		lkb->lkb_wait_count--;
1555		goto out_del;
1556	}
1557
1558	/* N.B. type of reply may not always correspond to type of original
1559	   msg due to lookup->request optimization, verify others? */
1560
1561	if (lkb->lkb_wait_type) {
1562		lkb->lkb_wait_type = 0;
1563		goto out_del;
1564	}
1565
1566	log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1567		  lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1568		  mstype, lkb->lkb_flags);
1569	return -1;
1570
1571 out_del:
1572	/* the force-unlock/cancel has completed and we haven't recvd a reply
1573	   to the op that was in progress prior to the unlock/cancel; we
1574	   give up on any reply to the earlier op.  FIXME: not sure when/how
1575	   this would happen */
1576
1577	if (overlap_done && lkb->lkb_wait_type) {
1578		log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1579			  lkb->lkb_id, mstype, lkb->lkb_wait_type);
1580		lkb->lkb_wait_count--;
1581		lkb->lkb_wait_type = 0;
1582	}
1583
1584	DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1585
1586	lkb->lkb_flags &= ~DLM_IFL_RESEND;
1587	lkb->lkb_wait_count--;
1588	if (!lkb->lkb_wait_count)
1589		list_del_init(&lkb->lkb_wait_reply);
1590	unhold_lkb(lkb);
1591	return 0;
1592}
1593
1594static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1595{
1596	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1597	int error;
1598
1599	mutex_lock(&ls->ls_waiters_mutex);
1600	error = _remove_from_waiters(lkb, mstype, NULL);
1601	mutex_unlock(&ls->ls_waiters_mutex);
1602	return error;
1603}
1604
1605/* Handles situations where we might be processing a "fake" or "stub" reply in
1606   which we can't try to take waiters_mutex again. */
1607
1608static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1609{
1610	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1611	int error;
1612
1613	if (ms->m_flags != DLM_IFL_STUB_MS)
1614		mutex_lock(&ls->ls_waiters_mutex);
1615	error = _remove_from_waiters(lkb, ms->m_type, ms);
1616	if (ms->m_flags != DLM_IFL_STUB_MS)
1617		mutex_unlock(&ls->ls_waiters_mutex);
1618	return error;
1619}
1620
1621/* If there's an rsb for the same resource being removed, ensure
1622   that the remove message is sent before the new lookup message.
1623   It should be rare to need a delay here, but if not, then it may
1624   be worthwhile to add a proper wait mechanism rather than a delay. */
1625
1626static void wait_pending_remove(struct dlm_rsb *r)
1627{
1628	struct dlm_ls *ls = r->res_ls;
1629 restart:
1630	spin_lock(&ls->ls_remove_spin);
1631	if (ls->ls_remove_len &&
1632	    !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
1633		log_debug(ls, "delay lookup for remove dir %d %s",
1634		  	  r->res_dir_nodeid, r->res_name);
1635		spin_unlock(&ls->ls_remove_spin);
1636		msleep(1);
1637		goto restart;
1638	}
1639	spin_unlock(&ls->ls_remove_spin);
1640}
1641
1642/*
1643 * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1644 * read by other threads in wait_pending_remove.  ls_remove_names
1645 * and ls_remove_lens are only used by the scan thread, so they do
1646 * not need protection.
1647 */
1648
1649static void shrink_bucket(struct dlm_ls *ls, int b)
1650{
1651	struct rb_node *n, *next;
1652	struct dlm_rsb *r;
1653	char *name;
1654	int our_nodeid = dlm_our_nodeid();
1655	int remote_count = 0;
1656	int need_shrink = 0;
1657	int i, len, rv;
1658
1659	memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1660
1661	spin_lock(&ls->ls_rsbtbl[b].lock);
1662
1663	if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1664		spin_unlock(&ls->ls_rsbtbl[b].lock);
1665		return;
1666	}
1667
1668	for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1669		next = rb_next(n);
1670		r = rb_entry(n, struct dlm_rsb, res_hashnode);
1671
1672		/* If we're the directory record for this rsb, and
1673		   we're not the master of it, then we need to wait
1674		   for the master node to send us a dir remove for
1675		   before removing the dir record. */
1676
1677		if (!dlm_no_directory(ls) &&
1678		    (r->res_master_nodeid != our_nodeid) &&
1679		    (dlm_dir_nodeid(r) == our_nodeid)) {
1680			continue;
1681		}
1682
1683		need_shrink = 1;
1684
1685		if (!time_after_eq(jiffies, r->res_toss_time +
1686				   dlm_config.ci_toss_secs * HZ)) {
1687			continue;
1688		}
1689
1690		if (!dlm_no_directory(ls) &&
1691		    (r->res_master_nodeid == our_nodeid) &&
1692		    (dlm_dir_nodeid(r) != our_nodeid)) {
1693
1694			/* We're the master of this rsb but we're not
1695			   the directory record, so we need to tell the
1696			   dir node to remove the dir record. */
1697
1698			ls->ls_remove_lens[remote_count] = r->res_length;
1699			memcpy(ls->ls_remove_names[remote_count], r->res_name,
1700			       DLM_RESNAME_MAXLEN);
1701			remote_count++;
1702
1703			if (remote_count >= DLM_REMOVE_NAMES_MAX)
1704				break;
1705			continue;
1706		}
1707
1708		if (!kref_put(&r->res_ref, kill_rsb)) {
1709			log_error(ls, "tossed rsb in use %s", r->res_name);
1710			continue;
1711		}
1712
1713		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1714		dlm_free_rsb(r);
1715	}
1716
1717	if (need_shrink)
1718		ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1719	else
1720		ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1721	spin_unlock(&ls->ls_rsbtbl[b].lock);
 
1722
1723	/*
1724	 * While searching for rsb's to free, we found some that require
1725	 * remote removal.  We leave them in place and find them again here
1726	 * so there is a very small gap between removing them from the toss
1727	 * list and sending the removal.  Keeping this gap small is
1728	 * important to keep us (the master node) from being out of sync
1729	 * with the remote dir node for very long.
1730	 *
1731	 * From the time the rsb is removed from toss until just after
1732	 * send_remove, the rsb name is saved in ls_remove_name.  A new
1733	 * lookup checks this to ensure that a new lookup message for the
1734	 * same resource name is not sent just before the remove message.
1735	 */
1736
1737	for (i = 0; i < remote_count; i++) {
1738		name = ls->ls_remove_names[i];
1739		len = ls->ls_remove_lens[i];
 
1740
 
 
1741		spin_lock(&ls->ls_rsbtbl[b].lock);
1742		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1743		if (rv) {
1744			spin_unlock(&ls->ls_rsbtbl[b].lock);
1745			log_debug(ls, "remove_name not toss %s", name);
1746			continue;
 
 
1747		}
1748
1749		if (r->res_master_nodeid != our_nodeid) {
1750			spin_unlock(&ls->ls_rsbtbl[b].lock);
1751			log_debug(ls, "remove_name master %d dir %d our %d %s",
1752				  r->res_master_nodeid, r->res_dir_nodeid,
1753				  our_nodeid, name);
1754			continue;
1755		}
1756
1757		if (r->res_dir_nodeid == our_nodeid) {
1758			/* should never happen */
1759			spin_unlock(&ls->ls_rsbtbl[b].lock);
1760			log_error(ls, "remove_name dir %d master %d our %d %s",
1761				  r->res_dir_nodeid, r->res_master_nodeid,
1762				  our_nodeid, name);
1763			continue;
1764		}
1765
1766		if (!time_after_eq(jiffies, r->res_toss_time +
1767				   dlm_config.ci_toss_secs * HZ)) {
 
 
 
1768			spin_unlock(&ls->ls_rsbtbl[b].lock);
1769			log_debug(ls, "remove_name toss_time %lu now %lu %s",
1770				  r->res_toss_time, jiffies, name);
1771			continue;
1772		}
 
1773
1774		if (!kref_put(&r->res_ref, kill_rsb)) {
1775			spin_unlock(&ls->ls_rsbtbl[b].lock);
1776			log_error(ls, "remove_name in use %s", name);
1777			continue;
1778		}
1779
1780		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1781
1782		/* block lookup of same name until we've sent remove */
1783		spin_lock(&ls->ls_remove_spin);
1784		ls->ls_remove_len = len;
1785		memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1786		spin_unlock(&ls->ls_remove_spin);
1787		spin_unlock(&ls->ls_rsbtbl[b].lock);
1788
1789		send_remove(r);
1790
1791		/* allow lookup of name again */
1792		spin_lock(&ls->ls_remove_spin);
1793		ls->ls_remove_len = 0;
1794		memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1795		spin_unlock(&ls->ls_remove_spin);
1796
1797		dlm_free_rsb(r);
1798	}
1799}
1800
1801void dlm_scan_rsbs(struct dlm_ls *ls)
1802{
1803	int i;
1804
1805	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1806		shrink_bucket(ls, i);
1807		if (dlm_locking_stopped(ls))
1808			break;
1809		cond_resched();
1810	}
1811}
1812
1813static void add_timeout(struct dlm_lkb *lkb)
1814{
1815	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1816
1817	if (is_master_copy(lkb))
1818		return;
1819
1820	if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1821	    !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1822		lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1823		goto add_it;
1824	}
1825	if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1826		goto add_it;
1827	return;
1828
1829 add_it:
1830	DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1831	mutex_lock(&ls->ls_timeout_mutex);
1832	hold_lkb(lkb);
1833	list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1834	mutex_unlock(&ls->ls_timeout_mutex);
1835}
1836
1837static void del_timeout(struct dlm_lkb *lkb)
1838{
1839	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1840
1841	mutex_lock(&ls->ls_timeout_mutex);
1842	if (!list_empty(&lkb->lkb_time_list)) {
1843		list_del_init(&lkb->lkb_time_list);
1844		unhold_lkb(lkb);
1845	}
1846	mutex_unlock(&ls->ls_timeout_mutex);
1847}
1848
1849/* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1850   lkb_lksb_timeout without lock_rsb?  Note: we can't lock timeout_mutex
1851   and then lock rsb because of lock ordering in add_timeout.  We may need
1852   to specify some special timeout-related bits in the lkb that are just to
1853   be accessed under the timeout_mutex. */
1854
1855void dlm_scan_timeout(struct dlm_ls *ls)
1856{
1857	struct dlm_rsb *r;
1858	struct dlm_lkb *lkb;
1859	int do_cancel, do_warn;
1860	s64 wait_us;
1861
1862	for (;;) {
1863		if (dlm_locking_stopped(ls))
1864			break;
1865
1866		do_cancel = 0;
1867		do_warn = 0;
1868		mutex_lock(&ls->ls_timeout_mutex);
1869		list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1870
1871			wait_us = ktime_to_us(ktime_sub(ktime_get(),
1872					      		lkb->lkb_timestamp));
1873
1874			if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1875			    wait_us >= (lkb->lkb_timeout_cs * 10000))
1876				do_cancel = 1;
1877
1878			if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1879			    wait_us >= dlm_config.ci_timewarn_cs * 10000)
1880				do_warn = 1;
1881
1882			if (!do_cancel && !do_warn)
1883				continue;
1884			hold_lkb(lkb);
1885			break;
1886		}
1887		mutex_unlock(&ls->ls_timeout_mutex);
1888
1889		if (!do_cancel && !do_warn)
1890			break;
1891
1892		r = lkb->lkb_resource;
1893		hold_rsb(r);
1894		lock_rsb(r);
1895
1896		if (do_warn) {
1897			/* clear flag so we only warn once */
1898			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1899			if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1900				del_timeout(lkb);
1901			dlm_timeout_warn(lkb);
1902		}
1903
1904		if (do_cancel) {
1905			log_debug(ls, "timeout cancel %x node %d %s",
1906				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1907			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1908			lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1909			del_timeout(lkb);
1910			_cancel_lock(r, lkb);
1911		}
1912
1913		unlock_rsb(r);
1914		unhold_rsb(r);
1915		dlm_put_lkb(lkb);
1916	}
1917}
1918
1919/* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1920   dlm_recoverd before checking/setting ls_recover_begin. */
1921
1922void dlm_adjust_timeouts(struct dlm_ls *ls)
1923{
1924	struct dlm_lkb *lkb;
1925	u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1926
1927	ls->ls_recover_begin = 0;
1928	mutex_lock(&ls->ls_timeout_mutex);
1929	list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1930		lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1931	mutex_unlock(&ls->ls_timeout_mutex);
1932
1933	if (!dlm_config.ci_waitwarn_us)
1934		return;
1935
1936	mutex_lock(&ls->ls_waiters_mutex);
1937	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1938		if (ktime_to_us(lkb->lkb_wait_time))
1939			lkb->lkb_wait_time = ktime_get();
1940	}
1941	mutex_unlock(&ls->ls_waiters_mutex);
1942}
1943
1944/* lkb is master or local copy */
1945
1946static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1947{
1948	int b, len = r->res_ls->ls_lvblen;
1949
1950	/* b=1 lvb returned to caller
1951	   b=0 lvb written to rsb or invalidated
1952	   b=-1 do nothing */
1953
1954	b =  dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1955
1956	if (b == 1) {
1957		if (!lkb->lkb_lvbptr)
1958			return;
1959
1960		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1961			return;
1962
1963		if (!r->res_lvbptr)
1964			return;
1965
1966		memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1967		lkb->lkb_lvbseq = r->res_lvbseq;
1968
1969	} else if (b == 0) {
1970		if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1971			rsb_set_flag(r, RSB_VALNOTVALID);
1972			return;
1973		}
1974
1975		if (!lkb->lkb_lvbptr)
1976			return;
1977
1978		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1979			return;
1980
1981		if (!r->res_lvbptr)
1982			r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1983
1984		if (!r->res_lvbptr)
1985			return;
1986
1987		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1988		r->res_lvbseq++;
1989		lkb->lkb_lvbseq = r->res_lvbseq;
1990		rsb_clear_flag(r, RSB_VALNOTVALID);
1991	}
1992
1993	if (rsb_flag(r, RSB_VALNOTVALID))
1994		lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1995}
1996
1997static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1998{
1999	if (lkb->lkb_grmode < DLM_LOCK_PW)
2000		return;
2001
2002	if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
2003		rsb_set_flag(r, RSB_VALNOTVALID);
2004		return;
2005	}
2006
2007	if (!lkb->lkb_lvbptr)
2008		return;
2009
2010	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2011		return;
2012
2013	if (!r->res_lvbptr)
2014		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
2015
2016	if (!r->res_lvbptr)
2017		return;
2018
2019	memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2020	r->res_lvbseq++;
2021	rsb_clear_flag(r, RSB_VALNOTVALID);
2022}
2023
2024/* lkb is process copy (pc) */
2025
2026static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2027			    struct dlm_message *ms)
2028{
2029	int b;
2030
2031	if (!lkb->lkb_lvbptr)
2032		return;
2033
2034	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2035		return;
2036
2037	b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
2038	if (b == 1) {
2039		int len = receive_extralen(ms);
2040		if (len > r->res_ls->ls_lvblen)
2041			len = r->res_ls->ls_lvblen;
2042		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2043		lkb->lkb_lvbseq = ms->m_lvbseq;
2044	}
2045}
2046
2047/* Manipulate lkb's on rsb's convert/granted/waiting queues
2048   remove_lock -- used for unlock, removes lkb from granted
2049   revert_lock -- used for cancel, moves lkb from convert to granted
2050   grant_lock  -- used for request and convert, adds lkb to granted or
2051                  moves lkb from convert or waiting to granted
2052
2053   Each of these is used for master or local copy lkb's.  There is
2054   also a _pc() variation used to make the corresponding change on
2055   a process copy (pc) lkb. */
2056
2057static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2058{
2059	del_lkb(r, lkb);
2060	lkb->lkb_grmode = DLM_LOCK_IV;
2061	/* this unhold undoes the original ref from create_lkb()
2062	   so this leads to the lkb being freed */
2063	unhold_lkb(lkb);
2064}
2065
2066static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2067{
2068	set_lvb_unlock(r, lkb);
2069	_remove_lock(r, lkb);
2070}
2071
2072static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2073{
2074	_remove_lock(r, lkb);
2075}
2076
2077/* returns: 0 did nothing
2078	    1 moved lock to granted
2079	   -1 removed lock */
2080
2081static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2082{
2083	int rv = 0;
2084
2085	lkb->lkb_rqmode = DLM_LOCK_IV;
2086
2087	switch (lkb->lkb_status) {
2088	case DLM_LKSTS_GRANTED:
2089		break;
2090	case DLM_LKSTS_CONVERT:
2091		move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2092		rv = 1;
2093		break;
2094	case DLM_LKSTS_WAITING:
2095		del_lkb(r, lkb);
2096		lkb->lkb_grmode = DLM_LOCK_IV;
2097		/* this unhold undoes the original ref from create_lkb()
2098		   so this leads to the lkb being freed */
2099		unhold_lkb(lkb);
2100		rv = -1;
2101		break;
2102	default:
2103		log_print("invalid status for revert %d", lkb->lkb_status);
2104	}
2105	return rv;
2106}
2107
2108static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2109{
2110	return revert_lock(r, lkb);
2111}
2112
2113static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2114{
2115	if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2116		lkb->lkb_grmode = lkb->lkb_rqmode;
2117		if (lkb->lkb_status)
2118			move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2119		else
2120			add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2121	}
2122
2123	lkb->lkb_rqmode = DLM_LOCK_IV;
2124	lkb->lkb_highbast = 0;
2125}
2126
2127static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2128{
2129	set_lvb_lock(r, lkb);
2130	_grant_lock(r, lkb);
 
2131}
2132
2133static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2134			  struct dlm_message *ms)
2135{
2136	set_lvb_lock_pc(r, lkb, ms);
2137	_grant_lock(r, lkb);
2138}
2139
2140/* called by grant_pending_locks() which means an async grant message must
2141   be sent to the requesting node in addition to granting the lock if the
2142   lkb belongs to a remote node. */
2143
2144static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2145{
2146	grant_lock(r, lkb);
2147	if (is_master_copy(lkb))
2148		send_grant(r, lkb);
2149	else
2150		queue_cast(r, lkb, 0);
2151}
2152
2153/* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2154   change the granted/requested modes.  We're munging things accordingly in
2155   the process copy.
2156   CONVDEADLK: our grmode may have been forced down to NL to resolve a
2157   conversion deadlock
2158   ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2159   compatible with other granted locks */
2160
2161static void munge_demoted(struct dlm_lkb *lkb)
2162{
2163	if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2164		log_print("munge_demoted %x invalid modes gr %d rq %d",
2165			  lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2166		return;
2167	}
2168
2169	lkb->lkb_grmode = DLM_LOCK_NL;
2170}
2171
2172static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2173{
2174	if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2175	    ms->m_type != DLM_MSG_GRANT) {
2176		log_print("munge_altmode %x invalid reply type %d",
2177			  lkb->lkb_id, ms->m_type);
2178		return;
2179	}
2180
2181	if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2182		lkb->lkb_rqmode = DLM_LOCK_PR;
2183	else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2184		lkb->lkb_rqmode = DLM_LOCK_CW;
2185	else {
2186		log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2187		dlm_print_lkb(lkb);
2188	}
2189}
2190
2191static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2192{
2193	struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2194					   lkb_statequeue);
2195	if (lkb->lkb_id == first->lkb_id)
2196		return 1;
2197
2198	return 0;
2199}
2200
2201/* Check if the given lkb conflicts with another lkb on the queue. */
2202
2203static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2204{
2205	struct dlm_lkb *this;
2206
2207	list_for_each_entry(this, head, lkb_statequeue) {
2208		if (this == lkb)
2209			continue;
2210		if (!modes_compat(this, lkb))
2211			return 1;
2212	}
2213	return 0;
2214}
2215
2216/*
2217 * "A conversion deadlock arises with a pair of lock requests in the converting
2218 * queue for one resource.  The granted mode of each lock blocks the requested
2219 * mode of the other lock."
2220 *
2221 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2222 * convert queue from being granted, then deadlk/demote lkb.
2223 *
2224 * Example:
2225 * Granted Queue: empty
2226 * Convert Queue: NL->EX (first lock)
2227 *                PR->EX (second lock)
2228 *
2229 * The first lock can't be granted because of the granted mode of the second
2230 * lock and the second lock can't be granted because it's not first in the
2231 * list.  We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2232 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2233 * flag set and return DEMOTED in the lksb flags.
2234 *
2235 * Originally, this function detected conv-deadlk in a more limited scope:
2236 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2237 * - if lkb1 was the first entry in the queue (not just earlier), and was
2238 *   blocked by the granted mode of lkb2, and there was nothing on the
2239 *   granted queue preventing lkb1 from being granted immediately, i.e.
2240 *   lkb2 was the only thing preventing lkb1 from being granted.
2241 *
2242 * That second condition meant we'd only say there was conv-deadlk if
2243 * resolving it (by demotion) would lead to the first lock on the convert
2244 * queue being granted right away.  It allowed conversion deadlocks to exist
2245 * between locks on the convert queue while they couldn't be granted anyway.
2246 *
2247 * Now, we detect and take action on conversion deadlocks immediately when
2248 * they're created, even if they may not be immediately consequential.  If
2249 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2250 * mode that would prevent lkb1's conversion from being granted, we do a
2251 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2252 * I think this means that the lkb_is_ahead condition below should always
2253 * be zero, i.e. there will never be conv-deadlk between two locks that are
2254 * both already on the convert queue.
2255 */
2256
2257static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2258{
2259	struct dlm_lkb *lkb1;
2260	int lkb_is_ahead = 0;
2261
2262	list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2263		if (lkb1 == lkb2) {
2264			lkb_is_ahead = 1;
2265			continue;
2266		}
2267
2268		if (!lkb_is_ahead) {
2269			if (!modes_compat(lkb2, lkb1))
2270				return 1;
2271		} else {
2272			if (!modes_compat(lkb2, lkb1) &&
2273			    !modes_compat(lkb1, lkb2))
2274				return 1;
2275		}
2276	}
2277	return 0;
2278}
2279
2280/*
2281 * Return 1 if the lock can be granted, 0 otherwise.
2282 * Also detect and resolve conversion deadlocks.
2283 *
2284 * lkb is the lock to be granted
2285 *
2286 * now is 1 if the function is being called in the context of the
2287 * immediate request, it is 0 if called later, after the lock has been
2288 * queued.
2289 *
2290 * recover is 1 if dlm_recover_grant() is trying to grant conversions
2291 * after recovery.
2292 *
2293 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2294 */
2295
2296static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2297			   int recover)
2298{
2299	int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2300
2301	/*
2302	 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2303	 * a new request for a NL mode lock being blocked.
2304	 *
2305	 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2306	 * request, then it would be granted.  In essence, the use of this flag
2307	 * tells the Lock Manager to expedite theis request by not considering
2308	 * what may be in the CONVERTING or WAITING queues...  As of this
2309	 * writing, the EXPEDITE flag can be used only with new requests for NL
2310	 * mode locks.  This flag is not valid for conversion requests.
2311	 *
2312	 * A shortcut.  Earlier checks return an error if EXPEDITE is used in a
2313	 * conversion or used with a non-NL requested mode.  We also know an
2314	 * EXPEDITE request is always granted immediately, so now must always
2315	 * be 1.  The full condition to grant an expedite request: (now &&
2316	 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2317	 * therefore be shortened to just checking the flag.
2318	 */
2319
2320	if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2321		return 1;
2322
2323	/*
2324	 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2325	 * added to the remaining conditions.
2326	 */
2327
2328	if (queue_conflict(&r->res_grantqueue, lkb))
2329		return 0;
2330
2331	/*
2332	 * 6-3: By default, a conversion request is immediately granted if the
2333	 * requested mode is compatible with the modes of all other granted
2334	 * locks
2335	 */
2336
2337	if (queue_conflict(&r->res_convertqueue, lkb))
2338		return 0;
2339
2340	/*
2341	 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2342	 * locks for a recovered rsb, on which lkb's have been rebuilt.
2343	 * The lkb's may have been rebuilt on the queues in a different
2344	 * order than they were in on the previous master.  So, granting
2345	 * queued conversions in order after recovery doesn't make sense
2346	 * since the order hasn't been preserved anyway.  The new order
2347	 * could also have created a new "in place" conversion deadlock.
2348	 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2349	 * After recovery, there would be no granted locks, and possibly
2350	 * NL->EX, PR->EX, an in-place conversion deadlock.)  So, after
2351	 * recovery, grant conversions without considering order.
2352	 */
2353
2354	if (conv && recover)
2355		return 1;
2356
2357	/*
2358	 * 6-5: But the default algorithm for deciding whether to grant or
2359	 * queue conversion requests does not by itself guarantee that such
2360	 * requests are serviced on a "first come first serve" basis.  This, in
2361	 * turn, can lead to a phenomenon known as "indefinate postponement".
2362	 *
2363	 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2364	 * the system service employed to request a lock conversion.  This flag
2365	 * forces certain conversion requests to be queued, even if they are
2366	 * compatible with the granted modes of other locks on the same
2367	 * resource.  Thus, the use of this flag results in conversion requests
2368	 * being ordered on a "first come first servce" basis.
2369	 *
2370	 * DCT: This condition is all about new conversions being able to occur
2371	 * "in place" while the lock remains on the granted queue (assuming
2372	 * nothing else conflicts.)  IOW if QUECVT isn't set, a conversion
2373	 * doesn't _have_ to go onto the convert queue where it's processed in
2374	 * order.  The "now" variable is necessary to distinguish converts
2375	 * being received and processed for the first time now, because once a
2376	 * convert is moved to the conversion queue the condition below applies
2377	 * requiring fifo granting.
2378	 */
2379
2380	if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2381		return 1;
2382
2383	/*
2384	 * Even if the convert is compat with all granted locks,
2385	 * QUECVT forces it behind other locks on the convert queue.
2386	 */
2387
2388	if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2389		if (list_empty(&r->res_convertqueue))
2390			return 1;
2391		else
2392			return 0;
2393	}
2394
2395	/*
2396	 * The NOORDER flag is set to avoid the standard vms rules on grant
2397	 * order.
2398	 */
2399
2400	if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2401		return 1;
2402
2403	/*
2404	 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2405	 * granted until all other conversion requests ahead of it are granted
2406	 * and/or canceled.
2407	 */
2408
2409	if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2410		return 1;
2411
2412	/*
2413	 * 6-4: By default, a new request is immediately granted only if all
2414	 * three of the following conditions are satisfied when the request is
2415	 * issued:
2416	 * - The queue of ungranted conversion requests for the resource is
2417	 *   empty.
2418	 * - The queue of ungranted new requests for the resource is empty.
2419	 * - The mode of the new request is compatible with the most
2420	 *   restrictive mode of all granted locks on the resource.
2421	 */
2422
2423	if (now && !conv && list_empty(&r->res_convertqueue) &&
2424	    list_empty(&r->res_waitqueue))
2425		return 1;
2426
2427	/*
2428	 * 6-4: Once a lock request is in the queue of ungranted new requests,
2429	 * it cannot be granted until the queue of ungranted conversion
2430	 * requests is empty, all ungranted new requests ahead of it are
2431	 * granted and/or canceled, and it is compatible with the granted mode
2432	 * of the most restrictive lock granted on the resource.
2433	 */
2434
2435	if (!now && !conv && list_empty(&r->res_convertqueue) &&
2436	    first_in_list(lkb, &r->res_waitqueue))
2437		return 1;
2438
2439	return 0;
2440}
2441
2442static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2443			  int recover, int *err)
2444{
2445	int rv;
2446	int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2447	int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2448
2449	if (err)
2450		*err = 0;
2451
2452	rv = _can_be_granted(r, lkb, now, recover);
2453	if (rv)
2454		goto out;
2455
2456	/*
2457	 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2458	 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2459	 * cancels one of the locks.
2460	 */
2461
2462	if (is_convert && can_be_queued(lkb) &&
2463	    conversion_deadlock_detect(r, lkb)) {
2464		if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2465			lkb->lkb_grmode = DLM_LOCK_NL;
2466			lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2467		} else if (err) {
2468			*err = -EDEADLK;
2469		} else {
2470			log_print("can_be_granted deadlock %x now %d",
2471				  lkb->lkb_id, now);
2472			dlm_dump_rsb(r);
 
 
2473		}
2474		goto out;
2475	}
2476
2477	/*
2478	 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2479	 * to grant a request in a mode other than the normal rqmode.  It's a
2480	 * simple way to provide a big optimization to applications that can
2481	 * use them.
2482	 */
2483
2484	if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2485		alt = DLM_LOCK_PR;
2486	else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2487		alt = DLM_LOCK_CW;
2488
2489	if (alt) {
2490		lkb->lkb_rqmode = alt;
2491		rv = _can_be_granted(r, lkb, now, 0);
2492		if (rv)
2493			lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2494		else
2495			lkb->lkb_rqmode = rqmode;
2496	}
2497 out:
2498	return rv;
2499}
2500
 
 
 
 
 
 
 
2501/* Returns the highest requested mode of all blocked conversions; sets
2502   cw if there's a blocked conversion to DLM_LOCK_CW. */
2503
2504static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2505				 unsigned int *count)
2506{
2507	struct dlm_lkb *lkb, *s;
2508	int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2509	int hi, demoted, quit, grant_restart, demote_restart;
2510	int deadlk;
2511
2512	quit = 0;
2513 restart:
2514	grant_restart = 0;
2515	demote_restart = 0;
2516	hi = DLM_LOCK_IV;
2517
2518	list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2519		demoted = is_demoted(lkb);
2520		deadlk = 0;
2521
2522		if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2523			grant_lock_pending(r, lkb);
2524			grant_restart = 1;
2525			if (count)
2526				(*count)++;
2527			continue;
2528		}
2529
2530		if (!demoted && is_demoted(lkb)) {
2531			log_print("WARN: pending demoted %x node %d %s",
2532				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2533			demote_restart = 1;
2534			continue;
2535		}
2536
2537		if (deadlk) {
2538			/*
2539			 * If DLM_LKB_NODLKWT flag is set and conversion
2540			 * deadlock is detected, we request blocking AST and
2541			 * down (or cancel) conversion.
2542			 */
2543			if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
2544				if (lkb->lkb_highbast < lkb->lkb_rqmode) {
2545					queue_bast(r, lkb, lkb->lkb_rqmode);
2546					lkb->lkb_highbast = lkb->lkb_rqmode;
2547				}
2548			} else {
2549				log_print("WARN: pending deadlock %x node %d %s",
2550					  lkb->lkb_id, lkb->lkb_nodeid,
2551					  r->res_name);
2552				dlm_dump_rsb(r);
2553			}
2554			continue;
2555		}
2556
2557		hi = max_t(int, lkb->lkb_rqmode, hi);
2558
2559		if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2560			*cw = 1;
2561	}
2562
2563	if (grant_restart)
2564		goto restart;
2565	if (demote_restart && !quit) {
2566		quit = 1;
2567		goto restart;
2568	}
2569
2570	return max_t(int, high, hi);
2571}
2572
2573static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2574			      unsigned int *count)
2575{
2576	struct dlm_lkb *lkb, *s;
2577
2578	list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2579		if (can_be_granted(r, lkb, 0, 0, NULL)) {
2580			grant_lock_pending(r, lkb);
2581			if (count)
2582				(*count)++;
2583		} else {
2584			high = max_t(int, lkb->lkb_rqmode, high);
2585			if (lkb->lkb_rqmode == DLM_LOCK_CW)
2586				*cw = 1;
2587		}
2588	}
2589
2590	return high;
2591}
2592
2593/* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2594   on either the convert or waiting queue.
2595   high is the largest rqmode of all locks blocked on the convert or
2596   waiting queue. */
2597
2598static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2599{
2600	if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2601		if (gr->lkb_highbast < DLM_LOCK_EX)
2602			return 1;
2603		return 0;
2604	}
2605
2606	if (gr->lkb_highbast < high &&
2607	    !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2608		return 1;
2609	return 0;
2610}
2611
2612static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2613{
2614	struct dlm_lkb *lkb, *s;
2615	int high = DLM_LOCK_IV;
2616	int cw = 0;
2617
2618	if (!is_master(r)) {
2619		log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2620		dlm_dump_rsb(r);
2621		return;
2622	}
2623
2624	high = grant_pending_convert(r, high, &cw, count);
2625	high = grant_pending_wait(r, high, &cw, count);
2626
2627	if (high == DLM_LOCK_IV)
2628		return;
2629
2630	/*
2631	 * If there are locks left on the wait/convert queue then send blocking
2632	 * ASTs to granted locks based on the largest requested mode (high)
2633	 * found above.
2634	 */
2635
2636	list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2637		if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2638			if (cw && high == DLM_LOCK_PR &&
2639			    lkb->lkb_grmode == DLM_LOCK_PR)
2640				queue_bast(r, lkb, DLM_LOCK_CW);
2641			else
2642				queue_bast(r, lkb, high);
2643			lkb->lkb_highbast = high;
2644		}
2645	}
2646}
2647
2648static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2649{
2650	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2651	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2652		if (gr->lkb_highbast < DLM_LOCK_EX)
2653			return 1;
2654		return 0;
2655	}
2656
2657	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2658		return 1;
2659	return 0;
2660}
2661
2662static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2663			    struct dlm_lkb *lkb)
2664{
2665	struct dlm_lkb *gr;
2666
2667	list_for_each_entry(gr, head, lkb_statequeue) {
2668		/* skip self when sending basts to convertqueue */
2669		if (gr == lkb)
2670			continue;
2671		if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2672			queue_bast(r, gr, lkb->lkb_rqmode);
2673			gr->lkb_highbast = lkb->lkb_rqmode;
2674		}
2675	}
2676}
2677
2678static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2679{
2680	send_bast_queue(r, &r->res_grantqueue, lkb);
2681}
2682
2683static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2684{
2685	send_bast_queue(r, &r->res_grantqueue, lkb);
2686	send_bast_queue(r, &r->res_convertqueue, lkb);
2687}
2688
2689/* set_master(r, lkb) -- set the master nodeid of a resource
2690
2691   The purpose of this function is to set the nodeid field in the given
2692   lkb using the nodeid field in the given rsb.  If the rsb's nodeid is
2693   known, it can just be copied to the lkb and the function will return
2694   0.  If the rsb's nodeid is _not_ known, it needs to be looked up
2695   before it can be copied to the lkb.
2696
2697   When the rsb nodeid is being looked up remotely, the initial lkb
2698   causing the lookup is kept on the ls_waiters list waiting for the
2699   lookup reply.  Other lkb's waiting for the same rsb lookup are kept
2700   on the rsb's res_lookup list until the master is verified.
2701
2702   Return values:
2703   0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2704   1: the rsb master is not available and the lkb has been placed on
2705      a wait queue
2706*/
2707
2708static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2709{
2710	int our_nodeid = dlm_our_nodeid();
 
2711
2712	if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2713		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2714		r->res_first_lkid = lkb->lkb_id;
2715		lkb->lkb_nodeid = r->res_nodeid;
2716		return 0;
2717	}
2718
2719	if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2720		list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2721		return 1;
2722	}
2723
2724	if (r->res_master_nodeid == our_nodeid) {
2725		lkb->lkb_nodeid = 0;
2726		return 0;
2727	}
2728
2729	if (r->res_master_nodeid) {
2730		lkb->lkb_nodeid = r->res_master_nodeid;
2731		return 0;
2732	}
2733
2734	if (dlm_dir_nodeid(r) == our_nodeid) {
2735		/* This is a somewhat unusual case; find_rsb will usually
2736		   have set res_master_nodeid when dir nodeid is local, but
2737		   there are cases where we become the dir node after we've
2738		   past find_rsb and go through _request_lock again.
2739		   confirm_master() or process_lookup_list() needs to be
2740		   called after this. */
2741		log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2742			  lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2743			  r->res_name);
2744		r->res_master_nodeid = our_nodeid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2745		r->res_nodeid = 0;
2746		lkb->lkb_nodeid = 0;
2747		return 0;
 
 
 
2748	}
2749
2750	wait_pending_remove(r);
2751
2752	r->res_first_lkid = lkb->lkb_id;
2753	send_lookup(r, lkb);
2754	return 1;
2755}
2756
2757static void process_lookup_list(struct dlm_rsb *r)
2758{
2759	struct dlm_lkb *lkb, *safe;
2760
2761	list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2762		list_del_init(&lkb->lkb_rsb_lookup);
2763		_request_lock(r, lkb);
2764		schedule();
2765	}
2766}
2767
2768/* confirm_master -- confirm (or deny) an rsb's master nodeid */
2769
2770static void confirm_master(struct dlm_rsb *r, int error)
2771{
2772	struct dlm_lkb *lkb;
2773
2774	if (!r->res_first_lkid)
2775		return;
2776
2777	switch (error) {
2778	case 0:
2779	case -EINPROGRESS:
2780		r->res_first_lkid = 0;
2781		process_lookup_list(r);
2782		break;
2783
2784	case -EAGAIN:
2785	case -EBADR:
2786	case -ENOTBLK:
2787		/* the remote request failed and won't be retried (it was
2788		   a NOQUEUE, or has been canceled/unlocked); make a waiting
2789		   lkb the first_lkid */
2790
2791		r->res_first_lkid = 0;
2792
2793		if (!list_empty(&r->res_lookup)) {
2794			lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2795					 lkb_rsb_lookup);
2796			list_del_init(&lkb->lkb_rsb_lookup);
2797			r->res_first_lkid = lkb->lkb_id;
2798			_request_lock(r, lkb);
2799		}
2800		break;
2801
2802	default:
2803		log_error(r->res_ls, "confirm_master unknown error %d", error);
2804	}
2805}
2806
2807static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2808			 int namelen, unsigned long timeout_cs,
2809			 void (*ast) (void *astparam),
2810			 void *astparam,
2811			 void (*bast) (void *astparam, int mode),
2812			 struct dlm_args *args)
2813{
2814	int rv = -EINVAL;
2815
2816	/* check for invalid arg usage */
2817
2818	if (mode < 0 || mode > DLM_LOCK_EX)
2819		goto out;
2820
2821	if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2822		goto out;
2823
2824	if (flags & DLM_LKF_CANCEL)
2825		goto out;
2826
2827	if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2828		goto out;
2829
2830	if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2831		goto out;
2832
2833	if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2834		goto out;
2835
2836	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2837		goto out;
2838
2839	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2840		goto out;
2841
2842	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2843		goto out;
2844
2845	if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2846		goto out;
2847
2848	if (!ast || !lksb)
2849		goto out;
2850
2851	if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2852		goto out;
2853
2854	if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2855		goto out;
2856
2857	/* these args will be copied to the lkb in validate_lock_args,
2858	   it cannot be done now because when converting locks, fields in
2859	   an active lkb cannot be modified before locking the rsb */
2860
2861	args->flags = flags;
2862	args->astfn = ast;
2863	args->astparam = astparam;
2864	args->bastfn = bast;
2865	args->timeout = timeout_cs;
2866	args->mode = mode;
2867	args->lksb = lksb;
2868	rv = 0;
2869 out:
2870	return rv;
2871}
2872
2873static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2874{
2875	if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2876 		      DLM_LKF_FORCEUNLOCK))
2877		return -EINVAL;
2878
2879	if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2880		return -EINVAL;
2881
2882	args->flags = flags;
2883	args->astparam = astarg;
2884	return 0;
2885}
2886
2887static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2888			      struct dlm_args *args)
2889{
2890	int rv = -EINVAL;
2891
2892	if (args->flags & DLM_LKF_CONVERT) {
2893		if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2894			goto out;
2895
2896		if (args->flags & DLM_LKF_QUECVT &&
2897		    !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2898			goto out;
2899
2900		rv = -EBUSY;
2901		if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2902			goto out;
2903
2904		if (lkb->lkb_wait_type)
2905			goto out;
2906
2907		if (is_overlap(lkb))
2908			goto out;
2909	}
2910
2911	lkb->lkb_exflags = args->flags;
2912	lkb->lkb_sbflags = 0;
2913	lkb->lkb_astfn = args->astfn;
2914	lkb->lkb_astparam = args->astparam;
2915	lkb->lkb_bastfn = args->bastfn;
2916	lkb->lkb_rqmode = args->mode;
2917	lkb->lkb_lksb = args->lksb;
2918	lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2919	lkb->lkb_ownpid = (int) current->pid;
2920	lkb->lkb_timeout_cs = args->timeout;
2921	rv = 0;
2922 out:
2923	if (rv)
2924		log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2925			  rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2926			  lkb->lkb_status, lkb->lkb_wait_type,
2927			  lkb->lkb_resource->res_name);
2928	return rv;
2929}
2930
2931/* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2932   for success */
2933
2934/* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2935   because there may be a lookup in progress and it's valid to do
2936   cancel/unlockf on it */
2937
2938static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2939{
2940	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2941	int rv = -EINVAL;
2942
2943	if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2944		log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2945		dlm_print_lkb(lkb);
2946		goto out;
2947	}
2948
2949	/* an lkb may still exist even though the lock is EOL'ed due to a
2950	   cancel, unlock or failed noqueue request; an app can't use these
2951	   locks; return same error as if the lkid had not been found at all */
2952
2953	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2954		log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2955		rv = -ENOENT;
2956		goto out;
2957	}
2958
2959	/* an lkb may be waiting for an rsb lookup to complete where the
2960	   lookup was initiated by another lock */
2961
2962	if (!list_empty(&lkb->lkb_rsb_lookup)) {
2963		if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2964			log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2965			list_del_init(&lkb->lkb_rsb_lookup);
2966			queue_cast(lkb->lkb_resource, lkb,
2967				   args->flags & DLM_LKF_CANCEL ?
2968				   -DLM_ECANCEL : -DLM_EUNLOCK);
2969			unhold_lkb(lkb); /* undoes create_lkb() */
2970		}
2971		/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2972		rv = -EBUSY;
2973		goto out;
2974	}
2975
2976	/* cancel not allowed with another cancel/unlock in progress */
2977
2978	if (args->flags & DLM_LKF_CANCEL) {
2979		if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2980			goto out;
2981
2982		if (is_overlap(lkb))
2983			goto out;
2984
2985		/* don't let scand try to do a cancel */
2986		del_timeout(lkb);
2987
2988		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2989			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2990			rv = -EBUSY;
2991			goto out;
2992		}
2993
2994		/* there's nothing to cancel */
2995		if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2996		    !lkb->lkb_wait_type) {
2997			rv = -EBUSY;
2998			goto out;
2999		}
3000
3001		switch (lkb->lkb_wait_type) {
3002		case DLM_MSG_LOOKUP:
3003		case DLM_MSG_REQUEST:
3004			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3005			rv = -EBUSY;
3006			goto out;
3007		case DLM_MSG_UNLOCK:
3008		case DLM_MSG_CANCEL:
3009			goto out;
3010		}
3011		/* add_to_waiters() will set OVERLAP_CANCEL */
3012		goto out_ok;
3013	}
3014
3015	/* do we need to allow a force-unlock if there's a normal unlock
3016	   already in progress?  in what conditions could the normal unlock
3017	   fail such that we'd want to send a force-unlock to be sure? */
3018
3019	if (args->flags & DLM_LKF_FORCEUNLOCK) {
3020		if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3021			goto out;
3022
3023		if (is_overlap_unlock(lkb))
3024			goto out;
3025
3026		/* don't let scand try to do a cancel */
3027		del_timeout(lkb);
3028
3029		if (lkb->lkb_flags & DLM_IFL_RESEND) {
3030			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3031			rv = -EBUSY;
3032			goto out;
3033		}
3034
3035		switch (lkb->lkb_wait_type) {
3036		case DLM_MSG_LOOKUP:
3037		case DLM_MSG_REQUEST:
3038			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3039			rv = -EBUSY;
3040			goto out;
3041		case DLM_MSG_UNLOCK:
3042			goto out;
3043		}
3044		/* add_to_waiters() will set OVERLAP_UNLOCK */
3045		goto out_ok;
3046	}
3047
3048	/* normal unlock not allowed if there's any op in progress */
3049	rv = -EBUSY;
3050	if (lkb->lkb_wait_type || lkb->lkb_wait_count)
3051		goto out;
3052
3053 out_ok:
3054	/* an overlapping op shouldn't blow away exflags from other op */
3055	lkb->lkb_exflags |= args->flags;
3056	lkb->lkb_sbflags = 0;
3057	lkb->lkb_astparam = args->astparam;
3058	rv = 0;
3059 out:
3060	if (rv)
3061		log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
3062			  lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3063			  args->flags, lkb->lkb_wait_type,
3064			  lkb->lkb_resource->res_name);
3065	return rv;
3066}
3067
3068/*
3069 * Four stage 4 varieties:
3070 * do_request(), do_convert(), do_unlock(), do_cancel()
3071 * These are called on the master node for the given lock and
3072 * from the central locking logic.
3073 */
3074
3075static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3076{
3077	int error = 0;
3078
3079	if (can_be_granted(r, lkb, 1, 0, NULL)) {
3080		grant_lock(r, lkb);
3081		queue_cast(r, lkb, 0);
3082		goto out;
3083	}
3084
3085	if (can_be_queued(lkb)) {
3086		error = -EINPROGRESS;
3087		add_lkb(r, lkb, DLM_LKSTS_WAITING);
3088		add_timeout(lkb);
3089		goto out;
3090	}
3091
3092	error = -EAGAIN;
3093	queue_cast(r, lkb, -EAGAIN);
3094 out:
3095	return error;
3096}
3097
3098static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3099			       int error)
3100{
3101	switch (error) {
3102	case -EAGAIN:
3103		if (force_blocking_asts(lkb))
3104			send_blocking_asts_all(r, lkb);
3105		break;
3106	case -EINPROGRESS:
3107		send_blocking_asts(r, lkb);
3108		break;
3109	}
3110}
3111
3112static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3113{
3114	int error = 0;
3115	int deadlk = 0;
3116
3117	/* changing an existing lock may allow others to be granted */
3118
3119	if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3120		grant_lock(r, lkb);
3121		queue_cast(r, lkb, 0);
3122		goto out;
3123	}
3124
3125	/* can_be_granted() detected that this lock would block in a conversion
3126	   deadlock, so we leave it on the granted queue and return EDEADLK in
3127	   the ast for the convert. */
3128
3129	if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
3130		/* it's left on the granted queue */
3131		revert_lock(r, lkb);
3132		queue_cast(r, lkb, -EDEADLK);
3133		error = -EDEADLK;
3134		goto out;
3135	}
3136
3137	/* is_demoted() means the can_be_granted() above set the grmode
3138	   to NL, and left us on the granted queue.  This auto-demotion
3139	   (due to CONVDEADLK) might mean other locks, and/or this lock, are
3140	   now grantable.  We have to try to grant other converting locks
3141	   before we try again to grant this one. */
3142
3143	if (is_demoted(lkb)) {
3144		grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3145		if (_can_be_granted(r, lkb, 1, 0)) {
3146			grant_lock(r, lkb);
3147			queue_cast(r, lkb, 0);
3148			goto out;
3149		}
3150		/* else fall through and move to convert queue */
3151	}
3152
3153	if (can_be_queued(lkb)) {
3154		error = -EINPROGRESS;
3155		del_lkb(r, lkb);
3156		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3157		add_timeout(lkb);
3158		goto out;
3159	}
3160
3161	error = -EAGAIN;
3162	queue_cast(r, lkb, -EAGAIN);
3163 out:
3164	return error;
3165}
3166
3167static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3168			       int error)
3169{
3170	switch (error) {
3171	case 0:
3172		grant_pending_locks(r, NULL);
3173		/* grant_pending_locks also sends basts */
3174		break;
3175	case -EAGAIN:
3176		if (force_blocking_asts(lkb))
3177			send_blocking_asts_all(r, lkb);
3178		break;
3179	case -EINPROGRESS:
3180		send_blocking_asts(r, lkb);
3181		break;
3182	}
3183}
3184
3185static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3186{
3187	remove_lock(r, lkb);
3188	queue_cast(r, lkb, -DLM_EUNLOCK);
3189	return -DLM_EUNLOCK;
3190}
3191
3192static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3193			      int error)
3194{
3195	grant_pending_locks(r, NULL);
3196}
3197
3198/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3199
3200static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3201{
3202	int error;
3203
3204	error = revert_lock(r, lkb);
3205	if (error) {
3206		queue_cast(r, lkb, -DLM_ECANCEL);
3207		return -DLM_ECANCEL;
3208	}
3209	return 0;
3210}
3211
3212static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3213			      int error)
3214{
3215	if (error)
3216		grant_pending_locks(r, NULL);
3217}
3218
3219/*
3220 * Four stage 3 varieties:
3221 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3222 */
3223
3224/* add a new lkb to a possibly new rsb, called by requesting process */
3225
3226static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3227{
3228	int error;
3229
3230	/* set_master: sets lkb nodeid from r */
3231
3232	error = set_master(r, lkb);
3233	if (error < 0)
3234		goto out;
3235	if (error) {
3236		error = 0;
3237		goto out;
3238	}
3239
3240	if (is_remote(r)) {
3241		/* receive_request() calls do_request() on remote node */
3242		error = send_request(r, lkb);
3243	} else {
3244		error = do_request(r, lkb);
3245		/* for remote locks the request_reply is sent
3246		   between do_request and do_request_effects */
3247		do_request_effects(r, lkb, error);
3248	}
3249 out:
3250	return error;
3251}
3252
3253/* change some property of an existing lkb, e.g. mode */
3254
3255static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3256{
3257	int error;
3258
3259	if (is_remote(r)) {
3260		/* receive_convert() calls do_convert() on remote node */
3261		error = send_convert(r, lkb);
3262	} else {
3263		error = do_convert(r, lkb);
3264		/* for remote locks the convert_reply is sent
3265		   between do_convert and do_convert_effects */
3266		do_convert_effects(r, lkb, error);
3267	}
3268
3269	return error;
3270}
3271
3272/* remove an existing lkb from the granted queue */
3273
3274static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3275{
3276	int error;
3277
3278	if (is_remote(r)) {
3279		/* receive_unlock() calls do_unlock() on remote node */
3280		error = send_unlock(r, lkb);
3281	} else {
3282		error = do_unlock(r, lkb);
3283		/* for remote locks the unlock_reply is sent
3284		   between do_unlock and do_unlock_effects */
3285		do_unlock_effects(r, lkb, error);
3286	}
3287
3288	return error;
3289}
3290
3291/* remove an existing lkb from the convert or wait queue */
3292
3293static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3294{
3295	int error;
3296
3297	if (is_remote(r)) {
3298		/* receive_cancel() calls do_cancel() on remote node */
3299		error = send_cancel(r, lkb);
3300	} else {
3301		error = do_cancel(r, lkb);
3302		/* for remote locks the cancel_reply is sent
3303		   between do_cancel and do_cancel_effects */
3304		do_cancel_effects(r, lkb, error);
3305	}
3306
3307	return error;
3308}
3309
3310/*
3311 * Four stage 2 varieties:
3312 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3313 */
3314
3315static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3316			int len, struct dlm_args *args)
3317{
3318	struct dlm_rsb *r;
3319	int error;
3320
3321	error = validate_lock_args(ls, lkb, args);
3322	if (error)
3323		return error;
3324
3325	error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3326	if (error)
3327		return error;
3328
3329	lock_rsb(r);
3330
3331	attach_lkb(r, lkb);
3332	lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3333
3334	error = _request_lock(r, lkb);
3335
3336	unlock_rsb(r);
3337	put_rsb(r);
 
 
3338	return error;
3339}
3340
3341static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3342			struct dlm_args *args)
3343{
3344	struct dlm_rsb *r;
3345	int error;
3346
3347	r = lkb->lkb_resource;
3348
3349	hold_rsb(r);
3350	lock_rsb(r);
3351
3352	error = validate_lock_args(ls, lkb, args);
3353	if (error)
3354		goto out;
3355
3356	error = _convert_lock(r, lkb);
3357 out:
3358	unlock_rsb(r);
3359	put_rsb(r);
3360	return error;
3361}
3362
3363static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3364		       struct dlm_args *args)
3365{
3366	struct dlm_rsb *r;
3367	int error;
3368
3369	r = lkb->lkb_resource;
3370
3371	hold_rsb(r);
3372	lock_rsb(r);
3373
3374	error = validate_unlock_args(lkb, args);
3375	if (error)
3376		goto out;
3377
3378	error = _unlock_lock(r, lkb);
3379 out:
3380	unlock_rsb(r);
3381	put_rsb(r);
3382	return error;
3383}
3384
3385static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3386		       struct dlm_args *args)
3387{
3388	struct dlm_rsb *r;
3389	int error;
3390
3391	r = lkb->lkb_resource;
3392
3393	hold_rsb(r);
3394	lock_rsb(r);
3395
3396	error = validate_unlock_args(lkb, args);
3397	if (error)
3398		goto out;
3399
3400	error = _cancel_lock(r, lkb);
3401 out:
3402	unlock_rsb(r);
3403	put_rsb(r);
3404	return error;
3405}
3406
3407/*
3408 * Two stage 1 varieties:  dlm_lock() and dlm_unlock()
3409 */
3410
3411int dlm_lock(dlm_lockspace_t *lockspace,
3412	     int mode,
3413	     struct dlm_lksb *lksb,
3414	     uint32_t flags,
3415	     void *name,
3416	     unsigned int namelen,
3417	     uint32_t parent_lkid,
3418	     void (*ast) (void *astarg),
3419	     void *astarg,
3420	     void (*bast) (void *astarg, int mode))
3421{
3422	struct dlm_ls *ls;
3423	struct dlm_lkb *lkb;
3424	struct dlm_args args;
3425	int error, convert = flags & DLM_LKF_CONVERT;
3426
3427	ls = dlm_find_lockspace_local(lockspace);
3428	if (!ls)
3429		return -EINVAL;
3430
3431	dlm_lock_recovery(ls);
3432
3433	if (convert)
3434		error = find_lkb(ls, lksb->sb_lkid, &lkb);
3435	else
3436		error = create_lkb(ls, &lkb);
3437
3438	if (error)
3439		goto out;
3440
3441	error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3442			      astarg, bast, &args);
3443	if (error)
3444		goto out_put;
3445
3446	if (convert)
3447		error = convert_lock(ls, lkb, &args);
3448	else
3449		error = request_lock(ls, lkb, name, namelen, &args);
3450
3451	if (error == -EINPROGRESS)
3452		error = 0;
3453 out_put:
3454	if (convert || error)
3455		__put_lkb(ls, lkb);
3456	if (error == -EAGAIN || error == -EDEADLK)
3457		error = 0;
3458 out:
3459	dlm_unlock_recovery(ls);
3460	dlm_put_lockspace(ls);
3461	return error;
3462}
3463
3464int dlm_unlock(dlm_lockspace_t *lockspace,
3465	       uint32_t lkid,
3466	       uint32_t flags,
3467	       struct dlm_lksb *lksb,
3468	       void *astarg)
3469{
3470	struct dlm_ls *ls;
3471	struct dlm_lkb *lkb;
3472	struct dlm_args args;
3473	int error;
3474
3475	ls = dlm_find_lockspace_local(lockspace);
3476	if (!ls)
3477		return -EINVAL;
3478
3479	dlm_lock_recovery(ls);
3480
3481	error = find_lkb(ls, lkid, &lkb);
3482	if (error)
3483		goto out;
3484
3485	error = set_unlock_args(flags, astarg, &args);
3486	if (error)
3487		goto out_put;
3488
3489	if (flags & DLM_LKF_CANCEL)
3490		error = cancel_lock(ls, lkb, &args);
3491	else
3492		error = unlock_lock(ls, lkb, &args);
3493
3494	if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3495		error = 0;
3496	if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3497		error = 0;
3498 out_put:
3499	dlm_put_lkb(lkb);
3500 out:
3501	dlm_unlock_recovery(ls);
3502	dlm_put_lockspace(ls);
3503	return error;
3504}
3505
3506/*
3507 * send/receive routines for remote operations and replies
3508 *
3509 * send_args
3510 * send_common
3511 * send_request			receive_request
3512 * send_convert			receive_convert
3513 * send_unlock			receive_unlock
3514 * send_cancel			receive_cancel
3515 * send_grant			receive_grant
3516 * send_bast			receive_bast
3517 * send_lookup			receive_lookup
3518 * send_remove			receive_remove
3519 *
3520 * 				send_common_reply
3521 * receive_request_reply	send_request_reply
3522 * receive_convert_reply	send_convert_reply
3523 * receive_unlock_reply		send_unlock_reply
3524 * receive_cancel_reply		send_cancel_reply
3525 * receive_lookup_reply		send_lookup_reply
3526 */
3527
3528static int _create_message(struct dlm_ls *ls, int mb_len,
3529			   int to_nodeid, int mstype,
3530			   struct dlm_message **ms_ret,
3531			   struct dlm_mhandle **mh_ret)
3532{
3533	struct dlm_message *ms;
3534	struct dlm_mhandle *mh;
3535	char *mb;
3536
3537	/* get_buffer gives us a message handle (mh) that we need to
3538	   pass into lowcomms_commit and a message buffer (mb) that we
3539	   write our data into */
3540
3541	mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3542	if (!mh)
3543		return -ENOBUFS;
3544
3545	memset(mb, 0, mb_len);
3546
3547	ms = (struct dlm_message *) mb;
3548
3549	ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3550	ms->m_header.h_lockspace = ls->ls_global_id;
3551	ms->m_header.h_nodeid = dlm_our_nodeid();
3552	ms->m_header.h_length = mb_len;
3553	ms->m_header.h_cmd = DLM_MSG;
3554
3555	ms->m_type = mstype;
3556
3557	*mh_ret = mh;
3558	*ms_ret = ms;
3559	return 0;
3560}
3561
3562static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3563			  int to_nodeid, int mstype,
3564			  struct dlm_message **ms_ret,
3565			  struct dlm_mhandle **mh_ret)
3566{
3567	int mb_len = sizeof(struct dlm_message);
3568
3569	switch (mstype) {
3570	case DLM_MSG_REQUEST:
3571	case DLM_MSG_LOOKUP:
3572	case DLM_MSG_REMOVE:
3573		mb_len += r->res_length;
3574		break;
3575	case DLM_MSG_CONVERT:
3576	case DLM_MSG_UNLOCK:
3577	case DLM_MSG_REQUEST_REPLY:
3578	case DLM_MSG_CONVERT_REPLY:
3579	case DLM_MSG_GRANT:
3580		if (lkb && lkb->lkb_lvbptr)
3581			mb_len += r->res_ls->ls_lvblen;
3582		break;
3583	}
3584
3585	return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3586			       ms_ret, mh_ret);
3587}
3588
3589/* further lowcomms enhancements or alternate implementations may make
3590   the return value from this function useful at some point */
3591
3592static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3593{
3594	dlm_message_out(ms);
3595	dlm_lowcomms_commit_buffer(mh);
3596	return 0;
3597}
3598
3599static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3600		      struct dlm_message *ms)
3601{
3602	ms->m_nodeid   = lkb->lkb_nodeid;
3603	ms->m_pid      = lkb->lkb_ownpid;
3604	ms->m_lkid     = lkb->lkb_id;
3605	ms->m_remid    = lkb->lkb_remid;
3606	ms->m_exflags  = lkb->lkb_exflags;
3607	ms->m_sbflags  = lkb->lkb_sbflags;
3608	ms->m_flags    = lkb->lkb_flags;
3609	ms->m_lvbseq   = lkb->lkb_lvbseq;
3610	ms->m_status   = lkb->lkb_status;
3611	ms->m_grmode   = lkb->lkb_grmode;
3612	ms->m_rqmode   = lkb->lkb_rqmode;
3613	ms->m_hash     = r->res_hash;
3614
3615	/* m_result and m_bastmode are set from function args,
3616	   not from lkb fields */
3617
3618	if (lkb->lkb_bastfn)
3619		ms->m_asts |= DLM_CB_BAST;
3620	if (lkb->lkb_astfn)
3621		ms->m_asts |= DLM_CB_CAST;
3622
3623	/* compare with switch in create_message; send_remove() doesn't
3624	   use send_args() */
3625
3626	switch (ms->m_type) {
3627	case DLM_MSG_REQUEST:
3628	case DLM_MSG_LOOKUP:
3629		memcpy(ms->m_extra, r->res_name, r->res_length);
3630		break;
3631	case DLM_MSG_CONVERT:
3632	case DLM_MSG_UNLOCK:
3633	case DLM_MSG_REQUEST_REPLY:
3634	case DLM_MSG_CONVERT_REPLY:
3635	case DLM_MSG_GRANT:
3636		if (!lkb->lkb_lvbptr)
3637			break;
3638		memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3639		break;
3640	}
3641}
3642
3643static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3644{
3645	struct dlm_message *ms;
3646	struct dlm_mhandle *mh;
3647	int to_nodeid, error;
3648
3649	to_nodeid = r->res_nodeid;
3650
3651	error = add_to_waiters(lkb, mstype, to_nodeid);
3652	if (error)
3653		return error;
3654
3655	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3656	if (error)
3657		goto fail;
3658
3659	send_args(r, lkb, ms);
3660
3661	error = send_message(mh, ms);
3662	if (error)
3663		goto fail;
3664	return 0;
3665
3666 fail:
3667	remove_from_waiters(lkb, msg_reply_type(mstype));
3668	return error;
3669}
3670
3671static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3672{
3673	return send_common(r, lkb, DLM_MSG_REQUEST);
3674}
3675
3676static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3677{
3678	int error;
3679
3680	error = send_common(r, lkb, DLM_MSG_CONVERT);
3681
3682	/* down conversions go without a reply from the master */
3683	if (!error && down_conversion(lkb)) {
3684		remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3685		r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3686		r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3687		r->res_ls->ls_stub_ms.m_result = 0;
3688		__receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3689	}
3690
3691	return error;
3692}
3693
3694/* FIXME: if this lkb is the only lock we hold on the rsb, then set
3695   MASTER_UNCERTAIN to force the next request on the rsb to confirm
3696   that the master is still correct. */
3697
3698static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3699{
3700	return send_common(r, lkb, DLM_MSG_UNLOCK);
3701}
3702
3703static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3704{
3705	return send_common(r, lkb, DLM_MSG_CANCEL);
3706}
3707
3708static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3709{
3710	struct dlm_message *ms;
3711	struct dlm_mhandle *mh;
3712	int to_nodeid, error;
3713
3714	to_nodeid = lkb->lkb_nodeid;
3715
3716	error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3717	if (error)
3718		goto out;
3719
3720	send_args(r, lkb, ms);
3721
3722	ms->m_result = 0;
3723
3724	error = send_message(mh, ms);
3725 out:
3726	return error;
3727}
3728
3729static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3730{
3731	struct dlm_message *ms;
3732	struct dlm_mhandle *mh;
3733	int to_nodeid, error;
3734
3735	to_nodeid = lkb->lkb_nodeid;
3736
3737	error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3738	if (error)
3739		goto out;
3740
3741	send_args(r, lkb, ms);
3742
3743	ms->m_bastmode = mode;
3744
3745	error = send_message(mh, ms);
3746 out:
3747	return error;
3748}
3749
3750static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3751{
3752	struct dlm_message *ms;
3753	struct dlm_mhandle *mh;
3754	int to_nodeid, error;
3755
3756	to_nodeid = dlm_dir_nodeid(r);
3757
3758	error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3759	if (error)
3760		return error;
3761
3762	error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3763	if (error)
3764		goto fail;
3765
3766	send_args(r, lkb, ms);
3767
3768	error = send_message(mh, ms);
3769	if (error)
3770		goto fail;
3771	return 0;
3772
3773 fail:
3774	remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3775	return error;
3776}
3777
3778static int send_remove(struct dlm_rsb *r)
3779{
3780	struct dlm_message *ms;
3781	struct dlm_mhandle *mh;
3782	int to_nodeid, error;
3783
3784	to_nodeid = dlm_dir_nodeid(r);
3785
3786	error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3787	if (error)
3788		goto out;
3789
3790	memcpy(ms->m_extra, r->res_name, r->res_length);
3791	ms->m_hash = r->res_hash;
3792
3793	error = send_message(mh, ms);
3794 out:
3795	return error;
3796}
3797
3798static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3799			     int mstype, int rv)
3800{
3801	struct dlm_message *ms;
3802	struct dlm_mhandle *mh;
3803	int to_nodeid, error;
3804
3805	to_nodeid = lkb->lkb_nodeid;
3806
3807	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3808	if (error)
3809		goto out;
3810
3811	send_args(r, lkb, ms);
3812
3813	ms->m_result = rv;
3814
3815	error = send_message(mh, ms);
3816 out:
3817	return error;
3818}
3819
3820static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3821{
3822	return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3823}
3824
3825static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3826{
3827	return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3828}
3829
3830static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3831{
3832	return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3833}
3834
3835static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3836{
3837	return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3838}
3839
3840static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3841			     int ret_nodeid, int rv)
3842{
3843	struct dlm_rsb *r = &ls->ls_stub_rsb;
3844	struct dlm_message *ms;
3845	struct dlm_mhandle *mh;
3846	int error, nodeid = ms_in->m_header.h_nodeid;
3847
3848	error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3849	if (error)
3850		goto out;
3851
3852	ms->m_lkid = ms_in->m_lkid;
3853	ms->m_result = rv;
3854	ms->m_nodeid = ret_nodeid;
3855
3856	error = send_message(mh, ms);
3857 out:
3858	return error;
3859}
3860
3861/* which args we save from a received message depends heavily on the type
3862   of message, unlike the send side where we can safely send everything about
3863   the lkb for any type of message */
3864
3865static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3866{
3867	lkb->lkb_exflags = ms->m_exflags;
3868	lkb->lkb_sbflags = ms->m_sbflags;
3869	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3870		         (ms->m_flags & 0x0000FFFF);
3871}
3872
3873static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3874{
3875	if (ms->m_flags == DLM_IFL_STUB_MS)
3876		return;
3877
3878	lkb->lkb_sbflags = ms->m_sbflags;
3879	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3880		         (ms->m_flags & 0x0000FFFF);
3881}
3882
3883static int receive_extralen(struct dlm_message *ms)
3884{
3885	return (ms->m_header.h_length - sizeof(struct dlm_message));
3886}
3887
3888static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3889		       struct dlm_message *ms)
3890{
3891	int len;
3892
3893	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3894		if (!lkb->lkb_lvbptr)
3895			lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3896		if (!lkb->lkb_lvbptr)
3897			return -ENOMEM;
3898		len = receive_extralen(ms);
3899		if (len > ls->ls_lvblen)
3900			len = ls->ls_lvblen;
3901		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3902	}
3903	return 0;
3904}
3905
3906static void fake_bastfn(void *astparam, int mode)
3907{
3908	log_print("fake_bastfn should not be called");
3909}
3910
3911static void fake_astfn(void *astparam)
3912{
3913	log_print("fake_astfn should not be called");
3914}
3915
3916static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3917				struct dlm_message *ms)
3918{
3919	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3920	lkb->lkb_ownpid = ms->m_pid;
3921	lkb->lkb_remid = ms->m_lkid;
3922	lkb->lkb_grmode = DLM_LOCK_IV;
3923	lkb->lkb_rqmode = ms->m_rqmode;
3924
3925	lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3926	lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3927
3928	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3929		/* lkb was just created so there won't be an lvb yet */
3930		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3931		if (!lkb->lkb_lvbptr)
3932			return -ENOMEM;
3933	}
3934
3935	return 0;
3936}
3937
3938static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3939				struct dlm_message *ms)
3940{
3941	if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3942		return -EBUSY;
3943
3944	if (receive_lvb(ls, lkb, ms))
3945		return -ENOMEM;
3946
3947	lkb->lkb_rqmode = ms->m_rqmode;
3948	lkb->lkb_lvbseq = ms->m_lvbseq;
3949
3950	return 0;
3951}
3952
3953static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3954			       struct dlm_message *ms)
3955{
3956	if (receive_lvb(ls, lkb, ms))
3957		return -ENOMEM;
3958	return 0;
3959}
3960
3961/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3962   uses to send a reply and that the remote end uses to process the reply. */
3963
3964static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3965{
3966	struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3967	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3968	lkb->lkb_remid = ms->m_lkid;
3969}
3970
3971/* This is called after the rsb is locked so that we can safely inspect
3972   fields in the lkb. */
3973
3974static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3975{
3976	int from = ms->m_header.h_nodeid;
3977	int error = 0;
3978
3979	switch (ms->m_type) {
3980	case DLM_MSG_CONVERT:
3981	case DLM_MSG_UNLOCK:
3982	case DLM_MSG_CANCEL:
3983		if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3984			error = -EINVAL;
3985		break;
3986
3987	case DLM_MSG_CONVERT_REPLY:
3988	case DLM_MSG_UNLOCK_REPLY:
3989	case DLM_MSG_CANCEL_REPLY:
3990	case DLM_MSG_GRANT:
3991	case DLM_MSG_BAST:
3992		if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3993			error = -EINVAL;
3994		break;
3995
3996	case DLM_MSG_REQUEST_REPLY:
3997		if (!is_process_copy(lkb))
3998			error = -EINVAL;
3999		else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
4000			error = -EINVAL;
4001		break;
4002
4003	default:
4004		error = -EINVAL;
4005	}
4006
4007	if (error)
4008		log_error(lkb->lkb_resource->res_ls,
4009			  "ignore invalid message %d from %d %x %x %x %d",
4010			  ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
4011			  lkb->lkb_flags, lkb->lkb_nodeid);
4012	return error;
4013}
4014
4015static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4016{
4017	char name[DLM_RESNAME_MAXLEN + 1];
4018	struct dlm_message *ms;
4019	struct dlm_mhandle *mh;
4020	struct dlm_rsb *r;
4021	uint32_t hash, b;
4022	int rv, dir_nodeid;
4023
4024	memset(name, 0, sizeof(name));
4025	memcpy(name, ms_name, len);
4026
4027	hash = jhash(name, len, 0);
4028	b = hash & (ls->ls_rsbtbl_size - 1);
4029
4030	dir_nodeid = dlm_hash2nodeid(ls, hash);
4031
4032	log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4033
4034	spin_lock(&ls->ls_rsbtbl[b].lock);
4035	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4036	if (!rv) {
4037		spin_unlock(&ls->ls_rsbtbl[b].lock);
4038		log_error(ls, "repeat_remove on keep %s", name);
4039		return;
4040	}
4041
4042	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4043	if (!rv) {
4044		spin_unlock(&ls->ls_rsbtbl[b].lock);
4045		log_error(ls, "repeat_remove on toss %s", name);
4046		return;
4047	}
4048
4049	/* use ls->remove_name2 to avoid conflict with shrink? */
4050
4051	spin_lock(&ls->ls_remove_spin);
4052	ls->ls_remove_len = len;
4053	memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4054	spin_unlock(&ls->ls_remove_spin);
4055	spin_unlock(&ls->ls_rsbtbl[b].lock);
4056
4057	rv = _create_message(ls, sizeof(struct dlm_message) + len,
4058			     dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4059	if (rv)
4060		return;
4061
4062	memcpy(ms->m_extra, name, len);
4063	ms->m_hash = hash;
4064
4065	send_message(mh, ms);
4066
4067	spin_lock(&ls->ls_remove_spin);
4068	ls->ls_remove_len = 0;
4069	memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4070	spin_unlock(&ls->ls_remove_spin);
4071}
4072
4073static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4074{
4075	struct dlm_lkb *lkb;
4076	struct dlm_rsb *r;
4077	int from_nodeid;
4078	int error, namelen = 0;
4079
4080	from_nodeid = ms->m_header.h_nodeid;
4081
4082	error = create_lkb(ls, &lkb);
4083	if (error)
4084		goto fail;
4085
4086	receive_flags(lkb, ms);
4087	lkb->lkb_flags |= DLM_IFL_MSTCPY;
4088	error = receive_request_args(ls, lkb, ms);
4089	if (error) {
4090		__put_lkb(ls, lkb);
4091		goto fail;
4092	}
4093
4094	/* The dir node is the authority on whether we are the master
4095	   for this rsb or not, so if the master sends us a request, we should
4096	   recreate the rsb if we've destroyed it.   This race happens when we
4097	   send a remove message to the dir node at the same time that the dir
4098	   node sends us a request for the rsb. */
4099
4100	namelen = receive_extralen(ms);
4101
4102	error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4103			 R_RECEIVE_REQUEST, &r);
4104	if (error) {
4105		__put_lkb(ls, lkb);
4106		goto fail;
4107	}
4108
4109	lock_rsb(r);
4110
4111	if (r->res_master_nodeid != dlm_our_nodeid()) {
4112		error = validate_master_nodeid(ls, r, from_nodeid);
4113		if (error) {
4114			unlock_rsb(r);
4115			put_rsb(r);
4116			__put_lkb(ls, lkb);
4117			goto fail;
4118		}
4119	}
4120
4121	attach_lkb(r, lkb);
4122	error = do_request(r, lkb);
4123	send_request_reply(r, lkb, error);
4124	do_request_effects(r, lkb, error);
4125
4126	unlock_rsb(r);
4127	put_rsb(r);
4128
4129	if (error == -EINPROGRESS)
4130		error = 0;
4131	if (error)
4132		dlm_put_lkb(lkb);
4133	return 0;
4134
4135 fail:
4136	/* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4137	   and do this receive_request again from process_lookup_list once
4138	   we get the lookup reply.  This would avoid a many repeated
4139	   ENOTBLK request failures when the lookup reply designating us
4140	   as master is delayed. */
4141
4142	/* We could repeatedly return -EBADR here if our send_remove() is
4143	   delayed in being sent/arriving/being processed on the dir node.
4144	   Another node would repeatedly lookup up the master, and the dir
4145	   node would continue returning our nodeid until our send_remove
4146	   took effect.
4147
4148	   We send another remove message in case our previous send_remove
4149	   was lost/ignored/missed somehow. */
4150
4151	if (error != -ENOTBLK) {
4152		log_limit(ls, "receive_request %x from %d %d",
4153			  ms->m_lkid, from_nodeid, error);
4154	}
4155
4156	if (namelen && error == -EBADR) {
4157		send_repeat_remove(ls, ms->m_extra, namelen);
4158		msleep(1000);
4159	}
4160
4161	setup_stub_lkb(ls, ms);
4162	send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4163	return error;
4164}
4165
4166static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4167{
4168	struct dlm_lkb *lkb;
4169	struct dlm_rsb *r;
4170	int error, reply = 1;
4171
4172	error = find_lkb(ls, ms->m_remid, &lkb);
4173	if (error)
4174		goto fail;
4175
4176	if (lkb->lkb_remid != ms->m_lkid) {
4177		log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4178			  "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4179			  (unsigned long long)lkb->lkb_recover_seq,
4180			  ms->m_header.h_nodeid, ms->m_lkid);
4181		error = -ENOENT;
4182		goto fail;
4183	}
4184
4185	r = lkb->lkb_resource;
4186
4187	hold_rsb(r);
4188	lock_rsb(r);
4189
4190	error = validate_message(lkb, ms);
4191	if (error)
4192		goto out;
4193
4194	receive_flags(lkb, ms);
4195
4196	error = receive_convert_args(ls, lkb, ms);
4197	if (error) {
4198		send_convert_reply(r, lkb, error);
4199		goto out;
4200	}
4201
4202	reply = !down_conversion(lkb);
4203
4204	error = do_convert(r, lkb);
4205	if (reply)
4206		send_convert_reply(r, lkb, error);
4207	do_convert_effects(r, lkb, error);
4208 out:
4209	unlock_rsb(r);
4210	put_rsb(r);
4211	dlm_put_lkb(lkb);
4212	return 0;
4213
4214 fail:
4215	setup_stub_lkb(ls, ms);
4216	send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4217	return error;
4218}
4219
4220static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4221{
4222	struct dlm_lkb *lkb;
4223	struct dlm_rsb *r;
4224	int error;
4225
4226	error = find_lkb(ls, ms->m_remid, &lkb);
4227	if (error)
4228		goto fail;
4229
4230	if (lkb->lkb_remid != ms->m_lkid) {
4231		log_error(ls, "receive_unlock %x remid %x remote %d %x",
4232			  lkb->lkb_id, lkb->lkb_remid,
4233			  ms->m_header.h_nodeid, ms->m_lkid);
4234		error = -ENOENT;
4235		goto fail;
4236	}
4237
4238	r = lkb->lkb_resource;
4239
4240	hold_rsb(r);
4241	lock_rsb(r);
4242
4243	error = validate_message(lkb, ms);
4244	if (error)
4245		goto out;
4246
4247	receive_flags(lkb, ms);
4248
4249	error = receive_unlock_args(ls, lkb, ms);
4250	if (error) {
4251		send_unlock_reply(r, lkb, error);
4252		goto out;
4253	}
4254
4255	error = do_unlock(r, lkb);
4256	send_unlock_reply(r, lkb, error);
4257	do_unlock_effects(r, lkb, error);
4258 out:
4259	unlock_rsb(r);
4260	put_rsb(r);
4261	dlm_put_lkb(lkb);
4262	return 0;
4263
4264 fail:
4265	setup_stub_lkb(ls, ms);
4266	send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4267	return error;
4268}
4269
4270static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4271{
4272	struct dlm_lkb *lkb;
4273	struct dlm_rsb *r;
4274	int error;
4275
4276	error = find_lkb(ls, ms->m_remid, &lkb);
4277	if (error)
4278		goto fail;
4279
4280	receive_flags(lkb, ms);
4281
4282	r = lkb->lkb_resource;
4283
4284	hold_rsb(r);
4285	lock_rsb(r);
4286
4287	error = validate_message(lkb, ms);
4288	if (error)
4289		goto out;
4290
4291	error = do_cancel(r, lkb);
4292	send_cancel_reply(r, lkb, error);
4293	do_cancel_effects(r, lkb, error);
4294 out:
4295	unlock_rsb(r);
4296	put_rsb(r);
4297	dlm_put_lkb(lkb);
4298	return 0;
4299
4300 fail:
4301	setup_stub_lkb(ls, ms);
4302	send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4303	return error;
4304}
4305
4306static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4307{
4308	struct dlm_lkb *lkb;
4309	struct dlm_rsb *r;
4310	int error;
4311
4312	error = find_lkb(ls, ms->m_remid, &lkb);
4313	if (error)
4314		return error;
 
 
 
4315
4316	r = lkb->lkb_resource;
4317
4318	hold_rsb(r);
4319	lock_rsb(r);
4320
4321	error = validate_message(lkb, ms);
4322	if (error)
4323		goto out;
4324
4325	receive_flags_reply(lkb, ms);
4326	if (is_altmode(lkb))
4327		munge_altmode(lkb, ms);
4328	grant_lock_pc(r, lkb, ms);
4329	queue_cast(r, lkb, 0);
4330 out:
4331	unlock_rsb(r);
4332	put_rsb(r);
4333	dlm_put_lkb(lkb);
4334	return 0;
4335}
4336
4337static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4338{
4339	struct dlm_lkb *lkb;
4340	struct dlm_rsb *r;
4341	int error;
4342
4343	error = find_lkb(ls, ms->m_remid, &lkb);
4344	if (error)
4345		return error;
 
 
 
4346
4347	r = lkb->lkb_resource;
4348
4349	hold_rsb(r);
4350	lock_rsb(r);
4351
4352	error = validate_message(lkb, ms);
4353	if (error)
4354		goto out;
4355
4356	queue_bast(r, lkb, ms->m_bastmode);
4357	lkb->lkb_highbast = ms->m_bastmode;
4358 out:
4359	unlock_rsb(r);
4360	put_rsb(r);
4361	dlm_put_lkb(lkb);
4362	return 0;
4363}
4364
4365static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4366{
4367	int len, error, ret_nodeid, from_nodeid, our_nodeid;
4368
4369	from_nodeid = ms->m_header.h_nodeid;
4370	our_nodeid = dlm_our_nodeid();
4371
4372	len = receive_extralen(ms);
4373
4374	error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4375				  &ret_nodeid, NULL);
 
 
 
 
 
 
 
 
4376
4377	/* Optimization: we're master so treat lookup as a request */
4378	if (!error && ret_nodeid == our_nodeid) {
4379		receive_request(ls, ms);
4380		return;
4381	}
 
4382	send_lookup_reply(ls, ms, ret_nodeid, error);
4383}
4384
4385static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4386{
4387	char name[DLM_RESNAME_MAXLEN+1];
4388	struct dlm_rsb *r;
4389	uint32_t hash, b;
4390	int rv, len, dir_nodeid, from_nodeid;
4391
4392	from_nodeid = ms->m_header.h_nodeid;
4393
4394	len = receive_extralen(ms);
4395
4396	if (len > DLM_RESNAME_MAXLEN) {
4397		log_error(ls, "receive_remove from %d bad len %d",
4398			  from_nodeid, len);
4399		return;
4400	}
4401
4402	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4403	if (dir_nodeid != dlm_our_nodeid()) {
4404		log_error(ls, "receive_remove from %d bad nodeid %d",
4405			  from_nodeid, dir_nodeid);
4406		return;
4407	}
4408
4409	/* Look for name on rsbtbl.toss, if it's there, kill it.
4410	   If it's on rsbtbl.keep, it's being used, and we should ignore this
4411	   message.  This is an expected race between the dir node sending a
4412	   request to the master node at the same time as the master node sends
4413	   a remove to the dir node.  The resolution to that race is for the
4414	   dir node to ignore the remove message, and the master node to
4415	   recreate the master rsb when it gets a request from the dir node for
4416	   an rsb it doesn't have. */
4417
4418	memset(name, 0, sizeof(name));
4419	memcpy(name, ms->m_extra, len);
4420
4421	hash = jhash(name, len, 0);
4422	b = hash & (ls->ls_rsbtbl_size - 1);
4423
4424	spin_lock(&ls->ls_rsbtbl[b].lock);
4425
4426	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4427	if (rv) {
4428		/* verify the rsb is on keep list per comment above */
4429		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4430		if (rv) {
4431			/* should not happen */
4432			log_error(ls, "receive_remove from %d not found %s",
4433				  from_nodeid, name);
4434			spin_unlock(&ls->ls_rsbtbl[b].lock);
4435			return;
4436		}
4437		if (r->res_master_nodeid != from_nodeid) {
4438			/* should not happen */
4439			log_error(ls, "receive_remove keep from %d master %d",
4440				  from_nodeid, r->res_master_nodeid);
4441			dlm_print_rsb(r);
4442			spin_unlock(&ls->ls_rsbtbl[b].lock);
4443			return;
4444		}
4445
4446		log_debug(ls, "receive_remove from %d master %d first %x %s",
4447			  from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4448			  name);
4449		spin_unlock(&ls->ls_rsbtbl[b].lock);
4450		return;
4451	}
4452
4453	if (r->res_master_nodeid != from_nodeid) {
4454		log_error(ls, "receive_remove toss from %d master %d",
4455			  from_nodeid, r->res_master_nodeid);
4456		dlm_print_rsb(r);
4457		spin_unlock(&ls->ls_rsbtbl[b].lock);
4458		return;
4459	}
4460
4461	if (kref_put(&r->res_ref, kill_rsb)) {
4462		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4463		spin_unlock(&ls->ls_rsbtbl[b].lock);
4464		dlm_free_rsb(r);
4465	} else {
4466		log_error(ls, "receive_remove from %d rsb ref error",
4467			  from_nodeid);
4468		dlm_print_rsb(r);
4469		spin_unlock(&ls->ls_rsbtbl[b].lock);
4470	}
4471}
4472
4473static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4474{
4475	do_purge(ls, ms->m_nodeid, ms->m_pid);
4476}
4477
4478static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4479{
4480	struct dlm_lkb *lkb;
4481	struct dlm_rsb *r;
4482	int error, mstype, result;
4483	int from_nodeid = ms->m_header.h_nodeid;
4484
4485	error = find_lkb(ls, ms->m_remid, &lkb);
4486	if (error)
4487		return error;
 
 
 
4488
4489	r = lkb->lkb_resource;
4490	hold_rsb(r);
4491	lock_rsb(r);
4492
4493	error = validate_message(lkb, ms);
4494	if (error)
4495		goto out;
4496
4497	mstype = lkb->lkb_wait_type;
4498	error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4499	if (error) {
4500		log_error(ls, "receive_request_reply %x remote %d %x result %d",
4501			  lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4502		dlm_dump_rsb(r);
4503		goto out;
4504	}
4505
4506	/* Optimization: the dir node was also the master, so it took our
4507	   lookup as a request and sent request reply instead of lookup reply */
4508	if (mstype == DLM_MSG_LOOKUP) {
4509		r->res_master_nodeid = from_nodeid;
4510		r->res_nodeid = from_nodeid;
4511		lkb->lkb_nodeid = from_nodeid;
4512	}
4513
4514	/* this is the value returned from do_request() on the master */
4515	result = ms->m_result;
4516
4517	switch (result) {
4518	case -EAGAIN:
4519		/* request would block (be queued) on remote master */
4520		queue_cast(r, lkb, -EAGAIN);
4521		confirm_master(r, -EAGAIN);
4522		unhold_lkb(lkb); /* undoes create_lkb() */
4523		break;
4524
4525	case -EINPROGRESS:
4526	case 0:
4527		/* request was queued or granted on remote master */
4528		receive_flags_reply(lkb, ms);
4529		lkb->lkb_remid = ms->m_lkid;
4530		if (is_altmode(lkb))
4531			munge_altmode(lkb, ms);
4532		if (result) {
4533			add_lkb(r, lkb, DLM_LKSTS_WAITING);
4534			add_timeout(lkb);
4535		} else {
4536			grant_lock_pc(r, lkb, ms);
4537			queue_cast(r, lkb, 0);
4538		}
4539		confirm_master(r, result);
4540		break;
4541
4542	case -EBADR:
4543	case -ENOTBLK:
4544		/* find_rsb failed to find rsb or rsb wasn't master */
4545		log_limit(ls, "receive_request_reply %x from %d %d "
4546			  "master %d dir %d first %x %s", lkb->lkb_id,
4547			  from_nodeid, result, r->res_master_nodeid,
4548			  r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4549
4550		if (r->res_dir_nodeid != dlm_our_nodeid() &&
4551		    r->res_master_nodeid != dlm_our_nodeid()) {
4552			/* cause _request_lock->set_master->send_lookup */
4553			r->res_master_nodeid = 0;
4554			r->res_nodeid = -1;
4555			lkb->lkb_nodeid = -1;
4556		}
4557
4558		if (is_overlap(lkb)) {
4559			/* we'll ignore error in cancel/unlock reply */
4560			queue_cast_overlap(r, lkb);
4561			confirm_master(r, result);
4562			unhold_lkb(lkb); /* undoes create_lkb() */
4563		} else {
4564			_request_lock(r, lkb);
4565
4566			if (r->res_master_nodeid == dlm_our_nodeid())
4567				confirm_master(r, 0);
4568		}
4569		break;
4570
4571	default:
4572		log_error(ls, "receive_request_reply %x error %d",
4573			  lkb->lkb_id, result);
4574	}
4575
4576	if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4577		log_debug(ls, "receive_request_reply %x result %d unlock",
4578			  lkb->lkb_id, result);
4579		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4580		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4581		send_unlock(r, lkb);
4582	} else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4583		log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4584		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4585		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4586		send_cancel(r, lkb);
4587	} else {
4588		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4589		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4590	}
4591 out:
4592	unlock_rsb(r);
4593	put_rsb(r);
4594	dlm_put_lkb(lkb);
4595	return 0;
4596}
4597
4598static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4599				    struct dlm_message *ms)
4600{
4601	/* this is the value returned from do_convert() on the master */
4602	switch (ms->m_result) {
4603	case -EAGAIN:
4604		/* convert would block (be queued) on remote master */
4605		queue_cast(r, lkb, -EAGAIN);
4606		break;
4607
4608	case -EDEADLK:
4609		receive_flags_reply(lkb, ms);
4610		revert_lock_pc(r, lkb);
4611		queue_cast(r, lkb, -EDEADLK);
4612		break;
4613
4614	case -EINPROGRESS:
4615		/* convert was queued on remote master */
4616		receive_flags_reply(lkb, ms);
4617		if (is_demoted(lkb))
4618			munge_demoted(lkb);
4619		del_lkb(r, lkb);
4620		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4621		add_timeout(lkb);
4622		break;
4623
4624	case 0:
4625		/* convert was granted on remote master */
4626		receive_flags_reply(lkb, ms);
4627		if (is_demoted(lkb))
4628			munge_demoted(lkb);
4629		grant_lock_pc(r, lkb, ms);
4630		queue_cast(r, lkb, 0);
4631		break;
4632
4633	default:
4634		log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4635			  lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4636			  ms->m_result);
4637		dlm_print_rsb(r);
4638		dlm_print_lkb(lkb);
4639	}
4640}
4641
4642static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4643{
4644	struct dlm_rsb *r = lkb->lkb_resource;
4645	int error;
4646
4647	hold_rsb(r);
4648	lock_rsb(r);
4649
4650	error = validate_message(lkb, ms);
4651	if (error)
4652		goto out;
4653
4654	/* stub reply can happen with waiters_mutex held */
4655	error = remove_from_waiters_ms(lkb, ms);
4656	if (error)
4657		goto out;
4658
4659	__receive_convert_reply(r, lkb, ms);
4660 out:
4661	unlock_rsb(r);
4662	put_rsb(r);
4663}
4664
4665static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4666{
4667	struct dlm_lkb *lkb;
4668	int error;
4669
4670	error = find_lkb(ls, ms->m_remid, &lkb);
4671	if (error)
4672		return error;
 
 
 
4673
4674	_receive_convert_reply(lkb, ms);
4675	dlm_put_lkb(lkb);
4676	return 0;
4677}
4678
4679static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4680{
4681	struct dlm_rsb *r = lkb->lkb_resource;
4682	int error;
4683
4684	hold_rsb(r);
4685	lock_rsb(r);
4686
4687	error = validate_message(lkb, ms);
4688	if (error)
4689		goto out;
4690
4691	/* stub reply can happen with waiters_mutex held */
4692	error = remove_from_waiters_ms(lkb, ms);
4693	if (error)
4694		goto out;
4695
4696	/* this is the value returned from do_unlock() on the master */
4697
4698	switch (ms->m_result) {
4699	case -DLM_EUNLOCK:
4700		receive_flags_reply(lkb, ms);
4701		remove_lock_pc(r, lkb);
4702		queue_cast(r, lkb, -DLM_EUNLOCK);
4703		break;
4704	case -ENOENT:
4705		break;
4706	default:
4707		log_error(r->res_ls, "receive_unlock_reply %x error %d",
4708			  lkb->lkb_id, ms->m_result);
4709	}
4710 out:
4711	unlock_rsb(r);
4712	put_rsb(r);
4713}
4714
4715static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4716{
4717	struct dlm_lkb *lkb;
4718	int error;
4719
4720	error = find_lkb(ls, ms->m_remid, &lkb);
4721	if (error)
4722		return error;
 
 
 
4723
4724	_receive_unlock_reply(lkb, ms);
4725	dlm_put_lkb(lkb);
4726	return 0;
4727}
4728
4729static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4730{
4731	struct dlm_rsb *r = lkb->lkb_resource;
4732	int error;
4733
4734	hold_rsb(r);
4735	lock_rsb(r);
4736
4737	error = validate_message(lkb, ms);
4738	if (error)
4739		goto out;
4740
4741	/* stub reply can happen with waiters_mutex held */
4742	error = remove_from_waiters_ms(lkb, ms);
4743	if (error)
4744		goto out;
4745
4746	/* this is the value returned from do_cancel() on the master */
4747
4748	switch (ms->m_result) {
4749	case -DLM_ECANCEL:
4750		receive_flags_reply(lkb, ms);
4751		revert_lock_pc(r, lkb);
4752		queue_cast(r, lkb, -DLM_ECANCEL);
4753		break;
4754	case 0:
4755		break;
4756	default:
4757		log_error(r->res_ls, "receive_cancel_reply %x error %d",
4758			  lkb->lkb_id, ms->m_result);
4759	}
4760 out:
4761	unlock_rsb(r);
4762	put_rsb(r);
4763}
4764
4765static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4766{
4767	struct dlm_lkb *lkb;
4768	int error;
4769
4770	error = find_lkb(ls, ms->m_remid, &lkb);
4771	if (error)
4772		return error;
 
 
 
4773
4774	_receive_cancel_reply(lkb, ms);
4775	dlm_put_lkb(lkb);
4776	return 0;
4777}
4778
4779static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4780{
4781	struct dlm_lkb *lkb;
4782	struct dlm_rsb *r;
4783	int error, ret_nodeid;
4784	int do_lookup_list = 0;
4785
4786	error = find_lkb(ls, ms->m_lkid, &lkb);
4787	if (error) {
4788		log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4789		return;
4790	}
4791
4792	/* ms->m_result is the value returned by dlm_master_lookup on dir node
4793	   FIXME: will a non-zero error ever be returned? */
4794
4795	r = lkb->lkb_resource;
4796	hold_rsb(r);
4797	lock_rsb(r);
4798
4799	error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4800	if (error)
4801		goto out;
4802
4803	ret_nodeid = ms->m_nodeid;
4804
4805	/* We sometimes receive a request from the dir node for this
4806	   rsb before we've received the dir node's loookup_reply for it.
4807	   The request from the dir node implies we're the master, so we set
4808	   ourself as master in receive_request_reply, and verify here that
4809	   we are indeed the master. */
4810
4811	if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4812		/* This should never happen */
4813		log_error(ls, "receive_lookup_reply %x from %d ret %d "
4814			  "master %d dir %d our %d first %x %s",
4815			  lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4816			  r->res_master_nodeid, r->res_dir_nodeid,
4817			  dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4818	}
4819
4820	if (ret_nodeid == dlm_our_nodeid()) {
4821		r->res_master_nodeid = ret_nodeid;
4822		r->res_nodeid = 0;
4823		do_lookup_list = 1;
4824		r->res_first_lkid = 0;
4825	} else if (ret_nodeid == -1) {
4826		/* the remote node doesn't believe it's the dir node */
4827		log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4828			  lkb->lkb_id, ms->m_header.h_nodeid);
4829		r->res_master_nodeid = 0;
4830		r->res_nodeid = -1;
4831		lkb->lkb_nodeid = -1;
4832	} else {
4833		/* set_master() will set lkb_nodeid from r */
4834		r->res_master_nodeid = ret_nodeid;
4835		r->res_nodeid = ret_nodeid;
4836	}
4837
4838	if (is_overlap(lkb)) {
4839		log_debug(ls, "receive_lookup_reply %x unlock %x",
4840			  lkb->lkb_id, lkb->lkb_flags);
4841		queue_cast_overlap(r, lkb);
4842		unhold_lkb(lkb); /* undoes create_lkb() */
4843		goto out_list;
4844	}
4845
4846	_request_lock(r, lkb);
4847
4848 out_list:
4849	if (do_lookup_list)
4850		process_lookup_list(r);
4851 out:
4852	unlock_rsb(r);
4853	put_rsb(r);
4854	dlm_put_lkb(lkb);
4855}
4856
4857static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4858			     uint32_t saved_seq)
4859{
4860	int error = 0, noent = 0;
4861
4862	if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4863		log_limit(ls, "receive %d from non-member %d %x %x %d",
4864			  ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4865			  ms->m_remid, ms->m_result);
4866		return;
4867	}
4868
4869	switch (ms->m_type) {
4870
4871	/* messages sent to a master node */
4872
4873	case DLM_MSG_REQUEST:
4874		error = receive_request(ls, ms);
4875		break;
4876
4877	case DLM_MSG_CONVERT:
4878		error = receive_convert(ls, ms);
4879		break;
4880
4881	case DLM_MSG_UNLOCK:
4882		error = receive_unlock(ls, ms);
4883		break;
4884
4885	case DLM_MSG_CANCEL:
4886		noent = 1;
4887		error = receive_cancel(ls, ms);
4888		break;
4889
4890	/* messages sent from a master node (replies to above) */
4891
4892	case DLM_MSG_REQUEST_REPLY:
4893		error = receive_request_reply(ls, ms);
4894		break;
4895
4896	case DLM_MSG_CONVERT_REPLY:
4897		error = receive_convert_reply(ls, ms);
4898		break;
4899
4900	case DLM_MSG_UNLOCK_REPLY:
4901		error = receive_unlock_reply(ls, ms);
4902		break;
4903
4904	case DLM_MSG_CANCEL_REPLY:
4905		error = receive_cancel_reply(ls, ms);
4906		break;
4907
4908	/* messages sent from a master node (only two types of async msg) */
4909
4910	case DLM_MSG_GRANT:
4911		noent = 1;
4912		error = receive_grant(ls, ms);
4913		break;
4914
4915	case DLM_MSG_BAST:
4916		noent = 1;
4917		error = receive_bast(ls, ms);
4918		break;
4919
4920	/* messages sent to a dir node */
4921
4922	case DLM_MSG_LOOKUP:
4923		receive_lookup(ls, ms);
4924		break;
4925
4926	case DLM_MSG_REMOVE:
4927		receive_remove(ls, ms);
4928		break;
4929
4930	/* messages sent from a dir node (remove has no reply) */
4931
4932	case DLM_MSG_LOOKUP_REPLY:
4933		receive_lookup_reply(ls, ms);
4934		break;
4935
4936	/* other messages */
4937
4938	case DLM_MSG_PURGE:
4939		receive_purge(ls, ms);
4940		break;
4941
4942	default:
4943		log_error(ls, "unknown message type %d", ms->m_type);
4944	}
4945
4946	/*
4947	 * When checking for ENOENT, we're checking the result of
4948	 * find_lkb(m_remid):
4949	 *
4950	 * The lock id referenced in the message wasn't found.  This may
4951	 * happen in normal usage for the async messages and cancel, so
4952	 * only use log_debug for them.
4953	 *
4954	 * Some errors are expected and normal.
4955	 */
4956
4957	if (error == -ENOENT && noent) {
4958		log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4959			  ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4960			  ms->m_lkid, saved_seq);
4961	} else if (error == -ENOENT) {
4962		log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4963			  ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4964			  ms->m_lkid, saved_seq);
4965
4966		if (ms->m_type == DLM_MSG_CONVERT)
4967			dlm_dump_rsb_hash(ls, ms->m_hash);
4968	}
4969
4970	if (error == -EINVAL) {
4971		log_error(ls, "receive %d inval from %d lkid %x remid %x "
4972			  "saved_seq %u",
4973			  ms->m_type, ms->m_header.h_nodeid,
4974			  ms->m_lkid, ms->m_remid, saved_seq);
4975	}
4976}
4977
4978/* If the lockspace is in recovery mode (locking stopped), then normal
4979   messages are saved on the requestqueue for processing after recovery is
4980   done.  When not in recovery mode, we wait for dlm_recoverd to drain saved
4981   messages off the requestqueue before we process new ones. This occurs right
4982   after recovery completes when we transition from saving all messages on
4983   requestqueue, to processing all the saved messages, to processing new
4984   messages as they arrive. */
4985
4986static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4987				int nodeid)
4988{
4989	if (dlm_locking_stopped(ls)) {
4990		/* If we were a member of this lockspace, left, and rejoined,
4991		   other nodes may still be sending us messages from the
4992		   lockspace generation before we left. */
4993		if (!ls->ls_generation) {
4994			log_limit(ls, "receive %d from %d ignore old gen",
4995				  ms->m_type, nodeid);
4996			return;
4997		}
4998
4999		dlm_add_requestqueue(ls, nodeid, ms);
5000	} else {
5001		dlm_wait_requestqueue(ls);
5002		_receive_message(ls, ms, 0);
5003	}
5004}
5005
5006/* This is called by dlm_recoverd to process messages that were saved on
5007   the requestqueue. */
5008
5009void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5010			       uint32_t saved_seq)
5011{
5012	_receive_message(ls, ms, saved_seq);
5013}
5014
5015/* This is called by the midcomms layer when something is received for
5016   the lockspace.  It could be either a MSG (normal message sent as part of
5017   standard locking activity) or an RCOM (recovery message sent as part of
5018   lockspace recovery). */
5019
5020void dlm_receive_buffer(union dlm_packet *p, int nodeid)
5021{
5022	struct dlm_header *hd = &p->header;
5023	struct dlm_ls *ls;
5024	int type = 0;
5025
5026	switch (hd->h_cmd) {
5027	case DLM_MSG:
5028		dlm_message_in(&p->message);
5029		type = p->message.m_type;
5030		break;
5031	case DLM_RCOM:
5032		dlm_rcom_in(&p->rcom);
5033		type = p->rcom.rc_type;
5034		break;
5035	default:
5036		log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5037		return;
5038	}
5039
5040	if (hd->h_nodeid != nodeid) {
5041		log_print("invalid h_nodeid %d from %d lockspace %x",
5042			  hd->h_nodeid, nodeid, hd->h_lockspace);
5043		return;
5044	}
5045
5046	ls = dlm_find_lockspace_global(hd->h_lockspace);
5047	if (!ls) {
5048		if (dlm_config.ci_log_debug) {
5049			printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5050				"%u from %d cmd %d type %d\n",
5051				hd->h_lockspace, nodeid, hd->h_cmd, type);
5052		}
5053
5054		if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
5055			dlm_send_ls_not_ready(nodeid, &p->rcom);
5056		return;
5057	}
5058
5059	/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5060	   be inactive (in this ls) before transitioning to recovery mode */
5061
5062	down_read(&ls->ls_recv_active);
5063	if (hd->h_cmd == DLM_MSG)
5064		dlm_receive_message(ls, &p->message, nodeid);
5065	else
5066		dlm_receive_rcom(ls, &p->rcom, nodeid);
5067	up_read(&ls->ls_recv_active);
5068
5069	dlm_put_lockspace(ls);
5070}
5071
5072static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5073				   struct dlm_message *ms_stub)
5074{
5075	if (middle_conversion(lkb)) {
5076		hold_lkb(lkb);
5077		memset(ms_stub, 0, sizeof(struct dlm_message));
5078		ms_stub->m_flags = DLM_IFL_STUB_MS;
5079		ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
5080		ms_stub->m_result = -EINPROGRESS;
5081		ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5082		_receive_convert_reply(lkb, ms_stub);
5083
5084		/* Same special case as in receive_rcom_lock_args() */
5085		lkb->lkb_grmode = DLM_LOCK_IV;
5086		rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5087		unhold_lkb(lkb);
5088
5089	} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5090		lkb->lkb_flags |= DLM_IFL_RESEND;
5091	}
5092
5093	/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5094	   conversions are async; there's no reply from the remote master */
5095}
5096
5097/* A waiting lkb needs recovery if the master node has failed, or
5098   the master node is changing (only when no directory is used) */
5099
5100static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5101				 int dir_nodeid)
5102{
5103	if (dlm_no_directory(ls))
5104		return 1;
5105
5106	if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
 
 
 
5107		return 1;
5108
5109	return 0;
5110}
5111
5112/* Recovery for locks that are waiting for replies from nodes that are now
5113   gone.  We can just complete unlocks and cancels by faking a reply from the
5114   dead node.  Requests and up-conversions we flag to be resent after
5115   recovery.  Down-conversions can just be completed with a fake reply like
5116   unlocks.  Conversions between PR and CW need special attention. */
5117
5118void dlm_recover_waiters_pre(struct dlm_ls *ls)
5119{
5120	struct dlm_lkb *lkb, *safe;
5121	struct dlm_message *ms_stub;
5122	int wait_type, stub_unlock_result, stub_cancel_result;
5123	int dir_nodeid;
5124
5125	ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
5126	if (!ms_stub)
 
5127		return;
 
5128
5129	mutex_lock(&ls->ls_waiters_mutex);
5130
5131	list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5132
5133		dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5134
5135		/* exclude debug messages about unlocks because there can be so
5136		   many and they aren't very interesting */
5137
5138		if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5139			log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5140				  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5141				  lkb->lkb_id,
5142				  lkb->lkb_remid,
5143				  lkb->lkb_wait_type,
5144				  lkb->lkb_resource->res_nodeid,
5145				  lkb->lkb_nodeid,
5146				  lkb->lkb_wait_nodeid,
5147				  dir_nodeid);
5148		}
5149
5150		/* all outstanding lookups, regardless of destination  will be
5151		   resent after recovery is done */
5152
5153		if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5154			lkb->lkb_flags |= DLM_IFL_RESEND;
5155			continue;
5156		}
5157
5158		if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5159			continue;
5160
5161		wait_type = lkb->lkb_wait_type;
5162		stub_unlock_result = -DLM_EUNLOCK;
5163		stub_cancel_result = -DLM_ECANCEL;
5164
5165		/* Main reply may have been received leaving a zero wait_type,
5166		   but a reply for the overlapping op may not have been
5167		   received.  In that case we need to fake the appropriate
5168		   reply for the overlap op. */
5169
5170		if (!wait_type) {
5171			if (is_overlap_cancel(lkb)) {
5172				wait_type = DLM_MSG_CANCEL;
5173				if (lkb->lkb_grmode == DLM_LOCK_IV)
5174					stub_cancel_result = 0;
5175			}
5176			if (is_overlap_unlock(lkb)) {
5177				wait_type = DLM_MSG_UNLOCK;
5178				if (lkb->lkb_grmode == DLM_LOCK_IV)
5179					stub_unlock_result = -ENOENT;
5180			}
5181
5182			log_debug(ls, "rwpre overlap %x %x %d %d %d",
5183				  lkb->lkb_id, lkb->lkb_flags, wait_type,
5184				  stub_cancel_result, stub_unlock_result);
5185		}
5186
5187		switch (wait_type) {
5188
5189		case DLM_MSG_REQUEST:
5190			lkb->lkb_flags |= DLM_IFL_RESEND;
5191			break;
5192
5193		case DLM_MSG_CONVERT:
5194			recover_convert_waiter(ls, lkb, ms_stub);
5195			break;
5196
5197		case DLM_MSG_UNLOCK:
5198			hold_lkb(lkb);
5199			memset(ms_stub, 0, sizeof(struct dlm_message));
5200			ms_stub->m_flags = DLM_IFL_STUB_MS;
5201			ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
5202			ms_stub->m_result = stub_unlock_result;
5203			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5204			_receive_unlock_reply(lkb, ms_stub);
5205			dlm_put_lkb(lkb);
5206			break;
5207
5208		case DLM_MSG_CANCEL:
5209			hold_lkb(lkb);
5210			memset(ms_stub, 0, sizeof(struct dlm_message));
5211			ms_stub->m_flags = DLM_IFL_STUB_MS;
5212			ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5213			ms_stub->m_result = stub_cancel_result;
5214			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5215			_receive_cancel_reply(lkb, ms_stub);
5216			dlm_put_lkb(lkb);
5217			break;
5218
5219		default:
5220			log_error(ls, "invalid lkb wait_type %d %d",
5221				  lkb->lkb_wait_type, wait_type);
5222		}
5223		schedule();
5224	}
5225	mutex_unlock(&ls->ls_waiters_mutex);
5226	kfree(ms_stub);
5227}
5228
5229static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5230{
5231	struct dlm_lkb *lkb;
5232	int found = 0;
5233
5234	mutex_lock(&ls->ls_waiters_mutex);
5235	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
5236		if (lkb->lkb_flags & DLM_IFL_RESEND) {
5237			hold_lkb(lkb);
5238			found = 1;
5239			break;
5240		}
5241	}
5242	mutex_unlock(&ls->ls_waiters_mutex);
5243
5244	if (!found)
5245		lkb = NULL;
5246	return lkb;
5247}
5248
5249/* Deal with lookups and lkb's marked RESEND from _pre.  We may now be the
5250   master or dir-node for r.  Processing the lkb may result in it being placed
5251   back on waiters. */
5252
5253/* We do this after normal locking has been enabled and any saved messages
5254   (in requestqueue) have been processed.  We should be confident that at
5255   this point we won't get or process a reply to any of these waiting
5256   operations.  But, new ops may be coming in on the rsbs/locks here from
5257   userspace or remotely. */
5258
5259/* there may have been an overlap unlock/cancel prior to recovery or after
5260   recovery.  if before, the lkb may still have a pos wait_count; if after, the
5261   overlap flag would just have been set and nothing new sent.  we can be
5262   confident here than any replies to either the initial op or overlap ops
5263   prior to recovery have been received. */
5264
5265int dlm_recover_waiters_post(struct dlm_ls *ls)
5266{
5267	struct dlm_lkb *lkb;
5268	struct dlm_rsb *r;
5269	int error = 0, mstype, err, oc, ou;
5270
5271	while (1) {
5272		if (dlm_locking_stopped(ls)) {
5273			log_debug(ls, "recover_waiters_post aborted");
5274			error = -EINTR;
5275			break;
5276		}
5277
5278		lkb = find_resend_waiter(ls);
5279		if (!lkb)
5280			break;
5281
5282		r = lkb->lkb_resource;
5283		hold_rsb(r);
5284		lock_rsb(r);
5285
5286		mstype = lkb->lkb_wait_type;
5287		oc = is_overlap_cancel(lkb);
5288		ou = is_overlap_unlock(lkb);
5289		err = 0;
5290
5291		log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5292			  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5293			  "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5294			  r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5295			  dlm_dir_nodeid(r), oc, ou);
5296
5297		/* At this point we assume that we won't get a reply to any
5298		   previous op or overlap op on this lock.  First, do a big
5299		   remove_from_waiters() for all previous ops. */
5300
5301		lkb->lkb_flags &= ~DLM_IFL_RESEND;
5302		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5303		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5304		lkb->lkb_wait_type = 0;
5305		lkb->lkb_wait_count = 0;
5306		mutex_lock(&ls->ls_waiters_mutex);
5307		list_del_init(&lkb->lkb_wait_reply);
5308		mutex_unlock(&ls->ls_waiters_mutex);
5309		unhold_lkb(lkb); /* for waiters list */
5310
5311		if (oc || ou) {
5312			/* do an unlock or cancel instead of resending */
5313			switch (mstype) {
5314			case DLM_MSG_LOOKUP:
5315			case DLM_MSG_REQUEST:
5316				queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5317							-DLM_ECANCEL);
5318				unhold_lkb(lkb); /* undoes create_lkb() */
5319				break;
5320			case DLM_MSG_CONVERT:
5321				if (oc) {
5322					queue_cast(r, lkb, -DLM_ECANCEL);
5323				} else {
5324					lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5325					_unlock_lock(r, lkb);
5326				}
5327				break;
5328			default:
5329				err = 1;
5330			}
5331		} else {
5332			switch (mstype) {
5333			case DLM_MSG_LOOKUP:
5334			case DLM_MSG_REQUEST:
5335				_request_lock(r, lkb);
5336				if (is_master(r))
5337					confirm_master(r, 0);
5338				break;
5339			case DLM_MSG_CONVERT:
5340				_convert_lock(r, lkb);
5341				break;
5342			default:
5343				err = 1;
5344			}
5345		}
5346
5347		if (err) {
5348			log_error(ls, "waiter %x msg %d r_nodeid %d "
5349				  "dir_nodeid %d overlap %d %d",
5350				  lkb->lkb_id, mstype, r->res_nodeid,
5351				  dlm_dir_nodeid(r), oc, ou);
5352		}
5353		unlock_rsb(r);
5354		put_rsb(r);
5355		dlm_put_lkb(lkb);
5356	}
5357
5358	return error;
5359}
5360
5361static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5362			      struct list_head *list)
5363{
 
5364	struct dlm_lkb *lkb, *safe;
5365
5366	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5367		if (!is_master_copy(lkb))
5368			continue;
5369
5370		/* don't purge lkbs we've added in recover_master_copy for
5371		   the current recovery seq */
5372
5373		if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5374			continue;
5375
5376		del_lkb(r, lkb);
5377
5378		/* this put should free the lkb */
5379		if (!dlm_put_lkb(lkb))
5380			log_error(ls, "purged mstcpy lkb not released");
5381	}
5382}
5383
5384void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5385{
5386	struct dlm_ls *ls = r->res_ls;
 
5387
5388	purge_mstcpy_list(ls, r, &r->res_grantqueue);
5389	purge_mstcpy_list(ls, r, &r->res_convertqueue);
5390	purge_mstcpy_list(ls, r, &r->res_waitqueue);
5391}
5392
5393static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5394			    struct list_head *list,
5395			    int nodeid_gone, unsigned int *count)
5396{
5397	struct dlm_lkb *lkb, *safe;
 
 
 
5398
5399	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5400		if (!is_master_copy(lkb))
5401			continue;
5402
5403		if ((lkb->lkb_nodeid == nodeid_gone) ||
5404		    dlm_is_removed(ls, lkb->lkb_nodeid)) {
5405
5406			/* tell recover_lvb to invalidate the lvb
5407			   because a node holding EX/PW failed */
5408			if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5409			    (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5410				rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5411			}
5412
5413			del_lkb(r, lkb);
5414
5415			/* this put should free the lkb */
5416			if (!dlm_put_lkb(lkb))
5417				log_error(ls, "purged dead lkb not released");
5418
5419			rsb_set_flag(r, RSB_RECOVER_GRANT);
5420
5421			(*count)++;
5422		}
5423	}
5424}
5425
5426/* Get rid of locks held by nodes that are gone. */
5427
5428void dlm_recover_purge(struct dlm_ls *ls)
5429{
5430	struct dlm_rsb *r;
5431	struct dlm_member *memb;
5432	int nodes_count = 0;
5433	int nodeid_gone = 0;
5434	unsigned int lkb_count = 0;
5435
5436	/* cache one removed nodeid to optimize the common
5437	   case of a single node removed */
5438
5439	list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5440		nodes_count++;
5441		nodeid_gone = memb->nodeid;
5442	}
5443
5444	if (!nodes_count)
5445		return;
5446
5447	down_write(&ls->ls_root_sem);
5448	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5449		hold_rsb(r);
5450		lock_rsb(r);
5451		if (is_master(r)) {
5452			purge_dead_list(ls, r, &r->res_grantqueue,
5453					nodeid_gone, &lkb_count);
5454			purge_dead_list(ls, r, &r->res_convertqueue,
5455					nodeid_gone, &lkb_count);
5456			purge_dead_list(ls, r, &r->res_waitqueue,
5457					nodeid_gone, &lkb_count);
5458		}
5459		unlock_rsb(r);
5460		unhold_rsb(r);
5461		cond_resched();
 
5462	}
5463	up_write(&ls->ls_root_sem);
5464
5465	if (lkb_count)
5466		log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5467			  lkb_count, nodes_count);
5468}
5469
5470static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5471{
5472	struct rb_node *n;
5473	struct dlm_rsb *r;
5474
5475	spin_lock(&ls->ls_rsbtbl[bucket].lock);
5476	for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5477		r = rb_entry(n, struct dlm_rsb, res_hashnode);
5478
5479		if (!rsb_flag(r, RSB_RECOVER_GRANT))
5480			continue;
5481		if (!is_master(r)) {
5482			rsb_clear_flag(r, RSB_RECOVER_GRANT);
5483			continue;
5484		}
5485		hold_rsb(r);
5486		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5487		return r;
 
5488	}
5489	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5490	return NULL;
5491}
5492
5493/*
5494 * Attempt to grant locks on resources that we are the master of.
5495 * Locks may have become grantable during recovery because locks
5496 * from departed nodes have been purged (or not rebuilt), allowing
5497 * previously blocked locks to now be granted.  The subset of rsb's
5498 * we are interested in are those with lkb's on either the convert or
5499 * waiting queues.
5500 *
5501 * Simplest would be to go through each master rsb and check for non-empty
5502 * convert or waiting queues, and attempt to grant on those rsbs.
5503 * Checking the queues requires lock_rsb, though, for which we'd need
5504 * to release the rsbtbl lock.  This would make iterating through all
5505 * rsb's very inefficient.  So, we rely on earlier recovery routines
5506 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5507 * locks for.
5508 */
5509
5510void dlm_recover_grant(struct dlm_ls *ls)
5511{
5512	struct dlm_rsb *r;
5513	int bucket = 0;
5514	unsigned int count = 0;
5515	unsigned int rsb_count = 0;
5516	unsigned int lkb_count = 0;
5517
5518	while (1) {
5519		r = find_grant_rsb(ls, bucket);
5520		if (!r) {
5521			if (bucket == ls->ls_rsbtbl_size - 1)
5522				break;
5523			bucket++;
5524			continue;
5525		}
5526		rsb_count++;
5527		count = 0;
5528		lock_rsb(r);
5529		/* the RECOVER_GRANT flag is checked in the grant path */
5530		grant_pending_locks(r, &count);
5531		rsb_clear_flag(r, RSB_RECOVER_GRANT);
5532		lkb_count += count;
5533		confirm_master(r, 0);
5534		unlock_rsb(r);
5535		put_rsb(r);
5536		cond_resched();
5537	}
5538
5539	if (lkb_count)
5540		log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5541			  lkb_count, rsb_count);
5542}
5543
5544static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5545					 uint32_t remid)
5546{
5547	struct dlm_lkb *lkb;
5548
5549	list_for_each_entry(lkb, head, lkb_statequeue) {
5550		if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5551			return lkb;
5552	}
5553	return NULL;
5554}
5555
5556static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5557				    uint32_t remid)
5558{
5559	struct dlm_lkb *lkb;
5560
5561	lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5562	if (lkb)
5563		return lkb;
5564	lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5565	if (lkb)
5566		return lkb;
5567	lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5568	if (lkb)
5569		return lkb;
5570	return NULL;
5571}
5572
5573/* needs at least dlm_rcom + rcom_lock */
5574static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5575				  struct dlm_rsb *r, struct dlm_rcom *rc)
5576{
5577	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5578
5579	lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5580	lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5581	lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5582	lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5583	lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5584	lkb->lkb_flags |= DLM_IFL_MSTCPY;
5585	lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5586	lkb->lkb_rqmode = rl->rl_rqmode;
5587	lkb->lkb_grmode = rl->rl_grmode;
5588	/* don't set lkb_status because add_lkb wants to itself */
5589
5590	lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5591	lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5592
5593	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5594		int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5595			 sizeof(struct rcom_lock);
5596		if (lvblen > ls->ls_lvblen)
5597			return -EINVAL;
5598		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5599		if (!lkb->lkb_lvbptr)
5600			return -ENOMEM;
5601		memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5602	}
5603
5604	/* Conversions between PR and CW (middle modes) need special handling.
5605	   The real granted mode of these converting locks cannot be determined
5606	   until all locks have been rebuilt on the rsb (recover_conversion) */
5607
5608	if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5609	    middle_conversion(lkb)) {
5610		rl->rl_status = DLM_LKSTS_CONVERT;
5611		lkb->lkb_grmode = DLM_LOCK_IV;
5612		rsb_set_flag(r, RSB_RECOVER_CONVERT);
5613	}
5614
5615	return 0;
5616}
5617
5618/* This lkb may have been recovered in a previous aborted recovery so we need
5619   to check if the rsb already has an lkb with the given remote nodeid/lkid.
5620   If so we just send back a standard reply.  If not, we create a new lkb with
5621   the given values and send back our lkid.  We send back our lkid by sending
5622   back the rcom_lock struct we got but with the remid field filled in. */
5623
5624/* needs at least dlm_rcom + rcom_lock */
5625int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5626{
5627	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5628	struct dlm_rsb *r;
5629	struct dlm_lkb *lkb;
5630	uint32_t remid = 0;
5631	int from_nodeid = rc->rc_header.h_nodeid;
5632	int error;
5633
5634	if (rl->rl_parent_lkid) {
5635		error = -EOPNOTSUPP;
5636		goto out;
5637	}
5638
5639	remid = le32_to_cpu(rl->rl_lkid);
5640
5641	/* In general we expect the rsb returned to be R_MASTER, but we don't
5642	   have to require it.  Recovery of masters on one node can overlap
5643	   recovery of locks on another node, so one node can send us MSTCPY
5644	   locks before we've made ourselves master of this rsb.  We can still
5645	   add new MSTCPY locks that we receive here without any harm; when
5646	   we make ourselves master, dlm_recover_masters() won't touch the
5647	   MSTCPY locks we've received early. */
5648
5649	error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5650			 from_nodeid, R_RECEIVE_RECOVER, &r);
5651	if (error)
5652		goto out;
5653
5654	lock_rsb(r);
5655
5656	if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5657		log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5658			  from_nodeid, remid);
5659		error = -EBADR;
5660		goto out_unlock;
5661	}
5662
5663	lkb = search_remid(r, from_nodeid, remid);
5664	if (lkb) {
5665		error = -EEXIST;
5666		goto out_remid;
5667	}
5668
5669	error = create_lkb(ls, &lkb);
5670	if (error)
5671		goto out_unlock;
5672
5673	error = receive_rcom_lock_args(ls, lkb, r, rc);
5674	if (error) {
5675		__put_lkb(ls, lkb);
5676		goto out_unlock;
5677	}
5678
5679	attach_lkb(r, lkb);
5680	add_lkb(r, lkb, rl->rl_status);
5681	error = 0;
5682	ls->ls_recover_locks_in++;
5683
5684	if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5685		rsb_set_flag(r, RSB_RECOVER_GRANT);
5686
5687 out_remid:
5688	/* this is the new value returned to the lock holder for
5689	   saving in its process-copy lkb */
5690	rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5691
5692	lkb->lkb_recover_seq = ls->ls_recover_seq;
5693
5694 out_unlock:
5695	unlock_rsb(r);
5696	put_rsb(r);
5697 out:
5698	if (error && error != -EEXIST)
5699		log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5700			  from_nodeid, remid, error);
5701	rl->rl_result = cpu_to_le32(error);
5702	return error;
5703}
5704
5705/* needs at least dlm_rcom + rcom_lock */
5706int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5707{
5708	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5709	struct dlm_rsb *r;
5710	struct dlm_lkb *lkb;
5711	uint32_t lkid, remid;
5712	int error, result;
5713
5714	lkid = le32_to_cpu(rl->rl_lkid);
5715	remid = le32_to_cpu(rl->rl_remid);
5716	result = le32_to_cpu(rl->rl_result);
5717
5718	error = find_lkb(ls, lkid, &lkb);
5719	if (error) {
5720		log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5721			  lkid, rc->rc_header.h_nodeid, remid, result);
5722		return error;
5723	}
5724
 
 
 
 
5725	r = lkb->lkb_resource;
5726	hold_rsb(r);
5727	lock_rsb(r);
5728
5729	if (!is_process_copy(lkb)) {
5730		log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5731			  lkid, rc->rc_header.h_nodeid, remid, result);
5732		dlm_dump_rsb(r);
5733		unlock_rsb(r);
5734		put_rsb(r);
5735		dlm_put_lkb(lkb);
5736		return -EINVAL;
5737	}
5738
5739	switch (result) {
5740	case -EBADR:
5741		/* There's a chance the new master received our lock before
5742		   dlm_recover_master_reply(), this wouldn't happen if we did
5743		   a barrier between recover_masters and recover_locks. */
5744
5745		log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5746			  lkid, rc->rc_header.h_nodeid, remid, result);
5747	
5748		dlm_send_rcom_lock(r, lkb);
5749		goto out;
5750	case -EEXIST:
 
 
5751	case 0:
5752		lkb->lkb_remid = remid;
5753		break;
5754	default:
5755		log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5756			  lkid, rc->rc_header.h_nodeid, remid, result);
5757	}
5758
5759	/* an ack for dlm_recover_locks() which waits for replies from
5760	   all the locks it sends to new masters */
5761	dlm_recovered_lock(r);
5762 out:
5763	unlock_rsb(r);
5764	put_rsb(r);
5765	dlm_put_lkb(lkb);
5766
5767	return 0;
5768}
5769
5770int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5771		     int mode, uint32_t flags, void *name, unsigned int namelen,
5772		     unsigned long timeout_cs)
5773{
5774	struct dlm_lkb *lkb;
5775	struct dlm_args args;
5776	int error;
5777
5778	dlm_lock_recovery(ls);
5779
5780	error = create_lkb(ls, &lkb);
5781	if (error) {
5782		kfree(ua);
5783		goto out;
5784	}
5785
5786	if (flags & DLM_LKF_VALBLK) {
5787		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5788		if (!ua->lksb.sb_lvbptr) {
5789			kfree(ua);
5790			__put_lkb(ls, lkb);
5791			error = -ENOMEM;
5792			goto out;
5793		}
5794	}
5795
5796	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
5797	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
5798	   lock and that lkb_astparam is the dlm_user_args structure. */
5799
5800	error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5801			      fake_astfn, ua, fake_bastfn, &args);
5802	lkb->lkb_flags |= DLM_IFL_USER;
5803
5804	if (error) {
5805		__put_lkb(ls, lkb);
5806		goto out;
5807	}
5808
5809	error = request_lock(ls, lkb, name, namelen, &args);
5810
5811	switch (error) {
5812	case 0:
5813		break;
5814	case -EINPROGRESS:
5815		error = 0;
5816		break;
5817	case -EAGAIN:
5818		error = 0;
5819		/* fall through */
5820	default:
5821		__put_lkb(ls, lkb);
5822		goto out;
5823	}
5824
5825	/* add this new lkb to the per-process list of locks */
5826	spin_lock(&ua->proc->locks_spin);
5827	hold_lkb(lkb);
5828	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5829	spin_unlock(&ua->proc->locks_spin);
5830 out:
5831	dlm_unlock_recovery(ls);
5832	return error;
5833}
5834
5835int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5836		     int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5837		     unsigned long timeout_cs)
5838{
5839	struct dlm_lkb *lkb;
5840	struct dlm_args args;
5841	struct dlm_user_args *ua;
5842	int error;
5843
5844	dlm_lock_recovery(ls);
5845
5846	error = find_lkb(ls, lkid, &lkb);
5847	if (error)
5848		goto out;
5849
5850	/* user can change the params on its lock when it converts it, or
5851	   add an lvb that didn't exist before */
5852
5853	ua = lkb->lkb_ua;
5854
5855	if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5856		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5857		if (!ua->lksb.sb_lvbptr) {
5858			error = -ENOMEM;
5859			goto out_put;
5860		}
5861	}
5862	if (lvb_in && ua->lksb.sb_lvbptr)
5863		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5864
5865	ua->xid = ua_tmp->xid;
5866	ua->castparam = ua_tmp->castparam;
5867	ua->castaddr = ua_tmp->castaddr;
5868	ua->bastparam = ua_tmp->bastparam;
5869	ua->bastaddr = ua_tmp->bastaddr;
5870	ua->user_lksb = ua_tmp->user_lksb;
5871
5872	error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5873			      fake_astfn, ua, fake_bastfn, &args);
5874	if (error)
5875		goto out_put;
5876
5877	error = convert_lock(ls, lkb, &args);
5878
5879	if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5880		error = 0;
5881 out_put:
5882	dlm_put_lkb(lkb);
5883 out:
5884	dlm_unlock_recovery(ls);
5885	kfree(ua_tmp);
5886	return error;
5887}
5888
5889/*
5890 * The caller asks for an orphan lock on a given resource with a given mode.
5891 * If a matching lock exists, it's moved to the owner's list of locks and
5892 * the lkid is returned.
5893 */
5894
5895int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5896		     int mode, uint32_t flags, void *name, unsigned int namelen,
5897		     unsigned long timeout_cs, uint32_t *lkid)
5898{
5899	struct dlm_lkb *lkb;
5900	struct dlm_user_args *ua;
5901	int found_other_mode = 0;
5902	int found = 0;
5903	int rv = 0;
5904
5905	mutex_lock(&ls->ls_orphans_mutex);
5906	list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
5907		if (lkb->lkb_resource->res_length != namelen)
5908			continue;
5909		if (memcmp(lkb->lkb_resource->res_name, name, namelen))
5910			continue;
5911		if (lkb->lkb_grmode != mode) {
5912			found_other_mode = 1;
5913			continue;
5914		}
5915
5916		found = 1;
5917		list_del_init(&lkb->lkb_ownqueue);
5918		lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
5919		*lkid = lkb->lkb_id;
5920		break;
5921	}
5922	mutex_unlock(&ls->ls_orphans_mutex);
5923
5924	if (!found && found_other_mode) {
5925		rv = -EAGAIN;
5926		goto out;
5927	}
5928
5929	if (!found) {
5930		rv = -ENOENT;
5931		goto out;
5932	}
5933
5934	lkb->lkb_exflags = flags;
5935	lkb->lkb_ownpid = (int) current->pid;
5936
5937	ua = lkb->lkb_ua;
5938
5939	ua->proc = ua_tmp->proc;
5940	ua->xid = ua_tmp->xid;
5941	ua->castparam = ua_tmp->castparam;
5942	ua->castaddr = ua_tmp->castaddr;
5943	ua->bastparam = ua_tmp->bastparam;
5944	ua->bastaddr = ua_tmp->bastaddr;
5945	ua->user_lksb = ua_tmp->user_lksb;
5946
5947	/*
5948	 * The lkb reference from the ls_orphans list was not
5949	 * removed above, and is now considered the reference
5950	 * for the proc locks list.
5951	 */
5952
5953	spin_lock(&ua->proc->locks_spin);
5954	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5955	spin_unlock(&ua->proc->locks_spin);
5956 out:
5957	kfree(ua_tmp);
5958	return rv;
5959}
5960
5961int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5962		    uint32_t flags, uint32_t lkid, char *lvb_in)
5963{
5964	struct dlm_lkb *lkb;
5965	struct dlm_args args;
5966	struct dlm_user_args *ua;
5967	int error;
5968
5969	dlm_lock_recovery(ls);
5970
5971	error = find_lkb(ls, lkid, &lkb);
5972	if (error)
5973		goto out;
5974
5975	ua = lkb->lkb_ua;
5976
5977	if (lvb_in && ua->lksb.sb_lvbptr)
5978		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5979	if (ua_tmp->castparam)
5980		ua->castparam = ua_tmp->castparam;
5981	ua->user_lksb = ua_tmp->user_lksb;
5982
5983	error = set_unlock_args(flags, ua, &args);
5984	if (error)
5985		goto out_put;
5986
5987	error = unlock_lock(ls, lkb, &args);
5988
5989	if (error == -DLM_EUNLOCK)
5990		error = 0;
5991	/* from validate_unlock_args() */
5992	if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
5993		error = 0;
5994	if (error)
5995		goto out_put;
5996
5997	spin_lock(&ua->proc->locks_spin);
5998	/* dlm_user_add_cb() may have already taken lkb off the proc list */
5999	if (!list_empty(&lkb->lkb_ownqueue))
6000		list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
6001	spin_unlock(&ua->proc->locks_spin);
6002 out_put:
6003	dlm_put_lkb(lkb);
6004 out:
6005	dlm_unlock_recovery(ls);
6006	kfree(ua_tmp);
6007	return error;
6008}
6009
6010int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6011		    uint32_t flags, uint32_t lkid)
6012{
6013	struct dlm_lkb *lkb;
6014	struct dlm_args args;
6015	struct dlm_user_args *ua;
6016	int error;
6017
6018	dlm_lock_recovery(ls);
6019
6020	error = find_lkb(ls, lkid, &lkb);
6021	if (error)
6022		goto out;
6023
6024	ua = lkb->lkb_ua;
6025	if (ua_tmp->castparam)
6026		ua->castparam = ua_tmp->castparam;
6027	ua->user_lksb = ua_tmp->user_lksb;
6028
6029	error = set_unlock_args(flags, ua, &args);
6030	if (error)
6031		goto out_put;
6032
6033	error = cancel_lock(ls, lkb, &args);
6034
6035	if (error == -DLM_ECANCEL)
6036		error = 0;
6037	/* from validate_unlock_args() */
6038	if (error == -EBUSY)
6039		error = 0;
6040 out_put:
6041	dlm_put_lkb(lkb);
6042 out:
6043	dlm_unlock_recovery(ls);
6044	kfree(ua_tmp);
6045	return error;
6046}
6047
6048int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6049{
6050	struct dlm_lkb *lkb;
6051	struct dlm_args args;
6052	struct dlm_user_args *ua;
6053	struct dlm_rsb *r;
6054	int error;
6055
6056	dlm_lock_recovery(ls);
6057
6058	error = find_lkb(ls, lkid, &lkb);
6059	if (error)
6060		goto out;
6061
6062	ua = lkb->lkb_ua;
6063
6064	error = set_unlock_args(flags, ua, &args);
6065	if (error)
6066		goto out_put;
6067
6068	/* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6069
6070	r = lkb->lkb_resource;
6071	hold_rsb(r);
6072	lock_rsb(r);
6073
6074	error = validate_unlock_args(lkb, &args);
6075	if (error)
6076		goto out_r;
6077	lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6078
6079	error = _cancel_lock(r, lkb);
6080 out_r:
6081	unlock_rsb(r);
6082	put_rsb(r);
6083
6084	if (error == -DLM_ECANCEL)
6085		error = 0;
6086	/* from validate_unlock_args() */
6087	if (error == -EBUSY)
6088		error = 0;
6089 out_put:
6090	dlm_put_lkb(lkb);
6091 out:
6092	dlm_unlock_recovery(ls);
6093	return error;
6094}
6095
6096/* lkb's that are removed from the waiters list by revert are just left on the
6097   orphans list with the granted orphan locks, to be freed by purge */
6098
6099static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6100{
6101	struct dlm_args args;
6102	int error;
6103
6104	hold_lkb(lkb); /* reference for the ls_orphans list */
6105	mutex_lock(&ls->ls_orphans_mutex);
6106	list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6107	mutex_unlock(&ls->ls_orphans_mutex);
6108
6109	set_unlock_args(0, lkb->lkb_ua, &args);
6110
6111	error = cancel_lock(ls, lkb, &args);
6112	if (error == -DLM_ECANCEL)
6113		error = 0;
6114	return error;
6115}
6116
6117/* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6118   granted.  Regardless of what rsb queue the lock is on, it's removed and
6119   freed.  The IVVALBLK flag causes the lvb on the resource to be invalidated
6120   if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6121
6122static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6123{
6124	struct dlm_args args;
6125	int error;
6126
6127	set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6128			lkb->lkb_ua, &args);
6129
6130	error = unlock_lock(ls, lkb, &args);
6131	if (error == -DLM_EUNLOCK)
6132		error = 0;
6133	return error;
6134}
6135
6136/* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6137   (which does lock_rsb) due to deadlock with receiving a message that does
6138   lock_rsb followed by dlm_user_add_cb() */
6139
6140static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6141				     struct dlm_user_proc *proc)
6142{
6143	struct dlm_lkb *lkb = NULL;
6144
6145	mutex_lock(&ls->ls_clear_proc_locks);
6146	if (list_empty(&proc->locks))
6147		goto out;
6148
6149	lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6150	list_del_init(&lkb->lkb_ownqueue);
6151
6152	if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6153		lkb->lkb_flags |= DLM_IFL_ORPHAN;
6154	else
6155		lkb->lkb_flags |= DLM_IFL_DEAD;
6156 out:
6157	mutex_unlock(&ls->ls_clear_proc_locks);
6158	return lkb;
6159}
6160
6161/* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6162   1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6163   which we clear here. */
6164
6165/* proc CLOSING flag is set so no more device_reads should look at proc->asts
6166   list, and no more device_writes should add lkb's to proc->locks list; so we
6167   shouldn't need to take asts_spin or locks_spin here.  this assumes that
6168   device reads/writes/closes are serialized -- FIXME: we may need to serialize
6169   them ourself. */
6170
6171void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6172{
6173	struct dlm_lkb *lkb, *safe;
6174
6175	dlm_lock_recovery(ls);
6176
6177	while (1) {
6178		lkb = del_proc_lock(ls, proc);
6179		if (!lkb)
6180			break;
6181		del_timeout(lkb);
6182		if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6183			orphan_proc_lock(ls, lkb);
6184		else
6185			unlock_proc_lock(ls, lkb);
6186
6187		/* this removes the reference for the proc->locks list
6188		   added by dlm_user_request, it may result in the lkb
6189		   being freed */
6190
6191		dlm_put_lkb(lkb);
6192	}
6193
6194	mutex_lock(&ls->ls_clear_proc_locks);
6195
6196	/* in-progress unlocks */
6197	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6198		list_del_init(&lkb->lkb_ownqueue);
6199		lkb->lkb_flags |= DLM_IFL_DEAD;
6200		dlm_put_lkb(lkb);
6201	}
6202
6203	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6204		memset(&lkb->lkb_callbacks, 0,
6205		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6206		list_del_init(&lkb->lkb_cb_list);
6207		dlm_put_lkb(lkb);
6208	}
6209
6210	mutex_unlock(&ls->ls_clear_proc_locks);
6211	dlm_unlock_recovery(ls);
6212}
6213
6214static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6215{
6216	struct dlm_lkb *lkb, *safe;
6217
6218	while (1) {
6219		lkb = NULL;
6220		spin_lock(&proc->locks_spin);
6221		if (!list_empty(&proc->locks)) {
6222			lkb = list_entry(proc->locks.next, struct dlm_lkb,
6223					 lkb_ownqueue);
6224			list_del_init(&lkb->lkb_ownqueue);
6225		}
6226		spin_unlock(&proc->locks_spin);
6227
6228		if (!lkb)
6229			break;
6230
6231		lkb->lkb_flags |= DLM_IFL_DEAD;
6232		unlock_proc_lock(ls, lkb);
6233		dlm_put_lkb(lkb); /* ref from proc->locks list */
6234	}
6235
6236	spin_lock(&proc->locks_spin);
6237	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6238		list_del_init(&lkb->lkb_ownqueue);
6239		lkb->lkb_flags |= DLM_IFL_DEAD;
6240		dlm_put_lkb(lkb);
6241	}
6242	spin_unlock(&proc->locks_spin);
6243
6244	spin_lock(&proc->asts_spin);
6245	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6246		memset(&lkb->lkb_callbacks, 0,
6247		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6248		list_del_init(&lkb->lkb_cb_list);
6249		dlm_put_lkb(lkb);
6250	}
6251	spin_unlock(&proc->asts_spin);
6252}
6253
6254/* pid of 0 means purge all orphans */
6255
6256static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6257{
6258	struct dlm_lkb *lkb, *safe;
6259
6260	mutex_lock(&ls->ls_orphans_mutex);
6261	list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6262		if (pid && lkb->lkb_ownpid != pid)
6263			continue;
6264		unlock_proc_lock(ls, lkb);
6265		list_del_init(&lkb->lkb_ownqueue);
6266		dlm_put_lkb(lkb);
6267	}
6268	mutex_unlock(&ls->ls_orphans_mutex);
6269}
6270
6271static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6272{
6273	struct dlm_message *ms;
6274	struct dlm_mhandle *mh;
6275	int error;
6276
6277	error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6278				DLM_MSG_PURGE, &ms, &mh);
6279	if (error)
6280		return error;
6281	ms->m_nodeid = nodeid;
6282	ms->m_pid = pid;
6283
6284	return send_message(mh, ms);
6285}
6286
6287int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6288		   int nodeid, int pid)
6289{
6290	int error = 0;
6291
6292	if (nodeid && (nodeid != dlm_our_nodeid())) {
6293		error = send_purge(ls, nodeid, pid);
6294	} else {
6295		dlm_lock_recovery(ls);
6296		if (pid == current->pid)
6297			purge_proc_locks(ls, proc);
6298		else
6299			do_purge(ls, nodeid, pid);
6300		dlm_unlock_recovery(ls);
6301	}
6302	return error;
6303}
6304