Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
 
 
  3 * dlmthread.c
  4 *
  5 * standalone DLM module
  6 *
  7 * Copyright (C) 2004 Oracle.  All rights reserved.
  8 */
  9
 10
 11#include <linux/module.h>
 12#include <linux/fs.h>
 13#include <linux/types.h>
 14#include <linux/highmem.h>
 15#include <linux/init.h>
 16#include <linux/sysctl.h>
 17#include <linux/random.h>
 18#include <linux/blkdev.h>
 19#include <linux/socket.h>
 20#include <linux/inet.h>
 21#include <linux/timer.h>
 22#include <linux/kthread.h>
 23#include <linux/delay.h>
 24
 25
 26#include "../cluster/heartbeat.h"
 27#include "../cluster/nodemanager.h"
 28#include "../cluster/tcp.h"
 29
 30#include "dlmapi.h"
 31#include "dlmcommon.h"
 32#include "dlmdomain.h"
 33
 34#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
 35#include "../cluster/masklog.h"
 36
 37static int dlm_thread(void *data);
 38static void dlm_flush_asts(struct dlm_ctxt *dlm);
 39
 
 
 40/* will exit holding res->spinlock, but may drop in function */
 41/* waits until flags are cleared on res->state */
 42void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
 43{
 44	DECLARE_WAITQUEUE(wait, current);
 45
 46	assert_spin_locked(&res->spinlock);
 47
 48	add_wait_queue(&res->wq, &wait);
 49repeat:
 50	set_current_state(TASK_UNINTERRUPTIBLE);
 51	if (res->state & flags) {
 52		spin_unlock(&res->spinlock);
 53		schedule();
 54		spin_lock(&res->spinlock);
 55		goto repeat;
 56	}
 57	remove_wait_queue(&res->wq, &wait);
 58	__set_current_state(TASK_RUNNING);
 59}
 60
 61int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
 62{
 63	if (list_empty(&res->granted) &&
 64	    list_empty(&res->converting) &&
 65	    list_empty(&res->blocked))
 66		return 0;
 67	return 1;
 68}
 69
 70/* "unused": the lockres has no locks, is not on the dirty list,
 71 * has no inflight locks (in the gap between mastery and acquiring
 72 * the first lock), and has no bits in its refmap.
 73 * truly ready to be freed. */
 74int __dlm_lockres_unused(struct dlm_lock_resource *res)
 75{
 76	int bit;
 77
 78	assert_spin_locked(&res->spinlock);
 79
 80	if (__dlm_lockres_has_locks(res))
 81		return 0;
 82
 83	/* Locks are in the process of being created */
 84	if (res->inflight_locks)
 85		return 0;
 86
 87	if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
 88		return 0;
 89
 90	if (res->state & (DLM_LOCK_RES_RECOVERING|
 91			DLM_LOCK_RES_RECOVERY_WAITING))
 92		return 0;
 93
 94	/* Another node has this resource with this node as the master */
 95	bit = find_first_bit(res->refmap, O2NM_MAX_NODES);
 96	if (bit < O2NM_MAX_NODES)
 97		return 0;
 98
 99	return 1;
100}
101
102
103/* Call whenever you may have added or deleted something from one of
104 * the lockres queue's. This will figure out whether it belongs on the
105 * unused list or not and does the appropriate thing. */
106void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
107			      struct dlm_lock_resource *res)
108{
109	assert_spin_locked(&dlm->spinlock);
110	assert_spin_locked(&res->spinlock);
111
112	if (__dlm_lockres_unused(res)){
113		if (list_empty(&res->purge)) {
114			mlog(0, "%s: Adding res %.*s to purge list\n",
115			     dlm->name, res->lockname.len, res->lockname.name);
116
117			res->last_used = jiffies;
118			dlm_lockres_get(res);
119			list_add_tail(&res->purge, &dlm->purge_list);
120			dlm->purge_count++;
121		}
122	} else if (!list_empty(&res->purge)) {
123		mlog(0, "%s: Removing res %.*s from purge list\n",
124		     dlm->name, res->lockname.len, res->lockname.name);
125
126		list_del_init(&res->purge);
127		dlm_lockres_put(res);
128		dlm->purge_count--;
129	}
130}
131
132void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
133			    struct dlm_lock_resource *res)
134{
135	spin_lock(&dlm->spinlock);
136	spin_lock(&res->spinlock);
137
138	__dlm_lockres_calc_usage(dlm, res);
139
140	spin_unlock(&res->spinlock);
141	spin_unlock(&dlm->spinlock);
142}
143
144/*
145 * Do the real purge work:
146 *     unhash the lockres, and
147 *     clear flag DLM_LOCK_RES_DROPPING_REF.
148 * It requires dlm and lockres spinlock to be taken.
149 */
150void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
151		struct dlm_lock_resource *res)
152{
153	assert_spin_locked(&dlm->spinlock);
154	assert_spin_locked(&res->spinlock);
155
156	if (!list_empty(&res->purge)) {
157		mlog(0, "%s: Removing res %.*s from purgelist\n",
158		     dlm->name, res->lockname.len, res->lockname.name);
159		list_del_init(&res->purge);
160		dlm_lockres_put(res);
161		dlm->purge_count--;
162	}
163
164	if (!__dlm_lockres_unused(res)) {
165		mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
166		     dlm->name, res->lockname.len, res->lockname.name);
167		__dlm_print_one_lock_resource(res);
168		BUG();
169	}
170
171	__dlm_unhash_lockres(dlm, res);
172
173	spin_lock(&dlm->track_lock);
174	if (!list_empty(&res->tracking))
175		list_del_init(&res->tracking);
176	else {
177		mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
178		     dlm->name, res->lockname.len, res->lockname.name);
179		__dlm_print_one_lock_resource(res);
180	}
181	spin_unlock(&dlm->track_lock);
182
183	/*
184	 * lockres is not in the hash now. drop the flag and wake up
185	 * any processes waiting in dlm_get_lock_resource.
186	 */
187	res->state &= ~DLM_LOCK_RES_DROPPING_REF;
188}
189
190static void dlm_purge_lockres(struct dlm_ctxt *dlm,
191			     struct dlm_lock_resource *res)
192{
193	int master;
194	int ret = 0;
195
196	assert_spin_locked(&dlm->spinlock);
197	assert_spin_locked(&res->spinlock);
198
199	master = (res->owner == dlm->node_num);
200
201	mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
202	     res->lockname.len, res->lockname.name, master);
203
204	if (!master) {
205		if (res->state & DLM_LOCK_RES_DROPPING_REF) {
206			mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
207				dlm->name, res->lockname.len, res->lockname.name);
208			spin_unlock(&res->spinlock);
209			return;
210		}
211
212		res->state |= DLM_LOCK_RES_DROPPING_REF;
213		/* drop spinlock...  retake below */
214		spin_unlock(&res->spinlock);
215		spin_unlock(&dlm->spinlock);
216
217		spin_lock(&res->spinlock);
218		/* This ensures that clear refmap is sent after the set */
219		__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
220		spin_unlock(&res->spinlock);
221
222		/* clear our bit from the master's refmap, ignore errors */
223		ret = dlm_drop_lockres_ref(dlm, res);
224		if (ret < 0) {
225			if (!dlm_is_host_down(ret))
226				BUG();
227		}
228		spin_lock(&dlm->spinlock);
229		spin_lock(&res->spinlock);
230	}
231
232	if (!list_empty(&res->purge)) {
233		mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
234		     dlm->name, res->lockname.len, res->lockname.name, master);
235		list_del_init(&res->purge);
236		dlm_lockres_put(res);
237		dlm->purge_count--;
238	}
239
240	if (!master && ret == DLM_DEREF_RESPONSE_INPROG) {
241		mlog(0, "%s: deref %.*s in progress\n",
242			dlm->name, res->lockname.len, res->lockname.name);
243		spin_unlock(&res->spinlock);
244		return;
245	}
246
247	if (!__dlm_lockres_unused(res)) {
248		mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
249		     dlm->name, res->lockname.len, res->lockname.name);
250		__dlm_print_one_lock_resource(res);
251		BUG();
252	}
253
254	__dlm_unhash_lockres(dlm, res);
255
256	spin_lock(&dlm->track_lock);
257	if (!list_empty(&res->tracking))
258		list_del_init(&res->tracking);
259	else {
260		mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
261				res->lockname.len, res->lockname.name);
262		__dlm_print_one_lock_resource(res);
263	}
264	spin_unlock(&dlm->track_lock);
265
266	/* lockres is not in the hash now.  drop the flag and wake up
267	 * any processes waiting in dlm_get_lock_resource. */
268	if (!master) {
269		res->state &= ~DLM_LOCK_RES_DROPPING_REF;
270		spin_unlock(&res->spinlock);
271		wake_up(&res->wq);
272	} else
273		spin_unlock(&res->spinlock);
274}
275
276static void dlm_run_purge_list(struct dlm_ctxt *dlm,
277			       int purge_now)
278{
279	unsigned int run_max, unused;
280	unsigned long purge_jiffies;
281	struct dlm_lock_resource *lockres;
282
283	spin_lock(&dlm->spinlock);
284	run_max = dlm->purge_count;
285
286	while(run_max && !list_empty(&dlm->purge_list)) {
287		run_max--;
288
289		lockres = list_entry(dlm->purge_list.next,
290				     struct dlm_lock_resource, purge);
291
292		spin_lock(&lockres->spinlock);
293
294		purge_jiffies = lockres->last_used +
295			msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
296
297		/* Make sure that we want to be processing this guy at
298		 * this time. */
299		if (!purge_now && time_after(purge_jiffies, jiffies)) {
300			/* Since resources are added to the purge list
301			 * in tail order, we can stop at the first
302			 * unpurgable resource -- anyone added after
303			 * him will have a greater last_used value */
304			spin_unlock(&lockres->spinlock);
305			break;
306		}
307
308		/* Status of the lockres *might* change so double
309		 * check. If the lockres is unused, holding the dlm
310		 * spinlock will prevent people from getting and more
311		 * refs on it. */
312		unused = __dlm_lockres_unused(lockres);
313		if (!unused ||
314		    (lockres->state & DLM_LOCK_RES_MIGRATING) ||
315		    (lockres->inflight_assert_workers != 0)) {
316			mlog(0, "%s: res %.*s is in use or being remastered, "
317			     "used %d, state %d, assert master workers %u\n",
318			     dlm->name, lockres->lockname.len,
319			     lockres->lockname.name,
320			     !unused, lockres->state,
321			     lockres->inflight_assert_workers);
322			list_move_tail(&lockres->purge, &dlm->purge_list);
323			spin_unlock(&lockres->spinlock);
324			continue;
325		}
326
327		dlm_lockres_get(lockres);
328
329		dlm_purge_lockres(dlm, lockres);
330
331		dlm_lockres_put(lockres);
332
333		/* Avoid adding any scheduling latencies */
334		cond_resched_lock(&dlm->spinlock);
335	}
336
337	spin_unlock(&dlm->spinlock);
338}
339
340static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
341			      struct dlm_lock_resource *res)
342{
343	struct dlm_lock *lock, *target;
344	int can_grant = 1;
345
346	/*
347	 * Because this function is called with the lockres
348	 * spinlock, and because we know that it is not migrating/
349	 * recovering/in-progress, it is fine to reserve asts and
350	 * basts right before queueing them all throughout
351	 */
352	assert_spin_locked(&dlm->ast_lock);
353	assert_spin_locked(&res->spinlock);
354	BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
355			      DLM_LOCK_RES_RECOVERING|
356			      DLM_LOCK_RES_IN_PROGRESS)));
357
358converting:
359	if (list_empty(&res->converting))
360		goto blocked;
361	mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
362	     res->lockname.len, res->lockname.name);
363
364	target = list_entry(res->converting.next, struct dlm_lock, list);
365	if (target->ml.convert_type == LKM_IVMODE) {
366		mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
367		     dlm->name, res->lockname.len, res->lockname.name);
368		BUG();
369	}
370	list_for_each_entry(lock, &res->granted, list) {
371		if (lock==target)
372			continue;
373		if (!dlm_lock_compatible(lock->ml.type,
374					 target->ml.convert_type)) {
375			can_grant = 0;
376			/* queue the BAST if not already */
377			if (lock->ml.highest_blocked == LKM_IVMODE) {
378				__dlm_lockres_reserve_ast(res);
379				__dlm_queue_bast(dlm, lock);
380			}
381			/* update the highest_blocked if needed */
382			if (lock->ml.highest_blocked < target->ml.convert_type)
383				lock->ml.highest_blocked =
384					target->ml.convert_type;
385		}
386	}
387
388	list_for_each_entry(lock, &res->converting, list) {
389		if (lock==target)
390			continue;
391		if (!dlm_lock_compatible(lock->ml.type,
392					 target->ml.convert_type)) {
393			can_grant = 0;
394			if (lock->ml.highest_blocked == LKM_IVMODE) {
395				__dlm_lockres_reserve_ast(res);
396				__dlm_queue_bast(dlm, lock);
397			}
398			if (lock->ml.highest_blocked < target->ml.convert_type)
399				lock->ml.highest_blocked =
400					target->ml.convert_type;
401		}
402	}
403
404	/* we can convert the lock */
405	if (can_grant) {
406		spin_lock(&target->spinlock);
407		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
408
409		mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
410		     "%d => %d, node %u\n", dlm->name, res->lockname.len,
411		     res->lockname.name,
412		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
413		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
414		     target->ml.type,
415		     target->ml.convert_type, target->ml.node);
416
417		target->ml.type = target->ml.convert_type;
418		target->ml.convert_type = LKM_IVMODE;
419		list_move_tail(&target->list, &res->granted);
420
421		BUG_ON(!target->lksb);
422		target->lksb->status = DLM_NORMAL;
423
424		spin_unlock(&target->spinlock);
425
426		__dlm_lockres_reserve_ast(res);
427		__dlm_queue_ast(dlm, target);
428		/* go back and check for more */
429		goto converting;
430	}
431
432blocked:
433	if (list_empty(&res->blocked))
434		goto leave;
435	target = list_entry(res->blocked.next, struct dlm_lock, list);
436
437	list_for_each_entry(lock, &res->granted, list) {
438		if (lock==target)
439			continue;
440		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
441			can_grant = 0;
442			if (lock->ml.highest_blocked == LKM_IVMODE) {
443				__dlm_lockres_reserve_ast(res);
444				__dlm_queue_bast(dlm, lock);
445			}
446			if (lock->ml.highest_blocked < target->ml.type)
447				lock->ml.highest_blocked = target->ml.type;
448		}
449	}
450
451	list_for_each_entry(lock, &res->converting, list) {
452		if (lock==target)
453			continue;
454		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
455			can_grant = 0;
456			if (lock->ml.highest_blocked == LKM_IVMODE) {
457				__dlm_lockres_reserve_ast(res);
458				__dlm_queue_bast(dlm, lock);
459			}
460			if (lock->ml.highest_blocked < target->ml.type)
461				lock->ml.highest_blocked = target->ml.type;
462		}
463	}
464
465	/* we can grant the blocked lock (only
466	 * possible if converting list empty) */
467	if (can_grant) {
468		spin_lock(&target->spinlock);
469		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
470
471		mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
472		     "node %u\n", dlm->name, res->lockname.len,
473		     res->lockname.name,
474		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
475		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
476		     target->ml.type, target->ml.node);
477
478		/* target->ml.type is already correct */
479		list_move_tail(&target->list, &res->granted);
480
481		BUG_ON(!target->lksb);
482		target->lksb->status = DLM_NORMAL;
483
484		spin_unlock(&target->spinlock);
485
486		__dlm_lockres_reserve_ast(res);
487		__dlm_queue_ast(dlm, target);
488		/* go back and check for more */
489		goto converting;
490	}
491
492leave:
493	return;
494}
495
496/* must have NO locks when calling this with res !=NULL * */
497void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
498{
499	if (res) {
500		spin_lock(&dlm->spinlock);
501		spin_lock(&res->spinlock);
502		__dlm_dirty_lockres(dlm, res);
503		spin_unlock(&res->spinlock);
504		spin_unlock(&dlm->spinlock);
505	}
506	wake_up(&dlm->dlm_thread_wq);
507}
508
509void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
510{
511	assert_spin_locked(&dlm->spinlock);
512	assert_spin_locked(&res->spinlock);
513
514	/* don't shuffle secondary queues */
515	if (res->owner == dlm->node_num) {
516		if (res->state & (DLM_LOCK_RES_MIGRATING |
517				  DLM_LOCK_RES_BLOCK_DIRTY))
518		    return;
519
520		if (list_empty(&res->dirty)) {
521			/* ref for dirty_list */
522			dlm_lockres_get(res);
523			list_add_tail(&res->dirty, &dlm->dirty_list);
524			res->state |= DLM_LOCK_RES_DIRTY;
525		}
526	}
527
528	mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
529	     res->lockname.name);
530}
531
532
533/* Launch the NM thread for the mounted volume */
534int dlm_launch_thread(struct dlm_ctxt *dlm)
535{
536	mlog(0, "Starting dlm_thread...\n");
537
538	dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm-%s",
539			dlm->name);
540	if (IS_ERR(dlm->dlm_thread_task)) {
541		mlog_errno(PTR_ERR(dlm->dlm_thread_task));
542		dlm->dlm_thread_task = NULL;
543		return -EINVAL;
544	}
545
546	return 0;
547}
548
549void dlm_complete_thread(struct dlm_ctxt *dlm)
550{
551	if (dlm->dlm_thread_task) {
552		mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
553		kthread_stop(dlm->dlm_thread_task);
554		dlm->dlm_thread_task = NULL;
555	}
556}
557
558static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
559{
560	int empty;
561
562	spin_lock(&dlm->spinlock);
563	empty = list_empty(&dlm->dirty_list);
564	spin_unlock(&dlm->spinlock);
565
566	return empty;
567}
568
569static void dlm_flush_asts(struct dlm_ctxt *dlm)
570{
571	int ret;
572	struct dlm_lock *lock;
573	struct dlm_lock_resource *res;
574	u8 hi;
575
576	spin_lock(&dlm->ast_lock);
577	while (!list_empty(&dlm->pending_asts)) {
578		lock = list_entry(dlm->pending_asts.next,
579				  struct dlm_lock, ast_list);
580		/* get an extra ref on lock */
581		dlm_lock_get(lock);
582		res = lock->lockres;
583		mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
584		     "node %u\n", dlm->name, res->lockname.len,
585		     res->lockname.name,
586		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
587		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
588		     lock->ml.type, lock->ml.node);
589
590		BUG_ON(!lock->ast_pending);
591
592		/* remove from list (including ref) */
593		list_del_init(&lock->ast_list);
594		dlm_lock_put(lock);
595		spin_unlock(&dlm->ast_lock);
596
597		if (lock->ml.node != dlm->node_num) {
598			ret = dlm_do_remote_ast(dlm, res, lock);
599			if (ret < 0)
600				mlog_errno(ret);
601		} else
602			dlm_do_local_ast(dlm, res, lock);
603
604		spin_lock(&dlm->ast_lock);
605
606		/* possible that another ast was queued while
607		 * we were delivering the last one */
608		if (!list_empty(&lock->ast_list)) {
609			mlog(0, "%s: res %.*s, AST queued while flushing last "
610			     "one\n", dlm->name, res->lockname.len,
611			     res->lockname.name);
612		} else
613			lock->ast_pending = 0;
614
615		/* drop the extra ref.
616		 * this may drop it completely. */
617		dlm_lock_put(lock);
618		dlm_lockres_release_ast(dlm, res);
619	}
620
621	while (!list_empty(&dlm->pending_basts)) {
622		lock = list_entry(dlm->pending_basts.next,
623				  struct dlm_lock, bast_list);
624		/* get an extra ref on lock */
625		dlm_lock_get(lock);
626		res = lock->lockres;
627
628		BUG_ON(!lock->bast_pending);
629
630		/* get the highest blocked lock, and reset */
631		spin_lock(&lock->spinlock);
632		BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
633		hi = lock->ml.highest_blocked;
634		lock->ml.highest_blocked = LKM_IVMODE;
635		spin_unlock(&lock->spinlock);
636
637		/* remove from list (including ref) */
638		list_del_init(&lock->bast_list);
639		dlm_lock_put(lock);
640		spin_unlock(&dlm->ast_lock);
641
642		mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
643		     "blocked %d, node %u\n",
644		     dlm->name, res->lockname.len, res->lockname.name,
645		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
646		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
647		     hi, lock->ml.node);
648
649		if (lock->ml.node != dlm->node_num) {
650			ret = dlm_send_proxy_bast(dlm, res, lock, hi);
651			if (ret < 0)
652				mlog_errno(ret);
653		} else
654			dlm_do_local_bast(dlm, res, lock, hi);
655
656		spin_lock(&dlm->ast_lock);
657
658		/* possible that another bast was queued while
659		 * we were delivering the last one */
660		if (!list_empty(&lock->bast_list)) {
661			mlog(0, "%s: res %.*s, BAST queued while flushing last "
662			     "one\n", dlm->name, res->lockname.len,
663			     res->lockname.name);
664		} else
665			lock->bast_pending = 0;
666
667		/* drop the extra ref.
668		 * this may drop it completely. */
669		dlm_lock_put(lock);
670		dlm_lockres_release_ast(dlm, res);
671	}
672	wake_up(&dlm->ast_wq);
673	spin_unlock(&dlm->ast_lock);
674}
675
676
677#define DLM_THREAD_TIMEOUT_MS (4 * 1000)
678#define DLM_THREAD_MAX_DIRTY  100
 
679
680static int dlm_thread(void *data)
681{
682	struct dlm_lock_resource *res;
683	struct dlm_ctxt *dlm = data;
684	unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
685
686	mlog(0, "dlm thread running for %s...\n", dlm->name);
687
688	while (!kthread_should_stop()) {
689		int n = DLM_THREAD_MAX_DIRTY;
690
691		/* dlm_shutting_down is very point-in-time, but that
692		 * doesn't matter as we'll just loop back around if we
693		 * get false on the leading edge of a state
694		 * transition. */
695		dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
696
697		/* We really don't want to hold dlm->spinlock while
698		 * calling dlm_shuffle_lists on each lockres that
699		 * needs to have its queues adjusted and AST/BASTs
700		 * run.  So let's pull each entry off the dirty_list
701		 * and drop dlm->spinlock ASAP.  Once off the list,
702		 * res->spinlock needs to be taken again to protect
703		 * the queues while calling dlm_shuffle_lists.  */
704		spin_lock(&dlm->spinlock);
705		while (!list_empty(&dlm->dirty_list)) {
706			int delay = 0;
707			res = list_entry(dlm->dirty_list.next,
708					 struct dlm_lock_resource, dirty);
709
710			/* peel a lockres off, remove it from the list,
711			 * unset the dirty flag and drop the dlm lock */
712			BUG_ON(!res);
713			dlm_lockres_get(res);
714
715			spin_lock(&res->spinlock);
716			/* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
717			list_del_init(&res->dirty);
718			spin_unlock(&res->spinlock);
719			spin_unlock(&dlm->spinlock);
720			/* Drop dirty_list ref */
721			dlm_lockres_put(res);
722
723		 	/* lockres can be re-dirtied/re-added to the
724			 * dirty_list in this gap, but that is ok */
725
726			spin_lock(&dlm->ast_lock);
727			spin_lock(&res->spinlock);
728			if (res->owner != dlm->node_num) {
729				__dlm_print_one_lock_resource(res);
730				mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
731				     " dirty %d\n", dlm->name,
732				     !!(res->state & DLM_LOCK_RES_IN_PROGRESS),
733				     !!(res->state & DLM_LOCK_RES_MIGRATING),
734				     !!(res->state & DLM_LOCK_RES_RECOVERING),
735				     !!(res->state & DLM_LOCK_RES_DIRTY));
736			}
737			BUG_ON(res->owner != dlm->node_num);
738
739			/* it is now ok to move lockreses in these states
740			 * to the dirty list, assuming that they will only be
741			 * dirty for a short while. */
742			BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
743			if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
744					  DLM_LOCK_RES_RECOVERING |
745					  DLM_LOCK_RES_RECOVERY_WAITING)) {
746				/* move it to the tail and keep going */
747				res->state &= ~DLM_LOCK_RES_DIRTY;
748				spin_unlock(&res->spinlock);
749				spin_unlock(&dlm->ast_lock);
750				mlog(0, "%s: res %.*s, inprogress, delay list "
751				     "shuffle, state %d\n", dlm->name,
752				     res->lockname.len, res->lockname.name,
753				     res->state);
754				delay = 1;
755				goto in_progress;
756			}
757
758			/* at this point the lockres is not migrating/
759			 * recovering/in-progress.  we have the lockres
760			 * spinlock and do NOT have the dlm lock.
761			 * safe to reserve/queue asts and run the lists. */
762
763			/* called while holding lockres lock */
764			dlm_shuffle_lists(dlm, res);
765			res->state &= ~DLM_LOCK_RES_DIRTY;
766			spin_unlock(&res->spinlock);
767			spin_unlock(&dlm->ast_lock);
768
769			dlm_lockres_calc_usage(dlm, res);
770
771in_progress:
772
773			spin_lock(&dlm->spinlock);
774			/* if the lock was in-progress, stick
775			 * it on the back of the list */
776			if (delay) {
777				spin_lock(&res->spinlock);
778				__dlm_dirty_lockres(dlm, res);
779				spin_unlock(&res->spinlock);
780			}
781			dlm_lockres_put(res);
782
783			/* unlikely, but we may need to give time to
784			 * other tasks */
785			if (!--n) {
786				mlog(0, "%s: Throttling dlm thread\n",
787				     dlm->name);
788				break;
789			}
790		}
791
792		spin_unlock(&dlm->spinlock);
793		dlm_flush_asts(dlm);
794
795		/* yield and continue right away if there is more work to do */
796		if (!n) {
797			cond_resched();
798			continue;
799		}
800
801		wait_event_interruptible_timeout(dlm->dlm_thread_wq,
802						 !dlm_dirty_list_empty(dlm) ||
803						 kthread_should_stop(),
804						 timeout);
805	}
806
807	mlog(0, "quitting DLM thread\n");
808	return 0;
809}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* -*- mode: c; c-basic-offset: 8; -*-
  3 * vim: noexpandtab sw=8 ts=8 sts=0:
  4 *
  5 * dlmthread.c
  6 *
  7 * standalone DLM module
  8 *
  9 * Copyright (C) 2004 Oracle.  All rights reserved.
 10 */
 11
 12
 13#include <linux/module.h>
 14#include <linux/fs.h>
 15#include <linux/types.h>
 16#include <linux/highmem.h>
 17#include <linux/init.h>
 18#include <linux/sysctl.h>
 19#include <linux/random.h>
 20#include <linux/blkdev.h>
 21#include <linux/socket.h>
 22#include <linux/inet.h>
 23#include <linux/timer.h>
 24#include <linux/kthread.h>
 25#include <linux/delay.h>
 26
 27
 28#include "cluster/heartbeat.h"
 29#include "cluster/nodemanager.h"
 30#include "cluster/tcp.h"
 31
 32#include "dlmapi.h"
 33#include "dlmcommon.h"
 34#include "dlmdomain.h"
 35
 36#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
 37#include "cluster/masklog.h"
 38
 39static int dlm_thread(void *data);
 40static void dlm_flush_asts(struct dlm_ctxt *dlm);
 41
 42#define dlm_lock_is_remote(dlm, lock)     ((lock)->ml.node != (dlm)->node_num)
 43
 44/* will exit holding res->spinlock, but may drop in function */
 45/* waits until flags are cleared on res->state */
 46void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
 47{
 48	DECLARE_WAITQUEUE(wait, current);
 49
 50	assert_spin_locked(&res->spinlock);
 51
 52	add_wait_queue(&res->wq, &wait);
 53repeat:
 54	set_current_state(TASK_UNINTERRUPTIBLE);
 55	if (res->state & flags) {
 56		spin_unlock(&res->spinlock);
 57		schedule();
 58		spin_lock(&res->spinlock);
 59		goto repeat;
 60	}
 61	remove_wait_queue(&res->wq, &wait);
 62	__set_current_state(TASK_RUNNING);
 63}
 64
 65int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
 66{
 67	if (list_empty(&res->granted) &&
 68	    list_empty(&res->converting) &&
 69	    list_empty(&res->blocked))
 70		return 0;
 71	return 1;
 72}
 73
 74/* "unused": the lockres has no locks, is not on the dirty list,
 75 * has no inflight locks (in the gap between mastery and acquiring
 76 * the first lock), and has no bits in its refmap.
 77 * truly ready to be freed. */
 78int __dlm_lockres_unused(struct dlm_lock_resource *res)
 79{
 80	int bit;
 81
 82	assert_spin_locked(&res->spinlock);
 83
 84	if (__dlm_lockres_has_locks(res))
 85		return 0;
 86
 87	/* Locks are in the process of being created */
 88	if (res->inflight_locks)
 89		return 0;
 90
 91	if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
 92		return 0;
 93
 94	if (res->state & (DLM_LOCK_RES_RECOVERING|
 95			DLM_LOCK_RES_RECOVERY_WAITING))
 96		return 0;
 97
 98	/* Another node has this resource with this node as the master */
 99	bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
100	if (bit < O2NM_MAX_NODES)
101		return 0;
102
103	return 1;
104}
105
106
107/* Call whenever you may have added or deleted something from one of
108 * the lockres queue's. This will figure out whether it belongs on the
109 * unused list or not and does the appropriate thing. */
110void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
111			      struct dlm_lock_resource *res)
112{
113	assert_spin_locked(&dlm->spinlock);
114	assert_spin_locked(&res->spinlock);
115
116	if (__dlm_lockres_unused(res)){
117		if (list_empty(&res->purge)) {
118			mlog(0, "%s: Adding res %.*s to purge list\n",
119			     dlm->name, res->lockname.len, res->lockname.name);
120
121			res->last_used = jiffies;
122			dlm_lockres_get(res);
123			list_add_tail(&res->purge, &dlm->purge_list);
124			dlm->purge_count++;
125		}
126	} else if (!list_empty(&res->purge)) {
127		mlog(0, "%s: Removing res %.*s from purge list\n",
128		     dlm->name, res->lockname.len, res->lockname.name);
129
130		list_del_init(&res->purge);
131		dlm_lockres_put(res);
132		dlm->purge_count--;
133	}
134}
135
136void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
137			    struct dlm_lock_resource *res)
138{
139	spin_lock(&dlm->spinlock);
140	spin_lock(&res->spinlock);
141
142	__dlm_lockres_calc_usage(dlm, res);
143
144	spin_unlock(&res->spinlock);
145	spin_unlock(&dlm->spinlock);
146}
147
148/*
149 * Do the real purge work:
150 *     unhash the lockres, and
151 *     clear flag DLM_LOCK_RES_DROPPING_REF.
152 * It requires dlm and lockres spinlock to be taken.
153 */
154void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
155		struct dlm_lock_resource *res)
156{
157	assert_spin_locked(&dlm->spinlock);
158	assert_spin_locked(&res->spinlock);
159
160	if (!list_empty(&res->purge)) {
161		mlog(0, "%s: Removing res %.*s from purgelist\n",
162		     dlm->name, res->lockname.len, res->lockname.name);
163		list_del_init(&res->purge);
164		dlm_lockres_put(res);
165		dlm->purge_count--;
166	}
167
168	if (!__dlm_lockres_unused(res)) {
169		mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
170		     dlm->name, res->lockname.len, res->lockname.name);
171		__dlm_print_one_lock_resource(res);
172		BUG();
173	}
174
175	__dlm_unhash_lockres(dlm, res);
176
177	spin_lock(&dlm->track_lock);
178	if (!list_empty(&res->tracking))
179		list_del_init(&res->tracking);
180	else {
181		mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
182		     dlm->name, res->lockname.len, res->lockname.name);
183		__dlm_print_one_lock_resource(res);
184	}
185	spin_unlock(&dlm->track_lock);
186
187	/*
188	 * lockres is not in the hash now. drop the flag and wake up
189	 * any processes waiting in dlm_get_lock_resource.
190	 */
191	res->state &= ~DLM_LOCK_RES_DROPPING_REF;
192}
193
194static void dlm_purge_lockres(struct dlm_ctxt *dlm,
195			     struct dlm_lock_resource *res)
196{
197	int master;
198	int ret = 0;
199
200	assert_spin_locked(&dlm->spinlock);
201	assert_spin_locked(&res->spinlock);
202
203	master = (res->owner == dlm->node_num);
204
205	mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
206	     res->lockname.len, res->lockname.name, master);
207
208	if (!master) {
209		if (res->state & DLM_LOCK_RES_DROPPING_REF) {
210			mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
211				dlm->name, res->lockname.len, res->lockname.name);
212			spin_unlock(&res->spinlock);
213			return;
214		}
215
216		res->state |= DLM_LOCK_RES_DROPPING_REF;
217		/* drop spinlock...  retake below */
218		spin_unlock(&res->spinlock);
219		spin_unlock(&dlm->spinlock);
220
221		spin_lock(&res->spinlock);
222		/* This ensures that clear refmap is sent after the set */
223		__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
224		spin_unlock(&res->spinlock);
225
226		/* clear our bit from the master's refmap, ignore errors */
227		ret = dlm_drop_lockres_ref(dlm, res);
228		if (ret < 0) {
229			if (!dlm_is_host_down(ret))
230				BUG();
231		}
232		spin_lock(&dlm->spinlock);
233		spin_lock(&res->spinlock);
234	}
235
236	if (!list_empty(&res->purge)) {
237		mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
238		     dlm->name, res->lockname.len, res->lockname.name, master);
239		list_del_init(&res->purge);
240		dlm_lockres_put(res);
241		dlm->purge_count--;
242	}
243
244	if (!master && ret == DLM_DEREF_RESPONSE_INPROG) {
245		mlog(0, "%s: deref %.*s in progress\n",
246			dlm->name, res->lockname.len, res->lockname.name);
247		spin_unlock(&res->spinlock);
248		return;
249	}
250
251	if (!__dlm_lockres_unused(res)) {
252		mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
253		     dlm->name, res->lockname.len, res->lockname.name);
254		__dlm_print_one_lock_resource(res);
255		BUG();
256	}
257
258	__dlm_unhash_lockres(dlm, res);
259
260	spin_lock(&dlm->track_lock);
261	if (!list_empty(&res->tracking))
262		list_del_init(&res->tracking);
263	else {
264		mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
265				res->lockname.len, res->lockname.name);
266		__dlm_print_one_lock_resource(res);
267	}
268	spin_unlock(&dlm->track_lock);
269
270	/* lockres is not in the hash now.  drop the flag and wake up
271	 * any processes waiting in dlm_get_lock_resource. */
272	if (!master) {
273		res->state &= ~DLM_LOCK_RES_DROPPING_REF;
274		spin_unlock(&res->spinlock);
275		wake_up(&res->wq);
276	} else
277		spin_unlock(&res->spinlock);
278}
279
280static void dlm_run_purge_list(struct dlm_ctxt *dlm,
281			       int purge_now)
282{
283	unsigned int run_max, unused;
284	unsigned long purge_jiffies;
285	struct dlm_lock_resource *lockres;
286
287	spin_lock(&dlm->spinlock);
288	run_max = dlm->purge_count;
289
290	while(run_max && !list_empty(&dlm->purge_list)) {
291		run_max--;
292
293		lockres = list_entry(dlm->purge_list.next,
294				     struct dlm_lock_resource, purge);
295
296		spin_lock(&lockres->spinlock);
297
298		purge_jiffies = lockres->last_used +
299			msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
300
301		/* Make sure that we want to be processing this guy at
302		 * this time. */
303		if (!purge_now && time_after(purge_jiffies, jiffies)) {
304			/* Since resources are added to the purge list
305			 * in tail order, we can stop at the first
306			 * unpurgable resource -- anyone added after
307			 * him will have a greater last_used value */
308			spin_unlock(&lockres->spinlock);
309			break;
310		}
311
312		/* Status of the lockres *might* change so double
313		 * check. If the lockres is unused, holding the dlm
314		 * spinlock will prevent people from getting and more
315		 * refs on it. */
316		unused = __dlm_lockres_unused(lockres);
317		if (!unused ||
318		    (lockres->state & DLM_LOCK_RES_MIGRATING) ||
319		    (lockres->inflight_assert_workers != 0)) {
320			mlog(0, "%s: res %.*s is in use or being remastered, "
321			     "used %d, state %d, assert master workers %u\n",
322			     dlm->name, lockres->lockname.len,
323			     lockres->lockname.name,
324			     !unused, lockres->state,
325			     lockres->inflight_assert_workers);
326			list_move_tail(&lockres->purge, &dlm->purge_list);
327			spin_unlock(&lockres->spinlock);
328			continue;
329		}
330
331		dlm_lockres_get(lockres);
332
333		dlm_purge_lockres(dlm, lockres);
334
335		dlm_lockres_put(lockres);
336
337		/* Avoid adding any scheduling latencies */
338		cond_resched_lock(&dlm->spinlock);
339	}
340
341	spin_unlock(&dlm->spinlock);
342}
343
344static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
345			      struct dlm_lock_resource *res)
346{
347	struct dlm_lock *lock, *target;
348	int can_grant = 1;
349
350	/*
351	 * Because this function is called with the lockres
352	 * spinlock, and because we know that it is not migrating/
353	 * recovering/in-progress, it is fine to reserve asts and
354	 * basts right before queueing them all throughout
355	 */
356	assert_spin_locked(&dlm->ast_lock);
357	assert_spin_locked(&res->spinlock);
358	BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
359			      DLM_LOCK_RES_RECOVERING|
360			      DLM_LOCK_RES_IN_PROGRESS)));
361
362converting:
363	if (list_empty(&res->converting))
364		goto blocked;
365	mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
366	     res->lockname.len, res->lockname.name);
367
368	target = list_entry(res->converting.next, struct dlm_lock, list);
369	if (target->ml.convert_type == LKM_IVMODE) {
370		mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
371		     dlm->name, res->lockname.len, res->lockname.name);
372		BUG();
373	}
374	list_for_each_entry(lock, &res->granted, list) {
375		if (lock==target)
376			continue;
377		if (!dlm_lock_compatible(lock->ml.type,
378					 target->ml.convert_type)) {
379			can_grant = 0;
380			/* queue the BAST if not already */
381			if (lock->ml.highest_blocked == LKM_IVMODE) {
382				__dlm_lockres_reserve_ast(res);
383				__dlm_queue_bast(dlm, lock);
384			}
385			/* update the highest_blocked if needed */
386			if (lock->ml.highest_blocked < target->ml.convert_type)
387				lock->ml.highest_blocked =
388					target->ml.convert_type;
389		}
390	}
391
392	list_for_each_entry(lock, &res->converting, list) {
393		if (lock==target)
394			continue;
395		if (!dlm_lock_compatible(lock->ml.type,
396					 target->ml.convert_type)) {
397			can_grant = 0;
398			if (lock->ml.highest_blocked == LKM_IVMODE) {
399				__dlm_lockres_reserve_ast(res);
400				__dlm_queue_bast(dlm, lock);
401			}
402			if (lock->ml.highest_blocked < target->ml.convert_type)
403				lock->ml.highest_blocked =
404					target->ml.convert_type;
405		}
406	}
407
408	/* we can convert the lock */
409	if (can_grant) {
410		spin_lock(&target->spinlock);
411		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
412
413		mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
414		     "%d => %d, node %u\n", dlm->name, res->lockname.len,
415		     res->lockname.name,
416		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
417		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
418		     target->ml.type,
419		     target->ml.convert_type, target->ml.node);
420
421		target->ml.type = target->ml.convert_type;
422		target->ml.convert_type = LKM_IVMODE;
423		list_move_tail(&target->list, &res->granted);
424
425		BUG_ON(!target->lksb);
426		target->lksb->status = DLM_NORMAL;
427
428		spin_unlock(&target->spinlock);
429
430		__dlm_lockres_reserve_ast(res);
431		__dlm_queue_ast(dlm, target);
432		/* go back and check for more */
433		goto converting;
434	}
435
436blocked:
437	if (list_empty(&res->blocked))
438		goto leave;
439	target = list_entry(res->blocked.next, struct dlm_lock, list);
440
441	list_for_each_entry(lock, &res->granted, list) {
442		if (lock==target)
443			continue;
444		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
445			can_grant = 0;
446			if (lock->ml.highest_blocked == LKM_IVMODE) {
447				__dlm_lockres_reserve_ast(res);
448				__dlm_queue_bast(dlm, lock);
449			}
450			if (lock->ml.highest_blocked < target->ml.type)
451				lock->ml.highest_blocked = target->ml.type;
452		}
453	}
454
455	list_for_each_entry(lock, &res->converting, list) {
456		if (lock==target)
457			continue;
458		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
459			can_grant = 0;
460			if (lock->ml.highest_blocked == LKM_IVMODE) {
461				__dlm_lockres_reserve_ast(res);
462				__dlm_queue_bast(dlm, lock);
463			}
464			if (lock->ml.highest_blocked < target->ml.type)
465				lock->ml.highest_blocked = target->ml.type;
466		}
467	}
468
469	/* we can grant the blocked lock (only
470	 * possible if converting list empty) */
471	if (can_grant) {
472		spin_lock(&target->spinlock);
473		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
474
475		mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
476		     "node %u\n", dlm->name, res->lockname.len,
477		     res->lockname.name,
478		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
479		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
480		     target->ml.type, target->ml.node);
481
482		/* target->ml.type is already correct */
483		list_move_tail(&target->list, &res->granted);
484
485		BUG_ON(!target->lksb);
486		target->lksb->status = DLM_NORMAL;
487
488		spin_unlock(&target->spinlock);
489
490		__dlm_lockres_reserve_ast(res);
491		__dlm_queue_ast(dlm, target);
492		/* go back and check for more */
493		goto converting;
494	}
495
496leave:
497	return;
498}
499
500/* must have NO locks when calling this with res !=NULL * */
501void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
502{
503	if (res) {
504		spin_lock(&dlm->spinlock);
505		spin_lock(&res->spinlock);
506		__dlm_dirty_lockres(dlm, res);
507		spin_unlock(&res->spinlock);
508		spin_unlock(&dlm->spinlock);
509	}
510	wake_up(&dlm->dlm_thread_wq);
511}
512
513void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
514{
515	assert_spin_locked(&dlm->spinlock);
516	assert_spin_locked(&res->spinlock);
517
518	/* don't shuffle secondary queues */
519	if (res->owner == dlm->node_num) {
520		if (res->state & (DLM_LOCK_RES_MIGRATING |
521				  DLM_LOCK_RES_BLOCK_DIRTY))
522		    return;
523
524		if (list_empty(&res->dirty)) {
525			/* ref for dirty_list */
526			dlm_lockres_get(res);
527			list_add_tail(&res->dirty, &dlm->dirty_list);
528			res->state |= DLM_LOCK_RES_DIRTY;
529		}
530	}
531
532	mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
533	     res->lockname.name);
534}
535
536
537/* Launch the NM thread for the mounted volume */
538int dlm_launch_thread(struct dlm_ctxt *dlm)
539{
540	mlog(0, "Starting dlm_thread...\n");
541
542	dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm-%s",
543			dlm->name);
544	if (IS_ERR(dlm->dlm_thread_task)) {
545		mlog_errno(PTR_ERR(dlm->dlm_thread_task));
546		dlm->dlm_thread_task = NULL;
547		return -EINVAL;
548	}
549
550	return 0;
551}
552
553void dlm_complete_thread(struct dlm_ctxt *dlm)
554{
555	if (dlm->dlm_thread_task) {
556		mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
557		kthread_stop(dlm->dlm_thread_task);
558		dlm->dlm_thread_task = NULL;
559	}
560}
561
562static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
563{
564	int empty;
565
566	spin_lock(&dlm->spinlock);
567	empty = list_empty(&dlm->dirty_list);
568	spin_unlock(&dlm->spinlock);
569
570	return empty;
571}
572
573static void dlm_flush_asts(struct dlm_ctxt *dlm)
574{
575	int ret;
576	struct dlm_lock *lock;
577	struct dlm_lock_resource *res;
578	u8 hi;
579
580	spin_lock(&dlm->ast_lock);
581	while (!list_empty(&dlm->pending_asts)) {
582		lock = list_entry(dlm->pending_asts.next,
583				  struct dlm_lock, ast_list);
584		/* get an extra ref on lock */
585		dlm_lock_get(lock);
586		res = lock->lockres;
587		mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
588		     "node %u\n", dlm->name, res->lockname.len,
589		     res->lockname.name,
590		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
591		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
592		     lock->ml.type, lock->ml.node);
593
594		BUG_ON(!lock->ast_pending);
595
596		/* remove from list (including ref) */
597		list_del_init(&lock->ast_list);
598		dlm_lock_put(lock);
599		spin_unlock(&dlm->ast_lock);
600
601		if (lock->ml.node != dlm->node_num) {
602			ret = dlm_do_remote_ast(dlm, res, lock);
603			if (ret < 0)
604				mlog_errno(ret);
605		} else
606			dlm_do_local_ast(dlm, res, lock);
607
608		spin_lock(&dlm->ast_lock);
609
610		/* possible that another ast was queued while
611		 * we were delivering the last one */
612		if (!list_empty(&lock->ast_list)) {
613			mlog(0, "%s: res %.*s, AST queued while flushing last "
614			     "one\n", dlm->name, res->lockname.len,
615			     res->lockname.name);
616		} else
617			lock->ast_pending = 0;
618
619		/* drop the extra ref.
620		 * this may drop it completely. */
621		dlm_lock_put(lock);
622		dlm_lockres_release_ast(dlm, res);
623	}
624
625	while (!list_empty(&dlm->pending_basts)) {
626		lock = list_entry(dlm->pending_basts.next,
627				  struct dlm_lock, bast_list);
628		/* get an extra ref on lock */
629		dlm_lock_get(lock);
630		res = lock->lockres;
631
632		BUG_ON(!lock->bast_pending);
633
634		/* get the highest blocked lock, and reset */
635		spin_lock(&lock->spinlock);
636		BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
637		hi = lock->ml.highest_blocked;
638		lock->ml.highest_blocked = LKM_IVMODE;
639		spin_unlock(&lock->spinlock);
640
641		/* remove from list (including ref) */
642		list_del_init(&lock->bast_list);
643		dlm_lock_put(lock);
644		spin_unlock(&dlm->ast_lock);
645
646		mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
647		     "blocked %d, node %u\n",
648		     dlm->name, res->lockname.len, res->lockname.name,
649		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
650		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
651		     hi, lock->ml.node);
652
653		if (lock->ml.node != dlm->node_num) {
654			ret = dlm_send_proxy_bast(dlm, res, lock, hi);
655			if (ret < 0)
656				mlog_errno(ret);
657		} else
658			dlm_do_local_bast(dlm, res, lock, hi);
659
660		spin_lock(&dlm->ast_lock);
661
662		/* possible that another bast was queued while
663		 * we were delivering the last one */
664		if (!list_empty(&lock->bast_list)) {
665			mlog(0, "%s: res %.*s, BAST queued while flushing last "
666			     "one\n", dlm->name, res->lockname.len,
667			     res->lockname.name);
668		} else
669			lock->bast_pending = 0;
670
671		/* drop the extra ref.
672		 * this may drop it completely. */
673		dlm_lock_put(lock);
674		dlm_lockres_release_ast(dlm, res);
675	}
676	wake_up(&dlm->ast_wq);
677	spin_unlock(&dlm->ast_lock);
678}
679
680
681#define DLM_THREAD_TIMEOUT_MS (4 * 1000)
682#define DLM_THREAD_MAX_DIRTY  100
683#define DLM_THREAD_MAX_ASTS   10
684
685static int dlm_thread(void *data)
686{
687	struct dlm_lock_resource *res;
688	struct dlm_ctxt *dlm = data;
689	unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
690
691	mlog(0, "dlm thread running for %s...\n", dlm->name);
692
693	while (!kthread_should_stop()) {
694		int n = DLM_THREAD_MAX_DIRTY;
695
696		/* dlm_shutting_down is very point-in-time, but that
697		 * doesn't matter as we'll just loop back around if we
698		 * get false on the leading edge of a state
699		 * transition. */
700		dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
701
702		/* We really don't want to hold dlm->spinlock while
703		 * calling dlm_shuffle_lists on each lockres that
704		 * needs to have its queues adjusted and AST/BASTs
705		 * run.  So let's pull each entry off the dirty_list
706		 * and drop dlm->spinlock ASAP.  Once off the list,
707		 * res->spinlock needs to be taken again to protect
708		 * the queues while calling dlm_shuffle_lists.  */
709		spin_lock(&dlm->spinlock);
710		while (!list_empty(&dlm->dirty_list)) {
711			int delay = 0;
712			res = list_entry(dlm->dirty_list.next,
713					 struct dlm_lock_resource, dirty);
714
715			/* peel a lockres off, remove it from the list,
716			 * unset the dirty flag and drop the dlm lock */
717			BUG_ON(!res);
718			dlm_lockres_get(res);
719
720			spin_lock(&res->spinlock);
721			/* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
722			list_del_init(&res->dirty);
723			spin_unlock(&res->spinlock);
724			spin_unlock(&dlm->spinlock);
725			/* Drop dirty_list ref */
726			dlm_lockres_put(res);
727
728		 	/* lockres can be re-dirtied/re-added to the
729			 * dirty_list in this gap, but that is ok */
730
731			spin_lock(&dlm->ast_lock);
732			spin_lock(&res->spinlock);
733			if (res->owner != dlm->node_num) {
734				__dlm_print_one_lock_resource(res);
735				mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
736				     " dirty %d\n", dlm->name,
737				     !!(res->state & DLM_LOCK_RES_IN_PROGRESS),
738				     !!(res->state & DLM_LOCK_RES_MIGRATING),
739				     !!(res->state & DLM_LOCK_RES_RECOVERING),
740				     !!(res->state & DLM_LOCK_RES_DIRTY));
741			}
742			BUG_ON(res->owner != dlm->node_num);
743
744			/* it is now ok to move lockreses in these states
745			 * to the dirty list, assuming that they will only be
746			 * dirty for a short while. */
747			BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
748			if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
749					  DLM_LOCK_RES_RECOVERING |
750					  DLM_LOCK_RES_RECOVERY_WAITING)) {
751				/* move it to the tail and keep going */
752				res->state &= ~DLM_LOCK_RES_DIRTY;
753				spin_unlock(&res->spinlock);
754				spin_unlock(&dlm->ast_lock);
755				mlog(0, "%s: res %.*s, inprogress, delay list "
756				     "shuffle, state %d\n", dlm->name,
757				     res->lockname.len, res->lockname.name,
758				     res->state);
759				delay = 1;
760				goto in_progress;
761			}
762
763			/* at this point the lockres is not migrating/
764			 * recovering/in-progress.  we have the lockres
765			 * spinlock and do NOT have the dlm lock.
766			 * safe to reserve/queue asts and run the lists. */
767
768			/* called while holding lockres lock */
769			dlm_shuffle_lists(dlm, res);
770			res->state &= ~DLM_LOCK_RES_DIRTY;
771			spin_unlock(&res->spinlock);
772			spin_unlock(&dlm->ast_lock);
773
774			dlm_lockres_calc_usage(dlm, res);
775
776in_progress:
777
778			spin_lock(&dlm->spinlock);
779			/* if the lock was in-progress, stick
780			 * it on the back of the list */
781			if (delay) {
782				spin_lock(&res->spinlock);
783				__dlm_dirty_lockres(dlm, res);
784				spin_unlock(&res->spinlock);
785			}
786			dlm_lockres_put(res);
787
788			/* unlikely, but we may need to give time to
789			 * other tasks */
790			if (!--n) {
791				mlog(0, "%s: Throttling dlm thread\n",
792				     dlm->name);
793				break;
794			}
795		}
796
797		spin_unlock(&dlm->spinlock);
798		dlm_flush_asts(dlm);
799
800		/* yield and continue right away if there is more work to do */
801		if (!n) {
802			cond_resched();
803			continue;
804		}
805
806		wait_event_interruptible_timeout(dlm->dlm_thread_wq,
807						 !dlm_dirty_list_empty(dlm) ||
808						 kthread_should_stop(),
809						 timeout);
810	}
811
812	mlog(0, "quitting DLM thread\n");
813	return 0;
814}