Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/******************************************************************************
  3*******************************************************************************
  4**
  5**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
  6**  Copyright (C) 2004-2011 Red Hat, Inc.  All rights reserved.
  7**
 
 
 
  8**
  9*******************************************************************************
 10******************************************************************************/
 11
 12#include "dlm_internal.h"
 13#include "lockspace.h"
 14#include "member.h"
 15#include "dir.h"
 16#include "ast.h"
 17#include "recover.h"
 18#include "lowcomms.h"
 19#include "lock.h"
 20#include "requestqueue.h"
 21#include "recoverd.h"
 22
 23static int dlm_create_masters_list(struct dlm_ls *ls)
 24{
 25	struct dlm_rsb *r;
 26	int error = 0;
 27
 28	write_lock_bh(&ls->ls_masters_lock);
 29	if (!list_empty(&ls->ls_masters_list)) {
 30		log_error(ls, "root list not empty");
 31		error = -EINVAL;
 32		goto out;
 33	}
 34
 35	read_lock_bh(&ls->ls_rsbtbl_lock);
 36	list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
 37		if (r->res_nodeid)
 38			continue;
 39
 40		list_add(&r->res_masters_list, &ls->ls_masters_list);
 41		dlm_hold_rsb(r);
 42	}
 43	read_unlock_bh(&ls->ls_rsbtbl_lock);
 44 out:
 45	write_unlock_bh(&ls->ls_masters_lock);
 46	return error;
 47}
 48
 49static void dlm_release_masters_list(struct dlm_ls *ls)
 50{
 51	struct dlm_rsb *r, *safe;
 52
 53	write_lock_bh(&ls->ls_masters_lock);
 54	list_for_each_entry_safe(r, safe, &ls->ls_masters_list, res_masters_list) {
 55		list_del_init(&r->res_masters_list);
 56		dlm_put_rsb(r);
 57	}
 58	write_unlock_bh(&ls->ls_masters_lock);
 59}
 60
 61static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list)
 62{
 63	struct dlm_rsb *r;
 64
 65	read_lock_bh(&ls->ls_rsbtbl_lock);
 66	list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
 67		list_add(&r->res_root_list, root_list);
 68		dlm_hold_rsb(r);
 69	}
 70
 71	WARN_ON_ONCE(!list_empty(&ls->ls_slow_inactive));
 72	read_unlock_bh(&ls->ls_rsbtbl_lock);
 73}
 74
 75static void dlm_release_root_list(struct list_head *root_list)
 76{
 77	struct dlm_rsb *r, *safe;
 78
 79	list_for_each_entry_safe(r, safe, root_list, res_root_list) {
 80		list_del_init(&r->res_root_list);
 81		dlm_put_rsb(r);
 82	}
 83}
 84
 85/* If the start for which we're re-enabling locking (seq) has been superseded
 86   by a newer stop (ls_recover_seq), we need to leave locking disabled.
 87
 88   We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
 89   locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
 90   enables locking and clears the requestqueue between a and b. */
 91
 92static int enable_locking(struct dlm_ls *ls, uint64_t seq)
 93{
 94	int error = -EINTR;
 95
 96	write_lock_bh(&ls->ls_recv_active);
 97
 98	spin_lock_bh(&ls->ls_recover_lock);
 99	if (ls->ls_recover_seq == seq) {
100		set_bit(LSFL_RUNNING, &ls->ls_flags);
101		/* Schedule next timer if recovery put something on inactive.
102		 *
103		 * The rsbs that was queued while recovery on toss hasn't
104		 * started yet because LSFL_RUNNING was set everything
105		 * else recovery hasn't started as well because ls_in_recovery
106		 * is still hold. So we should not run into the case that
107		 * resume_scan_timer() queues a timer that can occur in
108		 * a no op.
109		 */
110		resume_scan_timer(ls);
111		/* unblocks processes waiting to enter the dlm */
112		up_write(&ls->ls_in_recovery);
113		clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
114		error = 0;
115	}
116	spin_unlock_bh(&ls->ls_recover_lock);
117
118	write_unlock_bh(&ls->ls_recv_active);
119	return error;
120}
121
122static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
123{
124	LIST_HEAD(root_list);
125	unsigned long start;
126	int error, neg = 0;
127
128	log_rinfo(ls, "dlm_recover %llu", (unsigned long long)rv->seq);
129
130	mutex_lock(&ls->ls_recoverd_active);
131
132	dlm_callback_suspend(ls);
133
134	dlm_clear_inactive(ls);
135
136	/*
137	 * This list of root rsb's will be the basis of most of the recovery
138	 * routines.
139	 */
140
141	dlm_create_root_list(ls, &root_list);
142
143	/*
144	 * Add or remove nodes from the lockspace's ls_nodes list.
145	 *
146	 * Due to the fact that we must report all membership changes to lsops
147	 * or midcomms layer, it is not permitted to abort ls_recover() until
148	 * this is done.
149	 */
150
151	error = dlm_recover_members(ls, rv, &neg);
152	if (error) {
153		log_rinfo(ls, "dlm_recover_members error %d", error);
154		goto fail_root_list;
155	}
156
157	dlm_recover_dir_nodeid(ls, &root_list);
158
159	/* Create a snapshot of all active rsbs were we are the master of.
160	 * During the barrier between dlm_recover_members_wait() and
161	 * dlm_recover_directory() other nodes can dump their necessary
162	 * directory dlm_rsb (r->res_dir_nodeid == nodeid) in rcom
163	 * communication dlm_copy_master_names() handling.
164	 *
165	 * TODO We should create a per lockspace list that contains rsbs
166	 * that we are the master of. Instead of creating this list while
167	 * recovery we keep track of those rsbs while locking handling and
168	 * recovery can use it when necessary.
169	 */
170	error = dlm_create_masters_list(ls);
171	if (error) {
172		log_rinfo(ls, "dlm_create_masters_list error %d", error);
173		goto fail_root_list;
174	}
175
 
 
176	ls->ls_recover_locks_in = 0;
177
178	dlm_set_recover_status(ls, DLM_RS_NODES);
179
180	error = dlm_recover_members_wait(ls, rv->seq);
181	if (error) {
182		log_rinfo(ls, "dlm_recover_members_wait error %d", error);
183		dlm_release_masters_list(ls);
184		goto fail_root_list;
185	}
186
187	start = jiffies;
188
189	/*
190	 * Rebuild our own share of the directory by collecting from all other
191	 * nodes their master rsb names that hash to us.
192	 */
193
194	error = dlm_recover_directory(ls, rv->seq);
195	if (error) {
196		log_rinfo(ls, "dlm_recover_directory error %d", error);
197		dlm_release_masters_list(ls);
198		goto fail_root_list;
199	}
200
201	dlm_set_recover_status(ls, DLM_RS_DIR);
202
203	error = dlm_recover_directory_wait(ls, rv->seq);
204	if (error) {
205		log_rinfo(ls, "dlm_recover_directory_wait error %d", error);
206		dlm_release_masters_list(ls);
207		goto fail_root_list;
208	}
209
210	dlm_release_masters_list(ls);
 
211
212	/*
213	 * We may have outstanding operations that are waiting for a reply from
214	 * a failed node.  Mark these to be resent after recovery.  Unlock and
215	 * cancel ops can just be completed.
216	 */
217
218	dlm_recover_waiters_pre(ls);
219
220	if (dlm_recovery_stopped(ls)) {
221		error = -EINTR;
222		goto fail_root_list;
223	}
224
225	if (neg || dlm_no_directory(ls)) {
226		/*
227		 * Clear lkb's for departed nodes.
228		 */
229
230		dlm_recover_purge(ls, &root_list);
231
232		/*
233		 * Get new master nodeid's for rsb's that were mastered on
234		 * departed nodes.
235		 */
236
237		error = dlm_recover_masters(ls, rv->seq, &root_list);
238		if (error) {
239			log_rinfo(ls, "dlm_recover_masters error %d", error);
240			goto fail_root_list;
241		}
242
243		/*
244		 * Send our locks on remastered rsb's to the new masters.
245		 */
246
247		error = dlm_recover_locks(ls, rv->seq, &root_list);
248		if (error) {
249			log_rinfo(ls, "dlm_recover_locks error %d", error);
250			goto fail_root_list;
251		}
252
253		dlm_set_recover_status(ls, DLM_RS_LOCKS);
254
255		error = dlm_recover_locks_wait(ls, rv->seq);
256		if (error) {
257			log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
258			goto fail_root_list;
259		}
260
261		log_rinfo(ls, "dlm_recover_locks %u in",
262			  ls->ls_recover_locks_in);
263
264		/*
265		 * Finalize state in master rsb's now that all locks can be
266		 * checked.  This includes conversion resolution and lvb
267		 * settings.
268		 */
269
270		dlm_recover_rsbs(ls, &root_list);
271	} else {
272		/*
273		 * Other lockspace members may be going through the "neg" steps
274		 * while also adding us to the lockspace, in which case they'll
275		 * be doing the recover_locks (RS_LOCKS) barrier.
276		 */
277		dlm_set_recover_status(ls, DLM_RS_LOCKS);
278
279		error = dlm_recover_locks_wait(ls, rv->seq);
280		if (error) {
281			log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
282			goto fail_root_list;
283		}
284	}
285
286	dlm_release_root_list(&root_list);
287
288	/*
289	 * Purge directory-related requests that are saved in requestqueue.
290	 * All dir requests from before recovery are invalid now due to the dir
291	 * rebuild and will be resent by the requesting nodes.
292	 */
293
294	dlm_purge_requestqueue(ls);
295
296	dlm_set_recover_status(ls, DLM_RS_DONE);
297
298	error = dlm_recover_done_wait(ls, rv->seq);
299	if (error) {
300		log_rinfo(ls, "dlm_recover_done_wait error %d", error);
301		goto fail;
302	}
303
304	dlm_clear_members_gone(ls);
305
 
 
306	dlm_callback_resume(ls);
307
308	error = enable_locking(ls, rv->seq);
309	if (error) {
310		log_rinfo(ls, "enable_locking error %d", error);
311		goto fail;
312	}
313
314	error = dlm_process_requestqueue(ls);
315	if (error) {
316		log_rinfo(ls, "dlm_process_requestqueue error %d", error);
317		goto fail;
318	}
319
320	error = dlm_recover_waiters_post(ls);
321	if (error) {
322		log_rinfo(ls, "dlm_recover_waiters_post error %d", error);
323		goto fail;
324	}
325
326	dlm_recover_grant(ls);
327
328	log_rinfo(ls, "dlm_recover %llu generation %u done: %u ms",
329		  (unsigned long long)rv->seq, ls->ls_generation,
330		  jiffies_to_msecs(jiffies - start));
331	mutex_unlock(&ls->ls_recoverd_active);
332
 
333	return 0;
334
335 fail_root_list:
336	dlm_release_root_list(&root_list);
337 fail:
 
 
 
338	mutex_unlock(&ls->ls_recoverd_active);
339
340	return error;
341}
342
343/* The dlm_ls_start() that created the rv we take here may already have been
344   stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
345   flag set. */
346
347static void do_ls_recovery(struct dlm_ls *ls)
348{
349	struct dlm_recover *rv = NULL;
350	int error;
351
352	spin_lock_bh(&ls->ls_recover_lock);
353	rv = ls->ls_recover_args;
354	ls->ls_recover_args = NULL;
355	if (rv && ls->ls_recover_seq == rv->seq)
356		clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
357	spin_unlock_bh(&ls->ls_recover_lock);
358
359	if (rv) {
360		error = ls_recover(ls, rv);
361		switch (error) {
362		case 0:
363			ls->ls_recovery_result = 0;
364			complete(&ls->ls_recovery_done);
365
366			dlm_lsop_recover_done(ls);
367			break;
368		case -EINTR:
369			/* if recovery was interrupted -EINTR we wait for the next
370			 * ls_recover() iteration until it hopefully succeeds.
371			 */
372			log_rinfo(ls, "%s %llu interrupted and should be queued to run again",
373				  __func__, (unsigned long long)rv->seq);
374			break;
375		default:
376			log_rinfo(ls, "%s %llu error %d", __func__,
377				  (unsigned long long)rv->seq, error);
378
379			/* let new_lockspace() get aware of critical error */
380			ls->ls_recovery_result = error;
381			complete(&ls->ls_recovery_done);
382			break;
383		}
384
385		kfree(rv->nodes);
386		kfree(rv);
387	}
388}
389
390static int dlm_recoverd(void *arg)
391{
392	struct dlm_ls *ls;
393
394	ls = dlm_find_lockspace_local(arg);
395	if (!ls) {
396		log_print("dlm_recoverd: no lockspace %p", arg);
397		return -1;
398	}
399
400	down_write(&ls->ls_in_recovery);
401	set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
402	wake_up(&ls->ls_recover_lock_wait);
403
404	while (1) {
405		/*
406		 * We call kthread_should_stop() after set_current_state().
407		 * This is because it works correctly if kthread_stop() is
408		 * called just before set_current_state().
409		 */
410		set_current_state(TASK_INTERRUPTIBLE);
411		if (kthread_should_stop()) {
412			set_current_state(TASK_RUNNING);
413			break;
414		}
415		if (!test_bit(LSFL_RECOVER_WORK, &ls->ls_flags) &&
416		    !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) {
417			if (kthread_should_stop())
418				break;
419			schedule();
420		}
421		set_current_state(TASK_RUNNING);
422
423		if (test_and_clear_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) {
424			down_write(&ls->ls_in_recovery);
425			set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
426			wake_up(&ls->ls_recover_lock_wait);
427		}
428
429		if (test_and_clear_bit(LSFL_RECOVER_WORK, &ls->ls_flags))
430			do_ls_recovery(ls);
431	}
432
433	if (test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags))
434		up_write(&ls->ls_in_recovery);
435
436	dlm_put_lockspace(ls);
437	return 0;
438}
439
440int dlm_recoverd_start(struct dlm_ls *ls)
441{
442	struct task_struct *p;
443	int error = 0;
444
445	p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
446	if (IS_ERR(p))
447		error = PTR_ERR(p);
448	else
449                ls->ls_recoverd_task = p;
450	return error;
451}
452
453void dlm_recoverd_stop(struct dlm_ls *ls)
454{
455	kthread_stop(ls->ls_recoverd_task);
456}
457
458void dlm_recoverd_suspend(struct dlm_ls *ls)
459{
460	wake_up(&ls->ls_wait_general);
461	mutex_lock(&ls->ls_recoverd_active);
462}
463
464void dlm_recoverd_resume(struct dlm_ls *ls)
465{
466	mutex_unlock(&ls->ls_recoverd_active);
467}
468
v4.10.11
 
  1/******************************************************************************
  2*******************************************************************************
  3**
  4**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
  5**  Copyright (C) 2004-2011 Red Hat, Inc.  All rights reserved.
  6**
  7**  This copyrighted material is made available to anyone wishing to use,
  8**  modify, copy, or redistribute it subject to the terms and conditions
  9**  of the GNU General Public License v.2.
 10**
 11*******************************************************************************
 12******************************************************************************/
 13
 14#include "dlm_internal.h"
 15#include "lockspace.h"
 16#include "member.h"
 17#include "dir.h"
 18#include "ast.h"
 19#include "recover.h"
 20#include "lowcomms.h"
 21#include "lock.h"
 22#include "requestqueue.h"
 23#include "recoverd.h"
 24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25
 26/* If the start for which we're re-enabling locking (seq) has been superseded
 27   by a newer stop (ls_recover_seq), we need to leave locking disabled.
 28
 29   We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
 30   locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
 31   enables locking and clears the requestqueue between a and b. */
 32
 33static int enable_locking(struct dlm_ls *ls, uint64_t seq)
 34{
 35	int error = -EINTR;
 36
 37	down_write(&ls->ls_recv_active);
 38
 39	spin_lock(&ls->ls_recover_lock);
 40	if (ls->ls_recover_seq == seq) {
 41		set_bit(LSFL_RUNNING, &ls->ls_flags);
 
 
 
 
 
 
 
 
 
 
 42		/* unblocks processes waiting to enter the dlm */
 43		up_write(&ls->ls_in_recovery);
 44		clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
 45		error = 0;
 46	}
 47	spin_unlock(&ls->ls_recover_lock);
 48
 49	up_write(&ls->ls_recv_active);
 50	return error;
 51}
 52
 53static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 54{
 
 55	unsigned long start;
 56	int error, neg = 0;
 57
 58	log_rinfo(ls, "dlm_recover %llu", (unsigned long long)rv->seq);
 59
 60	mutex_lock(&ls->ls_recoverd_active);
 61
 62	dlm_callback_suspend(ls);
 63
 64	dlm_clear_toss(ls);
 65
 66	/*
 67	 * This list of root rsb's will be the basis of most of the recovery
 68	 * routines.
 69	 */
 70
 71	dlm_create_root_list(ls);
 72
 73	/*
 74	 * Add or remove nodes from the lockspace's ls_nodes list.
 
 
 
 
 75	 */
 76
 77	error = dlm_recover_members(ls, rv, &neg);
 78	if (error) {
 79		log_rinfo(ls, "dlm_recover_members error %d", error);
 80		goto fail;
 81	}
 82
 83	dlm_recover_dir_nodeid(ls);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84
 85	ls->ls_recover_dir_sent_res = 0;
 86	ls->ls_recover_dir_sent_msg = 0;
 87	ls->ls_recover_locks_in = 0;
 88
 89	dlm_set_recover_status(ls, DLM_RS_NODES);
 90
 91	error = dlm_recover_members_wait(ls);
 92	if (error) {
 93		log_rinfo(ls, "dlm_recover_members_wait error %d", error);
 94		goto fail;
 
 95	}
 96
 97	start = jiffies;
 98
 99	/*
100	 * Rebuild our own share of the directory by collecting from all other
101	 * nodes their master rsb names that hash to us.
102	 */
103
104	error = dlm_recover_directory(ls);
105	if (error) {
106		log_rinfo(ls, "dlm_recover_directory error %d", error);
107		goto fail;
 
108	}
109
110	dlm_set_recover_status(ls, DLM_RS_DIR);
111
112	error = dlm_recover_directory_wait(ls);
113	if (error) {
114		log_rinfo(ls, "dlm_recover_directory_wait error %d", error);
115		goto fail;
 
116	}
117
118	log_rinfo(ls, "dlm_recover_directory %u out %u messages",
119		  ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg);
120
121	/*
122	 * We may have outstanding operations that are waiting for a reply from
123	 * a failed node.  Mark these to be resent after recovery.  Unlock and
124	 * cancel ops can just be completed.
125	 */
126
127	dlm_recover_waiters_pre(ls);
128
129	error = dlm_recovery_stopped(ls);
130	if (error)
131		goto fail;
 
132
133	if (neg || dlm_no_directory(ls)) {
134		/*
135		 * Clear lkb's for departed nodes.
136		 */
137
138		dlm_recover_purge(ls);
139
140		/*
141		 * Get new master nodeid's for rsb's that were mastered on
142		 * departed nodes.
143		 */
144
145		error = dlm_recover_masters(ls);
146		if (error) {
147			log_rinfo(ls, "dlm_recover_masters error %d", error);
148			goto fail;
149		}
150
151		/*
152		 * Send our locks on remastered rsb's to the new masters.
153		 */
154
155		error = dlm_recover_locks(ls);
156		if (error) {
157			log_rinfo(ls, "dlm_recover_locks error %d", error);
158			goto fail;
159		}
160
161		dlm_set_recover_status(ls, DLM_RS_LOCKS);
162
163		error = dlm_recover_locks_wait(ls);
164		if (error) {
165			log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
166			goto fail;
167		}
168
169		log_rinfo(ls, "dlm_recover_locks %u in",
170			  ls->ls_recover_locks_in);
171
172		/*
173		 * Finalize state in master rsb's now that all locks can be
174		 * checked.  This includes conversion resolution and lvb
175		 * settings.
176		 */
177
178		dlm_recover_rsbs(ls);
179	} else {
180		/*
181		 * Other lockspace members may be going through the "neg" steps
182		 * while also adding us to the lockspace, in which case they'll
183		 * be doing the recover_locks (RS_LOCKS) barrier.
184		 */
185		dlm_set_recover_status(ls, DLM_RS_LOCKS);
186
187		error = dlm_recover_locks_wait(ls);
188		if (error) {
189			log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
190			goto fail;
191		}
192	}
193
194	dlm_release_root_list(ls);
195
196	/*
197	 * Purge directory-related requests that are saved in requestqueue.
198	 * All dir requests from before recovery are invalid now due to the dir
199	 * rebuild and will be resent by the requesting nodes.
200	 */
201
202	dlm_purge_requestqueue(ls);
203
204	dlm_set_recover_status(ls, DLM_RS_DONE);
205
206	error = dlm_recover_done_wait(ls);
207	if (error) {
208		log_rinfo(ls, "dlm_recover_done_wait error %d", error);
209		goto fail;
210	}
211
212	dlm_clear_members_gone(ls);
213
214	dlm_adjust_timeouts(ls);
215
216	dlm_callback_resume(ls);
217
218	error = enable_locking(ls, rv->seq);
219	if (error) {
220		log_rinfo(ls, "enable_locking error %d", error);
221		goto fail;
222	}
223
224	error = dlm_process_requestqueue(ls);
225	if (error) {
226		log_rinfo(ls, "dlm_process_requestqueue error %d", error);
227		goto fail;
228	}
229
230	error = dlm_recover_waiters_post(ls);
231	if (error) {
232		log_rinfo(ls, "dlm_recover_waiters_post error %d", error);
233		goto fail;
234	}
235
236	dlm_recover_grant(ls);
237
238	log_rinfo(ls, "dlm_recover %llu generation %u done: %u ms",
239		  (unsigned long long)rv->seq, ls->ls_generation,
240		  jiffies_to_msecs(jiffies - start));
241	mutex_unlock(&ls->ls_recoverd_active);
242
243	dlm_lsop_recover_done(ls);
244	return 0;
245
 
 
246 fail:
247	dlm_release_root_list(ls);
248	log_rinfo(ls, "dlm_recover %llu error %d",
249		  (unsigned long long)rv->seq, error);
250	mutex_unlock(&ls->ls_recoverd_active);
 
251	return error;
252}
253
254/* The dlm_ls_start() that created the rv we take here may already have been
255   stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
256   flag set. */
257
258static void do_ls_recovery(struct dlm_ls *ls)
259{
260	struct dlm_recover *rv = NULL;
 
261
262	spin_lock(&ls->ls_recover_lock);
263	rv = ls->ls_recover_args;
264	ls->ls_recover_args = NULL;
265	if (rv && ls->ls_recover_seq == rv->seq)
266		clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
267	spin_unlock(&ls->ls_recover_lock);
268
269	if (rv) {
270		ls_recover(ls, rv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271		kfree(rv->nodes);
272		kfree(rv);
273	}
274}
275
276static int dlm_recoverd(void *arg)
277{
278	struct dlm_ls *ls;
279
280	ls = dlm_find_lockspace_local(arg);
281	if (!ls) {
282		log_print("dlm_recoverd: no lockspace %p", arg);
283		return -1;
284	}
285
286	down_write(&ls->ls_in_recovery);
287	set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
288	wake_up(&ls->ls_recover_lock_wait);
289
290	while (!kthread_should_stop()) {
 
 
 
 
 
291		set_current_state(TASK_INTERRUPTIBLE);
 
 
 
 
292		if (!test_bit(LSFL_RECOVER_WORK, &ls->ls_flags) &&
293		    !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags))
 
 
294			schedule();
 
295		set_current_state(TASK_RUNNING);
296
297		if (test_and_clear_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) {
298			down_write(&ls->ls_in_recovery);
299			set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
300			wake_up(&ls->ls_recover_lock_wait);
301		}
302
303		if (test_and_clear_bit(LSFL_RECOVER_WORK, &ls->ls_flags))
304			do_ls_recovery(ls);
305	}
306
307	if (test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags))
308		up_write(&ls->ls_in_recovery);
309
310	dlm_put_lockspace(ls);
311	return 0;
312}
313
314int dlm_recoverd_start(struct dlm_ls *ls)
315{
316	struct task_struct *p;
317	int error = 0;
318
319	p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
320	if (IS_ERR(p))
321		error = PTR_ERR(p);
322	else
323                ls->ls_recoverd_task = p;
324	return error;
325}
326
327void dlm_recoverd_stop(struct dlm_ls *ls)
328{
329	kthread_stop(ls->ls_recoverd_task);
330}
331
332void dlm_recoverd_suspend(struct dlm_ls *ls)
333{
334	wake_up(&ls->ls_wait_general);
335	mutex_lock(&ls->ls_recoverd_active);
336}
337
338void dlm_recoverd_resume(struct dlm_ls *ls)
339{
340	mutex_unlock(&ls->ls_recoverd_active);
341}
342