Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/buffer_head.h>
16#include <linux/delay.h>
17#include <linux/sort.h>
18#include <linux/hash.h>
19#include <linux/jhash.h>
20#include <linux/kallsyms.h>
21#include <linux/gfs2_ondisk.h>
22#include <linux/list.h>
23#include <linux/wait.h>
24#include <linux/module.h>
25#include <linux/uaccess.h>
26#include <linux/seq_file.h>
27#include <linux/debugfs.h>
28#include <linux/kthread.h>
29#include <linux/freezer.h>
30#include <linux/workqueue.h>
31#include <linux/jiffies.h>
32#include <linux/rcupdate.h>
33#include <linux/rculist_bl.h>
34#include <linux/bit_spinlock.h>
35#include <linux/percpu.h>
36#include <linux/list_sort.h>
37#include <linux/lockref.h>
38#include <linux/rhashtable.h>
39
40#include "gfs2.h"
41#include "incore.h"
42#include "glock.h"
43#include "glops.h"
44#include "inode.h"
45#include "lops.h"
46#include "meta_io.h"
47#include "quota.h"
48#include "super.h"
49#include "util.h"
50#include "bmap.h"
51#define CREATE_TRACE_POINTS
52#include "trace_gfs2.h"
53
54struct gfs2_glock_iter {
55 struct gfs2_sbd *sdp; /* incore superblock */
56 struct rhashtable_iter hti; /* rhashtable iterator */
57 struct gfs2_glock *gl; /* current glock struct */
58 loff_t last_pos; /* last position */
59};
60
61typedef void (*glock_examiner) (struct gfs2_glock * gl);
62
63static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
64
65static struct dentry *gfs2_root;
66static struct workqueue_struct *glock_workqueue;
67struct workqueue_struct *gfs2_delete_workqueue;
68static LIST_HEAD(lru_list);
69static atomic_t lru_count = ATOMIC_INIT(0);
70static DEFINE_SPINLOCK(lru_lock);
71
72#define GFS2_GL_HASH_SHIFT 15
73#define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
74
75static const struct rhashtable_params ht_parms = {
76 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
77 .key_len = offsetofend(struct lm_lockname, ln_type),
78 .key_offset = offsetof(struct gfs2_glock, gl_name),
79 .head_offset = offsetof(struct gfs2_glock, gl_node),
80};
81
82static struct rhashtable gl_hash_table;
83
84#define GLOCK_WAIT_TABLE_BITS 12
85#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
86static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
87
88struct wait_glock_queue {
89 struct lm_lockname *name;
90 wait_queue_entry_t wait;
91};
92
93static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
94 int sync, void *key)
95{
96 struct wait_glock_queue *wait_glock =
97 container_of(wait, struct wait_glock_queue, wait);
98 struct lm_lockname *wait_name = wait_glock->name;
99 struct lm_lockname *wake_name = key;
100
101 if (wake_name->ln_sbd != wait_name->ln_sbd ||
102 wake_name->ln_number != wait_name->ln_number ||
103 wake_name->ln_type != wait_name->ln_type)
104 return 0;
105 return autoremove_wake_function(wait, mode, sync, key);
106}
107
108static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
109{
110 u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
111
112 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
113}
114
115/**
116 * wake_up_glock - Wake up waiters on a glock
117 * @gl: the glock
118 */
119static void wake_up_glock(struct gfs2_glock *gl)
120{
121 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
122
123 if (waitqueue_active(wq))
124 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
125}
126
127static void gfs2_glock_dealloc(struct rcu_head *rcu)
128{
129 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
130
131 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
132 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
133 } else {
134 kfree(gl->gl_lksb.sb_lvbptr);
135 kmem_cache_free(gfs2_glock_cachep, gl);
136 }
137}
138
139void gfs2_glock_free(struct gfs2_glock *gl)
140{
141 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
142
143 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
144 smp_mb();
145 wake_up_glock(gl);
146 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
147 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
148 wake_up(&sdp->sd_glock_wait);
149}
150
151/**
152 * gfs2_glock_hold() - increment reference count on glock
153 * @gl: The glock to hold
154 *
155 */
156
157void gfs2_glock_hold(struct gfs2_glock *gl)
158{
159 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
160 lockref_get(&gl->gl_lockref);
161}
162
163/**
164 * demote_ok - Check to see if it's ok to unlock a glock
165 * @gl: the glock
166 *
167 * Returns: 1 if it's ok
168 */
169
170static int demote_ok(const struct gfs2_glock *gl)
171{
172 const struct gfs2_glock_operations *glops = gl->gl_ops;
173
174 if (gl->gl_state == LM_ST_UNLOCKED)
175 return 0;
176 if (!list_empty(&gl->gl_holders))
177 return 0;
178 if (glops->go_demote_ok)
179 return glops->go_demote_ok(gl);
180 return 1;
181}
182
183
184void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
185{
186 spin_lock(&lru_lock);
187
188 if (!list_empty(&gl->gl_lru))
189 list_del_init(&gl->gl_lru);
190 else
191 atomic_inc(&lru_count);
192
193 list_add_tail(&gl->gl_lru, &lru_list);
194 set_bit(GLF_LRU, &gl->gl_flags);
195 spin_unlock(&lru_lock);
196}
197
198static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
199{
200 if (!(gl->gl_ops->go_flags & GLOF_LRU))
201 return;
202
203 spin_lock(&lru_lock);
204 if (!list_empty(&gl->gl_lru)) {
205 list_del_init(&gl->gl_lru);
206 atomic_dec(&lru_count);
207 clear_bit(GLF_LRU, &gl->gl_flags);
208 }
209 spin_unlock(&lru_lock);
210}
211
212/*
213 * Enqueue the glock on the work queue. Passes one glock reference on to the
214 * work queue.
215 */
216static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
217 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
218 /*
219 * We are holding the lockref spinlock, and the work was still
220 * queued above. The queued work (glock_work_func) takes that
221 * spinlock before dropping its glock reference(s), so it
222 * cannot have dropped them in the meantime.
223 */
224 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
225 gl->gl_lockref.count--;
226 }
227}
228
229static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
230 spin_lock(&gl->gl_lockref.lock);
231 __gfs2_glock_queue_work(gl, delay);
232 spin_unlock(&gl->gl_lockref.lock);
233}
234
235static void __gfs2_glock_put(struct gfs2_glock *gl)
236{
237 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
238 struct address_space *mapping = gfs2_glock2aspace(gl);
239
240 lockref_mark_dead(&gl->gl_lockref);
241
242 gfs2_glock_remove_from_lru(gl);
243 spin_unlock(&gl->gl_lockref.lock);
244 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
245 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
246 trace_gfs2_glock_put(gl);
247 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
248}
249
250/*
251 * Cause the glock to be put in work queue context.
252 */
253void gfs2_glock_queue_put(struct gfs2_glock *gl)
254{
255 gfs2_glock_queue_work(gl, 0);
256}
257
258/**
259 * gfs2_glock_put() - Decrement reference count on glock
260 * @gl: The glock to put
261 *
262 */
263
264void gfs2_glock_put(struct gfs2_glock *gl)
265{
266 if (lockref_put_or_lock(&gl->gl_lockref))
267 return;
268
269 __gfs2_glock_put(gl);
270}
271
272/**
273 * may_grant - check if its ok to grant a new lock
274 * @gl: The glock
275 * @gh: The lock request which we wish to grant
276 *
277 * Returns: true if its ok to grant the lock
278 */
279
280static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
281{
282 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
283 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
284 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
285 return 0;
286 if (gl->gl_state == gh->gh_state)
287 return 1;
288 if (gh->gh_flags & GL_EXACT)
289 return 0;
290 if (gl->gl_state == LM_ST_EXCLUSIVE) {
291 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
292 return 1;
293 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
294 return 1;
295 }
296 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
297 return 1;
298 return 0;
299}
300
301static void gfs2_holder_wake(struct gfs2_holder *gh)
302{
303 clear_bit(HIF_WAIT, &gh->gh_iflags);
304 smp_mb__after_atomic();
305 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
306}
307
308/**
309 * do_error - Something unexpected has happened during a lock request
310 *
311 */
312
313static void do_error(struct gfs2_glock *gl, const int ret)
314{
315 struct gfs2_holder *gh, *tmp;
316
317 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
318 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
319 continue;
320 if (ret & LM_OUT_ERROR)
321 gh->gh_error = -EIO;
322 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
323 gh->gh_error = GLR_TRYFAILED;
324 else
325 continue;
326 list_del_init(&gh->gh_list);
327 trace_gfs2_glock_queue(gh, 0);
328 gfs2_holder_wake(gh);
329 }
330}
331
332/**
333 * do_promote - promote as many requests as possible on the current queue
334 * @gl: The glock
335 *
336 * Returns: 1 if there is a blocked holder at the head of the list, or 2
337 * if a type specific operation is underway.
338 */
339
340static int do_promote(struct gfs2_glock *gl)
341__releases(&gl->gl_lockref.lock)
342__acquires(&gl->gl_lockref.lock)
343{
344 const struct gfs2_glock_operations *glops = gl->gl_ops;
345 struct gfs2_holder *gh, *tmp;
346 int ret;
347
348restart:
349 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
350 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
351 continue;
352 if (may_grant(gl, gh)) {
353 if (gh->gh_list.prev == &gl->gl_holders &&
354 glops->go_lock) {
355 spin_unlock(&gl->gl_lockref.lock);
356 /* FIXME: eliminate this eventually */
357 ret = glops->go_lock(gh);
358 spin_lock(&gl->gl_lockref.lock);
359 if (ret) {
360 if (ret == 1)
361 return 2;
362 gh->gh_error = ret;
363 list_del_init(&gh->gh_list);
364 trace_gfs2_glock_queue(gh, 0);
365 gfs2_holder_wake(gh);
366 goto restart;
367 }
368 set_bit(HIF_HOLDER, &gh->gh_iflags);
369 trace_gfs2_promote(gh, 1);
370 gfs2_holder_wake(gh);
371 goto restart;
372 }
373 set_bit(HIF_HOLDER, &gh->gh_iflags);
374 trace_gfs2_promote(gh, 0);
375 gfs2_holder_wake(gh);
376 continue;
377 }
378 if (gh->gh_list.prev == &gl->gl_holders)
379 return 1;
380 do_error(gl, 0);
381 break;
382 }
383 return 0;
384}
385
386/**
387 * find_first_waiter - find the first gh that's waiting for the glock
388 * @gl: the glock
389 */
390
391static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
392{
393 struct gfs2_holder *gh;
394
395 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
396 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
397 return gh;
398 }
399 return NULL;
400}
401
402/**
403 * state_change - record that the glock is now in a different state
404 * @gl: the glock
405 * @new_state the new state
406 *
407 */
408
409static void state_change(struct gfs2_glock *gl, unsigned int new_state)
410{
411 int held1, held2;
412
413 held1 = (gl->gl_state != LM_ST_UNLOCKED);
414 held2 = (new_state != LM_ST_UNLOCKED);
415
416 if (held1 != held2) {
417 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
418 if (held2)
419 gl->gl_lockref.count++;
420 else
421 gl->gl_lockref.count--;
422 }
423 if (held1 && held2 && list_empty(&gl->gl_holders))
424 clear_bit(GLF_QUEUED, &gl->gl_flags);
425
426 if (new_state != gl->gl_target)
427 /* shorten our minimum hold time */
428 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
429 GL_GLOCK_MIN_HOLD);
430 gl->gl_state = new_state;
431 gl->gl_tchange = jiffies;
432}
433
434static void gfs2_demote_wake(struct gfs2_glock *gl)
435{
436 gl->gl_demote_state = LM_ST_EXCLUSIVE;
437 clear_bit(GLF_DEMOTE, &gl->gl_flags);
438 smp_mb__after_atomic();
439 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
440}
441
442/**
443 * finish_xmote - The DLM has replied to one of our lock requests
444 * @gl: The glock
445 * @ret: The status from the DLM
446 *
447 */
448
449static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
450{
451 const struct gfs2_glock_operations *glops = gl->gl_ops;
452 struct gfs2_holder *gh;
453 unsigned state = ret & LM_OUT_ST_MASK;
454 int rv;
455
456 spin_lock(&gl->gl_lockref.lock);
457 trace_gfs2_glock_state_change(gl, state);
458 state_change(gl, state);
459 gh = find_first_waiter(gl);
460
461 /* Demote to UN request arrived during demote to SH or DF */
462 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
463 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
464 gl->gl_target = LM_ST_UNLOCKED;
465
466 /* Check for state != intended state */
467 if (unlikely(state != gl->gl_target)) {
468 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
469 /* move to back of queue and try next entry */
470 if (ret & LM_OUT_CANCELED) {
471 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
472 list_move_tail(&gh->gh_list, &gl->gl_holders);
473 gh = find_first_waiter(gl);
474 gl->gl_target = gh->gh_state;
475 goto retry;
476 }
477 /* Some error or failed "try lock" - report it */
478 if ((ret & LM_OUT_ERROR) ||
479 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
480 gl->gl_target = gl->gl_state;
481 do_error(gl, ret);
482 goto out;
483 }
484 }
485 switch(state) {
486 /* Unlocked due to conversion deadlock, try again */
487 case LM_ST_UNLOCKED:
488retry:
489 do_xmote(gl, gh, gl->gl_target);
490 break;
491 /* Conversion fails, unlock and try again */
492 case LM_ST_SHARED:
493 case LM_ST_DEFERRED:
494 do_xmote(gl, gh, LM_ST_UNLOCKED);
495 break;
496 default: /* Everything else */
497 pr_err("wanted %u got %u\n", gl->gl_target, state);
498 GLOCK_BUG_ON(gl, 1);
499 }
500 spin_unlock(&gl->gl_lockref.lock);
501 return;
502 }
503
504 /* Fast path - we got what we asked for */
505 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
506 gfs2_demote_wake(gl);
507 if (state != LM_ST_UNLOCKED) {
508 if (glops->go_xmote_bh) {
509 spin_unlock(&gl->gl_lockref.lock);
510 rv = glops->go_xmote_bh(gl, gh);
511 spin_lock(&gl->gl_lockref.lock);
512 if (rv) {
513 do_error(gl, rv);
514 goto out;
515 }
516 }
517 rv = do_promote(gl);
518 if (rv == 2)
519 goto out_locked;
520 }
521out:
522 clear_bit(GLF_LOCK, &gl->gl_flags);
523out_locked:
524 spin_unlock(&gl->gl_lockref.lock);
525}
526
527/**
528 * do_xmote - Calls the DLM to change the state of a lock
529 * @gl: The lock state
530 * @gh: The holder (only for promotes)
531 * @target: The target lock state
532 *
533 */
534
535static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
536__releases(&gl->gl_lockref.lock)
537__acquires(&gl->gl_lockref.lock)
538{
539 const struct gfs2_glock_operations *glops = gl->gl_ops;
540 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
541 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
542 int ret;
543
544 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
545 target != LM_ST_UNLOCKED)
546 return;
547 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
548 LM_FLAG_PRIORITY);
549 GLOCK_BUG_ON(gl, gl->gl_state == target);
550 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
551 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
552 glops->go_inval) {
553 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
554 do_error(gl, 0); /* Fail queued try locks */
555 }
556 gl->gl_req = target;
557 set_bit(GLF_BLOCKING, &gl->gl_flags);
558 if ((gl->gl_req == LM_ST_UNLOCKED) ||
559 (gl->gl_state == LM_ST_EXCLUSIVE) ||
560 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
561 clear_bit(GLF_BLOCKING, &gl->gl_flags);
562 spin_unlock(&gl->gl_lockref.lock);
563 if (glops->go_sync)
564 glops->go_sync(gl);
565 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
566 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
567 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
568
569 gfs2_glock_hold(gl);
570 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
571 /* lock_dlm */
572 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
573 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
574 target == LM_ST_UNLOCKED &&
575 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
576 finish_xmote(gl, target);
577 gfs2_glock_queue_work(gl, 0);
578 }
579 else if (ret) {
580 pr_err("lm_lock ret %d\n", ret);
581 GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
582 &sdp->sd_flags));
583 }
584 } else { /* lock_nolock */
585 finish_xmote(gl, target);
586 gfs2_glock_queue_work(gl, 0);
587 }
588
589 spin_lock(&gl->gl_lockref.lock);
590}
591
592/**
593 * find_first_holder - find the first "holder" gh
594 * @gl: the glock
595 */
596
597static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
598{
599 struct gfs2_holder *gh;
600
601 if (!list_empty(&gl->gl_holders)) {
602 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
603 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
604 return gh;
605 }
606 return NULL;
607}
608
609/**
610 * run_queue - do all outstanding tasks related to a glock
611 * @gl: The glock in question
612 * @nonblock: True if we must not block in run_queue
613 *
614 */
615
616static void run_queue(struct gfs2_glock *gl, const int nonblock)
617__releases(&gl->gl_lockref.lock)
618__acquires(&gl->gl_lockref.lock)
619{
620 struct gfs2_holder *gh = NULL;
621 int ret;
622
623 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
624 return;
625
626 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
627
628 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
629 gl->gl_demote_state != gl->gl_state) {
630 if (find_first_holder(gl))
631 goto out_unlock;
632 if (nonblock)
633 goto out_sched;
634 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
635 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
636 gl->gl_target = gl->gl_demote_state;
637 } else {
638 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
639 gfs2_demote_wake(gl);
640 ret = do_promote(gl);
641 if (ret == 0)
642 goto out_unlock;
643 if (ret == 2)
644 goto out;
645 gh = find_first_waiter(gl);
646 gl->gl_target = gh->gh_state;
647 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
648 do_error(gl, 0); /* Fail queued try locks */
649 }
650 do_xmote(gl, gh, gl->gl_target);
651out:
652 return;
653
654out_sched:
655 clear_bit(GLF_LOCK, &gl->gl_flags);
656 smp_mb__after_atomic();
657 gl->gl_lockref.count++;
658 __gfs2_glock_queue_work(gl, 0);
659 return;
660
661out_unlock:
662 clear_bit(GLF_LOCK, &gl->gl_flags);
663 smp_mb__after_atomic();
664 return;
665}
666
667static void delete_work_func(struct work_struct *work)
668{
669 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
670 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
671 struct inode *inode;
672 u64 no_addr = gl->gl_name.ln_number;
673
674 /* If someone's using this glock to create a new dinode, the block must
675 have been freed by another node, then re-used, in which case our
676 iopen callback is too late after the fact. Ignore it. */
677 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
678 goto out;
679
680 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
681 if (inode && !IS_ERR(inode)) {
682 d_prune_aliases(inode);
683 iput(inode);
684 }
685out:
686 gfs2_glock_put(gl);
687}
688
689static void glock_work_func(struct work_struct *work)
690{
691 unsigned long delay = 0;
692 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
693 unsigned int drop_refs = 1;
694
695 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
696 finish_xmote(gl, gl->gl_reply);
697 drop_refs++;
698 }
699 spin_lock(&gl->gl_lockref.lock);
700 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
701 gl->gl_state != LM_ST_UNLOCKED &&
702 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
703 unsigned long holdtime, now = jiffies;
704
705 holdtime = gl->gl_tchange + gl->gl_hold_time;
706 if (time_before(now, holdtime))
707 delay = holdtime - now;
708
709 if (!delay) {
710 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
711 set_bit(GLF_DEMOTE, &gl->gl_flags);
712 }
713 }
714 run_queue(gl, 0);
715 if (delay) {
716 /* Keep one glock reference for the work we requeue. */
717 drop_refs--;
718 if (gl->gl_name.ln_type != LM_TYPE_INODE)
719 delay = 0;
720 __gfs2_glock_queue_work(gl, delay);
721 }
722
723 /*
724 * Drop the remaining glock references manually here. (Mind that
725 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
726 * here as well.)
727 */
728 gl->gl_lockref.count -= drop_refs;
729 if (!gl->gl_lockref.count) {
730 __gfs2_glock_put(gl);
731 return;
732 }
733 spin_unlock(&gl->gl_lockref.lock);
734}
735
736static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
737 struct gfs2_glock *new)
738{
739 struct wait_glock_queue wait;
740 wait_queue_head_t *wq = glock_waitqueue(name);
741 struct gfs2_glock *gl;
742
743 wait.name = name;
744 init_wait(&wait.wait);
745 wait.wait.func = glock_wake_function;
746
747again:
748 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
749 rcu_read_lock();
750 if (new) {
751 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
752 &new->gl_node, ht_parms);
753 if (IS_ERR(gl))
754 goto out;
755 } else {
756 gl = rhashtable_lookup_fast(&gl_hash_table,
757 name, ht_parms);
758 }
759 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
760 rcu_read_unlock();
761 schedule();
762 goto again;
763 }
764out:
765 rcu_read_unlock();
766 finish_wait(wq, &wait.wait);
767 return gl;
768}
769
770/**
771 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
772 * @sdp: The GFS2 superblock
773 * @number: the lock number
774 * @glops: The glock_operations to use
775 * @create: If 0, don't create the glock if it doesn't exist
776 * @glp: the glock is returned here
777 *
778 * This does not lock a glock, just finds/creates structures for one.
779 *
780 * Returns: errno
781 */
782
783int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
784 const struct gfs2_glock_operations *glops, int create,
785 struct gfs2_glock **glp)
786{
787 struct super_block *s = sdp->sd_vfs;
788 struct lm_lockname name = { .ln_number = number,
789 .ln_type = glops->go_type,
790 .ln_sbd = sdp };
791 struct gfs2_glock *gl, *tmp;
792 struct address_space *mapping;
793 struct kmem_cache *cachep;
794 int ret = 0;
795
796 gl = find_insert_glock(&name, NULL);
797 if (gl) {
798 *glp = gl;
799 return 0;
800 }
801 if (!create)
802 return -ENOENT;
803
804 if (glops->go_flags & GLOF_ASPACE)
805 cachep = gfs2_glock_aspace_cachep;
806 else
807 cachep = gfs2_glock_cachep;
808 gl = kmem_cache_alloc(cachep, GFP_NOFS);
809 if (!gl)
810 return -ENOMEM;
811
812 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
813
814 if (glops->go_flags & GLOF_LVB) {
815 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
816 if (!gl->gl_lksb.sb_lvbptr) {
817 kmem_cache_free(cachep, gl);
818 return -ENOMEM;
819 }
820 }
821
822 atomic_inc(&sdp->sd_glock_disposal);
823 gl->gl_node.next = NULL;
824 gl->gl_flags = 0;
825 gl->gl_name = name;
826 gl->gl_lockref.count = 1;
827 gl->gl_state = LM_ST_UNLOCKED;
828 gl->gl_target = LM_ST_UNLOCKED;
829 gl->gl_demote_state = LM_ST_EXCLUSIVE;
830 gl->gl_ops = glops;
831 gl->gl_dstamp = 0;
832 preempt_disable();
833 /* We use the global stats to estimate the initial per-glock stats */
834 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
835 preempt_enable();
836 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
837 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
838 gl->gl_tchange = jiffies;
839 gl->gl_object = NULL;
840 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
841 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
842 INIT_WORK(&gl->gl_delete, delete_work_func);
843
844 mapping = gfs2_glock2aspace(gl);
845 if (mapping) {
846 mapping->a_ops = &gfs2_meta_aops;
847 mapping->host = s->s_bdev->bd_inode;
848 mapping->flags = 0;
849 mapping_set_gfp_mask(mapping, GFP_NOFS);
850 mapping->private_data = NULL;
851 mapping->writeback_index = 0;
852 }
853
854 tmp = find_insert_glock(&name, gl);
855 if (!tmp) {
856 *glp = gl;
857 goto out;
858 }
859 if (IS_ERR(tmp)) {
860 ret = PTR_ERR(tmp);
861 goto out_free;
862 }
863 *glp = tmp;
864
865out_free:
866 kfree(gl->gl_lksb.sb_lvbptr);
867 kmem_cache_free(cachep, gl);
868 atomic_dec(&sdp->sd_glock_disposal);
869
870out:
871 return ret;
872}
873
874/**
875 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
876 * @gl: the glock
877 * @state: the state we're requesting
878 * @flags: the modifier flags
879 * @gh: the holder structure
880 *
881 */
882
883void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
884 struct gfs2_holder *gh)
885{
886 INIT_LIST_HEAD(&gh->gh_list);
887 gh->gh_gl = gl;
888 gh->gh_ip = _RET_IP_;
889 gh->gh_owner_pid = get_pid(task_pid(current));
890 gh->gh_state = state;
891 gh->gh_flags = flags;
892 gh->gh_error = 0;
893 gh->gh_iflags = 0;
894 gfs2_glock_hold(gl);
895}
896
897/**
898 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
899 * @state: the state we're requesting
900 * @flags: the modifier flags
901 * @gh: the holder structure
902 *
903 * Don't mess with the glock.
904 *
905 */
906
907void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
908{
909 gh->gh_state = state;
910 gh->gh_flags = flags;
911 gh->gh_iflags = 0;
912 gh->gh_ip = _RET_IP_;
913 put_pid(gh->gh_owner_pid);
914 gh->gh_owner_pid = get_pid(task_pid(current));
915}
916
917/**
918 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
919 * @gh: the holder structure
920 *
921 */
922
923void gfs2_holder_uninit(struct gfs2_holder *gh)
924{
925 put_pid(gh->gh_owner_pid);
926 gfs2_glock_put(gh->gh_gl);
927 gfs2_holder_mark_uninitialized(gh);
928 gh->gh_ip = 0;
929}
930
931/**
932 * gfs2_glock_wait - wait on a glock acquisition
933 * @gh: the glock holder
934 *
935 * Returns: 0 on success
936 */
937
938int gfs2_glock_wait(struct gfs2_holder *gh)
939{
940 unsigned long time1 = jiffies;
941
942 might_sleep();
943 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
944 if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
945 /* Lengthen the minimum hold time. */
946 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
947 GL_GLOCK_HOLD_INCR,
948 GL_GLOCK_MAX_HOLD);
949 return gh->gh_error;
950}
951
952/**
953 * handle_callback - process a demote request
954 * @gl: the glock
955 * @state: the state the caller wants us to change to
956 *
957 * There are only two requests that we are going to see in actual
958 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
959 */
960
961static void handle_callback(struct gfs2_glock *gl, unsigned int state,
962 unsigned long delay, bool remote)
963{
964 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
965
966 set_bit(bit, &gl->gl_flags);
967 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
968 gl->gl_demote_state = state;
969 gl->gl_demote_time = jiffies;
970 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
971 gl->gl_demote_state != state) {
972 gl->gl_demote_state = LM_ST_UNLOCKED;
973 }
974 if (gl->gl_ops->go_callback)
975 gl->gl_ops->go_callback(gl, remote);
976 trace_gfs2_demote_rq(gl, remote);
977}
978
979void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
980{
981 struct va_format vaf;
982 va_list args;
983
984 va_start(args, fmt);
985
986 if (seq) {
987 seq_vprintf(seq, fmt, args);
988 } else {
989 vaf.fmt = fmt;
990 vaf.va = &args;
991
992 pr_err("%pV", &vaf);
993 }
994
995 va_end(args);
996}
997
998/**
999 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1000 * @gh: the holder structure to add
1001 *
1002 * Eventually we should move the recursive locking trap to a
1003 * debugging option or something like that. This is the fast
1004 * path and needs to have the minimum number of distractions.
1005 *
1006 */
1007
1008static inline void add_to_queue(struct gfs2_holder *gh)
1009__releases(&gl->gl_lockref.lock)
1010__acquires(&gl->gl_lockref.lock)
1011{
1012 struct gfs2_glock *gl = gh->gh_gl;
1013 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1014 struct list_head *insert_pt = NULL;
1015 struct gfs2_holder *gh2;
1016 int try_futile = 0;
1017
1018 BUG_ON(gh->gh_owner_pid == NULL);
1019 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1020 BUG();
1021
1022 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1023 if (test_bit(GLF_LOCK, &gl->gl_flags))
1024 try_futile = !may_grant(gl, gh);
1025 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1026 goto fail;
1027 }
1028
1029 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1030 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1031 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1032 goto trap_recursive;
1033 if (try_futile &&
1034 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1035fail:
1036 gh->gh_error = GLR_TRYFAILED;
1037 gfs2_holder_wake(gh);
1038 return;
1039 }
1040 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1041 continue;
1042 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1043 insert_pt = &gh2->gh_list;
1044 }
1045 set_bit(GLF_QUEUED, &gl->gl_flags);
1046 trace_gfs2_glock_queue(gh, 1);
1047 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1048 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1049 if (likely(insert_pt == NULL)) {
1050 list_add_tail(&gh->gh_list, &gl->gl_holders);
1051 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1052 goto do_cancel;
1053 return;
1054 }
1055 list_add_tail(&gh->gh_list, insert_pt);
1056do_cancel:
1057 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1058 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1059 spin_unlock(&gl->gl_lockref.lock);
1060 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1061 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1062 spin_lock(&gl->gl_lockref.lock);
1063 }
1064 return;
1065
1066trap_recursive:
1067 pr_err("original: %pSR\n", (void *)gh2->gh_ip);
1068 pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
1069 pr_err("lock type: %d req lock state : %d\n",
1070 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1071 pr_err("new: %pSR\n", (void *)gh->gh_ip);
1072 pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
1073 pr_err("lock type: %d req lock state : %d\n",
1074 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1075 gfs2_dump_glock(NULL, gl);
1076 BUG();
1077}
1078
1079/**
1080 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1081 * @gh: the holder structure
1082 *
1083 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1084 *
1085 * Returns: 0, GLR_TRYFAILED, or errno on failure
1086 */
1087
1088int gfs2_glock_nq(struct gfs2_holder *gh)
1089{
1090 struct gfs2_glock *gl = gh->gh_gl;
1091 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1092 int error = 0;
1093
1094 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1095 return -EIO;
1096
1097 if (test_bit(GLF_LRU, &gl->gl_flags))
1098 gfs2_glock_remove_from_lru(gl);
1099
1100 spin_lock(&gl->gl_lockref.lock);
1101 add_to_queue(gh);
1102 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1103 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1104 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1105 gl->gl_lockref.count++;
1106 __gfs2_glock_queue_work(gl, 0);
1107 }
1108 run_queue(gl, 1);
1109 spin_unlock(&gl->gl_lockref.lock);
1110
1111 if (!(gh->gh_flags & GL_ASYNC))
1112 error = gfs2_glock_wait(gh);
1113
1114 return error;
1115}
1116
1117/**
1118 * gfs2_glock_poll - poll to see if an async request has been completed
1119 * @gh: the holder
1120 *
1121 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1122 */
1123
1124int gfs2_glock_poll(struct gfs2_holder *gh)
1125{
1126 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1127}
1128
1129/**
1130 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1131 * @gh: the glock holder
1132 *
1133 */
1134
1135void gfs2_glock_dq(struct gfs2_holder *gh)
1136{
1137 struct gfs2_glock *gl = gh->gh_gl;
1138 const struct gfs2_glock_operations *glops = gl->gl_ops;
1139 unsigned delay = 0;
1140 int fast_path = 0;
1141
1142 spin_lock(&gl->gl_lockref.lock);
1143 if (gh->gh_flags & GL_NOCACHE)
1144 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1145
1146 list_del_init(&gh->gh_list);
1147 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1148 if (find_first_holder(gl) == NULL) {
1149 if (glops->go_unlock) {
1150 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1151 spin_unlock(&gl->gl_lockref.lock);
1152 glops->go_unlock(gh);
1153 spin_lock(&gl->gl_lockref.lock);
1154 clear_bit(GLF_LOCK, &gl->gl_flags);
1155 }
1156 if (list_empty(&gl->gl_holders) &&
1157 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1158 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1159 fast_path = 1;
1160 }
1161 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1162 (glops->go_flags & GLOF_LRU))
1163 gfs2_glock_add_to_lru(gl);
1164
1165 trace_gfs2_glock_queue(gh, 0);
1166 if (unlikely(!fast_path)) {
1167 gl->gl_lockref.count++;
1168 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1169 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1170 gl->gl_name.ln_type == LM_TYPE_INODE)
1171 delay = gl->gl_hold_time;
1172 __gfs2_glock_queue_work(gl, delay);
1173 }
1174 spin_unlock(&gl->gl_lockref.lock);
1175}
1176
1177void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1178{
1179 struct gfs2_glock *gl = gh->gh_gl;
1180 gfs2_glock_dq(gh);
1181 might_sleep();
1182 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1183}
1184
1185/**
1186 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1187 * @gh: the holder structure
1188 *
1189 */
1190
1191void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1192{
1193 gfs2_glock_dq(gh);
1194 gfs2_holder_uninit(gh);
1195}
1196
1197/**
1198 * gfs2_glock_nq_num - acquire a glock based on lock number
1199 * @sdp: the filesystem
1200 * @number: the lock number
1201 * @glops: the glock operations for the type of glock
1202 * @state: the state to acquire the glock in
1203 * @flags: modifier flags for the acquisition
1204 * @gh: the struct gfs2_holder
1205 *
1206 * Returns: errno
1207 */
1208
1209int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1210 const struct gfs2_glock_operations *glops,
1211 unsigned int state, u16 flags, struct gfs2_holder *gh)
1212{
1213 struct gfs2_glock *gl;
1214 int error;
1215
1216 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1217 if (!error) {
1218 error = gfs2_glock_nq_init(gl, state, flags, gh);
1219 gfs2_glock_put(gl);
1220 }
1221
1222 return error;
1223}
1224
1225/**
1226 * glock_compare - Compare two struct gfs2_glock structures for sorting
1227 * @arg_a: the first structure
1228 * @arg_b: the second structure
1229 *
1230 */
1231
1232static int glock_compare(const void *arg_a, const void *arg_b)
1233{
1234 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1235 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1236 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1237 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1238
1239 if (a->ln_number > b->ln_number)
1240 return 1;
1241 if (a->ln_number < b->ln_number)
1242 return -1;
1243 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1244 return 0;
1245}
1246
1247/**
1248 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1249 * @num_gh: the number of structures
1250 * @ghs: an array of struct gfs2_holder structures
1251 *
1252 * Returns: 0 on success (all glocks acquired),
1253 * errno on failure (no glocks acquired)
1254 */
1255
1256static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1257 struct gfs2_holder **p)
1258{
1259 unsigned int x;
1260 int error = 0;
1261
1262 for (x = 0; x < num_gh; x++)
1263 p[x] = &ghs[x];
1264
1265 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1266
1267 for (x = 0; x < num_gh; x++) {
1268 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1269
1270 error = gfs2_glock_nq(p[x]);
1271 if (error) {
1272 while (x--)
1273 gfs2_glock_dq(p[x]);
1274 break;
1275 }
1276 }
1277
1278 return error;
1279}
1280
1281/**
1282 * gfs2_glock_nq_m - acquire multiple glocks
1283 * @num_gh: the number of structures
1284 * @ghs: an array of struct gfs2_holder structures
1285 *
1286 *
1287 * Returns: 0 on success (all glocks acquired),
1288 * errno on failure (no glocks acquired)
1289 */
1290
1291int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1292{
1293 struct gfs2_holder *tmp[4];
1294 struct gfs2_holder **pph = tmp;
1295 int error = 0;
1296
1297 switch(num_gh) {
1298 case 0:
1299 return 0;
1300 case 1:
1301 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1302 return gfs2_glock_nq(ghs);
1303 default:
1304 if (num_gh <= 4)
1305 break;
1306 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1307 if (!pph)
1308 return -ENOMEM;
1309 }
1310
1311 error = nq_m_sync(num_gh, ghs, pph);
1312
1313 if (pph != tmp)
1314 kfree(pph);
1315
1316 return error;
1317}
1318
1319/**
1320 * gfs2_glock_dq_m - release multiple glocks
1321 * @num_gh: the number of structures
1322 * @ghs: an array of struct gfs2_holder structures
1323 *
1324 */
1325
1326void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1327{
1328 while (num_gh--)
1329 gfs2_glock_dq(&ghs[num_gh]);
1330}
1331
1332void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1333{
1334 unsigned long delay = 0;
1335 unsigned long holdtime;
1336 unsigned long now = jiffies;
1337
1338 gfs2_glock_hold(gl);
1339 holdtime = gl->gl_tchange + gl->gl_hold_time;
1340 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1341 gl->gl_name.ln_type == LM_TYPE_INODE) {
1342 if (time_before(now, holdtime))
1343 delay = holdtime - now;
1344 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1345 delay = gl->gl_hold_time;
1346 }
1347
1348 spin_lock(&gl->gl_lockref.lock);
1349 handle_callback(gl, state, delay, true);
1350 __gfs2_glock_queue_work(gl, delay);
1351 spin_unlock(&gl->gl_lockref.lock);
1352}
1353
1354/**
1355 * gfs2_should_freeze - Figure out if glock should be frozen
1356 * @gl: The glock in question
1357 *
1358 * Glocks are not frozen if (a) the result of the dlm operation is
1359 * an error, (b) the locking operation was an unlock operation or
1360 * (c) if there is a "noexp" flagged request anywhere in the queue
1361 *
1362 * Returns: 1 if freezing should occur, 0 otherwise
1363 */
1364
1365static int gfs2_should_freeze(const struct gfs2_glock *gl)
1366{
1367 const struct gfs2_holder *gh;
1368
1369 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1370 return 0;
1371 if (gl->gl_target == LM_ST_UNLOCKED)
1372 return 0;
1373
1374 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1375 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1376 continue;
1377 if (LM_FLAG_NOEXP & gh->gh_flags)
1378 return 0;
1379 }
1380
1381 return 1;
1382}
1383
1384/**
1385 * gfs2_glock_complete - Callback used by locking
1386 * @gl: Pointer to the glock
1387 * @ret: The return value from the dlm
1388 *
1389 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1390 * to use a bitfield shared with other glock state fields.
1391 */
1392
1393void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1394{
1395 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1396
1397 spin_lock(&gl->gl_lockref.lock);
1398 gl->gl_reply = ret;
1399
1400 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1401 if (gfs2_should_freeze(gl)) {
1402 set_bit(GLF_FROZEN, &gl->gl_flags);
1403 spin_unlock(&gl->gl_lockref.lock);
1404 return;
1405 }
1406 }
1407
1408 gl->gl_lockref.count++;
1409 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1410 __gfs2_glock_queue_work(gl, 0);
1411 spin_unlock(&gl->gl_lockref.lock);
1412}
1413
1414static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1415{
1416 struct gfs2_glock *gla, *glb;
1417
1418 gla = list_entry(a, struct gfs2_glock, gl_lru);
1419 glb = list_entry(b, struct gfs2_glock, gl_lru);
1420
1421 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1422 return 1;
1423 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1424 return -1;
1425
1426 return 0;
1427}
1428
1429/**
1430 * gfs2_dispose_glock_lru - Demote a list of glocks
1431 * @list: The list to dispose of
1432 *
1433 * Disposing of glocks may involve disk accesses, so that here we sort
1434 * the glocks by number (i.e. disk location of the inodes) so that if
1435 * there are any such accesses, they'll be sent in order (mostly).
1436 *
1437 * Must be called under the lru_lock, but may drop and retake this
1438 * lock. While the lru_lock is dropped, entries may vanish from the
1439 * list, but no new entries will appear on the list (since it is
1440 * private)
1441 */
1442
1443static void gfs2_dispose_glock_lru(struct list_head *list)
1444__releases(&lru_lock)
1445__acquires(&lru_lock)
1446{
1447 struct gfs2_glock *gl;
1448
1449 list_sort(NULL, list, glock_cmp);
1450
1451 while(!list_empty(list)) {
1452 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1453 list_del_init(&gl->gl_lru);
1454 if (!spin_trylock(&gl->gl_lockref.lock)) {
1455add_back_to_lru:
1456 list_add(&gl->gl_lru, &lru_list);
1457 atomic_inc(&lru_count);
1458 continue;
1459 }
1460 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1461 spin_unlock(&gl->gl_lockref.lock);
1462 goto add_back_to_lru;
1463 }
1464 clear_bit(GLF_LRU, &gl->gl_flags);
1465 gl->gl_lockref.count++;
1466 if (demote_ok(gl))
1467 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1468 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1469 __gfs2_glock_queue_work(gl, 0);
1470 spin_unlock(&gl->gl_lockref.lock);
1471 cond_resched_lock(&lru_lock);
1472 }
1473}
1474
1475/**
1476 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1477 * @nr: The number of entries to scan
1478 *
1479 * This function selects the entries on the LRU which are able to
1480 * be demoted, and then kicks off the process by calling
1481 * gfs2_dispose_glock_lru() above.
1482 */
1483
1484static long gfs2_scan_glock_lru(int nr)
1485{
1486 struct gfs2_glock *gl;
1487 LIST_HEAD(skipped);
1488 LIST_HEAD(dispose);
1489 long freed = 0;
1490
1491 spin_lock(&lru_lock);
1492 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1493 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1494
1495 /* Test for being demotable */
1496 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1497 list_move(&gl->gl_lru, &dispose);
1498 atomic_dec(&lru_count);
1499 freed++;
1500 continue;
1501 }
1502
1503 list_move(&gl->gl_lru, &skipped);
1504 }
1505 list_splice(&skipped, &lru_list);
1506 if (!list_empty(&dispose))
1507 gfs2_dispose_glock_lru(&dispose);
1508 spin_unlock(&lru_lock);
1509
1510 return freed;
1511}
1512
1513static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1514 struct shrink_control *sc)
1515{
1516 if (!(sc->gfp_mask & __GFP_FS))
1517 return SHRINK_STOP;
1518 return gfs2_scan_glock_lru(sc->nr_to_scan);
1519}
1520
1521static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1522 struct shrink_control *sc)
1523{
1524 return vfs_pressure_ratio(atomic_read(&lru_count));
1525}
1526
1527static struct shrinker glock_shrinker = {
1528 .seeks = DEFAULT_SEEKS,
1529 .count_objects = gfs2_glock_shrink_count,
1530 .scan_objects = gfs2_glock_shrink_scan,
1531};
1532
1533/**
1534 * examine_bucket - Call a function for glock in a hash bucket
1535 * @examiner: the function
1536 * @sdp: the filesystem
1537 * @bucket: the bucket
1538 *
1539 * Note that the function can be called multiple times on the same
1540 * object. So the user must ensure that the function can cope with
1541 * that.
1542 */
1543
1544static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1545{
1546 struct gfs2_glock *gl;
1547 struct rhashtable_iter iter;
1548
1549 rhashtable_walk_enter(&gl_hash_table, &iter);
1550
1551 do {
1552 rhashtable_walk_start(&iter);
1553
1554 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
1555 if (gl->gl_name.ln_sbd == sdp &&
1556 lockref_get_not_dead(&gl->gl_lockref))
1557 examiner(gl);
1558
1559 rhashtable_walk_stop(&iter);
1560 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1561
1562 rhashtable_walk_exit(&iter);
1563}
1564
1565/**
1566 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1567 * @gl: The glock to thaw
1568 *
1569 */
1570
1571static void thaw_glock(struct gfs2_glock *gl)
1572{
1573 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1574 gfs2_glock_put(gl);
1575 return;
1576 }
1577 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1578 gfs2_glock_queue_work(gl, 0);
1579}
1580
1581/**
1582 * clear_glock - look at a glock and see if we can free it from glock cache
1583 * @gl: the glock to look at
1584 *
1585 */
1586
1587static void clear_glock(struct gfs2_glock *gl)
1588{
1589 gfs2_glock_remove_from_lru(gl);
1590
1591 spin_lock(&gl->gl_lockref.lock);
1592 if (gl->gl_state != LM_ST_UNLOCKED)
1593 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1594 __gfs2_glock_queue_work(gl, 0);
1595 spin_unlock(&gl->gl_lockref.lock);
1596}
1597
1598/**
1599 * gfs2_glock_thaw - Thaw any frozen glocks
1600 * @sdp: The super block
1601 *
1602 */
1603
1604void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1605{
1606 glock_hash_walk(thaw_glock, sdp);
1607}
1608
1609static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1610{
1611 spin_lock(&gl->gl_lockref.lock);
1612 gfs2_dump_glock(seq, gl);
1613 spin_unlock(&gl->gl_lockref.lock);
1614}
1615
1616static void dump_glock_func(struct gfs2_glock *gl)
1617{
1618 dump_glock(NULL, gl);
1619}
1620
1621/**
1622 * gfs2_gl_hash_clear - Empty out the glock hash table
1623 * @sdp: the filesystem
1624 * @wait: wait until it's all gone
1625 *
1626 * Called when unmounting the filesystem.
1627 */
1628
1629void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1630{
1631 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1632 flush_workqueue(glock_workqueue);
1633 glock_hash_walk(clear_glock, sdp);
1634 flush_workqueue(glock_workqueue);
1635 wait_event_timeout(sdp->sd_glock_wait,
1636 atomic_read(&sdp->sd_glock_disposal) == 0,
1637 HZ * 600);
1638 glock_hash_walk(dump_glock_func, sdp);
1639}
1640
1641void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1642{
1643 struct gfs2_glock *gl = ip->i_gl;
1644 int ret;
1645
1646 ret = gfs2_truncatei_resume(ip);
1647 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1648
1649 spin_lock(&gl->gl_lockref.lock);
1650 clear_bit(GLF_LOCK, &gl->gl_flags);
1651 run_queue(gl, 1);
1652 spin_unlock(&gl->gl_lockref.lock);
1653}
1654
1655static const char *state2str(unsigned state)
1656{
1657 switch(state) {
1658 case LM_ST_UNLOCKED:
1659 return "UN";
1660 case LM_ST_SHARED:
1661 return "SH";
1662 case LM_ST_DEFERRED:
1663 return "DF";
1664 case LM_ST_EXCLUSIVE:
1665 return "EX";
1666 }
1667 return "??";
1668}
1669
1670static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1671{
1672 char *p = buf;
1673 if (flags & LM_FLAG_TRY)
1674 *p++ = 't';
1675 if (flags & LM_FLAG_TRY_1CB)
1676 *p++ = 'T';
1677 if (flags & LM_FLAG_NOEXP)
1678 *p++ = 'e';
1679 if (flags & LM_FLAG_ANY)
1680 *p++ = 'A';
1681 if (flags & LM_FLAG_PRIORITY)
1682 *p++ = 'p';
1683 if (flags & GL_ASYNC)
1684 *p++ = 'a';
1685 if (flags & GL_EXACT)
1686 *p++ = 'E';
1687 if (flags & GL_NOCACHE)
1688 *p++ = 'c';
1689 if (test_bit(HIF_HOLDER, &iflags))
1690 *p++ = 'H';
1691 if (test_bit(HIF_WAIT, &iflags))
1692 *p++ = 'W';
1693 if (test_bit(HIF_FIRST, &iflags))
1694 *p++ = 'F';
1695 *p = 0;
1696 return buf;
1697}
1698
1699/**
1700 * dump_holder - print information about a glock holder
1701 * @seq: the seq_file struct
1702 * @gh: the glock holder
1703 *
1704 */
1705
1706static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1707{
1708 struct task_struct *gh_owner = NULL;
1709 char flags_buf[32];
1710
1711 rcu_read_lock();
1712 if (gh->gh_owner_pid)
1713 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1714 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1715 state2str(gh->gh_state),
1716 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1717 gh->gh_error,
1718 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1719 gh_owner ? gh_owner->comm : "(ended)",
1720 (void *)gh->gh_ip);
1721 rcu_read_unlock();
1722}
1723
1724static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1725{
1726 const unsigned long *gflags = &gl->gl_flags;
1727 char *p = buf;
1728
1729 if (test_bit(GLF_LOCK, gflags))
1730 *p++ = 'l';
1731 if (test_bit(GLF_DEMOTE, gflags))
1732 *p++ = 'D';
1733 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1734 *p++ = 'd';
1735 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1736 *p++ = 'p';
1737 if (test_bit(GLF_DIRTY, gflags))
1738 *p++ = 'y';
1739 if (test_bit(GLF_LFLUSH, gflags))
1740 *p++ = 'f';
1741 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1742 *p++ = 'i';
1743 if (test_bit(GLF_REPLY_PENDING, gflags))
1744 *p++ = 'r';
1745 if (test_bit(GLF_INITIAL, gflags))
1746 *p++ = 'I';
1747 if (test_bit(GLF_FROZEN, gflags))
1748 *p++ = 'F';
1749 if (test_bit(GLF_QUEUED, gflags))
1750 *p++ = 'q';
1751 if (test_bit(GLF_LRU, gflags))
1752 *p++ = 'L';
1753 if (gl->gl_object)
1754 *p++ = 'o';
1755 if (test_bit(GLF_BLOCKING, gflags))
1756 *p++ = 'b';
1757 *p = 0;
1758 return buf;
1759}
1760
1761/**
1762 * gfs2_dump_glock - print information about a glock
1763 * @seq: The seq_file struct
1764 * @gl: the glock
1765 *
1766 * The file format is as follows:
1767 * One line per object, capital letters are used to indicate objects
1768 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1769 * other objects are indented by a single space and follow the glock to
1770 * which they are related. Fields are indicated by lower case letters
1771 * followed by a colon and the field value, except for strings which are in
1772 * [] so that its possible to see if they are composed of spaces for
1773 * example. The field's are n = number (id of the object), f = flags,
1774 * t = type, s = state, r = refcount, e = error, p = pid.
1775 *
1776 */
1777
1778void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1779{
1780 const struct gfs2_glock_operations *glops = gl->gl_ops;
1781 unsigned long long dtime;
1782 const struct gfs2_holder *gh;
1783 char gflags_buf[32];
1784
1785 dtime = jiffies - gl->gl_demote_time;
1786 dtime *= 1000000/HZ; /* demote time in uSec */
1787 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1788 dtime = 0;
1789 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1790 state2str(gl->gl_state),
1791 gl->gl_name.ln_type,
1792 (unsigned long long)gl->gl_name.ln_number,
1793 gflags2str(gflags_buf, gl),
1794 state2str(gl->gl_target),
1795 state2str(gl->gl_demote_state), dtime,
1796 atomic_read(&gl->gl_ail_count),
1797 atomic_read(&gl->gl_revokes),
1798 (int)gl->gl_lockref.count, gl->gl_hold_time);
1799
1800 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1801 dump_holder(seq, gh);
1802
1803 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1804 glops->go_dump(seq, gl);
1805}
1806
1807static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1808{
1809 struct gfs2_glock *gl = iter_ptr;
1810
1811 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1812 gl->gl_name.ln_type,
1813 (unsigned long long)gl->gl_name.ln_number,
1814 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1815 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1816 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1817 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1818 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1819 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1820 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1821 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1822 return 0;
1823}
1824
1825static const char *gfs2_gltype[] = {
1826 "type",
1827 "reserved",
1828 "nondisk",
1829 "inode",
1830 "rgrp",
1831 "meta",
1832 "iopen",
1833 "flock",
1834 "plock",
1835 "quota",
1836 "journal",
1837};
1838
1839static const char *gfs2_stype[] = {
1840 [GFS2_LKS_SRTT] = "srtt",
1841 [GFS2_LKS_SRTTVAR] = "srttvar",
1842 [GFS2_LKS_SRTTB] = "srttb",
1843 [GFS2_LKS_SRTTVARB] = "srttvarb",
1844 [GFS2_LKS_SIRT] = "sirt",
1845 [GFS2_LKS_SIRTVAR] = "sirtvar",
1846 [GFS2_LKS_DCOUNT] = "dlm",
1847 [GFS2_LKS_QCOUNT] = "queue",
1848};
1849
1850#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1851
1852static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1853{
1854 struct gfs2_sbd *sdp = seq->private;
1855 loff_t pos = *(loff_t *)iter_ptr;
1856 unsigned index = pos >> 3;
1857 unsigned subindex = pos & 0x07;
1858 int i;
1859
1860 if (index == 0 && subindex != 0)
1861 return 0;
1862
1863 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1864 (index == 0) ? "cpu": gfs2_stype[subindex]);
1865
1866 for_each_possible_cpu(i) {
1867 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1868
1869 if (index == 0)
1870 seq_printf(seq, " %15u", i);
1871 else
1872 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1873 lkstats[index - 1].stats[subindex]);
1874 }
1875 seq_putc(seq, '\n');
1876 return 0;
1877}
1878
1879int __init gfs2_glock_init(void)
1880{
1881 int i, ret;
1882
1883 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1884 if (ret < 0)
1885 return ret;
1886
1887 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1888 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1889 if (!glock_workqueue) {
1890 rhashtable_destroy(&gl_hash_table);
1891 return -ENOMEM;
1892 }
1893 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1894 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1895 0);
1896 if (!gfs2_delete_workqueue) {
1897 destroy_workqueue(glock_workqueue);
1898 rhashtable_destroy(&gl_hash_table);
1899 return -ENOMEM;
1900 }
1901
1902 ret = register_shrinker(&glock_shrinker);
1903 if (ret) {
1904 destroy_workqueue(gfs2_delete_workqueue);
1905 destroy_workqueue(glock_workqueue);
1906 rhashtable_destroy(&gl_hash_table);
1907 return ret;
1908 }
1909
1910 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
1911 init_waitqueue_head(glock_wait_table + i);
1912
1913 return 0;
1914}
1915
1916void gfs2_glock_exit(void)
1917{
1918 unregister_shrinker(&glock_shrinker);
1919 rhashtable_destroy(&gl_hash_table);
1920 destroy_workqueue(glock_workqueue);
1921 destroy_workqueue(gfs2_delete_workqueue);
1922}
1923
1924static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
1925{
1926 struct gfs2_glock *gl = gi->gl;
1927
1928 if (gl) {
1929 if (n == 0)
1930 return;
1931 if (!lockref_put_not_zero(&gl->gl_lockref))
1932 gfs2_glock_queue_put(gl);
1933 }
1934 for (;;) {
1935 gl = rhashtable_walk_next(&gi->hti);
1936 if (IS_ERR_OR_NULL(gl)) {
1937 if (gl == ERR_PTR(-EAGAIN)) {
1938 n = 1;
1939 continue;
1940 }
1941 gl = NULL;
1942 break;
1943 }
1944 if (gl->gl_name.ln_sbd != gi->sdp)
1945 continue;
1946 if (n <= 1) {
1947 if (!lockref_get_not_dead(&gl->gl_lockref))
1948 continue;
1949 break;
1950 } else {
1951 if (__lockref_is_dead(&gl->gl_lockref))
1952 continue;
1953 n--;
1954 }
1955 }
1956 gi->gl = gl;
1957}
1958
1959static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1960 __acquires(RCU)
1961{
1962 struct gfs2_glock_iter *gi = seq->private;
1963 loff_t n;
1964
1965 /*
1966 * We can either stay where we are, skip to the next hash table
1967 * entry, or start from the beginning.
1968 */
1969 if (*pos < gi->last_pos) {
1970 rhashtable_walk_exit(&gi->hti);
1971 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
1972 n = *pos + 1;
1973 } else {
1974 n = *pos - gi->last_pos;
1975 }
1976
1977 rhashtable_walk_start(&gi->hti);
1978
1979 gfs2_glock_iter_next(gi, n);
1980 gi->last_pos = *pos;
1981 return gi->gl;
1982}
1983
1984static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1985 loff_t *pos)
1986{
1987 struct gfs2_glock_iter *gi = seq->private;
1988
1989 (*pos)++;
1990 gi->last_pos = *pos;
1991 gfs2_glock_iter_next(gi, 1);
1992 return gi->gl;
1993}
1994
1995static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1996 __releases(RCU)
1997{
1998 struct gfs2_glock_iter *gi = seq->private;
1999
2000 rhashtable_walk_stop(&gi->hti);
2001}
2002
2003static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2004{
2005 dump_glock(seq, iter_ptr);
2006 return 0;
2007}
2008
2009static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2010{
2011 preempt_disable();
2012 if (*pos >= GFS2_NR_SBSTATS)
2013 return NULL;
2014 return pos;
2015}
2016
2017static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2018 loff_t *pos)
2019{
2020 (*pos)++;
2021 if (*pos >= GFS2_NR_SBSTATS)
2022 return NULL;
2023 return pos;
2024}
2025
2026static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2027{
2028 preempt_enable();
2029}
2030
2031static const struct seq_operations gfs2_glock_seq_ops = {
2032 .start = gfs2_glock_seq_start,
2033 .next = gfs2_glock_seq_next,
2034 .stop = gfs2_glock_seq_stop,
2035 .show = gfs2_glock_seq_show,
2036};
2037
2038static const struct seq_operations gfs2_glstats_seq_ops = {
2039 .start = gfs2_glock_seq_start,
2040 .next = gfs2_glock_seq_next,
2041 .stop = gfs2_glock_seq_stop,
2042 .show = gfs2_glstats_seq_show,
2043};
2044
2045static const struct seq_operations gfs2_sbstats_seq_ops = {
2046 .start = gfs2_sbstats_seq_start,
2047 .next = gfs2_sbstats_seq_next,
2048 .stop = gfs2_sbstats_seq_stop,
2049 .show = gfs2_sbstats_seq_show,
2050};
2051
2052#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2053
2054static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2055 const struct seq_operations *ops)
2056{
2057 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2058 if (ret == 0) {
2059 struct seq_file *seq = file->private_data;
2060 struct gfs2_glock_iter *gi = seq->private;
2061
2062 gi->sdp = inode->i_private;
2063 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2064 if (seq->buf)
2065 seq->size = GFS2_SEQ_GOODSIZE;
2066 /*
2067 * Initially, we are "before" the first hash table entry; the
2068 * first call to rhashtable_walk_next gets us the first entry.
2069 */
2070 gi->last_pos = -1;
2071 gi->gl = NULL;
2072 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2073 }
2074 return ret;
2075}
2076
2077static int gfs2_glocks_open(struct inode *inode, struct file *file)
2078{
2079 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2080}
2081
2082static int gfs2_glocks_release(struct inode *inode, struct file *file)
2083{
2084 struct seq_file *seq = file->private_data;
2085 struct gfs2_glock_iter *gi = seq->private;
2086
2087 if (gi->gl)
2088 gfs2_glock_put(gi->gl);
2089 rhashtable_walk_exit(&gi->hti);
2090 return seq_release_private(inode, file);
2091}
2092
2093static int gfs2_glstats_open(struct inode *inode, struct file *file)
2094{
2095 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2096}
2097
2098static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2099{
2100 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2101 if (ret == 0) {
2102 struct seq_file *seq = file->private_data;
2103 seq->private = inode->i_private; /* sdp */
2104 }
2105 return ret;
2106}
2107
2108static const struct file_operations gfs2_glocks_fops = {
2109 .owner = THIS_MODULE,
2110 .open = gfs2_glocks_open,
2111 .read = seq_read,
2112 .llseek = seq_lseek,
2113 .release = gfs2_glocks_release,
2114};
2115
2116static const struct file_operations gfs2_glstats_fops = {
2117 .owner = THIS_MODULE,
2118 .open = gfs2_glstats_open,
2119 .read = seq_read,
2120 .llseek = seq_lseek,
2121 .release = gfs2_glocks_release,
2122};
2123
2124static const struct file_operations gfs2_sbstats_fops = {
2125 .owner = THIS_MODULE,
2126 .open = gfs2_sbstats_open,
2127 .read = seq_read,
2128 .llseek = seq_lseek,
2129 .release = seq_release,
2130};
2131
2132int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2133{
2134 struct dentry *dent;
2135
2136 dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2137 if (IS_ERR_OR_NULL(dent))
2138 goto fail;
2139 sdp->debugfs_dir = dent;
2140
2141 dent = debugfs_create_file("glocks",
2142 S_IFREG | S_IRUGO,
2143 sdp->debugfs_dir, sdp,
2144 &gfs2_glocks_fops);
2145 if (IS_ERR_OR_NULL(dent))
2146 goto fail;
2147 sdp->debugfs_dentry_glocks = dent;
2148
2149 dent = debugfs_create_file("glstats",
2150 S_IFREG | S_IRUGO,
2151 sdp->debugfs_dir, sdp,
2152 &gfs2_glstats_fops);
2153 if (IS_ERR_OR_NULL(dent))
2154 goto fail;
2155 sdp->debugfs_dentry_glstats = dent;
2156
2157 dent = debugfs_create_file("sbstats",
2158 S_IFREG | S_IRUGO,
2159 sdp->debugfs_dir, sdp,
2160 &gfs2_sbstats_fops);
2161 if (IS_ERR_OR_NULL(dent))
2162 goto fail;
2163 sdp->debugfs_dentry_sbstats = dent;
2164
2165 return 0;
2166fail:
2167 gfs2_delete_debugfs_file(sdp);
2168 return dent ? PTR_ERR(dent) : -ENOMEM;
2169}
2170
2171void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2172{
2173 if (sdp->debugfs_dir) {
2174 if (sdp->debugfs_dentry_glocks) {
2175 debugfs_remove(sdp->debugfs_dentry_glocks);
2176 sdp->debugfs_dentry_glocks = NULL;
2177 }
2178 if (sdp->debugfs_dentry_glstats) {
2179 debugfs_remove(sdp->debugfs_dentry_glstats);
2180 sdp->debugfs_dentry_glstats = NULL;
2181 }
2182 if (sdp->debugfs_dentry_sbstats) {
2183 debugfs_remove(sdp->debugfs_dentry_sbstats);
2184 sdp->debugfs_dentry_sbstats = NULL;
2185 }
2186 debugfs_remove(sdp->debugfs_dir);
2187 sdp->debugfs_dir = NULL;
2188 }
2189}
2190
2191int gfs2_register_debugfs(void)
2192{
2193 gfs2_root = debugfs_create_dir("gfs2", NULL);
2194 if (IS_ERR(gfs2_root))
2195 return PTR_ERR(gfs2_root);
2196 return gfs2_root ? 0 : -ENOMEM;
2197}
2198
2199void gfs2_unregister_debugfs(void)
2200{
2201 debugfs_remove(gfs2_root);
2202 gfs2_root = NULL;
2203}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/sched.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/buffer_head.h>
13#include <linux/delay.h>
14#include <linux/sort.h>
15#include <linux/hash.h>
16#include <linux/jhash.h>
17#include <linux/kallsyms.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/list.h>
20#include <linux/wait.h>
21#include <linux/module.h>
22#include <linux/uaccess.h>
23#include <linux/seq_file.h>
24#include <linux/debugfs.h>
25#include <linux/kthread.h>
26#include <linux/freezer.h>
27#include <linux/workqueue.h>
28#include <linux/jiffies.h>
29#include <linux/rcupdate.h>
30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h>
32#include <linux/percpu.h>
33#include <linux/list_sort.h>
34#include <linux/lockref.h>
35#include <linux/rhashtable.h>
36#include <linux/pid_namespace.h>
37#include <linux/file.h>
38#include <linux/random.h>
39
40#include "gfs2.h"
41#include "incore.h"
42#include "glock.h"
43#include "glops.h"
44#include "inode.h"
45#include "lops.h"
46#include "meta_io.h"
47#include "quota.h"
48#include "super.h"
49#include "util.h"
50#include "bmap.h"
51#define CREATE_TRACE_POINTS
52#include "trace_gfs2.h"
53
54struct gfs2_glock_iter {
55 struct gfs2_sbd *sdp; /* incore superblock */
56 struct rhashtable_iter hti; /* rhashtable iterator */
57 struct gfs2_glock *gl; /* current glock struct */
58 loff_t last_pos; /* last position */
59};
60
61typedef void (*glock_examiner) (struct gfs2_glock * gl);
62
63static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
64static void request_demote(struct gfs2_glock *gl, unsigned int state,
65 unsigned long delay, bool remote);
66
67static struct dentry *gfs2_root;
68static LIST_HEAD(lru_list);
69static atomic_t lru_count = ATOMIC_INIT(0);
70static DEFINE_SPINLOCK(lru_lock);
71
72#define GFS2_GL_HASH_SHIFT 15
73#define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
74
75static const struct rhashtable_params ht_parms = {
76 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
77 .key_len = offsetofend(struct lm_lockname, ln_type),
78 .key_offset = offsetof(struct gfs2_glock, gl_name),
79 .head_offset = offsetof(struct gfs2_glock, gl_node),
80};
81
82static struct rhashtable gl_hash_table;
83
84#define GLOCK_WAIT_TABLE_BITS 12
85#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
86static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
87
88struct wait_glock_queue {
89 struct lm_lockname *name;
90 wait_queue_entry_t wait;
91};
92
93static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
94 int sync, void *key)
95{
96 struct wait_glock_queue *wait_glock =
97 container_of(wait, struct wait_glock_queue, wait);
98 struct lm_lockname *wait_name = wait_glock->name;
99 struct lm_lockname *wake_name = key;
100
101 if (wake_name->ln_sbd != wait_name->ln_sbd ||
102 wake_name->ln_number != wait_name->ln_number ||
103 wake_name->ln_type != wait_name->ln_type)
104 return 0;
105 return autoremove_wake_function(wait, mode, sync, key);
106}
107
108static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
109{
110 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
111
112 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
113}
114
115/**
116 * wake_up_glock - Wake up waiters on a glock
117 * @gl: the glock
118 */
119static void wake_up_glock(struct gfs2_glock *gl)
120{
121 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
122
123 if (waitqueue_active(wq))
124 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
125}
126
127static void gfs2_glock_dealloc(struct rcu_head *rcu)
128{
129 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
130
131 kfree(gl->gl_lksb.sb_lvbptr);
132 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
133 struct gfs2_glock_aspace *gla =
134 container_of(gl, struct gfs2_glock_aspace, glock);
135 kmem_cache_free(gfs2_glock_aspace_cachep, gla);
136 } else
137 kmem_cache_free(gfs2_glock_cachep, gl);
138}
139
140/**
141 * glock_blocked_by_withdraw - determine if we can still use a glock
142 * @gl: the glock
143 *
144 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
145 * when we're withdrawn. For example, to maintain metadata integrity, we should
146 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks like
147 * the iopen or freeze glock may be safely used because none of their
148 * metadata goes through the journal. So in general, we should disallow all
149 * glocks that are journaled, and allow all the others. One exception is:
150 * we need to allow our active journal to be promoted and demoted so others
151 * may recover it and we can reacquire it when they're done.
152 */
153static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
154{
155 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
156
157 if (!gfs2_withdrawing_or_withdrawn(sdp))
158 return false;
159 if (gl->gl_ops->go_flags & GLOF_NONDISK)
160 return false;
161 if (!sdp->sd_jdesc ||
162 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
163 return false;
164 return true;
165}
166
167static void __gfs2_glock_free(struct gfs2_glock *gl)
168{
169 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
170 smp_mb();
171 wake_up_glock(gl);
172 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
173}
174
175void gfs2_glock_free(struct gfs2_glock *gl) {
176 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
177
178 __gfs2_glock_free(gl);
179 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
180 wake_up(&sdp->sd_kill_wait);
181}
182
183void gfs2_glock_free_later(struct gfs2_glock *gl) {
184 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
185
186 spin_lock(&lru_lock);
187 list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
188 spin_unlock(&lru_lock);
189 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
190 wake_up(&sdp->sd_kill_wait);
191}
192
193static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
194{
195 struct list_head *list = &sdp->sd_dead_glocks;
196
197 while(!list_empty(list)) {
198 struct gfs2_glock *gl;
199
200 gl = list_first_entry(list, struct gfs2_glock, gl_lru);
201 list_del_init(&gl->gl_lru);
202 __gfs2_glock_free(gl);
203 }
204}
205
206/**
207 * gfs2_glock_hold() - increment reference count on glock
208 * @gl: The glock to hold
209 *
210 */
211
212struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl)
213{
214 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
215 lockref_get(&gl->gl_lockref);
216 return gl;
217}
218
219static void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
220{
221 spin_lock(&lru_lock);
222 list_move_tail(&gl->gl_lru, &lru_list);
223
224 if (!test_bit(GLF_LRU, &gl->gl_flags)) {
225 set_bit(GLF_LRU, &gl->gl_flags);
226 atomic_inc(&lru_count);
227 }
228
229 spin_unlock(&lru_lock);
230}
231
232static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
233{
234 spin_lock(&lru_lock);
235 if (test_bit(GLF_LRU, &gl->gl_flags)) {
236 list_del_init(&gl->gl_lru);
237 atomic_dec(&lru_count);
238 clear_bit(GLF_LRU, &gl->gl_flags);
239 }
240 spin_unlock(&lru_lock);
241}
242
243/*
244 * Enqueue the glock on the work queue. Passes one glock reference on to the
245 * work queue.
246 */
247static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
248 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
249
250 if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) {
251 /*
252 * We are holding the lockref spinlock, and the work was still
253 * queued above. The queued work (glock_work_func) takes that
254 * spinlock before dropping its glock reference(s), so it
255 * cannot have dropped them in the meantime.
256 */
257 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
258 gl->gl_lockref.count--;
259 }
260}
261
262static void __gfs2_glock_put(struct gfs2_glock *gl)
263{
264 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
265 struct address_space *mapping = gfs2_glock2aspace(gl);
266
267 lockref_mark_dead(&gl->gl_lockref);
268 spin_unlock(&gl->gl_lockref.lock);
269 gfs2_glock_remove_from_lru(gl);
270 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
271 if (mapping) {
272 truncate_inode_pages_final(mapping);
273 if (!gfs2_withdrawing_or_withdrawn(sdp))
274 GLOCK_BUG_ON(gl, !mapping_empty(mapping));
275 }
276 trace_gfs2_glock_put(gl);
277 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
278}
279
280static bool __gfs2_glock_put_or_lock(struct gfs2_glock *gl)
281{
282 if (lockref_put_or_lock(&gl->gl_lockref))
283 return true;
284 GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
285 if (gl->gl_state != LM_ST_UNLOCKED) {
286 gl->gl_lockref.count--;
287 gfs2_glock_add_to_lru(gl);
288 spin_unlock(&gl->gl_lockref.lock);
289 return true;
290 }
291 return false;
292}
293
294/**
295 * gfs2_glock_put() - Decrement reference count on glock
296 * @gl: The glock to put
297 *
298 */
299
300void gfs2_glock_put(struct gfs2_glock *gl)
301{
302 if (__gfs2_glock_put_or_lock(gl))
303 return;
304
305 __gfs2_glock_put(gl);
306}
307
308/*
309 * gfs2_glock_put_async - Decrement reference count without sleeping
310 * @gl: The glock to put
311 *
312 * Decrement the reference count on glock immediately unless it is the last
313 * reference. Defer putting the last reference to work queue context.
314 */
315void gfs2_glock_put_async(struct gfs2_glock *gl)
316{
317 if (__gfs2_glock_put_or_lock(gl))
318 return;
319
320 gfs2_glock_queue_work(gl, 0);
321 spin_unlock(&gl->gl_lockref.lock);
322}
323
324/**
325 * may_grant - check if it's ok to grant a new lock
326 * @gl: The glock
327 * @current_gh: One of the current holders of @gl
328 * @gh: The lock request which we wish to grant
329 *
330 * With our current compatibility rules, if a glock has one or more active
331 * holders (HIF_HOLDER flag set), any of those holders can be passed in as
332 * @current_gh; they are all the same as far as compatibility with the new @gh
333 * goes.
334 *
335 * Returns true if it's ok to grant the lock.
336 */
337
338static inline bool may_grant(struct gfs2_glock *gl,
339 struct gfs2_holder *current_gh,
340 struct gfs2_holder *gh)
341{
342 if (current_gh) {
343 GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, ¤t_gh->gh_iflags));
344
345 switch(current_gh->gh_state) {
346 case LM_ST_EXCLUSIVE:
347 /*
348 * Here we make a special exception to grant holders
349 * who agree to share the EX lock with other holders
350 * who also have the bit set. If the original holder
351 * has the LM_FLAG_NODE_SCOPE bit set, we grant more
352 * holders with the bit set.
353 */
354 return gh->gh_state == LM_ST_EXCLUSIVE &&
355 (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) &&
356 (gh->gh_flags & LM_FLAG_NODE_SCOPE);
357
358 case LM_ST_SHARED:
359 case LM_ST_DEFERRED:
360 return gh->gh_state == current_gh->gh_state;
361
362 default:
363 return false;
364 }
365 }
366
367 if (gl->gl_state == gh->gh_state)
368 return true;
369 if (gh->gh_flags & GL_EXACT)
370 return false;
371 if (gl->gl_state == LM_ST_EXCLUSIVE) {
372 return gh->gh_state == LM_ST_SHARED ||
373 gh->gh_state == LM_ST_DEFERRED;
374 }
375 if (gh->gh_flags & LM_FLAG_ANY)
376 return gl->gl_state != LM_ST_UNLOCKED;
377 return false;
378}
379
380static void gfs2_holder_wake(struct gfs2_holder *gh)
381{
382 clear_bit(HIF_WAIT, &gh->gh_iflags);
383 smp_mb__after_atomic();
384 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
385 if (gh->gh_flags & GL_ASYNC) {
386 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
387
388 wake_up(&sdp->sd_async_glock_wait);
389 }
390}
391
392/**
393 * do_error - Something unexpected has happened during a lock request
394 * @gl: The glock
395 * @ret: The status from the DLM
396 */
397
398static void do_error(struct gfs2_glock *gl, const int ret)
399{
400 struct gfs2_holder *gh, *tmp;
401
402 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
403 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
404 continue;
405 if (ret & LM_OUT_ERROR)
406 gh->gh_error = -EIO;
407 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
408 gh->gh_error = GLR_TRYFAILED;
409 else
410 continue;
411 list_del_init(&gh->gh_list);
412 trace_gfs2_glock_queue(gh, 0);
413 gfs2_holder_wake(gh);
414 }
415}
416
417/**
418 * find_first_holder - find the first "holder" gh
419 * @gl: the glock
420 */
421
422static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
423{
424 struct gfs2_holder *gh;
425
426 if (!list_empty(&gl->gl_holders)) {
427 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
428 gh_list);
429 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
430 return gh;
431 }
432 return NULL;
433}
434
435/*
436 * gfs2_instantiate - Call the glops instantiate function
437 * @gh: The glock holder
438 *
439 * Returns: 0 if instantiate was successful, or error.
440 */
441int gfs2_instantiate(struct gfs2_holder *gh)
442{
443 struct gfs2_glock *gl = gh->gh_gl;
444 const struct gfs2_glock_operations *glops = gl->gl_ops;
445 int ret;
446
447again:
448 if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags))
449 goto done;
450
451 /*
452 * Since we unlock the lockref lock, we set a flag to indicate
453 * instantiate is in progress.
454 */
455 if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
456 wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG,
457 TASK_UNINTERRUPTIBLE);
458 /*
459 * Here we just waited for a different instantiate to finish.
460 * But that may not have been successful, as when a process
461 * locks an inode glock _before_ it has an actual inode to
462 * instantiate into. So we check again. This process might
463 * have an inode to instantiate, so might be successful.
464 */
465 goto again;
466 }
467
468 ret = glops->go_instantiate(gl);
469 if (!ret)
470 clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
471 clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
472 if (ret)
473 return ret;
474
475done:
476 if (glops->go_held)
477 return glops->go_held(gh);
478 return 0;
479}
480
481/**
482 * do_promote - promote as many requests as possible on the current queue
483 * @gl: The glock
484 *
485 * Returns true on success (i.e., progress was made or there are no waiters).
486 */
487
488static bool do_promote(struct gfs2_glock *gl)
489{
490 struct gfs2_holder *gh, *current_gh;
491
492 current_gh = find_first_holder(gl);
493 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
494 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
495 continue;
496 if (!may_grant(gl, current_gh, gh)) {
497 /*
498 * If we get here, it means we may not grant this
499 * holder for some reason. If this holder is at the
500 * head of the list, it means we have a blocked holder
501 * at the head, so return false.
502 */
503 if (list_is_first(&gh->gh_list, &gl->gl_holders))
504 return false;
505 do_error(gl, 0);
506 break;
507 }
508 set_bit(HIF_HOLDER, &gh->gh_iflags);
509 trace_gfs2_promote(gh);
510 gfs2_holder_wake(gh);
511 if (!current_gh)
512 current_gh = gh;
513 }
514 return true;
515}
516
517/**
518 * find_first_waiter - find the first gh that's waiting for the glock
519 * @gl: the glock
520 */
521
522static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
523{
524 struct gfs2_holder *gh;
525
526 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
527 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
528 return gh;
529 }
530 return NULL;
531}
532
533/**
534 * find_last_waiter - find the last gh that's waiting for the glock
535 * @gl: the glock
536 *
537 * This also is a fast way of finding out if there are any waiters.
538 */
539
540static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl)
541{
542 struct gfs2_holder *gh;
543
544 if (list_empty(&gl->gl_holders))
545 return NULL;
546 gh = list_last_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
547 return test_bit(HIF_HOLDER, &gh->gh_iflags) ? NULL : gh;
548}
549
550/**
551 * state_change - record that the glock is now in a different state
552 * @gl: the glock
553 * @new_state: the new state
554 */
555
556static void state_change(struct gfs2_glock *gl, unsigned int new_state)
557{
558 if (new_state != gl->gl_target)
559 /* shorten our minimum hold time */
560 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
561 GL_GLOCK_MIN_HOLD);
562 gl->gl_state = new_state;
563 gl->gl_tchange = jiffies;
564}
565
566static void gfs2_set_demote(int nr, struct gfs2_glock *gl)
567{
568 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
569
570 set_bit(nr, &gl->gl_flags);
571 smp_mb();
572 wake_up(&sdp->sd_async_glock_wait);
573}
574
575static void gfs2_demote_wake(struct gfs2_glock *gl)
576{
577 gl->gl_demote_state = LM_ST_EXCLUSIVE;
578 clear_bit(GLF_DEMOTE, &gl->gl_flags);
579 smp_mb__after_atomic();
580 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
581}
582
583/**
584 * finish_xmote - The DLM has replied to one of our lock requests
585 * @gl: The glock
586 * @ret: The status from the DLM
587 *
588 */
589
590static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
591{
592 const struct gfs2_glock_operations *glops = gl->gl_ops;
593 struct gfs2_holder *gh;
594 unsigned state = ret & LM_OUT_ST_MASK;
595
596 trace_gfs2_glock_state_change(gl, state);
597 state_change(gl, state);
598 gh = find_first_waiter(gl);
599
600 /* Demote to UN request arrived during demote to SH or DF */
601 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
602 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
603 gl->gl_target = LM_ST_UNLOCKED;
604
605 /* Check for state != intended state */
606 if (unlikely(state != gl->gl_target)) {
607 if (gh && (ret & LM_OUT_CANCELED))
608 gfs2_holder_wake(gh);
609 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
610 /* move to back of queue and try next entry */
611 if (ret & LM_OUT_CANCELED) {
612 list_move_tail(&gh->gh_list, &gl->gl_holders);
613 gh = find_first_waiter(gl);
614 gl->gl_target = gh->gh_state;
615 if (do_promote(gl))
616 goto out;
617 goto retry;
618 }
619 /* Some error or failed "try lock" - report it */
620 if ((ret & LM_OUT_ERROR) ||
621 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
622 gl->gl_target = gl->gl_state;
623 do_error(gl, ret);
624 goto out;
625 }
626 }
627 switch(state) {
628 /* Unlocked due to conversion deadlock, try again */
629 case LM_ST_UNLOCKED:
630retry:
631 do_xmote(gl, gh, gl->gl_target);
632 break;
633 /* Conversion fails, unlock and try again */
634 case LM_ST_SHARED:
635 case LM_ST_DEFERRED:
636 do_xmote(gl, gh, LM_ST_UNLOCKED);
637 break;
638 default: /* Everything else */
639 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
640 gl->gl_target, state);
641 GLOCK_BUG_ON(gl, 1);
642 }
643 return;
644 }
645
646 /* Fast path - we got what we asked for */
647 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
648 gfs2_demote_wake(gl);
649 if (state != LM_ST_UNLOCKED) {
650 if (glops->go_xmote_bh) {
651 int rv;
652
653 spin_unlock(&gl->gl_lockref.lock);
654 rv = glops->go_xmote_bh(gl);
655 spin_lock(&gl->gl_lockref.lock);
656 if (rv) {
657 do_error(gl, rv);
658 goto out;
659 }
660 }
661 do_promote(gl);
662 }
663out:
664 clear_bit(GLF_LOCK, &gl->gl_flags);
665}
666
667static bool is_system_glock(struct gfs2_glock *gl)
668{
669 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
670 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
671
672 if (gl == m_ip->i_gl)
673 return true;
674 return false;
675}
676
677/**
678 * do_xmote - Calls the DLM to change the state of a lock
679 * @gl: The lock state
680 * @gh: The holder (only for promotes)
681 * @target: The target lock state
682 *
683 */
684
685static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh,
686 unsigned int target)
687__releases(&gl->gl_lockref.lock)
688__acquires(&gl->gl_lockref.lock)
689{
690 const struct gfs2_glock_operations *glops = gl->gl_ops;
691 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
692 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
693 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
694 int ret;
695
696 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
697 gh && !(gh->gh_flags & LM_FLAG_NOEXP))
698 goto skip_inval;
699
700 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP);
701 GLOCK_BUG_ON(gl, gl->gl_state == target);
702 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
703 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
704 glops->go_inval) {
705 /*
706 * If another process is already doing the invalidate, let that
707 * finish first. The glock state machine will get back to this
708 * holder again later.
709 */
710 if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
711 &gl->gl_flags))
712 return;
713 do_error(gl, 0); /* Fail queued try locks */
714 }
715 gl->gl_req = target;
716 set_bit(GLF_BLOCKING, &gl->gl_flags);
717 if ((gl->gl_req == LM_ST_UNLOCKED) ||
718 (gl->gl_state == LM_ST_EXCLUSIVE) ||
719 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
720 clear_bit(GLF_BLOCKING, &gl->gl_flags);
721 if (!glops->go_inval && !glops->go_sync)
722 goto skip_inval;
723
724 spin_unlock(&gl->gl_lockref.lock);
725 if (glops->go_sync) {
726 ret = glops->go_sync(gl);
727 /* If we had a problem syncing (due to io errors or whatever,
728 * we should not invalidate the metadata or tell dlm to
729 * release the glock to other nodes.
730 */
731 if (ret) {
732 if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
733 fs_err(sdp, "Error %d syncing glock \n", ret);
734 gfs2_dump_glock(NULL, gl, true);
735 }
736 spin_lock(&gl->gl_lockref.lock);
737 goto skip_inval;
738 }
739 }
740 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
741 /*
742 * The call to go_sync should have cleared out the ail list.
743 * If there are still items, we have a problem. We ought to
744 * withdraw, but we can't because the withdraw code also uses
745 * glocks. Warn about the error, dump the glock, then fall
746 * through and wait for logd to do the withdraw for us.
747 */
748 if ((atomic_read(&gl->gl_ail_count) != 0) &&
749 (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
750 gfs2_glock_assert_warn(gl,
751 !atomic_read(&gl->gl_ail_count));
752 gfs2_dump_glock(NULL, gl, true);
753 }
754 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
755 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
756 }
757 spin_lock(&gl->gl_lockref.lock);
758
759skip_inval:
760 gl->gl_lockref.count++;
761 /*
762 * Check for an error encountered since we called go_sync and go_inval.
763 * If so, we can't withdraw from the glock code because the withdraw
764 * code itself uses glocks (see function signal_our_withdraw) to
765 * change the mount to read-only. Most importantly, we must not call
766 * dlm to unlock the glock until the journal is in a known good state
767 * (after journal replay) otherwise other nodes may use the object
768 * (rgrp or dinode) and then later, journal replay will corrupt the
769 * file system. The best we can do here is wait for the logd daemon
770 * to see sd_log_error and withdraw, and in the meantime, requeue the
771 * work for later.
772 *
773 * We make a special exception for some system glocks, such as the
774 * system statfs inode glock, which needs to be granted before the
775 * gfs2_quotad daemon can exit, and that exit needs to finish before
776 * we can unmount the withdrawn file system.
777 *
778 * However, if we're just unlocking the lock (say, for unmount, when
779 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
780 * then it's okay to tell dlm to unlock it.
781 */
782 if (unlikely(sdp->sd_log_error) && !gfs2_withdrawing_or_withdrawn(sdp))
783 gfs2_withdraw_delayed(sdp);
784 if (glock_blocked_by_withdraw(gl) &&
785 (target != LM_ST_UNLOCKED ||
786 test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
787 if (!is_system_glock(gl)) {
788 request_demote(gl, LM_ST_UNLOCKED, 0, false);
789 /*
790 * Ordinarily, we would call dlm and its callback would call
791 * finish_xmote, which would call state_change() to the new state.
792 * Since we withdrew, we won't call dlm, so call state_change
793 * manually, but to the UNLOCKED state we desire.
794 */
795 state_change(gl, LM_ST_UNLOCKED);
796 /*
797 * We skip telling dlm to do the locking, so we won't get a
798 * reply that would otherwise clear GLF_LOCK. So we clear it here.
799 */
800 clear_bit(GLF_LOCK, &gl->gl_flags);
801 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
802 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
803 return;
804 } else {
805 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
806 }
807 }
808
809 if (ls->ls_ops->lm_lock) {
810 spin_unlock(&gl->gl_lockref.lock);
811 ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
812 spin_lock(&gl->gl_lockref.lock);
813
814 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
815 target == LM_ST_UNLOCKED &&
816 test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
817 /*
818 * The lockspace has been released and the lock has
819 * been unlocked implicitly.
820 */
821 } else if (ret) {
822 fs_err(sdp, "lm_lock ret %d\n", ret);
823 target = gl->gl_state | LM_OUT_ERROR;
824 } else {
825 /* The operation will be completed asynchronously. */
826 return;
827 }
828 }
829
830 /* Complete the operation now. */
831 finish_xmote(gl, target);
832 gfs2_glock_queue_work(gl, 0);
833}
834
835/**
836 * run_queue - do all outstanding tasks related to a glock
837 * @gl: The glock in question
838 * @nonblock: True if we must not block in run_queue
839 *
840 */
841
842static void run_queue(struct gfs2_glock *gl, const int nonblock)
843__releases(&gl->gl_lockref.lock)
844__acquires(&gl->gl_lockref.lock)
845{
846 struct gfs2_holder *gh = NULL;
847
848 if (test_bit(GLF_LOCK, &gl->gl_flags))
849 return;
850 set_bit(GLF_LOCK, &gl->gl_flags);
851
852 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
853
854 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
855 gl->gl_demote_state != gl->gl_state) {
856 if (find_first_holder(gl))
857 goto out_unlock;
858 if (nonblock)
859 goto out_sched;
860 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
861 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
862 gl->gl_target = gl->gl_demote_state;
863 } else {
864 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
865 gfs2_demote_wake(gl);
866 if (do_promote(gl))
867 goto out_unlock;
868 gh = find_first_waiter(gl);
869 gl->gl_target = gh->gh_state;
870 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
871 do_error(gl, 0); /* Fail queued try locks */
872 }
873 do_xmote(gl, gh, gl->gl_target);
874 return;
875
876out_sched:
877 clear_bit(GLF_LOCK, &gl->gl_flags);
878 smp_mb__after_atomic();
879 gl->gl_lockref.count++;
880 gfs2_glock_queue_work(gl, 0);
881 return;
882
883out_unlock:
884 clear_bit(GLF_LOCK, &gl->gl_flags);
885 smp_mb__after_atomic();
886}
887
888/**
889 * glock_set_object - set the gl_object field of a glock
890 * @gl: the glock
891 * @object: the object
892 */
893void glock_set_object(struct gfs2_glock *gl, void *object)
894{
895 void *prev_object;
896
897 spin_lock(&gl->gl_lockref.lock);
898 prev_object = gl->gl_object;
899 gl->gl_object = object;
900 spin_unlock(&gl->gl_lockref.lock);
901 if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) {
902 pr_warn("glock=%u/%llx\n",
903 gl->gl_name.ln_type,
904 (unsigned long long)gl->gl_name.ln_number);
905 gfs2_dump_glock(NULL, gl, true);
906 }
907}
908
909/**
910 * glock_clear_object - clear the gl_object field of a glock
911 * @gl: the glock
912 * @object: object the glock currently points at
913 */
914void glock_clear_object(struct gfs2_glock *gl, void *object)
915{
916 void *prev_object;
917
918 spin_lock(&gl->gl_lockref.lock);
919 prev_object = gl->gl_object;
920 gl->gl_object = NULL;
921 spin_unlock(&gl->gl_lockref.lock);
922 if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) {
923 pr_warn("glock=%u/%llx\n",
924 gl->gl_name.ln_type,
925 (unsigned long long)gl->gl_name.ln_number);
926 gfs2_dump_glock(NULL, gl, true);
927 }
928}
929
930void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
931{
932 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
933
934 if (ri->ri_magic == 0)
935 ri->ri_magic = cpu_to_be32(GFS2_MAGIC);
936 if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC))
937 ri->ri_generation_deleted = cpu_to_be64(generation);
938}
939
940bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
941{
942 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
943
944 if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC))
945 return false;
946 return generation <= be64_to_cpu(ri->ri_generation_deleted);
947}
948
949static void gfs2_glock_poke(struct gfs2_glock *gl)
950{
951 int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP;
952 struct gfs2_holder gh;
953 int error;
954
955 __gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_);
956 error = gfs2_glock_nq(&gh);
957 if (!error)
958 gfs2_glock_dq(&gh);
959 gfs2_holder_uninit(&gh);
960}
961
962static void gfs2_try_evict(struct gfs2_glock *gl)
963{
964 struct gfs2_inode *ip;
965
966 /*
967 * If there is contention on the iopen glock and we have an inode, try
968 * to grab and release the inode so that it can be evicted. The
969 * GIF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode
970 * should not be deleted locally. This will allow the remote node to
971 * go ahead and delete the inode without us having to do it, which will
972 * avoid rgrp glock thrashing.
973 *
974 * The remote node is likely still holding the corresponding inode
975 * glock, so it will run before we get to verify that the delete has
976 * happened below. (Verification is triggered by the call to
977 * gfs2_queue_verify_delete() in gfs2_evict_inode().)
978 */
979 spin_lock(&gl->gl_lockref.lock);
980 ip = gl->gl_object;
981 if (ip && !igrab(&ip->i_inode))
982 ip = NULL;
983 spin_unlock(&gl->gl_lockref.lock);
984 if (ip) {
985 wait_on_inode(&ip->i_inode);
986 if (is_bad_inode(&ip->i_inode)) {
987 iput(&ip->i_inode);
988 ip = NULL;
989 }
990 }
991 if (ip) {
992 set_bit(GIF_DEFER_DELETE, &ip->i_flags);
993 d_prune_aliases(&ip->i_inode);
994 iput(&ip->i_inode);
995
996 /* If the inode was evicted, gl->gl_object will now be NULL. */
997 spin_lock(&gl->gl_lockref.lock);
998 ip = gl->gl_object;
999 if (ip) {
1000 clear_bit(GIF_DEFER_DELETE, &ip->i_flags);
1001 if (!igrab(&ip->i_inode))
1002 ip = NULL;
1003 }
1004 spin_unlock(&gl->gl_lockref.lock);
1005 if (ip) {
1006 gfs2_glock_poke(ip->i_gl);
1007 iput(&ip->i_inode);
1008 }
1009 }
1010}
1011
1012bool gfs2_queue_try_to_evict(struct gfs2_glock *gl)
1013{
1014 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1015
1016 if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
1017 return false;
1018 return !mod_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, 0);
1019}
1020
1021bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later)
1022{
1023 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1024 unsigned long delay;
1025
1026 if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags))
1027 return false;
1028 delay = later ? HZ + get_random_long() % (HZ * 9) : 0;
1029 return queue_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, delay);
1030}
1031
1032static void delete_work_func(struct work_struct *work)
1033{
1034 struct delayed_work *dwork = to_delayed_work(work);
1035 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
1036 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1037 bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
1038
1039 if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
1040 gfs2_try_evict(gl);
1041
1042 if (verify_delete) {
1043 u64 no_addr = gl->gl_name.ln_number;
1044 struct inode *inode;
1045
1046 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
1047 GFS2_BLKST_UNLINKED);
1048 if (IS_ERR(inode)) {
1049 if (PTR_ERR(inode) == -EAGAIN &&
1050 !test_bit(SDF_KILL, &sdp->sd_flags) &&
1051 gfs2_queue_verify_delete(gl, true))
1052 return;
1053 } else {
1054 d_prune_aliases(inode);
1055 iput(inode);
1056 }
1057 }
1058
1059 gfs2_glock_put(gl);
1060}
1061
1062static void glock_work_func(struct work_struct *work)
1063{
1064 unsigned long delay = 0;
1065 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
1066 unsigned int drop_refs = 1;
1067
1068 spin_lock(&gl->gl_lockref.lock);
1069 if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags)) {
1070 clear_bit(GLF_HAVE_REPLY, &gl->gl_flags);
1071 finish_xmote(gl, gl->gl_reply);
1072 drop_refs++;
1073 }
1074 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1075 gl->gl_state != LM_ST_UNLOCKED &&
1076 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
1077 if (gl->gl_name.ln_type == LM_TYPE_INODE) {
1078 unsigned long holdtime, now = jiffies;
1079
1080 holdtime = gl->gl_tchange + gl->gl_hold_time;
1081 if (time_before(now, holdtime))
1082 delay = holdtime - now;
1083 }
1084
1085 if (!delay) {
1086 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
1087 gfs2_set_demote(GLF_DEMOTE, gl);
1088 }
1089 }
1090 run_queue(gl, 0);
1091 if (delay) {
1092 /* Keep one glock reference for the work we requeue. */
1093 drop_refs--;
1094 gfs2_glock_queue_work(gl, delay);
1095 }
1096
1097 /* Drop the remaining glock references manually. */
1098 GLOCK_BUG_ON(gl, gl->gl_lockref.count < drop_refs);
1099 gl->gl_lockref.count -= drop_refs;
1100 if (!gl->gl_lockref.count) {
1101 if (gl->gl_state == LM_ST_UNLOCKED) {
1102 __gfs2_glock_put(gl);
1103 return;
1104 }
1105 gfs2_glock_add_to_lru(gl);
1106 }
1107 spin_unlock(&gl->gl_lockref.lock);
1108}
1109
1110static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
1111 struct gfs2_glock *new)
1112{
1113 struct wait_glock_queue wait;
1114 wait_queue_head_t *wq = glock_waitqueue(name);
1115 struct gfs2_glock *gl;
1116
1117 wait.name = name;
1118 init_wait(&wait.wait);
1119 wait.wait.func = glock_wake_function;
1120
1121again:
1122 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1123 rcu_read_lock();
1124 if (new) {
1125 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
1126 &new->gl_node, ht_parms);
1127 if (IS_ERR(gl))
1128 goto out;
1129 } else {
1130 gl = rhashtable_lookup_fast(&gl_hash_table,
1131 name, ht_parms);
1132 }
1133 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
1134 rcu_read_unlock();
1135 schedule();
1136 goto again;
1137 }
1138out:
1139 rcu_read_unlock();
1140 finish_wait(wq, &wait.wait);
1141 if (gl)
1142 gfs2_glock_remove_from_lru(gl);
1143 return gl;
1144}
1145
1146/**
1147 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
1148 * @sdp: The GFS2 superblock
1149 * @number: the lock number
1150 * @glops: The glock_operations to use
1151 * @create: If 0, don't create the glock if it doesn't exist
1152 * @glp: the glock is returned here
1153 *
1154 * This does not lock a glock, just finds/creates structures for one.
1155 *
1156 * Returns: errno
1157 */
1158
1159int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
1160 const struct gfs2_glock_operations *glops, int create,
1161 struct gfs2_glock **glp)
1162{
1163 struct super_block *s = sdp->sd_vfs;
1164 struct lm_lockname name = { .ln_number = number,
1165 .ln_type = glops->go_type,
1166 .ln_sbd = sdp };
1167 struct gfs2_glock *gl, *tmp;
1168 struct address_space *mapping;
1169
1170 gl = find_insert_glock(&name, NULL);
1171 if (gl)
1172 goto found;
1173 if (!create)
1174 return -ENOENT;
1175
1176 if (glops->go_flags & GLOF_ASPACE) {
1177 struct gfs2_glock_aspace *gla =
1178 kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS);
1179 if (!gla)
1180 return -ENOMEM;
1181 gl = &gla->glock;
1182 } else {
1183 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS);
1184 if (!gl)
1185 return -ENOMEM;
1186 }
1187 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
1188 gl->gl_ops = glops;
1189
1190 if (glops->go_flags & GLOF_LVB) {
1191 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
1192 if (!gl->gl_lksb.sb_lvbptr) {
1193 gfs2_glock_dealloc(&gl->gl_rcu);
1194 return -ENOMEM;
1195 }
1196 }
1197
1198 atomic_inc(&sdp->sd_glock_disposal);
1199 gl->gl_node.next = NULL;
1200 gl->gl_flags = BIT(GLF_INITIAL);
1201 if (glops->go_instantiate)
1202 gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED);
1203 gl->gl_name = name;
1204 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
1205 gl->gl_lockref.count = 1;
1206 gl->gl_state = LM_ST_UNLOCKED;
1207 gl->gl_target = LM_ST_UNLOCKED;
1208 gl->gl_demote_state = LM_ST_EXCLUSIVE;
1209 gl->gl_dstamp = 0;
1210 preempt_disable();
1211 /* We use the global stats to estimate the initial per-glock stats */
1212 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
1213 preempt_enable();
1214 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
1215 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
1216 gl->gl_tchange = jiffies;
1217 gl->gl_object = NULL;
1218 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
1219 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
1220 if (gl->gl_name.ln_type == LM_TYPE_IOPEN)
1221 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
1222
1223 mapping = gfs2_glock2aspace(gl);
1224 if (mapping) {
1225 mapping->a_ops = &gfs2_meta_aops;
1226 mapping->host = s->s_bdev->bd_mapping->host;
1227 mapping->flags = 0;
1228 mapping_set_gfp_mask(mapping, GFP_NOFS);
1229 mapping->i_private_data = NULL;
1230 mapping->writeback_index = 0;
1231 }
1232
1233 tmp = find_insert_glock(&name, gl);
1234 if (tmp) {
1235 gfs2_glock_dealloc(&gl->gl_rcu);
1236 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
1237 wake_up(&sdp->sd_kill_wait);
1238
1239 if (IS_ERR(tmp))
1240 return PTR_ERR(tmp);
1241 gl = tmp;
1242 }
1243
1244found:
1245 *glp = gl;
1246 return 0;
1247}
1248
1249/**
1250 * __gfs2_holder_init - initialize a struct gfs2_holder in the default way
1251 * @gl: the glock
1252 * @state: the state we're requesting
1253 * @flags: the modifier flags
1254 * @gh: the holder structure
1255 *
1256 */
1257
1258void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
1259 struct gfs2_holder *gh, unsigned long ip)
1260{
1261 INIT_LIST_HEAD(&gh->gh_list);
1262 gh->gh_gl = gfs2_glock_hold(gl);
1263 gh->gh_ip = ip;
1264 gh->gh_owner_pid = get_pid(task_pid(current));
1265 gh->gh_state = state;
1266 gh->gh_flags = flags;
1267 gh->gh_iflags = 0;
1268}
1269
1270/**
1271 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
1272 * @state: the state we're requesting
1273 * @flags: the modifier flags
1274 * @gh: the holder structure
1275 *
1276 * Don't mess with the glock.
1277 *
1278 */
1279
1280void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
1281{
1282 gh->gh_state = state;
1283 gh->gh_flags = flags;
1284 gh->gh_iflags = 0;
1285 gh->gh_ip = _RET_IP_;
1286 put_pid(gh->gh_owner_pid);
1287 gh->gh_owner_pid = get_pid(task_pid(current));
1288}
1289
1290/**
1291 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
1292 * @gh: the holder structure
1293 *
1294 */
1295
1296void gfs2_holder_uninit(struct gfs2_holder *gh)
1297{
1298 put_pid(gh->gh_owner_pid);
1299 gfs2_glock_put(gh->gh_gl);
1300 gfs2_holder_mark_uninitialized(gh);
1301 gh->gh_ip = 0;
1302}
1303
1304static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
1305 unsigned long start_time)
1306{
1307 /* Have we waited longer that a second? */
1308 if (time_after(jiffies, start_time + HZ)) {
1309 /* Lengthen the minimum hold time. */
1310 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
1311 GL_GLOCK_MAX_HOLD);
1312 }
1313}
1314
1315/**
1316 * gfs2_glock_holder_ready - holder is ready and its error code can be collected
1317 * @gh: the glock holder
1318 *
1319 * Called when a glock holder no longer needs to be waited for because it is
1320 * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has
1321 * failed (gh_error != 0).
1322 */
1323
1324int gfs2_glock_holder_ready(struct gfs2_holder *gh)
1325{
1326 if (gh->gh_error || (gh->gh_flags & GL_SKIP))
1327 return gh->gh_error;
1328 gh->gh_error = gfs2_instantiate(gh);
1329 if (gh->gh_error)
1330 gfs2_glock_dq(gh);
1331 return gh->gh_error;
1332}
1333
1334/**
1335 * gfs2_glock_wait - wait on a glock acquisition
1336 * @gh: the glock holder
1337 *
1338 * Returns: 0 on success
1339 */
1340
1341int gfs2_glock_wait(struct gfs2_holder *gh)
1342{
1343 unsigned long start_time = jiffies;
1344
1345 might_sleep();
1346 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
1347 gfs2_glock_update_hold_time(gh->gh_gl, start_time);
1348 return gfs2_glock_holder_ready(gh);
1349}
1350
1351static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
1352{
1353 int i;
1354
1355 for (i = 0; i < num_gh; i++)
1356 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
1357 return 1;
1358 return 0;
1359}
1360
1361/**
1362 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
1363 * @num_gh: the number of holders in the array
1364 * @ghs: the glock holder array
1365 *
1366 * Returns: 0 on success, meaning all glocks have been granted and are held.
1367 * -ESTALE if the request timed out, meaning all glocks were released,
1368 * and the caller should retry the operation.
1369 */
1370
1371int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
1372{
1373 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
1374 int i, ret = 0, timeout = 0;
1375 unsigned long start_time = jiffies;
1376
1377 might_sleep();
1378 /*
1379 * Total up the (minimum hold time * 2) of all glocks and use that to
1380 * determine the max amount of time we should wait.
1381 */
1382 for (i = 0; i < num_gh; i++)
1383 timeout += ghs[i].gh_gl->gl_hold_time << 1;
1384
1385 if (!wait_event_timeout(sdp->sd_async_glock_wait,
1386 !glocks_pending(num_gh, ghs), timeout)) {
1387 ret = -ESTALE; /* request timed out. */
1388 goto out;
1389 }
1390
1391 for (i = 0; i < num_gh; i++) {
1392 struct gfs2_holder *gh = &ghs[i];
1393 int ret2;
1394
1395 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1396 gfs2_glock_update_hold_time(gh->gh_gl,
1397 start_time);
1398 }
1399 ret2 = gfs2_glock_holder_ready(gh);
1400 if (!ret)
1401 ret = ret2;
1402 }
1403
1404out:
1405 if (ret) {
1406 for (i = 0; i < num_gh; i++) {
1407 struct gfs2_holder *gh = &ghs[i];
1408
1409 gfs2_glock_dq(gh);
1410 }
1411 }
1412 return ret;
1413}
1414
1415/**
1416 * request_demote - process a demote request
1417 * @gl: the glock
1418 * @state: the state the caller wants us to change to
1419 * @delay: zero to demote immediately; otherwise pending demote
1420 * @remote: true if this came from a different cluster node
1421 *
1422 * There are only two requests that we are going to see in actual
1423 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
1424 */
1425
1426static void request_demote(struct gfs2_glock *gl, unsigned int state,
1427 unsigned long delay, bool remote)
1428{
1429 gfs2_set_demote(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, gl);
1430 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
1431 gl->gl_demote_state = state;
1432 gl->gl_demote_time = jiffies;
1433 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
1434 gl->gl_demote_state != state) {
1435 gl->gl_demote_state = LM_ST_UNLOCKED;
1436 }
1437 if (gl->gl_ops->go_callback)
1438 gl->gl_ops->go_callback(gl, remote);
1439 trace_gfs2_demote_rq(gl, remote);
1440}
1441
1442void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
1443{
1444 struct va_format vaf;
1445 va_list args;
1446
1447 va_start(args, fmt);
1448
1449 if (seq) {
1450 seq_vprintf(seq, fmt, args);
1451 } else {
1452 vaf.fmt = fmt;
1453 vaf.va = &args;
1454
1455 pr_err("%pV", &vaf);
1456 }
1457
1458 va_end(args);
1459}
1460
1461static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
1462{
1463 if (!(gh->gh_flags & GL_NOPID))
1464 return true;
1465 if (gh->gh_state == LM_ST_UNLOCKED)
1466 return true;
1467 return false;
1468}
1469
1470/**
1471 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1472 * @gh: the holder structure to add
1473 *
1474 * Eventually we should move the recursive locking trap to a
1475 * debugging option or something like that. This is the fast
1476 * path and needs to have the minimum number of distractions.
1477 *
1478 */
1479
1480static inline void add_to_queue(struct gfs2_holder *gh)
1481__releases(&gl->gl_lockref.lock)
1482__acquires(&gl->gl_lockref.lock)
1483{
1484 struct gfs2_glock *gl = gh->gh_gl;
1485 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1486 struct list_head *insert_pt = NULL;
1487 struct gfs2_holder *gh2;
1488 int try_futile = 0;
1489
1490 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1491 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1492 GLOCK_BUG_ON(gl, true);
1493
1494 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1495 if (test_bit(GLF_LOCK, &gl->gl_flags)) {
1496 struct gfs2_holder *current_gh;
1497
1498 current_gh = find_first_holder(gl);
1499 try_futile = !may_grant(gl, current_gh, gh);
1500 }
1501 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1502 goto fail;
1503 }
1504
1505 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1506 if (likely(gh2->gh_owner_pid != gh->gh_owner_pid))
1507 continue;
1508 if (gh->gh_gl->gl_ops->go_type == LM_TYPE_FLOCK)
1509 continue;
1510 if (!pid_is_meaningful(gh2))
1511 continue;
1512 goto trap_recursive;
1513 }
1514 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1515 if (try_futile &&
1516 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1517fail:
1518 gh->gh_error = GLR_TRYFAILED;
1519 gfs2_holder_wake(gh);
1520 return;
1521 }
1522 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1523 continue;
1524 }
1525 trace_gfs2_glock_queue(gh, 1);
1526 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1527 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1528 if (likely(insert_pt == NULL)) {
1529 list_add_tail(&gh->gh_list, &gl->gl_holders);
1530 return;
1531 }
1532 list_add_tail(&gh->gh_list, insert_pt);
1533 spin_unlock(&gl->gl_lockref.lock);
1534 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1535 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1536 spin_lock(&gl->gl_lockref.lock);
1537 return;
1538
1539trap_recursive:
1540 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1541 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1542 fs_err(sdp, "lock type: %d req lock state : %d\n",
1543 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1544 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1545 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1546 fs_err(sdp, "lock type: %d req lock state : %d\n",
1547 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1548 gfs2_dump_glock(NULL, gl, true);
1549 BUG();
1550}
1551
1552/**
1553 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1554 * @gh: the holder structure
1555 *
1556 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1557 *
1558 * Returns: 0, GLR_TRYFAILED, or errno on failure
1559 */
1560
1561int gfs2_glock_nq(struct gfs2_holder *gh)
1562{
1563 struct gfs2_glock *gl = gh->gh_gl;
1564 int error;
1565
1566 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
1567 return -EIO;
1568
1569 if (gh->gh_flags & GL_NOBLOCK) {
1570 struct gfs2_holder *current_gh;
1571
1572 error = -ECHILD;
1573 spin_lock(&gl->gl_lockref.lock);
1574 if (find_last_waiter(gl))
1575 goto unlock;
1576 current_gh = find_first_holder(gl);
1577 if (!may_grant(gl, current_gh, gh))
1578 goto unlock;
1579 set_bit(HIF_HOLDER, &gh->gh_iflags);
1580 list_add_tail(&gh->gh_list, &gl->gl_holders);
1581 trace_gfs2_promote(gh);
1582 error = 0;
1583unlock:
1584 spin_unlock(&gl->gl_lockref.lock);
1585 return error;
1586 }
1587
1588 gh->gh_error = 0;
1589 spin_lock(&gl->gl_lockref.lock);
1590 add_to_queue(gh);
1591 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1592 test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))) {
1593 set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
1594 gl->gl_lockref.count++;
1595 gfs2_glock_queue_work(gl, 0);
1596 }
1597 run_queue(gl, 1);
1598 spin_unlock(&gl->gl_lockref.lock);
1599
1600 error = 0;
1601 if (!(gh->gh_flags & GL_ASYNC))
1602 error = gfs2_glock_wait(gh);
1603
1604 return error;
1605}
1606
1607/**
1608 * gfs2_glock_poll - poll to see if an async request has been completed
1609 * @gh: the holder
1610 *
1611 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1612 */
1613
1614int gfs2_glock_poll(struct gfs2_holder *gh)
1615{
1616 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1617}
1618
1619static void __gfs2_glock_dq(struct gfs2_holder *gh)
1620{
1621 struct gfs2_glock *gl = gh->gh_gl;
1622 unsigned delay = 0;
1623 int fast_path = 0;
1624
1625 /*
1626 * This holder should not be cached, so mark it for demote.
1627 * Note: this should be done before the glock_needs_demote
1628 * check below.
1629 */
1630 if (gh->gh_flags & GL_NOCACHE)
1631 request_demote(gl, LM_ST_UNLOCKED, 0, false);
1632
1633 list_del_init(&gh->gh_list);
1634 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1635 trace_gfs2_glock_queue(gh, 0);
1636
1637 /*
1638 * If there hasn't been a demote request we are done.
1639 * (Let the remaining holders, if any, keep holding it.)
1640 */
1641 if (!glock_needs_demote(gl)) {
1642 if (list_empty(&gl->gl_holders))
1643 fast_path = 1;
1644 }
1645
1646 if (unlikely(!fast_path)) {
1647 gl->gl_lockref.count++;
1648 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1649 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1650 gl->gl_name.ln_type == LM_TYPE_INODE)
1651 delay = gl->gl_hold_time;
1652 gfs2_glock_queue_work(gl, delay);
1653 }
1654}
1655
1656/**
1657 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1658 * @gh: the glock holder
1659 *
1660 */
1661void gfs2_glock_dq(struct gfs2_holder *gh)
1662{
1663 struct gfs2_glock *gl = gh->gh_gl;
1664 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1665
1666 spin_lock(&gl->gl_lockref.lock);
1667 if (!gfs2_holder_queued(gh)) {
1668 /*
1669 * May have already been dequeued because the locking request
1670 * was GL_ASYNC and it has failed in the meantime.
1671 */
1672 goto out;
1673 }
1674
1675 if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
1676 !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1677 spin_unlock(&gl->gl_lockref.lock);
1678 gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
1679 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
1680 spin_lock(&gl->gl_lockref.lock);
1681 }
1682
1683 /*
1684 * If we're in the process of file system withdraw, we cannot just
1685 * dequeue any glocks until our journal is recovered, lest we introduce
1686 * file system corruption. We need two exceptions to this rule: We need
1687 * to allow unlocking of nondisk glocks and the glock for our own
1688 * journal that needs recovery.
1689 */
1690 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
1691 glock_blocked_by_withdraw(gl) &&
1692 gh->gh_gl != sdp->sd_jinode_gl) {
1693 sdp->sd_glock_dqs_held++;
1694 spin_unlock(&gl->gl_lockref.lock);
1695 might_sleep();
1696 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
1697 TASK_UNINTERRUPTIBLE);
1698 spin_lock(&gl->gl_lockref.lock);
1699 }
1700
1701 __gfs2_glock_dq(gh);
1702out:
1703 spin_unlock(&gl->gl_lockref.lock);
1704}
1705
1706void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1707{
1708 struct gfs2_glock *gl = gh->gh_gl;
1709 gfs2_glock_dq(gh);
1710 might_sleep();
1711 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1712}
1713
1714/**
1715 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1716 * @gh: the holder structure
1717 *
1718 */
1719
1720void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1721{
1722 gfs2_glock_dq(gh);
1723 gfs2_holder_uninit(gh);
1724}
1725
1726/**
1727 * gfs2_glock_nq_num - acquire a glock based on lock number
1728 * @sdp: the filesystem
1729 * @number: the lock number
1730 * @glops: the glock operations for the type of glock
1731 * @state: the state to acquire the glock in
1732 * @flags: modifier flags for the acquisition
1733 * @gh: the struct gfs2_holder
1734 *
1735 * Returns: errno
1736 */
1737
1738int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1739 const struct gfs2_glock_operations *glops,
1740 unsigned int state, u16 flags, struct gfs2_holder *gh)
1741{
1742 struct gfs2_glock *gl;
1743 int error;
1744
1745 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1746 if (!error) {
1747 error = gfs2_glock_nq_init(gl, state, flags, gh);
1748 gfs2_glock_put(gl);
1749 }
1750
1751 return error;
1752}
1753
1754/**
1755 * glock_compare - Compare two struct gfs2_glock structures for sorting
1756 * @arg_a: the first structure
1757 * @arg_b: the second structure
1758 *
1759 */
1760
1761static int glock_compare(const void *arg_a, const void *arg_b)
1762{
1763 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1764 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1765 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1766 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1767
1768 if (a->ln_number > b->ln_number)
1769 return 1;
1770 if (a->ln_number < b->ln_number)
1771 return -1;
1772 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1773 return 0;
1774}
1775
1776/**
1777 * nq_m_sync - synchronously acquire more than one glock in deadlock free order
1778 * @num_gh: the number of structures
1779 * @ghs: an array of struct gfs2_holder structures
1780 * @p: placeholder for the holder structure to pass back
1781 *
1782 * Returns: 0 on success (all glocks acquired),
1783 * errno on failure (no glocks acquired)
1784 */
1785
1786static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1787 struct gfs2_holder **p)
1788{
1789 unsigned int x;
1790 int error = 0;
1791
1792 for (x = 0; x < num_gh; x++)
1793 p[x] = &ghs[x];
1794
1795 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1796
1797 for (x = 0; x < num_gh; x++) {
1798 error = gfs2_glock_nq(p[x]);
1799 if (error) {
1800 while (x--)
1801 gfs2_glock_dq(p[x]);
1802 break;
1803 }
1804 }
1805
1806 return error;
1807}
1808
1809/**
1810 * gfs2_glock_nq_m - acquire multiple glocks
1811 * @num_gh: the number of structures
1812 * @ghs: an array of struct gfs2_holder structures
1813 *
1814 * Returns: 0 on success (all glocks acquired),
1815 * errno on failure (no glocks acquired)
1816 */
1817
1818int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1819{
1820 struct gfs2_holder *tmp[4];
1821 struct gfs2_holder **pph = tmp;
1822 int error = 0;
1823
1824 switch(num_gh) {
1825 case 0:
1826 return 0;
1827 case 1:
1828 return gfs2_glock_nq(ghs);
1829 default:
1830 if (num_gh <= 4)
1831 break;
1832 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
1833 GFP_NOFS);
1834 if (!pph)
1835 return -ENOMEM;
1836 }
1837
1838 error = nq_m_sync(num_gh, ghs, pph);
1839
1840 if (pph != tmp)
1841 kfree(pph);
1842
1843 return error;
1844}
1845
1846/**
1847 * gfs2_glock_dq_m - release multiple glocks
1848 * @num_gh: the number of structures
1849 * @ghs: an array of struct gfs2_holder structures
1850 *
1851 */
1852
1853void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1854{
1855 while (num_gh--)
1856 gfs2_glock_dq(&ghs[num_gh]);
1857}
1858
1859void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1860{
1861 unsigned long delay = 0;
1862
1863 gfs2_glock_hold(gl);
1864 spin_lock(&gl->gl_lockref.lock);
1865 if (!list_empty(&gl->gl_holders) &&
1866 gl->gl_name.ln_type == LM_TYPE_INODE) {
1867 unsigned long now = jiffies;
1868 unsigned long holdtime;
1869
1870 holdtime = gl->gl_tchange + gl->gl_hold_time;
1871
1872 if (time_before(now, holdtime))
1873 delay = holdtime - now;
1874 if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags))
1875 delay = gl->gl_hold_time;
1876 }
1877 request_demote(gl, state, delay, true);
1878 gfs2_glock_queue_work(gl, delay);
1879 spin_unlock(&gl->gl_lockref.lock);
1880}
1881
1882/**
1883 * gfs2_should_freeze - Figure out if glock should be frozen
1884 * @gl: The glock in question
1885 *
1886 * Glocks are not frozen if (a) the result of the dlm operation is
1887 * an error, (b) the locking operation was an unlock operation or
1888 * (c) if there is a "noexp" flagged request anywhere in the queue
1889 *
1890 * Returns: 1 if freezing should occur, 0 otherwise
1891 */
1892
1893static int gfs2_should_freeze(const struct gfs2_glock *gl)
1894{
1895 const struct gfs2_holder *gh;
1896
1897 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1898 return 0;
1899 if (gl->gl_target == LM_ST_UNLOCKED)
1900 return 0;
1901
1902 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1903 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1904 continue;
1905 if (LM_FLAG_NOEXP & gh->gh_flags)
1906 return 0;
1907 }
1908
1909 return 1;
1910}
1911
1912/**
1913 * gfs2_glock_complete - Callback used by locking
1914 * @gl: Pointer to the glock
1915 * @ret: The return value from the dlm
1916 *
1917 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1918 * to use a bitfield shared with other glock state fields.
1919 */
1920
1921void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1922{
1923 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1924
1925 spin_lock(&gl->gl_lockref.lock);
1926 gl->gl_reply = ret;
1927
1928 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1929 if (gfs2_should_freeze(gl)) {
1930 set_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags);
1931 spin_unlock(&gl->gl_lockref.lock);
1932 return;
1933 }
1934 }
1935
1936 gl->gl_lockref.count++;
1937 set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
1938 gfs2_glock_queue_work(gl, 0);
1939 spin_unlock(&gl->gl_lockref.lock);
1940}
1941
1942static int glock_cmp(void *priv, const struct list_head *a,
1943 const struct list_head *b)
1944{
1945 struct gfs2_glock *gla, *glb;
1946
1947 gla = list_entry(a, struct gfs2_glock, gl_lru);
1948 glb = list_entry(b, struct gfs2_glock, gl_lru);
1949
1950 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1951 return 1;
1952 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1953 return -1;
1954
1955 return 0;
1956}
1957
1958static bool can_free_glock(struct gfs2_glock *gl)
1959{
1960 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1961
1962 return !test_bit(GLF_LOCK, &gl->gl_flags) &&
1963 !gl->gl_lockref.count &&
1964 (!test_bit(GLF_LFLUSH, &gl->gl_flags) ||
1965 test_bit(SDF_KILL, &sdp->sd_flags));
1966}
1967
1968/**
1969 * gfs2_dispose_glock_lru - Demote a list of glocks
1970 * @list: The list to dispose of
1971 *
1972 * Disposing of glocks may involve disk accesses, so that here we sort
1973 * the glocks by number (i.e. disk location of the inodes) so that if
1974 * there are any such accesses, they'll be sent in order (mostly).
1975 *
1976 * Must be called under the lru_lock, but may drop and retake this
1977 * lock. While the lru_lock is dropped, entries may vanish from the
1978 * list, but no new entries will appear on the list (since it is
1979 * private)
1980 */
1981
1982static unsigned long gfs2_dispose_glock_lru(struct list_head *list)
1983__releases(&lru_lock)
1984__acquires(&lru_lock)
1985{
1986 struct gfs2_glock *gl;
1987 unsigned long freed = 0;
1988
1989 list_sort(NULL, list, glock_cmp);
1990
1991 while(!list_empty(list)) {
1992 gl = list_first_entry(list, struct gfs2_glock, gl_lru);
1993 if (!spin_trylock(&gl->gl_lockref.lock)) {
1994add_back_to_lru:
1995 list_move(&gl->gl_lru, &lru_list);
1996 continue;
1997 }
1998 if (!can_free_glock(gl)) {
1999 spin_unlock(&gl->gl_lockref.lock);
2000 goto add_back_to_lru;
2001 }
2002 list_del_init(&gl->gl_lru);
2003 atomic_dec(&lru_count);
2004 clear_bit(GLF_LRU, &gl->gl_flags);
2005 freed++;
2006 gl->gl_lockref.count++;
2007 if (gl->gl_state != LM_ST_UNLOCKED)
2008 request_demote(gl, LM_ST_UNLOCKED, 0, false);
2009 gfs2_glock_queue_work(gl, 0);
2010 spin_unlock(&gl->gl_lockref.lock);
2011 cond_resched_lock(&lru_lock);
2012 }
2013 return freed;
2014}
2015
2016/**
2017 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
2018 * @nr: The number of entries to scan
2019 *
2020 * This function selects the entries on the LRU which are able to
2021 * be demoted, and then kicks off the process by calling
2022 * gfs2_dispose_glock_lru() above.
2023 */
2024
2025static unsigned long gfs2_scan_glock_lru(unsigned long nr)
2026{
2027 struct gfs2_glock *gl, *next;
2028 LIST_HEAD(dispose);
2029 unsigned long freed = 0;
2030
2031 spin_lock(&lru_lock);
2032 list_for_each_entry_safe(gl, next, &lru_list, gl_lru) {
2033 if (!nr--)
2034 break;
2035 if (can_free_glock(gl))
2036 list_move(&gl->gl_lru, &dispose);
2037 }
2038 if (!list_empty(&dispose))
2039 freed = gfs2_dispose_glock_lru(&dispose);
2040 spin_unlock(&lru_lock);
2041
2042 return freed;
2043}
2044
2045static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
2046 struct shrink_control *sc)
2047{
2048 if (!(sc->gfp_mask & __GFP_FS))
2049 return SHRINK_STOP;
2050 return gfs2_scan_glock_lru(sc->nr_to_scan);
2051}
2052
2053static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
2054 struct shrink_control *sc)
2055{
2056 return vfs_pressure_ratio(atomic_read(&lru_count));
2057}
2058
2059static struct shrinker *glock_shrinker;
2060
2061/**
2062 * glock_hash_walk - Call a function for glock in a hash bucket
2063 * @examiner: the function
2064 * @sdp: the filesystem
2065 *
2066 * Note that the function can be called multiple times on the same
2067 * object. So the user must ensure that the function can cope with
2068 * that.
2069 */
2070
2071static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
2072{
2073 struct gfs2_glock *gl;
2074 struct rhashtable_iter iter;
2075
2076 rhashtable_walk_enter(&gl_hash_table, &iter);
2077
2078 do {
2079 rhashtable_walk_start(&iter);
2080
2081 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) {
2082 if (gl->gl_name.ln_sbd == sdp)
2083 examiner(gl);
2084 }
2085
2086 rhashtable_walk_stop(&iter);
2087 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
2088
2089 rhashtable_walk_exit(&iter);
2090}
2091
2092void gfs2_cancel_delete_work(struct gfs2_glock *gl)
2093{
2094 clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags);
2095 clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
2096 if (cancel_delayed_work(&gl->gl_delete))
2097 gfs2_glock_put(gl);
2098}
2099
2100static void flush_delete_work(struct gfs2_glock *gl)
2101{
2102 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
2103 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
2104
2105 if (cancel_delayed_work(&gl->gl_delete)) {
2106 queue_delayed_work(sdp->sd_delete_wq,
2107 &gl->gl_delete, 0);
2108 }
2109 }
2110}
2111
2112void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
2113{
2114 glock_hash_walk(flush_delete_work, sdp);
2115 flush_workqueue(sdp->sd_delete_wq);
2116}
2117
2118/**
2119 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
2120 * @gl: The glock to thaw
2121 *
2122 */
2123
2124static void thaw_glock(struct gfs2_glock *gl)
2125{
2126 if (!test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))
2127 return;
2128 if (!lockref_get_not_dead(&gl->gl_lockref))
2129 return;
2130
2131 gfs2_glock_remove_from_lru(gl);
2132 spin_lock(&gl->gl_lockref.lock);
2133 set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
2134 gfs2_glock_queue_work(gl, 0);
2135 spin_unlock(&gl->gl_lockref.lock);
2136}
2137
2138/**
2139 * clear_glock - look at a glock and see if we can free it from glock cache
2140 * @gl: the glock to look at
2141 *
2142 */
2143
2144static void clear_glock(struct gfs2_glock *gl)
2145{
2146 gfs2_glock_remove_from_lru(gl);
2147
2148 spin_lock(&gl->gl_lockref.lock);
2149 if (!__lockref_is_dead(&gl->gl_lockref)) {
2150 gl->gl_lockref.count++;
2151 if (gl->gl_state != LM_ST_UNLOCKED)
2152 request_demote(gl, LM_ST_UNLOCKED, 0, false);
2153 gfs2_glock_queue_work(gl, 0);
2154 }
2155 spin_unlock(&gl->gl_lockref.lock);
2156}
2157
2158/**
2159 * gfs2_glock_thaw - Thaw any frozen glocks
2160 * @sdp: The super block
2161 *
2162 */
2163
2164void gfs2_glock_thaw(struct gfs2_sbd *sdp)
2165{
2166 glock_hash_walk(thaw_glock, sdp);
2167}
2168
2169static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
2170{
2171 spin_lock(&gl->gl_lockref.lock);
2172 gfs2_dump_glock(seq, gl, fsid);
2173 spin_unlock(&gl->gl_lockref.lock);
2174}
2175
2176static void dump_glock_func(struct gfs2_glock *gl)
2177{
2178 dump_glock(NULL, gl, true);
2179}
2180
2181static void withdraw_dq(struct gfs2_glock *gl)
2182{
2183 spin_lock(&gl->gl_lockref.lock);
2184 if (!__lockref_is_dead(&gl->gl_lockref) &&
2185 glock_blocked_by_withdraw(gl))
2186 do_error(gl, LM_OUT_ERROR); /* remove pending waiters */
2187 spin_unlock(&gl->gl_lockref.lock);
2188}
2189
2190void gfs2_gl_dq_holders(struct gfs2_sbd *sdp)
2191{
2192 glock_hash_walk(withdraw_dq, sdp);
2193}
2194
2195/**
2196 * gfs2_gl_hash_clear - Empty out the glock hash table
2197 * @sdp: the filesystem
2198 *
2199 * Called when unmounting the filesystem.
2200 */
2201
2202void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
2203{
2204 unsigned long start = jiffies;
2205 bool timed_out = false;
2206
2207 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
2208 flush_workqueue(sdp->sd_glock_wq);
2209 glock_hash_walk(clear_glock, sdp);
2210 flush_workqueue(sdp->sd_glock_wq);
2211
2212 while (!timed_out) {
2213 wait_event_timeout(sdp->sd_kill_wait,
2214 !atomic_read(&sdp->sd_glock_disposal),
2215 HZ * 60);
2216 if (!atomic_read(&sdp->sd_glock_disposal))
2217 break;
2218 timed_out = time_after(jiffies, start + (HZ * 600));
2219 fs_warn(sdp, "%u glocks left after %u seconds%s\n",
2220 atomic_read(&sdp->sd_glock_disposal),
2221 jiffies_to_msecs(jiffies - start) / 1000,
2222 timed_out ? ":" : "; still waiting");
2223 }
2224 gfs2_lm_unmount(sdp);
2225 gfs2_free_dead_glocks(sdp);
2226 glock_hash_walk(dump_glock_func, sdp);
2227 destroy_workqueue(sdp->sd_glock_wq);
2228 sdp->sd_glock_wq = NULL;
2229}
2230
2231static const char *state2str(unsigned state)
2232{
2233 switch(state) {
2234 case LM_ST_UNLOCKED:
2235 return "UN";
2236 case LM_ST_SHARED:
2237 return "SH";
2238 case LM_ST_DEFERRED:
2239 return "DF";
2240 case LM_ST_EXCLUSIVE:
2241 return "EX";
2242 }
2243 return "??";
2244}
2245
2246static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
2247{
2248 char *p = buf;
2249 if (flags & LM_FLAG_TRY)
2250 *p++ = 't';
2251 if (flags & LM_FLAG_TRY_1CB)
2252 *p++ = 'T';
2253 if (flags & LM_FLAG_NOEXP)
2254 *p++ = 'e';
2255 if (flags & LM_FLAG_ANY)
2256 *p++ = 'A';
2257 if (flags & LM_FLAG_NODE_SCOPE)
2258 *p++ = 'n';
2259 if (flags & GL_ASYNC)
2260 *p++ = 'a';
2261 if (flags & GL_EXACT)
2262 *p++ = 'E';
2263 if (flags & GL_NOCACHE)
2264 *p++ = 'c';
2265 if (test_bit(HIF_HOLDER, &iflags))
2266 *p++ = 'H';
2267 if (test_bit(HIF_WAIT, &iflags))
2268 *p++ = 'W';
2269 if (flags & GL_SKIP)
2270 *p++ = 's';
2271 *p = 0;
2272 return buf;
2273}
2274
2275/**
2276 * dump_holder - print information about a glock holder
2277 * @seq: the seq_file struct
2278 * @gh: the glock holder
2279 * @fs_id_buf: pointer to file system id (if requested)
2280 *
2281 */
2282
2283static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
2284 const char *fs_id_buf)
2285{
2286 const char *comm = "(none)";
2287 pid_t owner_pid = 0;
2288 char flags_buf[32];
2289
2290 rcu_read_lock();
2291 if (pid_is_meaningful(gh)) {
2292 struct task_struct *gh_owner;
2293
2294 comm = "(ended)";
2295 owner_pid = pid_nr(gh->gh_owner_pid);
2296 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
2297 if (gh_owner)
2298 comm = gh_owner->comm;
2299 }
2300 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
2301 fs_id_buf, state2str(gh->gh_state),
2302 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
2303 gh->gh_error, (long)owner_pid, comm, (void *)gh->gh_ip);
2304 rcu_read_unlock();
2305}
2306
2307static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
2308{
2309 const unsigned long *gflags = &gl->gl_flags;
2310 char *p = buf;
2311
2312 if (test_bit(GLF_LOCK, gflags))
2313 *p++ = 'l';
2314 if (test_bit(GLF_DEMOTE, gflags))
2315 *p++ = 'D';
2316 if (test_bit(GLF_PENDING_DEMOTE, gflags))
2317 *p++ = 'd';
2318 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
2319 *p++ = 'p';
2320 if (test_bit(GLF_DIRTY, gflags))
2321 *p++ = 'y';
2322 if (test_bit(GLF_LFLUSH, gflags))
2323 *p++ = 'f';
2324 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
2325 *p++ = 'i';
2326 if (test_bit(GLF_HAVE_REPLY, gflags))
2327 *p++ = 'r';
2328 if (test_bit(GLF_INITIAL, gflags))
2329 *p++ = 'a';
2330 if (test_bit(GLF_HAVE_FROZEN_REPLY, gflags))
2331 *p++ = 'F';
2332 if (!list_empty(&gl->gl_holders))
2333 *p++ = 'q';
2334 if (test_bit(GLF_LRU, gflags))
2335 *p++ = 'L';
2336 if (gl->gl_object)
2337 *p++ = 'o';
2338 if (test_bit(GLF_BLOCKING, gflags))
2339 *p++ = 'b';
2340 if (test_bit(GLF_UNLOCKED, gflags))
2341 *p++ = 'x';
2342 if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))
2343 *p++ = 'n';
2344 if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags))
2345 *p++ = 'N';
2346 if (test_bit(GLF_TRY_TO_EVICT, gflags))
2347 *p++ = 'e';
2348 if (test_bit(GLF_VERIFY_DELETE, gflags))
2349 *p++ = 'E';
2350 *p = 0;
2351 return buf;
2352}
2353
2354/**
2355 * gfs2_dump_glock - print information about a glock
2356 * @seq: The seq_file struct
2357 * @gl: the glock
2358 * @fsid: If true, also dump the file system id
2359 *
2360 * The file format is as follows:
2361 * One line per object, capital letters are used to indicate objects
2362 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
2363 * other objects are indented by a single space and follow the glock to
2364 * which they are related. Fields are indicated by lower case letters
2365 * followed by a colon and the field value, except for strings which are in
2366 * [] so that its possible to see if they are composed of spaces for
2367 * example. The field's are n = number (id of the object), f = flags,
2368 * t = type, s = state, r = refcount, e = error, p = pid.
2369 *
2370 */
2371
2372void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
2373{
2374 const struct gfs2_glock_operations *glops = gl->gl_ops;
2375 unsigned long long dtime;
2376 const struct gfs2_holder *gh;
2377 char gflags_buf[32];
2378 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
2379 char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
2380 unsigned long nrpages = 0;
2381
2382 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
2383 struct address_space *mapping = gfs2_glock2aspace(gl);
2384
2385 nrpages = mapping->nrpages;
2386 }
2387 memset(fs_id_buf, 0, sizeof(fs_id_buf));
2388 if (fsid && sdp) /* safety precaution */
2389 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
2390 dtime = jiffies - gl->gl_demote_time;
2391 dtime *= 1000000/HZ; /* demote time in uSec */
2392 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
2393 dtime = 0;
2394 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
2395 "v:%d r:%d m:%ld p:%lu\n",
2396 fs_id_buf, state2str(gl->gl_state),
2397 gl->gl_name.ln_type,
2398 (unsigned long long)gl->gl_name.ln_number,
2399 gflags2str(gflags_buf, gl),
2400 state2str(gl->gl_target),
2401 state2str(gl->gl_demote_state), dtime,
2402 atomic_read(&gl->gl_ail_count),
2403 atomic_read(&gl->gl_revokes),
2404 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages);
2405
2406 list_for_each_entry(gh, &gl->gl_holders, gh_list)
2407 dump_holder(seq, gh, fs_id_buf);
2408
2409 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
2410 glops->go_dump(seq, gl, fs_id_buf);
2411}
2412
2413static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
2414{
2415 struct gfs2_glock *gl = iter_ptr;
2416
2417 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
2418 gl->gl_name.ln_type,
2419 (unsigned long long)gl->gl_name.ln_number,
2420 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
2421 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
2422 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
2423 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
2424 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
2425 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
2426 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
2427 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
2428 return 0;
2429}
2430
2431static const char *gfs2_gltype[] = {
2432 "type",
2433 "reserved",
2434 "nondisk",
2435 "inode",
2436 "rgrp",
2437 "meta",
2438 "iopen",
2439 "flock",
2440 "plock",
2441 "quota",
2442 "journal",
2443};
2444
2445static const char *gfs2_stype[] = {
2446 [GFS2_LKS_SRTT] = "srtt",
2447 [GFS2_LKS_SRTTVAR] = "srttvar",
2448 [GFS2_LKS_SRTTB] = "srttb",
2449 [GFS2_LKS_SRTTVARB] = "srttvarb",
2450 [GFS2_LKS_SIRT] = "sirt",
2451 [GFS2_LKS_SIRTVAR] = "sirtvar",
2452 [GFS2_LKS_DCOUNT] = "dlm",
2453 [GFS2_LKS_QCOUNT] = "queue",
2454};
2455
2456#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
2457
2458static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
2459{
2460 struct gfs2_sbd *sdp = seq->private;
2461 loff_t pos = *(loff_t *)iter_ptr;
2462 unsigned index = pos >> 3;
2463 unsigned subindex = pos & 0x07;
2464 int i;
2465
2466 if (index == 0 && subindex != 0)
2467 return 0;
2468
2469 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
2470 (index == 0) ? "cpu": gfs2_stype[subindex]);
2471
2472 for_each_possible_cpu(i) {
2473 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
2474
2475 if (index == 0)
2476 seq_printf(seq, " %15u", i);
2477 else
2478 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
2479 lkstats[index - 1].stats[subindex]);
2480 }
2481 seq_putc(seq, '\n');
2482 return 0;
2483}
2484
2485int __init gfs2_glock_init(void)
2486{
2487 int i, ret;
2488
2489 ret = rhashtable_init(&gl_hash_table, &ht_parms);
2490 if (ret < 0)
2491 return ret;
2492
2493 glock_shrinker = shrinker_alloc(0, "gfs2-glock");
2494 if (!glock_shrinker) {
2495 rhashtable_destroy(&gl_hash_table);
2496 return -ENOMEM;
2497 }
2498
2499 glock_shrinker->count_objects = gfs2_glock_shrink_count;
2500 glock_shrinker->scan_objects = gfs2_glock_shrink_scan;
2501
2502 shrinker_register(glock_shrinker);
2503
2504 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
2505 init_waitqueue_head(glock_wait_table + i);
2506
2507 return 0;
2508}
2509
2510void gfs2_glock_exit(void)
2511{
2512 shrinker_free(glock_shrinker);
2513 rhashtable_destroy(&gl_hash_table);
2514}
2515
2516static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
2517{
2518 struct gfs2_glock *gl = gi->gl;
2519
2520 if (gl) {
2521 if (n == 0)
2522 return;
2523 gfs2_glock_put_async(gl);
2524 }
2525 for (;;) {
2526 gl = rhashtable_walk_next(&gi->hti);
2527 if (IS_ERR_OR_NULL(gl)) {
2528 if (gl == ERR_PTR(-EAGAIN)) {
2529 n = 1;
2530 continue;
2531 }
2532 gl = NULL;
2533 break;
2534 }
2535 if (gl->gl_name.ln_sbd != gi->sdp)
2536 continue;
2537 if (n <= 1) {
2538 if (!lockref_get_not_dead(&gl->gl_lockref))
2539 continue;
2540 break;
2541 } else {
2542 if (__lockref_is_dead(&gl->gl_lockref))
2543 continue;
2544 n--;
2545 }
2546 }
2547 gi->gl = gl;
2548}
2549
2550static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
2551 __acquires(RCU)
2552{
2553 struct gfs2_glock_iter *gi = seq->private;
2554 loff_t n;
2555
2556 /*
2557 * We can either stay where we are, skip to the next hash table
2558 * entry, or start from the beginning.
2559 */
2560 if (*pos < gi->last_pos) {
2561 rhashtable_walk_exit(&gi->hti);
2562 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2563 n = *pos + 1;
2564 } else {
2565 n = *pos - gi->last_pos;
2566 }
2567
2568 rhashtable_walk_start(&gi->hti);
2569
2570 gfs2_glock_iter_next(gi, n);
2571 gi->last_pos = *pos;
2572 return gi->gl;
2573}
2574
2575static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
2576 loff_t *pos)
2577{
2578 struct gfs2_glock_iter *gi = seq->private;
2579
2580 (*pos)++;
2581 gi->last_pos = *pos;
2582 gfs2_glock_iter_next(gi, 1);
2583 return gi->gl;
2584}
2585
2586static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
2587 __releases(RCU)
2588{
2589 struct gfs2_glock_iter *gi = seq->private;
2590
2591 rhashtable_walk_stop(&gi->hti);
2592}
2593
2594static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2595{
2596 dump_glock(seq, iter_ptr, false);
2597 return 0;
2598}
2599
2600static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2601{
2602 preempt_disable();
2603 if (*pos >= GFS2_NR_SBSTATS)
2604 return NULL;
2605 return pos;
2606}
2607
2608static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2609 loff_t *pos)
2610{
2611 (*pos)++;
2612 if (*pos >= GFS2_NR_SBSTATS)
2613 return NULL;
2614 return pos;
2615}
2616
2617static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2618{
2619 preempt_enable();
2620}
2621
2622static const struct seq_operations gfs2_glock_seq_ops = {
2623 .start = gfs2_glock_seq_start,
2624 .next = gfs2_glock_seq_next,
2625 .stop = gfs2_glock_seq_stop,
2626 .show = gfs2_glock_seq_show,
2627};
2628
2629static const struct seq_operations gfs2_glstats_seq_ops = {
2630 .start = gfs2_glock_seq_start,
2631 .next = gfs2_glock_seq_next,
2632 .stop = gfs2_glock_seq_stop,
2633 .show = gfs2_glstats_seq_show,
2634};
2635
2636static const struct seq_operations gfs2_sbstats_sops = {
2637 .start = gfs2_sbstats_seq_start,
2638 .next = gfs2_sbstats_seq_next,
2639 .stop = gfs2_sbstats_seq_stop,
2640 .show = gfs2_sbstats_seq_show,
2641};
2642
2643#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2644
2645static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2646 const struct seq_operations *ops)
2647{
2648 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2649 if (ret == 0) {
2650 struct seq_file *seq = file->private_data;
2651 struct gfs2_glock_iter *gi = seq->private;
2652
2653 gi->sdp = inode->i_private;
2654 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2655 if (seq->buf)
2656 seq->size = GFS2_SEQ_GOODSIZE;
2657 /*
2658 * Initially, we are "before" the first hash table entry; the
2659 * first call to rhashtable_walk_next gets us the first entry.
2660 */
2661 gi->last_pos = -1;
2662 gi->gl = NULL;
2663 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2664 }
2665 return ret;
2666}
2667
2668static int gfs2_glocks_open(struct inode *inode, struct file *file)
2669{
2670 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2671}
2672
2673static int gfs2_glocks_release(struct inode *inode, struct file *file)
2674{
2675 struct seq_file *seq = file->private_data;
2676 struct gfs2_glock_iter *gi = seq->private;
2677
2678 if (gi->gl)
2679 gfs2_glock_put(gi->gl);
2680 rhashtable_walk_exit(&gi->hti);
2681 return seq_release_private(inode, file);
2682}
2683
2684static int gfs2_glstats_open(struct inode *inode, struct file *file)
2685{
2686 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2687}
2688
2689static const struct file_operations gfs2_glocks_fops = {
2690 .owner = THIS_MODULE,
2691 .open = gfs2_glocks_open,
2692 .read = seq_read,
2693 .llseek = seq_lseek,
2694 .release = gfs2_glocks_release,
2695};
2696
2697static const struct file_operations gfs2_glstats_fops = {
2698 .owner = THIS_MODULE,
2699 .open = gfs2_glstats_open,
2700 .read = seq_read,
2701 .llseek = seq_lseek,
2702 .release = gfs2_glocks_release,
2703};
2704
2705struct gfs2_glockfd_iter {
2706 struct super_block *sb;
2707 unsigned int tgid;
2708 struct task_struct *task;
2709 unsigned int fd;
2710 struct file *file;
2711};
2712
2713static struct task_struct *gfs2_glockfd_next_task(struct gfs2_glockfd_iter *i)
2714{
2715 struct pid_namespace *ns = task_active_pid_ns(current);
2716 struct pid *pid;
2717
2718 if (i->task)
2719 put_task_struct(i->task);
2720
2721 rcu_read_lock();
2722retry:
2723 i->task = NULL;
2724 pid = find_ge_pid(i->tgid, ns);
2725 if (pid) {
2726 i->tgid = pid_nr_ns(pid, ns);
2727 i->task = pid_task(pid, PIDTYPE_TGID);
2728 if (!i->task) {
2729 i->tgid++;
2730 goto retry;
2731 }
2732 get_task_struct(i->task);
2733 }
2734 rcu_read_unlock();
2735 return i->task;
2736}
2737
2738static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i)
2739{
2740 if (i->file) {
2741 fput(i->file);
2742 i->file = NULL;
2743 }
2744
2745 for(;; i->fd++) {
2746 i->file = fget_task_next(i->task, &i->fd);
2747 if (!i->file) {
2748 i->fd = 0;
2749 break;
2750 }
2751
2752 if (file_inode(i->file)->i_sb == i->sb)
2753 break;
2754
2755 fput(i->file);
2756 }
2757 return i->file;
2758}
2759
2760static void *gfs2_glockfd_seq_start(struct seq_file *seq, loff_t *pos)
2761{
2762 struct gfs2_glockfd_iter *i = seq->private;
2763
2764 if (*pos)
2765 return NULL;
2766 while (gfs2_glockfd_next_task(i)) {
2767 if (gfs2_glockfd_next_file(i))
2768 return i;
2769 i->tgid++;
2770 }
2771 return NULL;
2772}
2773
2774static void *gfs2_glockfd_seq_next(struct seq_file *seq, void *iter_ptr,
2775 loff_t *pos)
2776{
2777 struct gfs2_glockfd_iter *i = seq->private;
2778
2779 (*pos)++;
2780 i->fd++;
2781 do {
2782 if (gfs2_glockfd_next_file(i))
2783 return i;
2784 i->tgid++;
2785 } while (gfs2_glockfd_next_task(i));
2786 return NULL;
2787}
2788
2789static void gfs2_glockfd_seq_stop(struct seq_file *seq, void *iter_ptr)
2790{
2791 struct gfs2_glockfd_iter *i = seq->private;
2792
2793 if (i->file)
2794 fput(i->file);
2795 if (i->task)
2796 put_task_struct(i->task);
2797}
2798
2799static void gfs2_glockfd_seq_show_flock(struct seq_file *seq,
2800 struct gfs2_glockfd_iter *i)
2801{
2802 struct gfs2_file *fp = i->file->private_data;
2803 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
2804 struct lm_lockname gl_name = { .ln_type = LM_TYPE_RESERVED };
2805
2806 if (!READ_ONCE(fl_gh->gh_gl))
2807 return;
2808
2809 spin_lock(&i->file->f_lock);
2810 if (gfs2_holder_initialized(fl_gh))
2811 gl_name = fl_gh->gh_gl->gl_name;
2812 spin_unlock(&i->file->f_lock);
2813
2814 if (gl_name.ln_type != LM_TYPE_RESERVED) {
2815 seq_printf(seq, "%d %u %u/%llx\n",
2816 i->tgid, i->fd, gl_name.ln_type,
2817 (unsigned long long)gl_name.ln_number);
2818 }
2819}
2820
2821static int gfs2_glockfd_seq_show(struct seq_file *seq, void *iter_ptr)
2822{
2823 struct gfs2_glockfd_iter *i = seq->private;
2824 struct inode *inode = file_inode(i->file);
2825 struct gfs2_glock *gl;
2826
2827 inode_lock_shared(inode);
2828 gl = GFS2_I(inode)->i_iopen_gh.gh_gl;
2829 if (gl) {
2830 seq_printf(seq, "%d %u %u/%llx\n",
2831 i->tgid, i->fd, gl->gl_name.ln_type,
2832 (unsigned long long)gl->gl_name.ln_number);
2833 }
2834 gfs2_glockfd_seq_show_flock(seq, i);
2835 inode_unlock_shared(inode);
2836 return 0;
2837}
2838
2839static const struct seq_operations gfs2_glockfd_seq_ops = {
2840 .start = gfs2_glockfd_seq_start,
2841 .next = gfs2_glockfd_seq_next,
2842 .stop = gfs2_glockfd_seq_stop,
2843 .show = gfs2_glockfd_seq_show,
2844};
2845
2846static int gfs2_glockfd_open(struct inode *inode, struct file *file)
2847{
2848 struct gfs2_glockfd_iter *i;
2849 struct gfs2_sbd *sdp = inode->i_private;
2850
2851 i = __seq_open_private(file, &gfs2_glockfd_seq_ops,
2852 sizeof(struct gfs2_glockfd_iter));
2853 if (!i)
2854 return -ENOMEM;
2855 i->sb = sdp->sd_vfs;
2856 return 0;
2857}
2858
2859static const struct file_operations gfs2_glockfd_fops = {
2860 .owner = THIS_MODULE,
2861 .open = gfs2_glockfd_open,
2862 .read = seq_read,
2863 .llseek = seq_lseek,
2864 .release = seq_release_private,
2865};
2866
2867DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats);
2868
2869void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2870{
2871 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2872
2873 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2874 &gfs2_glocks_fops);
2875
2876 debugfs_create_file("glockfd", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2877 &gfs2_glockfd_fops);
2878
2879 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2880 &gfs2_glstats_fops);
2881
2882 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2883 &gfs2_sbstats_fops);
2884}
2885
2886void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2887{
2888 debugfs_remove_recursive(sdp->debugfs_dir);
2889 sdp->debugfs_dir = NULL;
2890}
2891
2892void gfs2_register_debugfs(void)
2893{
2894 gfs2_root = debugfs_create_dir("gfs2", NULL);
2895}
2896
2897void gfs2_unregister_debugfs(void)
2898{
2899 debugfs_remove(gfs2_root);
2900 gfs2_root = NULL;
2901}