Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/buffer_head.h>
16#include <linux/delay.h>
17#include <linux/sort.h>
18#include <linux/jhash.h>
19#include <linux/kallsyms.h>
20#include <linux/gfs2_ondisk.h>
21#include <linux/list.h>
22#include <linux/wait.h>
23#include <linux/module.h>
24#include <asm/uaccess.h>
25#include <linux/seq_file.h>
26#include <linux/debugfs.h>
27#include <linux/kthread.h>
28#include <linux/freezer.h>
29#include <linux/workqueue.h>
30#include <linux/jiffies.h>
31#include <linux/rcupdate.h>
32#include <linux/rculist_bl.h>
33#include <linux/bit_spinlock.h>
34#include <linux/percpu.h>
35#include <linux/list_sort.h>
36#include <linux/lockref.h>
37
38#include "gfs2.h"
39#include "incore.h"
40#include "glock.h"
41#include "glops.h"
42#include "inode.h"
43#include "lops.h"
44#include "meta_io.h"
45#include "quota.h"
46#include "super.h"
47#include "util.h"
48#include "bmap.h"
49#define CREATE_TRACE_POINTS
50#include "trace_gfs2.h"
51
52struct gfs2_glock_iter {
53 int hash; /* hash bucket index */
54 unsigned nhash; /* Index within current bucket */
55 struct gfs2_sbd *sdp; /* incore superblock */
56 struct gfs2_glock *gl; /* current glock struct */
57 loff_t last_pos; /* last position */
58};
59
60typedef void (*glock_examiner) (struct gfs2_glock * gl);
61
62static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
63
64static struct dentry *gfs2_root;
65static struct workqueue_struct *glock_workqueue;
66struct workqueue_struct *gfs2_delete_workqueue;
67static LIST_HEAD(lru_list);
68static atomic_t lru_count = ATOMIC_INIT(0);
69static DEFINE_SPINLOCK(lru_lock);
70
71#define GFS2_GL_HASH_SHIFT 15
72#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
73#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
74
75static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
76static struct dentry *gfs2_root;
77
78/**
79 * gl_hash() - Turn glock number into hash bucket number
80 * @lock: The glock number
81 *
82 * Returns: The number of the corresponding hash bucket
83 */
84
85static unsigned int gl_hash(const struct gfs2_sbd *sdp,
86 const struct lm_lockname *name)
87{
88 unsigned int h;
89
90 h = jhash(&name->ln_number, sizeof(u64), 0);
91 h = jhash(&name->ln_type, sizeof(unsigned int), h);
92 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
93 h &= GFS2_GL_HASH_MASK;
94
95 return h;
96}
97
98static inline void spin_lock_bucket(unsigned int hash)
99{
100 hlist_bl_lock(&gl_hash_table[hash]);
101}
102
103static inline void spin_unlock_bucket(unsigned int hash)
104{
105 hlist_bl_unlock(&gl_hash_table[hash]);
106}
107
108static void gfs2_glock_dealloc(struct rcu_head *rcu)
109{
110 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
111
112 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
113 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
114 } else {
115 kfree(gl->gl_lksb.sb_lvbptr);
116 kmem_cache_free(gfs2_glock_cachep, gl);
117 }
118}
119
120void gfs2_glock_free(struct gfs2_glock *gl)
121{
122 struct gfs2_sbd *sdp = gl->gl_sbd;
123
124 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
125 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
126 wake_up(&sdp->sd_glock_wait);
127}
128
129/**
130 * gfs2_glock_hold() - increment reference count on glock
131 * @gl: The glock to hold
132 *
133 */
134
135static void gfs2_glock_hold(struct gfs2_glock *gl)
136{
137 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
138 lockref_get(&gl->gl_lockref);
139}
140
141/**
142 * demote_ok - Check to see if it's ok to unlock a glock
143 * @gl: the glock
144 *
145 * Returns: 1 if it's ok
146 */
147
148static int demote_ok(const struct gfs2_glock *gl)
149{
150 const struct gfs2_glock_operations *glops = gl->gl_ops;
151
152 if (gl->gl_state == LM_ST_UNLOCKED)
153 return 0;
154 if (!list_empty(&gl->gl_holders))
155 return 0;
156 if (glops->go_demote_ok)
157 return glops->go_demote_ok(gl);
158 return 1;
159}
160
161
162void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
163{
164 spin_lock(&lru_lock);
165
166 if (!list_empty(&gl->gl_lru))
167 list_del_init(&gl->gl_lru);
168 else
169 atomic_inc(&lru_count);
170
171 list_add_tail(&gl->gl_lru, &lru_list);
172 set_bit(GLF_LRU, &gl->gl_flags);
173 spin_unlock(&lru_lock);
174}
175
176static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
177{
178 if (!list_empty(&gl->gl_lru)) {
179 list_del_init(&gl->gl_lru);
180 atomic_dec(&lru_count);
181 clear_bit(GLF_LRU, &gl->gl_flags);
182 }
183}
184
185static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
186{
187 spin_lock(&lru_lock);
188 __gfs2_glock_remove_from_lru(gl);
189 spin_unlock(&lru_lock);
190}
191
192/**
193 * gfs2_glock_put() - Decrement reference count on glock
194 * @gl: The glock to put
195 *
196 */
197
198void gfs2_glock_put(struct gfs2_glock *gl)
199{
200 struct gfs2_sbd *sdp = gl->gl_sbd;
201 struct address_space *mapping = gfs2_glock2aspace(gl);
202
203 if (lockref_put_or_lock(&gl->gl_lockref))
204 return;
205
206 lockref_mark_dead(&gl->gl_lockref);
207
208 spin_lock(&lru_lock);
209 __gfs2_glock_remove_from_lru(gl);
210 spin_unlock(&lru_lock);
211 spin_unlock(&gl->gl_lockref.lock);
212 spin_lock_bucket(gl->gl_hash);
213 hlist_bl_del_rcu(&gl->gl_list);
214 spin_unlock_bucket(gl->gl_hash);
215 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
216 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
217 trace_gfs2_glock_put(gl);
218 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
219}
220
221/**
222 * search_bucket() - Find struct gfs2_glock by lock number
223 * @bucket: the bucket to search
224 * @name: The lock name
225 *
226 * Returns: NULL, or the struct gfs2_glock with the requested number
227 */
228
229static struct gfs2_glock *search_bucket(unsigned int hash,
230 const struct gfs2_sbd *sdp,
231 const struct lm_lockname *name)
232{
233 struct gfs2_glock *gl;
234 struct hlist_bl_node *h;
235
236 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
237 if (!lm_name_equal(&gl->gl_name, name))
238 continue;
239 if (gl->gl_sbd != sdp)
240 continue;
241 if (lockref_get_not_dead(&gl->gl_lockref))
242 return gl;
243 }
244
245 return NULL;
246}
247
248/**
249 * may_grant - check if its ok to grant a new lock
250 * @gl: The glock
251 * @gh: The lock request which we wish to grant
252 *
253 * Returns: true if its ok to grant the lock
254 */
255
256static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
257{
258 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
259 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
260 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
261 return 0;
262 if (gl->gl_state == gh->gh_state)
263 return 1;
264 if (gh->gh_flags & GL_EXACT)
265 return 0;
266 if (gl->gl_state == LM_ST_EXCLUSIVE) {
267 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
268 return 1;
269 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
270 return 1;
271 }
272 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
273 return 1;
274 return 0;
275}
276
277static void gfs2_holder_wake(struct gfs2_holder *gh)
278{
279 clear_bit(HIF_WAIT, &gh->gh_iflags);
280 smp_mb__after_clear_bit();
281 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
282}
283
284/**
285 * do_error - Something unexpected has happened during a lock request
286 *
287 */
288
289static inline void do_error(struct gfs2_glock *gl, const int ret)
290{
291 struct gfs2_holder *gh, *tmp;
292
293 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
294 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
295 continue;
296 if (ret & LM_OUT_ERROR)
297 gh->gh_error = -EIO;
298 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
299 gh->gh_error = GLR_TRYFAILED;
300 else
301 continue;
302 list_del_init(&gh->gh_list);
303 trace_gfs2_glock_queue(gh, 0);
304 gfs2_holder_wake(gh);
305 }
306}
307
308/**
309 * do_promote - promote as many requests as possible on the current queue
310 * @gl: The glock
311 *
312 * Returns: 1 if there is a blocked holder at the head of the list, or 2
313 * if a type specific operation is underway.
314 */
315
316static int do_promote(struct gfs2_glock *gl)
317__releases(&gl->gl_spin)
318__acquires(&gl->gl_spin)
319{
320 const struct gfs2_glock_operations *glops = gl->gl_ops;
321 struct gfs2_holder *gh, *tmp;
322 int ret;
323
324restart:
325 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
326 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
327 continue;
328 if (may_grant(gl, gh)) {
329 if (gh->gh_list.prev == &gl->gl_holders &&
330 glops->go_lock) {
331 spin_unlock(&gl->gl_spin);
332 /* FIXME: eliminate this eventually */
333 ret = glops->go_lock(gh);
334 spin_lock(&gl->gl_spin);
335 if (ret) {
336 if (ret == 1)
337 return 2;
338 gh->gh_error = ret;
339 list_del_init(&gh->gh_list);
340 trace_gfs2_glock_queue(gh, 0);
341 gfs2_holder_wake(gh);
342 goto restart;
343 }
344 set_bit(HIF_HOLDER, &gh->gh_iflags);
345 trace_gfs2_promote(gh, 1);
346 gfs2_holder_wake(gh);
347 goto restart;
348 }
349 set_bit(HIF_HOLDER, &gh->gh_iflags);
350 trace_gfs2_promote(gh, 0);
351 gfs2_holder_wake(gh);
352 continue;
353 }
354 if (gh->gh_list.prev == &gl->gl_holders)
355 return 1;
356 do_error(gl, 0);
357 break;
358 }
359 return 0;
360}
361
362/**
363 * find_first_waiter - find the first gh that's waiting for the glock
364 * @gl: the glock
365 */
366
367static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
368{
369 struct gfs2_holder *gh;
370
371 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
372 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
373 return gh;
374 }
375 return NULL;
376}
377
378/**
379 * state_change - record that the glock is now in a different state
380 * @gl: the glock
381 * @new_state the new state
382 *
383 */
384
385static void state_change(struct gfs2_glock *gl, unsigned int new_state)
386{
387 int held1, held2;
388
389 held1 = (gl->gl_state != LM_ST_UNLOCKED);
390 held2 = (new_state != LM_ST_UNLOCKED);
391
392 if (held1 != held2) {
393 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
394 if (held2)
395 gl->gl_lockref.count++;
396 else
397 gl->gl_lockref.count--;
398 }
399 if (held1 && held2 && list_empty(&gl->gl_holders))
400 clear_bit(GLF_QUEUED, &gl->gl_flags);
401
402 if (new_state != gl->gl_target)
403 /* shorten our minimum hold time */
404 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
405 GL_GLOCK_MIN_HOLD);
406 gl->gl_state = new_state;
407 gl->gl_tchange = jiffies;
408}
409
410static void gfs2_demote_wake(struct gfs2_glock *gl)
411{
412 gl->gl_demote_state = LM_ST_EXCLUSIVE;
413 clear_bit(GLF_DEMOTE, &gl->gl_flags);
414 smp_mb__after_clear_bit();
415 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
416}
417
418/**
419 * finish_xmote - The DLM has replied to one of our lock requests
420 * @gl: The glock
421 * @ret: The status from the DLM
422 *
423 */
424
425static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
426{
427 const struct gfs2_glock_operations *glops = gl->gl_ops;
428 struct gfs2_holder *gh;
429 unsigned state = ret & LM_OUT_ST_MASK;
430 int rv;
431
432 spin_lock(&gl->gl_spin);
433 trace_gfs2_glock_state_change(gl, state);
434 state_change(gl, state);
435 gh = find_first_waiter(gl);
436
437 /* Demote to UN request arrived during demote to SH or DF */
438 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
439 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
440 gl->gl_target = LM_ST_UNLOCKED;
441
442 /* Check for state != intended state */
443 if (unlikely(state != gl->gl_target)) {
444 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
445 /* move to back of queue and try next entry */
446 if (ret & LM_OUT_CANCELED) {
447 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
448 list_move_tail(&gh->gh_list, &gl->gl_holders);
449 gh = find_first_waiter(gl);
450 gl->gl_target = gh->gh_state;
451 goto retry;
452 }
453 /* Some error or failed "try lock" - report it */
454 if ((ret & LM_OUT_ERROR) ||
455 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
456 gl->gl_target = gl->gl_state;
457 do_error(gl, ret);
458 goto out;
459 }
460 }
461 switch(state) {
462 /* Unlocked due to conversion deadlock, try again */
463 case LM_ST_UNLOCKED:
464retry:
465 do_xmote(gl, gh, gl->gl_target);
466 break;
467 /* Conversion fails, unlock and try again */
468 case LM_ST_SHARED:
469 case LM_ST_DEFERRED:
470 do_xmote(gl, gh, LM_ST_UNLOCKED);
471 break;
472 default: /* Everything else */
473 pr_err("wanted %u got %u\n", gl->gl_target, state);
474 GLOCK_BUG_ON(gl, 1);
475 }
476 spin_unlock(&gl->gl_spin);
477 return;
478 }
479
480 /* Fast path - we got what we asked for */
481 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
482 gfs2_demote_wake(gl);
483 if (state != LM_ST_UNLOCKED) {
484 if (glops->go_xmote_bh) {
485 spin_unlock(&gl->gl_spin);
486 rv = glops->go_xmote_bh(gl, gh);
487 spin_lock(&gl->gl_spin);
488 if (rv) {
489 do_error(gl, rv);
490 goto out;
491 }
492 }
493 rv = do_promote(gl);
494 if (rv == 2)
495 goto out_locked;
496 }
497out:
498 clear_bit(GLF_LOCK, &gl->gl_flags);
499out_locked:
500 spin_unlock(&gl->gl_spin);
501}
502
503/**
504 * do_xmote - Calls the DLM to change the state of a lock
505 * @gl: The lock state
506 * @gh: The holder (only for promotes)
507 * @target: The target lock state
508 *
509 */
510
511static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
512__releases(&gl->gl_spin)
513__acquires(&gl->gl_spin)
514{
515 const struct gfs2_glock_operations *glops = gl->gl_ops;
516 struct gfs2_sbd *sdp = gl->gl_sbd;
517 unsigned int lck_flags = gh ? gh->gh_flags : 0;
518 int ret;
519
520 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
521 LM_FLAG_PRIORITY);
522 GLOCK_BUG_ON(gl, gl->gl_state == target);
523 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
524 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
525 glops->go_inval) {
526 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
527 do_error(gl, 0); /* Fail queued try locks */
528 }
529 gl->gl_req = target;
530 set_bit(GLF_BLOCKING, &gl->gl_flags);
531 if ((gl->gl_req == LM_ST_UNLOCKED) ||
532 (gl->gl_state == LM_ST_EXCLUSIVE) ||
533 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
534 clear_bit(GLF_BLOCKING, &gl->gl_flags);
535 spin_unlock(&gl->gl_spin);
536 if (glops->go_sync)
537 glops->go_sync(gl);
538 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
539 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
540 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
541
542 gfs2_glock_hold(gl);
543 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
544 /* lock_dlm */
545 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
546 if (ret) {
547 pr_err("lm_lock ret %d\n", ret);
548 GLOCK_BUG_ON(gl, 1);
549 }
550 } else { /* lock_nolock */
551 finish_xmote(gl, target);
552 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
553 gfs2_glock_put(gl);
554 }
555
556 spin_lock(&gl->gl_spin);
557}
558
559/**
560 * find_first_holder - find the first "holder" gh
561 * @gl: the glock
562 */
563
564static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
565{
566 struct gfs2_holder *gh;
567
568 if (!list_empty(&gl->gl_holders)) {
569 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
570 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
571 return gh;
572 }
573 return NULL;
574}
575
576/**
577 * run_queue - do all outstanding tasks related to a glock
578 * @gl: The glock in question
579 * @nonblock: True if we must not block in run_queue
580 *
581 */
582
583static void run_queue(struct gfs2_glock *gl, const int nonblock)
584__releases(&gl->gl_spin)
585__acquires(&gl->gl_spin)
586{
587 struct gfs2_holder *gh = NULL;
588 int ret;
589
590 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
591 return;
592
593 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
594
595 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
596 gl->gl_demote_state != gl->gl_state) {
597 if (find_first_holder(gl))
598 goto out_unlock;
599 if (nonblock)
600 goto out_sched;
601 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
602 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
603 gl->gl_target = gl->gl_demote_state;
604 } else {
605 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
606 gfs2_demote_wake(gl);
607 ret = do_promote(gl);
608 if (ret == 0)
609 goto out_unlock;
610 if (ret == 2)
611 goto out;
612 gh = find_first_waiter(gl);
613 gl->gl_target = gh->gh_state;
614 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
615 do_error(gl, 0); /* Fail queued try locks */
616 }
617 do_xmote(gl, gh, gl->gl_target);
618out:
619 return;
620
621out_sched:
622 clear_bit(GLF_LOCK, &gl->gl_flags);
623 smp_mb__after_clear_bit();
624 gl->gl_lockref.count++;
625 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
626 gl->gl_lockref.count--;
627 return;
628
629out_unlock:
630 clear_bit(GLF_LOCK, &gl->gl_flags);
631 smp_mb__after_clear_bit();
632 return;
633}
634
635static void delete_work_func(struct work_struct *work)
636{
637 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
638 struct gfs2_sbd *sdp = gl->gl_sbd;
639 struct gfs2_inode *ip;
640 struct inode *inode;
641 u64 no_addr = gl->gl_name.ln_number;
642
643 ip = gl->gl_object;
644 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
645
646 if (ip)
647 inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
648 else
649 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
650 if (inode && !IS_ERR(inode)) {
651 d_prune_aliases(inode);
652 iput(inode);
653 }
654 gfs2_glock_put(gl);
655}
656
657static void glock_work_func(struct work_struct *work)
658{
659 unsigned long delay = 0;
660 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
661 int drop_ref = 0;
662
663 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
664 finish_xmote(gl, gl->gl_reply);
665 drop_ref = 1;
666 }
667 spin_lock(&gl->gl_spin);
668 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
669 gl->gl_state != LM_ST_UNLOCKED &&
670 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
671 unsigned long holdtime, now = jiffies;
672
673 holdtime = gl->gl_tchange + gl->gl_hold_time;
674 if (time_before(now, holdtime))
675 delay = holdtime - now;
676
677 if (!delay) {
678 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
679 set_bit(GLF_DEMOTE, &gl->gl_flags);
680 }
681 }
682 run_queue(gl, 0);
683 spin_unlock(&gl->gl_spin);
684 if (!delay)
685 gfs2_glock_put(gl);
686 else {
687 if (gl->gl_name.ln_type != LM_TYPE_INODE)
688 delay = 0;
689 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
690 gfs2_glock_put(gl);
691 }
692 if (drop_ref)
693 gfs2_glock_put(gl);
694}
695
696/**
697 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
698 * @sdp: The GFS2 superblock
699 * @number: the lock number
700 * @glops: The glock_operations to use
701 * @create: If 0, don't create the glock if it doesn't exist
702 * @glp: the glock is returned here
703 *
704 * This does not lock a glock, just finds/creates structures for one.
705 *
706 * Returns: errno
707 */
708
709int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
710 const struct gfs2_glock_operations *glops, int create,
711 struct gfs2_glock **glp)
712{
713 struct super_block *s = sdp->sd_vfs;
714 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
715 struct gfs2_glock *gl, *tmp;
716 unsigned int hash = gl_hash(sdp, &name);
717 struct address_space *mapping;
718 struct kmem_cache *cachep;
719
720 rcu_read_lock();
721 gl = search_bucket(hash, sdp, &name);
722 rcu_read_unlock();
723
724 *glp = gl;
725 if (gl)
726 return 0;
727 if (!create)
728 return -ENOENT;
729
730 if (glops->go_flags & GLOF_ASPACE)
731 cachep = gfs2_glock_aspace_cachep;
732 else
733 cachep = gfs2_glock_cachep;
734 gl = kmem_cache_alloc(cachep, GFP_KERNEL);
735 if (!gl)
736 return -ENOMEM;
737
738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
739
740 if (glops->go_flags & GLOF_LVB) {
741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
742 if (!gl->gl_lksb.sb_lvbptr) {
743 kmem_cache_free(cachep, gl);
744 return -ENOMEM;
745 }
746 }
747
748 atomic_inc(&sdp->sd_glock_disposal);
749 gl->gl_sbd = sdp;
750 gl->gl_flags = 0;
751 gl->gl_name = name;
752 gl->gl_lockref.count = 1;
753 gl->gl_state = LM_ST_UNLOCKED;
754 gl->gl_target = LM_ST_UNLOCKED;
755 gl->gl_demote_state = LM_ST_EXCLUSIVE;
756 gl->gl_hash = hash;
757 gl->gl_ops = glops;
758 gl->gl_dstamp = ktime_set(0, 0);
759 preempt_disable();
760 /* We use the global stats to estimate the initial per-glock stats */
761 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
762 preempt_enable();
763 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
764 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
765 gl->gl_tchange = jiffies;
766 gl->gl_object = NULL;
767 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
768 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
769 INIT_WORK(&gl->gl_delete, delete_work_func);
770
771 mapping = gfs2_glock2aspace(gl);
772 if (mapping) {
773 mapping->a_ops = &gfs2_meta_aops;
774 mapping->host = s->s_bdev->bd_inode;
775 mapping->flags = 0;
776 mapping_set_gfp_mask(mapping, GFP_NOFS);
777 mapping->private_data = NULL;
778 mapping->backing_dev_info = s->s_bdi;
779 mapping->writeback_index = 0;
780 }
781
782 spin_lock_bucket(hash);
783 tmp = search_bucket(hash, sdp, &name);
784 if (tmp) {
785 spin_unlock_bucket(hash);
786 kfree(gl->gl_lksb.sb_lvbptr);
787 kmem_cache_free(cachep, gl);
788 atomic_dec(&sdp->sd_glock_disposal);
789 gl = tmp;
790 } else {
791 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
792 spin_unlock_bucket(hash);
793 }
794
795 *glp = gl;
796
797 return 0;
798}
799
800/**
801 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
802 * @gl: the glock
803 * @state: the state we're requesting
804 * @flags: the modifier flags
805 * @gh: the holder structure
806 *
807 */
808
809void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
810 struct gfs2_holder *gh)
811{
812 INIT_LIST_HEAD(&gh->gh_list);
813 gh->gh_gl = gl;
814 gh->gh_ip = (unsigned long)__builtin_return_address(0);
815 gh->gh_owner_pid = get_pid(task_pid(current));
816 gh->gh_state = state;
817 gh->gh_flags = flags;
818 gh->gh_error = 0;
819 gh->gh_iflags = 0;
820 gfs2_glock_hold(gl);
821}
822
823/**
824 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
825 * @state: the state we're requesting
826 * @flags: the modifier flags
827 * @gh: the holder structure
828 *
829 * Don't mess with the glock.
830 *
831 */
832
833void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
834{
835 gh->gh_state = state;
836 gh->gh_flags = flags;
837 gh->gh_iflags = 0;
838 gh->gh_ip = (unsigned long)__builtin_return_address(0);
839 if (gh->gh_owner_pid)
840 put_pid(gh->gh_owner_pid);
841 gh->gh_owner_pid = get_pid(task_pid(current));
842}
843
844/**
845 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
846 * @gh: the holder structure
847 *
848 */
849
850void gfs2_holder_uninit(struct gfs2_holder *gh)
851{
852 put_pid(gh->gh_owner_pid);
853 gfs2_glock_put(gh->gh_gl);
854 gh->gh_gl = NULL;
855 gh->gh_ip = 0;
856}
857
858/**
859 * gfs2_glock_holder_wait
860 * @word: unused
861 *
862 * This function and gfs2_glock_demote_wait both show up in the WCHAN
863 * field. Thus I've separated these otherwise identical functions in
864 * order to be more informative to the user.
865 */
866
867static int gfs2_glock_holder_wait(void *word)
868{
869 schedule();
870 return 0;
871}
872
873static int gfs2_glock_demote_wait(void *word)
874{
875 schedule();
876 return 0;
877}
878
879/**
880 * gfs2_glock_wait - wait on a glock acquisition
881 * @gh: the glock holder
882 *
883 * Returns: 0 on success
884 */
885
886int gfs2_glock_wait(struct gfs2_holder *gh)
887{
888 unsigned long time1 = jiffies;
889
890 might_sleep();
891 wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
892 if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
893 /* Lengthen the minimum hold time. */
894 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
895 GL_GLOCK_HOLD_INCR,
896 GL_GLOCK_MAX_HOLD);
897 return gh->gh_error;
898}
899
900/**
901 * handle_callback - process a demote request
902 * @gl: the glock
903 * @state: the state the caller wants us to change to
904 *
905 * There are only two requests that we are going to see in actual
906 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
907 */
908
909static void handle_callback(struct gfs2_glock *gl, unsigned int state,
910 unsigned long delay, bool remote)
911{
912 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
913
914 set_bit(bit, &gl->gl_flags);
915 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
916 gl->gl_demote_state = state;
917 gl->gl_demote_time = jiffies;
918 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
919 gl->gl_demote_state != state) {
920 gl->gl_demote_state = LM_ST_UNLOCKED;
921 }
922 if (gl->gl_ops->go_callback)
923 gl->gl_ops->go_callback(gl, remote);
924 trace_gfs2_demote_rq(gl, remote);
925}
926
927void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
928{
929 struct va_format vaf;
930 va_list args;
931
932 va_start(args, fmt);
933
934 if (seq) {
935 seq_vprintf(seq, fmt, args);
936 } else {
937 vaf.fmt = fmt;
938 vaf.va = &args;
939
940 pr_err("%pV", &vaf);
941 }
942
943 va_end(args);
944}
945
946/**
947 * add_to_queue - Add a holder to the wait queue (but look for recursion)
948 * @gh: the holder structure to add
949 *
950 * Eventually we should move the recursive locking trap to a
951 * debugging option or something like that. This is the fast
952 * path and needs to have the minimum number of distractions.
953 *
954 */
955
956static inline void add_to_queue(struct gfs2_holder *gh)
957__releases(&gl->gl_spin)
958__acquires(&gl->gl_spin)
959{
960 struct gfs2_glock *gl = gh->gh_gl;
961 struct gfs2_sbd *sdp = gl->gl_sbd;
962 struct list_head *insert_pt = NULL;
963 struct gfs2_holder *gh2;
964 int try_futile = 0;
965
966 BUG_ON(gh->gh_owner_pid == NULL);
967 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
968 BUG();
969
970 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
971 if (test_bit(GLF_LOCK, &gl->gl_flags))
972 try_futile = !may_grant(gl, gh);
973 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
974 goto fail;
975 }
976
977 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
978 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
979 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
980 goto trap_recursive;
981 if (try_futile &&
982 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
983fail:
984 gh->gh_error = GLR_TRYFAILED;
985 gfs2_holder_wake(gh);
986 return;
987 }
988 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
989 continue;
990 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
991 insert_pt = &gh2->gh_list;
992 }
993 set_bit(GLF_QUEUED, &gl->gl_flags);
994 trace_gfs2_glock_queue(gh, 1);
995 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
996 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
997 if (likely(insert_pt == NULL)) {
998 list_add_tail(&gh->gh_list, &gl->gl_holders);
999 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1000 goto do_cancel;
1001 return;
1002 }
1003 list_add_tail(&gh->gh_list, insert_pt);
1004do_cancel:
1005 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1006 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1007 spin_unlock(&gl->gl_spin);
1008 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1009 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1010 spin_lock(&gl->gl_spin);
1011 }
1012 return;
1013
1014trap_recursive:
1015 pr_err("original: %pSR\n", (void *)gh2->gh_ip);
1016 pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
1017 pr_err("lock type: %d req lock state : %d\n",
1018 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1019 pr_err("new: %pSR\n", (void *)gh->gh_ip);
1020 pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
1021 pr_err("lock type: %d req lock state : %d\n",
1022 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1023 gfs2_dump_glock(NULL, gl);
1024 BUG();
1025}
1026
1027/**
1028 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1029 * @gh: the holder structure
1030 *
1031 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1032 *
1033 * Returns: 0, GLR_TRYFAILED, or errno on failure
1034 */
1035
1036int gfs2_glock_nq(struct gfs2_holder *gh)
1037{
1038 struct gfs2_glock *gl = gh->gh_gl;
1039 struct gfs2_sbd *sdp = gl->gl_sbd;
1040 int error = 0;
1041
1042 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1043 return -EIO;
1044
1045 if (test_bit(GLF_LRU, &gl->gl_flags))
1046 gfs2_glock_remove_from_lru(gl);
1047
1048 spin_lock(&gl->gl_spin);
1049 add_to_queue(gh);
1050 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1051 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1052 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1053 gl->gl_lockref.count++;
1054 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1055 gl->gl_lockref.count--;
1056 }
1057 run_queue(gl, 1);
1058 spin_unlock(&gl->gl_spin);
1059
1060 if (!(gh->gh_flags & GL_ASYNC))
1061 error = gfs2_glock_wait(gh);
1062
1063 return error;
1064}
1065
1066/**
1067 * gfs2_glock_poll - poll to see if an async request has been completed
1068 * @gh: the holder
1069 *
1070 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1071 */
1072
1073int gfs2_glock_poll(struct gfs2_holder *gh)
1074{
1075 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1076}
1077
1078/**
1079 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1080 * @gh: the glock holder
1081 *
1082 */
1083
1084void gfs2_glock_dq(struct gfs2_holder *gh)
1085{
1086 struct gfs2_glock *gl = gh->gh_gl;
1087 const struct gfs2_glock_operations *glops = gl->gl_ops;
1088 unsigned delay = 0;
1089 int fast_path = 0;
1090
1091 spin_lock(&gl->gl_spin);
1092 if (gh->gh_flags & GL_NOCACHE)
1093 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1094
1095 list_del_init(&gh->gh_list);
1096 if (find_first_holder(gl) == NULL) {
1097 if (glops->go_unlock) {
1098 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1099 spin_unlock(&gl->gl_spin);
1100 glops->go_unlock(gh);
1101 spin_lock(&gl->gl_spin);
1102 clear_bit(GLF_LOCK, &gl->gl_flags);
1103 }
1104 if (list_empty(&gl->gl_holders) &&
1105 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1106 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1107 fast_path = 1;
1108 }
1109 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1110 gfs2_glock_add_to_lru(gl);
1111
1112 trace_gfs2_glock_queue(gh, 0);
1113 spin_unlock(&gl->gl_spin);
1114 if (likely(fast_path))
1115 return;
1116
1117 gfs2_glock_hold(gl);
1118 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1119 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1120 gl->gl_name.ln_type == LM_TYPE_INODE)
1121 delay = gl->gl_hold_time;
1122 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1123 gfs2_glock_put(gl);
1124}
1125
1126void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1127{
1128 struct gfs2_glock *gl = gh->gh_gl;
1129 gfs2_glock_dq(gh);
1130 might_sleep();
1131 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
1132}
1133
1134/**
1135 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1136 * @gh: the holder structure
1137 *
1138 */
1139
1140void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1141{
1142 gfs2_glock_dq(gh);
1143 gfs2_holder_uninit(gh);
1144}
1145
1146/**
1147 * gfs2_glock_nq_num - acquire a glock based on lock number
1148 * @sdp: the filesystem
1149 * @number: the lock number
1150 * @glops: the glock operations for the type of glock
1151 * @state: the state to acquire the glock in
1152 * @flags: modifier flags for the acquisition
1153 * @gh: the struct gfs2_holder
1154 *
1155 * Returns: errno
1156 */
1157
1158int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1159 const struct gfs2_glock_operations *glops,
1160 unsigned int state, int flags, struct gfs2_holder *gh)
1161{
1162 struct gfs2_glock *gl;
1163 int error;
1164
1165 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1166 if (!error) {
1167 error = gfs2_glock_nq_init(gl, state, flags, gh);
1168 gfs2_glock_put(gl);
1169 }
1170
1171 return error;
1172}
1173
1174/**
1175 * glock_compare - Compare two struct gfs2_glock structures for sorting
1176 * @arg_a: the first structure
1177 * @arg_b: the second structure
1178 *
1179 */
1180
1181static int glock_compare(const void *arg_a, const void *arg_b)
1182{
1183 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1184 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1185 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1186 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1187
1188 if (a->ln_number > b->ln_number)
1189 return 1;
1190 if (a->ln_number < b->ln_number)
1191 return -1;
1192 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1193 return 0;
1194}
1195
1196/**
1197 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1198 * @num_gh: the number of structures
1199 * @ghs: an array of struct gfs2_holder structures
1200 *
1201 * Returns: 0 on success (all glocks acquired),
1202 * errno on failure (no glocks acquired)
1203 */
1204
1205static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1206 struct gfs2_holder **p)
1207{
1208 unsigned int x;
1209 int error = 0;
1210
1211 for (x = 0; x < num_gh; x++)
1212 p[x] = &ghs[x];
1213
1214 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1215
1216 for (x = 0; x < num_gh; x++) {
1217 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1218
1219 error = gfs2_glock_nq(p[x]);
1220 if (error) {
1221 while (x--)
1222 gfs2_glock_dq(p[x]);
1223 break;
1224 }
1225 }
1226
1227 return error;
1228}
1229
1230/**
1231 * gfs2_glock_nq_m - acquire multiple glocks
1232 * @num_gh: the number of structures
1233 * @ghs: an array of struct gfs2_holder structures
1234 *
1235 *
1236 * Returns: 0 on success (all glocks acquired),
1237 * errno on failure (no glocks acquired)
1238 */
1239
1240int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1241{
1242 struct gfs2_holder *tmp[4];
1243 struct gfs2_holder **pph = tmp;
1244 int error = 0;
1245
1246 switch(num_gh) {
1247 case 0:
1248 return 0;
1249 case 1:
1250 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1251 return gfs2_glock_nq(ghs);
1252 default:
1253 if (num_gh <= 4)
1254 break;
1255 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1256 if (!pph)
1257 return -ENOMEM;
1258 }
1259
1260 error = nq_m_sync(num_gh, ghs, pph);
1261
1262 if (pph != tmp)
1263 kfree(pph);
1264
1265 return error;
1266}
1267
1268/**
1269 * gfs2_glock_dq_m - release multiple glocks
1270 * @num_gh: the number of structures
1271 * @ghs: an array of struct gfs2_holder structures
1272 *
1273 */
1274
1275void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1276{
1277 while (num_gh--)
1278 gfs2_glock_dq(&ghs[num_gh]);
1279}
1280
1281void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1282{
1283 unsigned long delay = 0;
1284 unsigned long holdtime;
1285 unsigned long now = jiffies;
1286
1287 gfs2_glock_hold(gl);
1288 holdtime = gl->gl_tchange + gl->gl_hold_time;
1289 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1290 gl->gl_name.ln_type == LM_TYPE_INODE) {
1291 if (time_before(now, holdtime))
1292 delay = holdtime - now;
1293 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1294 delay = gl->gl_hold_time;
1295 }
1296
1297 spin_lock(&gl->gl_spin);
1298 handle_callback(gl, state, delay, true);
1299 spin_unlock(&gl->gl_spin);
1300 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1301 gfs2_glock_put(gl);
1302}
1303
1304/**
1305 * gfs2_should_freeze - Figure out if glock should be frozen
1306 * @gl: The glock in question
1307 *
1308 * Glocks are not frozen if (a) the result of the dlm operation is
1309 * an error, (b) the locking operation was an unlock operation or
1310 * (c) if there is a "noexp" flagged request anywhere in the queue
1311 *
1312 * Returns: 1 if freezing should occur, 0 otherwise
1313 */
1314
1315static int gfs2_should_freeze(const struct gfs2_glock *gl)
1316{
1317 const struct gfs2_holder *gh;
1318
1319 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1320 return 0;
1321 if (gl->gl_target == LM_ST_UNLOCKED)
1322 return 0;
1323
1324 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1325 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1326 continue;
1327 if (LM_FLAG_NOEXP & gh->gh_flags)
1328 return 0;
1329 }
1330
1331 return 1;
1332}
1333
1334/**
1335 * gfs2_glock_complete - Callback used by locking
1336 * @gl: Pointer to the glock
1337 * @ret: The return value from the dlm
1338 *
1339 * The gl_reply field is under the gl_spin lock so that it is ok
1340 * to use a bitfield shared with other glock state fields.
1341 */
1342
1343void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1344{
1345 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1346
1347 spin_lock(&gl->gl_spin);
1348 gl->gl_reply = ret;
1349
1350 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1351 if (gfs2_should_freeze(gl)) {
1352 set_bit(GLF_FROZEN, &gl->gl_flags);
1353 spin_unlock(&gl->gl_spin);
1354 return;
1355 }
1356 }
1357
1358 gl->gl_lockref.count++;
1359 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1360 spin_unlock(&gl->gl_spin);
1361
1362 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1363 gfs2_glock_put(gl);
1364}
1365
1366static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1367{
1368 struct gfs2_glock *gla, *glb;
1369
1370 gla = list_entry(a, struct gfs2_glock, gl_lru);
1371 glb = list_entry(b, struct gfs2_glock, gl_lru);
1372
1373 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1374 return 1;
1375 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1376 return -1;
1377
1378 return 0;
1379}
1380
1381/**
1382 * gfs2_dispose_glock_lru - Demote a list of glocks
1383 * @list: The list to dispose of
1384 *
1385 * Disposing of glocks may involve disk accesses, so that here we sort
1386 * the glocks by number (i.e. disk location of the inodes) so that if
1387 * there are any such accesses, they'll be sent in order (mostly).
1388 *
1389 * Must be called under the lru_lock, but may drop and retake this
1390 * lock. While the lru_lock is dropped, entries may vanish from the
1391 * list, but no new entries will appear on the list (since it is
1392 * private)
1393 */
1394
1395static void gfs2_dispose_glock_lru(struct list_head *list)
1396__releases(&lru_lock)
1397__acquires(&lru_lock)
1398{
1399 struct gfs2_glock *gl;
1400
1401 list_sort(NULL, list, glock_cmp);
1402
1403 while(!list_empty(list)) {
1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1405 list_del_init(&gl->gl_lru);
1406 if (!spin_trylock(&gl->gl_spin)) {
1407 list_add(&gl->gl_lru, &lru_list);
1408 atomic_inc(&lru_count);
1409 continue;
1410 }
1411 clear_bit(GLF_LRU, &gl->gl_flags);
1412 spin_unlock(&lru_lock);
1413 gl->gl_lockref.count++;
1414 if (demote_ok(gl))
1415 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1416 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1417 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1418 gl->gl_lockref.count--;
1419 spin_unlock(&gl->gl_spin);
1420 spin_lock(&lru_lock);
1421 }
1422}
1423
1424/**
1425 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1426 * @nr: The number of entries to scan
1427 *
1428 * This function selects the entries on the LRU which are able to
1429 * be demoted, and then kicks off the process by calling
1430 * gfs2_dispose_glock_lru() above.
1431 */
1432
1433static long gfs2_scan_glock_lru(int nr)
1434{
1435 struct gfs2_glock *gl;
1436 LIST_HEAD(skipped);
1437 LIST_HEAD(dispose);
1438 long freed = 0;
1439
1440 spin_lock(&lru_lock);
1441 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1442 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1443
1444 /* Test for being demotable */
1445 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1446 list_move(&gl->gl_lru, &dispose);
1447 atomic_dec(&lru_count);
1448 freed++;
1449 continue;
1450 }
1451
1452 list_move(&gl->gl_lru, &skipped);
1453 }
1454 list_splice(&skipped, &lru_list);
1455 if (!list_empty(&dispose))
1456 gfs2_dispose_glock_lru(&dispose);
1457 spin_unlock(&lru_lock);
1458
1459 return freed;
1460}
1461
1462static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1463 struct shrink_control *sc)
1464{
1465 if (!(sc->gfp_mask & __GFP_FS))
1466 return SHRINK_STOP;
1467 return gfs2_scan_glock_lru(sc->nr_to_scan);
1468}
1469
1470static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1471 struct shrink_control *sc)
1472{
1473 return vfs_pressure_ratio(atomic_read(&lru_count));
1474}
1475
1476static struct shrinker glock_shrinker = {
1477 .seeks = DEFAULT_SEEKS,
1478 .count_objects = gfs2_glock_shrink_count,
1479 .scan_objects = gfs2_glock_shrink_scan,
1480};
1481
1482/**
1483 * examine_bucket - Call a function for glock in a hash bucket
1484 * @examiner: the function
1485 * @sdp: the filesystem
1486 * @bucket: the bucket
1487 *
1488 */
1489
1490static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1491 unsigned int hash)
1492{
1493 struct gfs2_glock *gl;
1494 struct hlist_bl_head *head = &gl_hash_table[hash];
1495 struct hlist_bl_node *pos;
1496
1497 rcu_read_lock();
1498 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1499 if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
1500 examiner(gl);
1501 }
1502 rcu_read_unlock();
1503 cond_resched();
1504}
1505
1506static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1507{
1508 unsigned x;
1509
1510 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1511 examine_bucket(examiner, sdp, x);
1512}
1513
1514
1515/**
1516 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1517 * @gl: The glock to thaw
1518 *
1519 */
1520
1521static void thaw_glock(struct gfs2_glock *gl)
1522{
1523 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1524 goto out;
1525 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1526 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
1527out:
1528 gfs2_glock_put(gl);
1529 }
1530}
1531
1532/**
1533 * clear_glock - look at a glock and see if we can free it from glock cache
1534 * @gl: the glock to look at
1535 *
1536 */
1537
1538static void clear_glock(struct gfs2_glock *gl)
1539{
1540 gfs2_glock_remove_from_lru(gl);
1541
1542 spin_lock(&gl->gl_spin);
1543 if (gl->gl_state != LM_ST_UNLOCKED)
1544 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1545 spin_unlock(&gl->gl_spin);
1546 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1547 gfs2_glock_put(gl);
1548}
1549
1550/**
1551 * gfs2_glock_thaw - Thaw any frozen glocks
1552 * @sdp: The super block
1553 *
1554 */
1555
1556void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1557{
1558 glock_hash_walk(thaw_glock, sdp);
1559}
1560
1561static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1562{
1563 spin_lock(&gl->gl_spin);
1564 gfs2_dump_glock(seq, gl);
1565 spin_unlock(&gl->gl_spin);
1566}
1567
1568static void dump_glock_func(struct gfs2_glock *gl)
1569{
1570 dump_glock(NULL, gl);
1571}
1572
1573/**
1574 * gfs2_gl_hash_clear - Empty out the glock hash table
1575 * @sdp: the filesystem
1576 * @wait: wait until it's all gone
1577 *
1578 * Called when unmounting the filesystem.
1579 */
1580
1581void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1582{
1583 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1584 flush_workqueue(glock_workqueue);
1585 glock_hash_walk(clear_glock, sdp);
1586 flush_workqueue(glock_workqueue);
1587 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1588 glock_hash_walk(dump_glock_func, sdp);
1589}
1590
1591void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1592{
1593 struct gfs2_glock *gl = ip->i_gl;
1594 int ret;
1595
1596 ret = gfs2_truncatei_resume(ip);
1597 gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1598
1599 spin_lock(&gl->gl_spin);
1600 clear_bit(GLF_LOCK, &gl->gl_flags);
1601 run_queue(gl, 1);
1602 spin_unlock(&gl->gl_spin);
1603}
1604
1605static const char *state2str(unsigned state)
1606{
1607 switch(state) {
1608 case LM_ST_UNLOCKED:
1609 return "UN";
1610 case LM_ST_SHARED:
1611 return "SH";
1612 case LM_ST_DEFERRED:
1613 return "DF";
1614 case LM_ST_EXCLUSIVE:
1615 return "EX";
1616 }
1617 return "??";
1618}
1619
1620static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1621{
1622 char *p = buf;
1623 if (flags & LM_FLAG_TRY)
1624 *p++ = 't';
1625 if (flags & LM_FLAG_TRY_1CB)
1626 *p++ = 'T';
1627 if (flags & LM_FLAG_NOEXP)
1628 *p++ = 'e';
1629 if (flags & LM_FLAG_ANY)
1630 *p++ = 'A';
1631 if (flags & LM_FLAG_PRIORITY)
1632 *p++ = 'p';
1633 if (flags & GL_ASYNC)
1634 *p++ = 'a';
1635 if (flags & GL_EXACT)
1636 *p++ = 'E';
1637 if (flags & GL_NOCACHE)
1638 *p++ = 'c';
1639 if (test_bit(HIF_HOLDER, &iflags))
1640 *p++ = 'H';
1641 if (test_bit(HIF_WAIT, &iflags))
1642 *p++ = 'W';
1643 if (test_bit(HIF_FIRST, &iflags))
1644 *p++ = 'F';
1645 *p = 0;
1646 return buf;
1647}
1648
1649/**
1650 * dump_holder - print information about a glock holder
1651 * @seq: the seq_file struct
1652 * @gh: the glock holder
1653 *
1654 */
1655
1656static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1657{
1658 struct task_struct *gh_owner = NULL;
1659 char flags_buf[32];
1660
1661 rcu_read_lock();
1662 if (gh->gh_owner_pid)
1663 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1664 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1665 state2str(gh->gh_state),
1666 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1667 gh->gh_error,
1668 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1669 gh_owner ? gh_owner->comm : "(ended)",
1670 (void *)gh->gh_ip);
1671 rcu_read_unlock();
1672}
1673
1674static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1675{
1676 const unsigned long *gflags = &gl->gl_flags;
1677 char *p = buf;
1678
1679 if (test_bit(GLF_LOCK, gflags))
1680 *p++ = 'l';
1681 if (test_bit(GLF_DEMOTE, gflags))
1682 *p++ = 'D';
1683 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1684 *p++ = 'd';
1685 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1686 *p++ = 'p';
1687 if (test_bit(GLF_DIRTY, gflags))
1688 *p++ = 'y';
1689 if (test_bit(GLF_LFLUSH, gflags))
1690 *p++ = 'f';
1691 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1692 *p++ = 'i';
1693 if (test_bit(GLF_REPLY_PENDING, gflags))
1694 *p++ = 'r';
1695 if (test_bit(GLF_INITIAL, gflags))
1696 *p++ = 'I';
1697 if (test_bit(GLF_FROZEN, gflags))
1698 *p++ = 'F';
1699 if (test_bit(GLF_QUEUED, gflags))
1700 *p++ = 'q';
1701 if (test_bit(GLF_LRU, gflags))
1702 *p++ = 'L';
1703 if (gl->gl_object)
1704 *p++ = 'o';
1705 if (test_bit(GLF_BLOCKING, gflags))
1706 *p++ = 'b';
1707 *p = 0;
1708 return buf;
1709}
1710
1711/**
1712 * gfs2_dump_glock - print information about a glock
1713 * @seq: The seq_file struct
1714 * @gl: the glock
1715 *
1716 * The file format is as follows:
1717 * One line per object, capital letters are used to indicate objects
1718 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1719 * other objects are indented by a single space and follow the glock to
1720 * which they are related. Fields are indicated by lower case letters
1721 * followed by a colon and the field value, except for strings which are in
1722 * [] so that its possible to see if they are composed of spaces for
1723 * example. The field's are n = number (id of the object), f = flags,
1724 * t = type, s = state, r = refcount, e = error, p = pid.
1725 *
1726 */
1727
1728void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1729{
1730 const struct gfs2_glock_operations *glops = gl->gl_ops;
1731 unsigned long long dtime;
1732 const struct gfs2_holder *gh;
1733 char gflags_buf[32];
1734
1735 dtime = jiffies - gl->gl_demote_time;
1736 dtime *= 1000000/HZ; /* demote time in uSec */
1737 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1738 dtime = 0;
1739 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1740 state2str(gl->gl_state),
1741 gl->gl_name.ln_type,
1742 (unsigned long long)gl->gl_name.ln_number,
1743 gflags2str(gflags_buf, gl),
1744 state2str(gl->gl_target),
1745 state2str(gl->gl_demote_state), dtime,
1746 atomic_read(&gl->gl_ail_count),
1747 atomic_read(&gl->gl_revokes),
1748 (int)gl->gl_lockref.count, gl->gl_hold_time);
1749
1750 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1751 dump_holder(seq, gh);
1752
1753 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1754 glops->go_dump(seq, gl);
1755}
1756
1757static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1758{
1759 struct gfs2_glock *gl = iter_ptr;
1760
1761 seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
1762 gl->gl_name.ln_type,
1763 (unsigned long long)gl->gl_name.ln_number,
1764 (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1765 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1766 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1767 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1768 (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1769 (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1770 (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1771 (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1772 return 0;
1773}
1774
1775static const char *gfs2_gltype[] = {
1776 "type",
1777 "reserved",
1778 "nondisk",
1779 "inode",
1780 "rgrp",
1781 "meta",
1782 "iopen",
1783 "flock",
1784 "plock",
1785 "quota",
1786 "journal",
1787};
1788
1789static const char *gfs2_stype[] = {
1790 [GFS2_LKS_SRTT] = "srtt",
1791 [GFS2_LKS_SRTTVAR] = "srttvar",
1792 [GFS2_LKS_SRTTB] = "srttb",
1793 [GFS2_LKS_SRTTVARB] = "srttvarb",
1794 [GFS2_LKS_SIRT] = "sirt",
1795 [GFS2_LKS_SIRTVAR] = "sirtvar",
1796 [GFS2_LKS_DCOUNT] = "dlm",
1797 [GFS2_LKS_QCOUNT] = "queue",
1798};
1799
1800#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1801
1802static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1803{
1804 struct gfs2_glock_iter *gi = seq->private;
1805 struct gfs2_sbd *sdp = gi->sdp;
1806 unsigned index = gi->hash >> 3;
1807 unsigned subindex = gi->hash & 0x07;
1808 s64 value;
1809 int i;
1810
1811 if (index == 0 && subindex != 0)
1812 return 0;
1813
1814 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1815 (index == 0) ? "cpu": gfs2_stype[subindex]);
1816
1817 for_each_possible_cpu(i) {
1818 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1819 if (index == 0) {
1820 value = i;
1821 } else {
1822 value = lkstats->lkstats[index - 1].stats[subindex];
1823 }
1824 seq_printf(seq, " %15lld", (long long)value);
1825 }
1826 seq_putc(seq, '\n');
1827 return 0;
1828}
1829
1830int __init gfs2_glock_init(void)
1831{
1832 unsigned i;
1833 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1834 INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1835 }
1836
1837 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1838 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1839 if (!glock_workqueue)
1840 return -ENOMEM;
1841 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1842 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1843 0);
1844 if (!gfs2_delete_workqueue) {
1845 destroy_workqueue(glock_workqueue);
1846 return -ENOMEM;
1847 }
1848
1849 register_shrinker(&glock_shrinker);
1850
1851 return 0;
1852}
1853
1854void gfs2_glock_exit(void)
1855{
1856 unregister_shrinker(&glock_shrinker);
1857 destroy_workqueue(glock_workqueue);
1858 destroy_workqueue(gfs2_delete_workqueue);
1859}
1860
1861static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1862{
1863 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1864 struct gfs2_glock, gl_list);
1865}
1866
1867static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1868{
1869 return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1870 struct gfs2_glock, gl_list);
1871}
1872
1873static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1874{
1875 struct gfs2_glock *gl;
1876
1877 do {
1878 gl = gi->gl;
1879 if (gl) {
1880 gi->gl = glock_hash_next(gl);
1881 gi->nhash++;
1882 } else {
1883 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1884 rcu_read_unlock();
1885 return 1;
1886 }
1887 gi->gl = glock_hash_chain(gi->hash);
1888 gi->nhash = 0;
1889 }
1890 while (gi->gl == NULL) {
1891 gi->hash++;
1892 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1893 rcu_read_unlock();
1894 return 1;
1895 }
1896 gi->gl = glock_hash_chain(gi->hash);
1897 gi->nhash = 0;
1898 }
1899 /* Skip entries for other sb and dead entries */
1900 } while (gi->sdp != gi->gl->gl_sbd ||
1901 __lockref_is_dead(&gi->gl->gl_lockref));
1902
1903 return 0;
1904}
1905
1906static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1907{
1908 struct gfs2_glock_iter *gi = seq->private;
1909 loff_t n = *pos;
1910
1911 if (gi->last_pos <= *pos)
1912 n = gi->nhash + (*pos - gi->last_pos);
1913 else
1914 gi->hash = 0;
1915
1916 gi->nhash = 0;
1917 rcu_read_lock();
1918
1919 do {
1920 if (gfs2_glock_iter_next(gi))
1921 return NULL;
1922 } while (n--);
1923
1924 gi->last_pos = *pos;
1925 return gi->gl;
1926}
1927
1928static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1929 loff_t *pos)
1930{
1931 struct gfs2_glock_iter *gi = seq->private;
1932
1933 (*pos)++;
1934 gi->last_pos = *pos;
1935 if (gfs2_glock_iter_next(gi))
1936 return NULL;
1937
1938 return gi->gl;
1939}
1940
1941static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1942{
1943 struct gfs2_glock_iter *gi = seq->private;
1944
1945 if (gi->gl)
1946 rcu_read_unlock();
1947 gi->gl = NULL;
1948}
1949
1950static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1951{
1952 dump_glock(seq, iter_ptr);
1953 return 0;
1954}
1955
1956static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1957{
1958 struct gfs2_glock_iter *gi = seq->private;
1959
1960 gi->hash = *pos;
1961 if (*pos >= GFS2_NR_SBSTATS)
1962 return NULL;
1963 preempt_disable();
1964 return SEQ_START_TOKEN;
1965}
1966
1967static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1968 loff_t *pos)
1969{
1970 struct gfs2_glock_iter *gi = seq->private;
1971 (*pos)++;
1972 gi->hash++;
1973 if (gi->hash >= GFS2_NR_SBSTATS) {
1974 preempt_enable();
1975 return NULL;
1976 }
1977 return SEQ_START_TOKEN;
1978}
1979
1980static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1981{
1982 preempt_enable();
1983}
1984
1985static const struct seq_operations gfs2_glock_seq_ops = {
1986 .start = gfs2_glock_seq_start,
1987 .next = gfs2_glock_seq_next,
1988 .stop = gfs2_glock_seq_stop,
1989 .show = gfs2_glock_seq_show,
1990};
1991
1992static const struct seq_operations gfs2_glstats_seq_ops = {
1993 .start = gfs2_glock_seq_start,
1994 .next = gfs2_glock_seq_next,
1995 .stop = gfs2_glock_seq_stop,
1996 .show = gfs2_glstats_seq_show,
1997};
1998
1999static const struct seq_operations gfs2_sbstats_seq_ops = {
2000 .start = gfs2_sbstats_seq_start,
2001 .next = gfs2_sbstats_seq_next,
2002 .stop = gfs2_sbstats_seq_stop,
2003 .show = gfs2_sbstats_seq_show,
2004};
2005
2006#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2007
2008static int gfs2_glocks_open(struct inode *inode, struct file *file)
2009{
2010 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
2011 sizeof(struct gfs2_glock_iter));
2012 if (ret == 0) {
2013 struct seq_file *seq = file->private_data;
2014 struct gfs2_glock_iter *gi = seq->private;
2015 gi->sdp = inode->i_private;
2016 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2017 if (seq->buf)
2018 seq->size = GFS2_SEQ_GOODSIZE;
2019 }
2020 return ret;
2021}
2022
2023static int gfs2_glstats_open(struct inode *inode, struct file *file)
2024{
2025 int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
2026 sizeof(struct gfs2_glock_iter));
2027 if (ret == 0) {
2028 struct seq_file *seq = file->private_data;
2029 struct gfs2_glock_iter *gi = seq->private;
2030 gi->sdp = inode->i_private;
2031 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2032 if (seq->buf)
2033 seq->size = GFS2_SEQ_GOODSIZE;
2034 }
2035 return ret;
2036}
2037
2038static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2039{
2040 int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
2041 sizeof(struct gfs2_glock_iter));
2042 if (ret == 0) {
2043 struct seq_file *seq = file->private_data;
2044 struct gfs2_glock_iter *gi = seq->private;
2045 gi->sdp = inode->i_private;
2046 }
2047 return ret;
2048}
2049
2050static const struct file_operations gfs2_glocks_fops = {
2051 .owner = THIS_MODULE,
2052 .open = gfs2_glocks_open,
2053 .read = seq_read,
2054 .llseek = seq_lseek,
2055 .release = seq_release_private,
2056};
2057
2058static const struct file_operations gfs2_glstats_fops = {
2059 .owner = THIS_MODULE,
2060 .open = gfs2_glstats_open,
2061 .read = seq_read,
2062 .llseek = seq_lseek,
2063 .release = seq_release_private,
2064};
2065
2066static const struct file_operations gfs2_sbstats_fops = {
2067 .owner = THIS_MODULE,
2068 .open = gfs2_sbstats_open,
2069 .read = seq_read,
2070 .llseek = seq_lseek,
2071 .release = seq_release_private,
2072};
2073
2074int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2075{
2076 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2077 if (!sdp->debugfs_dir)
2078 return -ENOMEM;
2079 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2080 S_IFREG | S_IRUGO,
2081 sdp->debugfs_dir, sdp,
2082 &gfs2_glocks_fops);
2083 if (!sdp->debugfs_dentry_glocks)
2084 goto fail;
2085
2086 sdp->debugfs_dentry_glstats = debugfs_create_file("glstats",
2087 S_IFREG | S_IRUGO,
2088 sdp->debugfs_dir, sdp,
2089 &gfs2_glstats_fops);
2090 if (!sdp->debugfs_dentry_glstats)
2091 goto fail;
2092
2093 sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats",
2094 S_IFREG | S_IRUGO,
2095 sdp->debugfs_dir, sdp,
2096 &gfs2_sbstats_fops);
2097 if (!sdp->debugfs_dentry_sbstats)
2098 goto fail;
2099
2100 return 0;
2101fail:
2102 gfs2_delete_debugfs_file(sdp);
2103 return -ENOMEM;
2104}
2105
2106void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2107{
2108 if (sdp->debugfs_dir) {
2109 if (sdp->debugfs_dentry_glocks) {
2110 debugfs_remove(sdp->debugfs_dentry_glocks);
2111 sdp->debugfs_dentry_glocks = NULL;
2112 }
2113 if (sdp->debugfs_dentry_glstats) {
2114 debugfs_remove(sdp->debugfs_dentry_glstats);
2115 sdp->debugfs_dentry_glstats = NULL;
2116 }
2117 if (sdp->debugfs_dentry_sbstats) {
2118 debugfs_remove(sdp->debugfs_dentry_sbstats);
2119 sdp->debugfs_dentry_sbstats = NULL;
2120 }
2121 debugfs_remove(sdp->debugfs_dir);
2122 sdp->debugfs_dir = NULL;
2123 }
2124}
2125
2126int gfs2_register_debugfs(void)
2127{
2128 gfs2_root = debugfs_create_dir("gfs2", NULL);
2129 return gfs2_root ? 0 : -ENOMEM;
2130}
2131
2132void gfs2_unregister_debugfs(void)
2133{
2134 debugfs_remove(gfs2_root);
2135 gfs2_root = NULL;
2136}
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/buffer_head.h>
14#include <linux/delay.h>
15#include <linux/sort.h>
16#include <linux/jhash.h>
17#include <linux/kallsyms.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/list.h>
20#include <linux/wait.h>
21#include <linux/module.h>
22#include <asm/uaccess.h>
23#include <linux/seq_file.h>
24#include <linux/debugfs.h>
25#include <linux/kthread.h>
26#include <linux/freezer.h>
27#include <linux/workqueue.h>
28#include <linux/jiffies.h>
29#include <linux/rcupdate.h>
30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h>
32#include <linux/percpu.h>
33
34#include "gfs2.h"
35#include "incore.h"
36#include "glock.h"
37#include "glops.h"
38#include "inode.h"
39#include "lops.h"
40#include "meta_io.h"
41#include "quota.h"
42#include "super.h"
43#include "util.h"
44#include "bmap.h"
45#define CREATE_TRACE_POINTS
46#include "trace_gfs2.h"
47
48struct gfs2_glock_iter {
49 int hash; /* hash bucket index */
50 struct gfs2_sbd *sdp; /* incore superblock */
51 struct gfs2_glock *gl; /* current glock struct */
52 char string[512]; /* scratch space */
53};
54
55typedef void (*glock_examiner) (struct gfs2_glock * gl);
56
57static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
58#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
59static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
60
61static struct dentry *gfs2_root;
62static struct workqueue_struct *glock_workqueue;
63struct workqueue_struct *gfs2_delete_workqueue;
64static LIST_HEAD(lru_list);
65static atomic_t lru_count = ATOMIC_INIT(0);
66static DEFINE_SPINLOCK(lru_lock);
67
68#define GFS2_GL_HASH_SHIFT 15
69#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
70#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
71
72static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
73static struct dentry *gfs2_root;
74
75/**
76 * gl_hash() - Turn glock number into hash bucket number
77 * @lock: The glock number
78 *
79 * Returns: The number of the corresponding hash bucket
80 */
81
82static unsigned int gl_hash(const struct gfs2_sbd *sdp,
83 const struct lm_lockname *name)
84{
85 unsigned int h;
86
87 h = jhash(&name->ln_number, sizeof(u64), 0);
88 h = jhash(&name->ln_type, sizeof(unsigned int), h);
89 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
90 h &= GFS2_GL_HASH_MASK;
91
92 return h;
93}
94
95static inline void spin_lock_bucket(unsigned int hash)
96{
97 hlist_bl_lock(&gl_hash_table[hash]);
98}
99
100static inline void spin_unlock_bucket(unsigned int hash)
101{
102 hlist_bl_unlock(&gl_hash_table[hash]);
103}
104
105static void gfs2_glock_dealloc(struct rcu_head *rcu)
106{
107 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
108
109 if (gl->gl_ops->go_flags & GLOF_ASPACE)
110 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
111 else
112 kmem_cache_free(gfs2_glock_cachep, gl);
113}
114
115void gfs2_glock_free(struct gfs2_glock *gl)
116{
117 struct gfs2_sbd *sdp = gl->gl_sbd;
118
119 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
120 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
121 wake_up(&sdp->sd_glock_wait);
122}
123
124/**
125 * gfs2_glock_hold() - increment reference count on glock
126 * @gl: The glock to hold
127 *
128 */
129
130void gfs2_glock_hold(struct gfs2_glock *gl)
131{
132 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
133 atomic_inc(&gl->gl_ref);
134}
135
136/**
137 * demote_ok - Check to see if it's ok to unlock a glock
138 * @gl: the glock
139 *
140 * Returns: 1 if it's ok
141 */
142
143static int demote_ok(const struct gfs2_glock *gl)
144{
145 const struct gfs2_glock_operations *glops = gl->gl_ops;
146
147 if (gl->gl_state == LM_ST_UNLOCKED)
148 return 0;
149 if (!list_empty(&gl->gl_holders))
150 return 0;
151 if (glops->go_demote_ok)
152 return glops->go_demote_ok(gl);
153 return 1;
154}
155
156
157void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
158{
159 spin_lock(&lru_lock);
160
161 if (!list_empty(&gl->gl_lru))
162 list_del_init(&gl->gl_lru);
163 else
164 atomic_inc(&lru_count);
165
166 list_add_tail(&gl->gl_lru, &lru_list);
167 set_bit(GLF_LRU, &gl->gl_flags);
168 spin_unlock(&lru_lock);
169}
170
171static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
172{
173 if (!list_empty(&gl->gl_lru)) {
174 list_del_init(&gl->gl_lru);
175 atomic_dec(&lru_count);
176 clear_bit(GLF_LRU, &gl->gl_flags);
177 }
178}
179
180static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
181{
182 spin_lock(&lru_lock);
183 __gfs2_glock_remove_from_lru(gl);
184 spin_unlock(&lru_lock);
185}
186
187/**
188 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
189 * @gl: the glock
190 *
191 * If the glock is demotable, then we add it (or move it) to the end
192 * of the glock LRU list.
193 */
194
195static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
196{
197 if (demote_ok(gl))
198 gfs2_glock_add_to_lru(gl);
199}
200
201/**
202 * gfs2_glock_put_nolock() - Decrement reference count on glock
203 * @gl: The glock to put
204 *
205 * This function should only be used if the caller has its own reference
206 * to the glock, in addition to the one it is dropping.
207 */
208
209void gfs2_glock_put_nolock(struct gfs2_glock *gl)
210{
211 if (atomic_dec_and_test(&gl->gl_ref))
212 GLOCK_BUG_ON(gl, 1);
213}
214
215/**
216 * gfs2_glock_put() - Decrement reference count on glock
217 * @gl: The glock to put
218 *
219 */
220
221void gfs2_glock_put(struct gfs2_glock *gl)
222{
223 struct gfs2_sbd *sdp = gl->gl_sbd;
224 struct address_space *mapping = gfs2_glock2aspace(gl);
225
226 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
227 __gfs2_glock_remove_from_lru(gl);
228 spin_unlock(&lru_lock);
229 spin_lock_bucket(gl->gl_hash);
230 hlist_bl_del_rcu(&gl->gl_list);
231 spin_unlock_bucket(gl->gl_hash);
232 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
233 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
234 trace_gfs2_glock_put(gl);
235 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
236 }
237}
238
239/**
240 * search_bucket() - Find struct gfs2_glock by lock number
241 * @bucket: the bucket to search
242 * @name: The lock name
243 *
244 * Returns: NULL, or the struct gfs2_glock with the requested number
245 */
246
247static struct gfs2_glock *search_bucket(unsigned int hash,
248 const struct gfs2_sbd *sdp,
249 const struct lm_lockname *name)
250{
251 struct gfs2_glock *gl;
252 struct hlist_bl_node *h;
253
254 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
255 if (!lm_name_equal(&gl->gl_name, name))
256 continue;
257 if (gl->gl_sbd != sdp)
258 continue;
259 if (atomic_inc_not_zero(&gl->gl_ref))
260 return gl;
261 }
262
263 return NULL;
264}
265
266/**
267 * may_grant - check if its ok to grant a new lock
268 * @gl: The glock
269 * @gh: The lock request which we wish to grant
270 *
271 * Returns: true if its ok to grant the lock
272 */
273
274static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
275{
276 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
277 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
278 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
279 return 0;
280 if (gl->gl_state == gh->gh_state)
281 return 1;
282 if (gh->gh_flags & GL_EXACT)
283 return 0;
284 if (gl->gl_state == LM_ST_EXCLUSIVE) {
285 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
286 return 1;
287 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
288 return 1;
289 }
290 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
291 return 1;
292 return 0;
293}
294
295static void gfs2_holder_wake(struct gfs2_holder *gh)
296{
297 clear_bit(HIF_WAIT, &gh->gh_iflags);
298 smp_mb__after_clear_bit();
299 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
300}
301
302/**
303 * do_error - Something unexpected has happened during a lock request
304 *
305 */
306
307static inline void do_error(struct gfs2_glock *gl, const int ret)
308{
309 struct gfs2_holder *gh, *tmp;
310
311 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
312 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
313 continue;
314 if (ret & LM_OUT_ERROR)
315 gh->gh_error = -EIO;
316 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
317 gh->gh_error = GLR_TRYFAILED;
318 else
319 continue;
320 list_del_init(&gh->gh_list);
321 trace_gfs2_glock_queue(gh, 0);
322 gfs2_holder_wake(gh);
323 }
324}
325
326/**
327 * do_promote - promote as many requests as possible on the current queue
328 * @gl: The glock
329 *
330 * Returns: 1 if there is a blocked holder at the head of the list, or 2
331 * if a type specific operation is underway.
332 */
333
334static int do_promote(struct gfs2_glock *gl)
335__releases(&gl->gl_spin)
336__acquires(&gl->gl_spin)
337{
338 const struct gfs2_glock_operations *glops = gl->gl_ops;
339 struct gfs2_holder *gh, *tmp;
340 int ret;
341
342restart:
343 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
344 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
345 continue;
346 if (may_grant(gl, gh)) {
347 if (gh->gh_list.prev == &gl->gl_holders &&
348 glops->go_lock) {
349 spin_unlock(&gl->gl_spin);
350 /* FIXME: eliminate this eventually */
351 ret = glops->go_lock(gh);
352 spin_lock(&gl->gl_spin);
353 if (ret) {
354 if (ret == 1)
355 return 2;
356 gh->gh_error = ret;
357 list_del_init(&gh->gh_list);
358 trace_gfs2_glock_queue(gh, 0);
359 gfs2_holder_wake(gh);
360 goto restart;
361 }
362 set_bit(HIF_HOLDER, &gh->gh_iflags);
363 trace_gfs2_promote(gh, 1);
364 gfs2_holder_wake(gh);
365 goto restart;
366 }
367 set_bit(HIF_HOLDER, &gh->gh_iflags);
368 trace_gfs2_promote(gh, 0);
369 gfs2_holder_wake(gh);
370 continue;
371 }
372 if (gh->gh_list.prev == &gl->gl_holders)
373 return 1;
374 do_error(gl, 0);
375 break;
376 }
377 return 0;
378}
379
380/**
381 * find_first_waiter - find the first gh that's waiting for the glock
382 * @gl: the glock
383 */
384
385static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
386{
387 struct gfs2_holder *gh;
388
389 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
390 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
391 return gh;
392 }
393 return NULL;
394}
395
396/**
397 * state_change - record that the glock is now in a different state
398 * @gl: the glock
399 * @new_state the new state
400 *
401 */
402
403static void state_change(struct gfs2_glock *gl, unsigned int new_state)
404{
405 int held1, held2;
406
407 held1 = (gl->gl_state != LM_ST_UNLOCKED);
408 held2 = (new_state != LM_ST_UNLOCKED);
409
410 if (held1 != held2) {
411 if (held2)
412 gfs2_glock_hold(gl);
413 else
414 gfs2_glock_put_nolock(gl);
415 }
416 if (held1 && held2 && list_empty(&gl->gl_holders))
417 clear_bit(GLF_QUEUED, &gl->gl_flags);
418
419 if (new_state != gl->gl_target)
420 /* shorten our minimum hold time */
421 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
422 GL_GLOCK_MIN_HOLD);
423 gl->gl_state = new_state;
424 gl->gl_tchange = jiffies;
425}
426
427static void gfs2_demote_wake(struct gfs2_glock *gl)
428{
429 gl->gl_demote_state = LM_ST_EXCLUSIVE;
430 clear_bit(GLF_DEMOTE, &gl->gl_flags);
431 smp_mb__after_clear_bit();
432 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
433}
434
435/**
436 * finish_xmote - The DLM has replied to one of our lock requests
437 * @gl: The glock
438 * @ret: The status from the DLM
439 *
440 */
441
442static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
443{
444 const struct gfs2_glock_operations *glops = gl->gl_ops;
445 struct gfs2_holder *gh;
446 unsigned state = ret & LM_OUT_ST_MASK;
447 int rv;
448
449 spin_lock(&gl->gl_spin);
450 trace_gfs2_glock_state_change(gl, state);
451 state_change(gl, state);
452 gh = find_first_waiter(gl);
453
454 /* Demote to UN request arrived during demote to SH or DF */
455 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
456 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
457 gl->gl_target = LM_ST_UNLOCKED;
458
459 /* Check for state != intended state */
460 if (unlikely(state != gl->gl_target)) {
461 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
462 /* move to back of queue and try next entry */
463 if (ret & LM_OUT_CANCELED) {
464 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
465 list_move_tail(&gh->gh_list, &gl->gl_holders);
466 gh = find_first_waiter(gl);
467 gl->gl_target = gh->gh_state;
468 goto retry;
469 }
470 /* Some error or failed "try lock" - report it */
471 if ((ret & LM_OUT_ERROR) ||
472 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
473 gl->gl_target = gl->gl_state;
474 do_error(gl, ret);
475 goto out;
476 }
477 }
478 switch(state) {
479 /* Unlocked due to conversion deadlock, try again */
480 case LM_ST_UNLOCKED:
481retry:
482 do_xmote(gl, gh, gl->gl_target);
483 break;
484 /* Conversion fails, unlock and try again */
485 case LM_ST_SHARED:
486 case LM_ST_DEFERRED:
487 do_xmote(gl, gh, LM_ST_UNLOCKED);
488 break;
489 default: /* Everything else */
490 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
491 GLOCK_BUG_ON(gl, 1);
492 }
493 spin_unlock(&gl->gl_spin);
494 return;
495 }
496
497 /* Fast path - we got what we asked for */
498 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
499 gfs2_demote_wake(gl);
500 if (state != LM_ST_UNLOCKED) {
501 if (glops->go_xmote_bh) {
502 spin_unlock(&gl->gl_spin);
503 rv = glops->go_xmote_bh(gl, gh);
504 spin_lock(&gl->gl_spin);
505 if (rv) {
506 do_error(gl, rv);
507 goto out;
508 }
509 }
510 rv = do_promote(gl);
511 if (rv == 2)
512 goto out_locked;
513 }
514out:
515 clear_bit(GLF_LOCK, &gl->gl_flags);
516out_locked:
517 spin_unlock(&gl->gl_spin);
518}
519
520/**
521 * do_xmote - Calls the DLM to change the state of a lock
522 * @gl: The lock state
523 * @gh: The holder (only for promotes)
524 * @target: The target lock state
525 *
526 */
527
528static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
529__releases(&gl->gl_spin)
530__acquires(&gl->gl_spin)
531{
532 const struct gfs2_glock_operations *glops = gl->gl_ops;
533 struct gfs2_sbd *sdp = gl->gl_sbd;
534 unsigned int lck_flags = gh ? gh->gh_flags : 0;
535 int ret;
536
537 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
538 LM_FLAG_PRIORITY);
539 GLOCK_BUG_ON(gl, gl->gl_state == target);
540 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
541 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
542 glops->go_inval) {
543 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
544 do_error(gl, 0); /* Fail queued try locks */
545 }
546 gl->gl_req = target;
547 set_bit(GLF_BLOCKING, &gl->gl_flags);
548 if ((gl->gl_req == LM_ST_UNLOCKED) ||
549 (gl->gl_state == LM_ST_EXCLUSIVE) ||
550 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
551 clear_bit(GLF_BLOCKING, &gl->gl_flags);
552 spin_unlock(&gl->gl_spin);
553 if (glops->go_xmote_th)
554 glops->go_xmote_th(gl);
555 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
556 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
557 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
558
559 gfs2_glock_hold(gl);
560 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
561 /* lock_dlm */
562 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
563 GLOCK_BUG_ON(gl, ret);
564 } else { /* lock_nolock */
565 finish_xmote(gl, target);
566 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
567 gfs2_glock_put(gl);
568 }
569
570 spin_lock(&gl->gl_spin);
571}
572
573/**
574 * find_first_holder - find the first "holder" gh
575 * @gl: the glock
576 */
577
578static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
579{
580 struct gfs2_holder *gh;
581
582 if (!list_empty(&gl->gl_holders)) {
583 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
584 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
585 return gh;
586 }
587 return NULL;
588}
589
590/**
591 * run_queue - do all outstanding tasks related to a glock
592 * @gl: The glock in question
593 * @nonblock: True if we must not block in run_queue
594 *
595 */
596
597static void run_queue(struct gfs2_glock *gl, const int nonblock)
598__releases(&gl->gl_spin)
599__acquires(&gl->gl_spin)
600{
601 struct gfs2_holder *gh = NULL;
602 int ret;
603
604 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
605 return;
606
607 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
608
609 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
610 gl->gl_demote_state != gl->gl_state) {
611 if (find_first_holder(gl))
612 goto out_unlock;
613 if (nonblock)
614 goto out_sched;
615 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
616 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
617 gl->gl_target = gl->gl_demote_state;
618 } else {
619 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
620 gfs2_demote_wake(gl);
621 ret = do_promote(gl);
622 if (ret == 0)
623 goto out_unlock;
624 if (ret == 2)
625 goto out;
626 gh = find_first_waiter(gl);
627 gl->gl_target = gh->gh_state;
628 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
629 do_error(gl, 0); /* Fail queued try locks */
630 }
631 do_xmote(gl, gh, gl->gl_target);
632out:
633 return;
634
635out_sched:
636 clear_bit(GLF_LOCK, &gl->gl_flags);
637 smp_mb__after_clear_bit();
638 gfs2_glock_hold(gl);
639 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
640 gfs2_glock_put_nolock(gl);
641 return;
642
643out_unlock:
644 clear_bit(GLF_LOCK, &gl->gl_flags);
645 smp_mb__after_clear_bit();
646 return;
647}
648
649static void delete_work_func(struct work_struct *work)
650{
651 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
652 struct gfs2_sbd *sdp = gl->gl_sbd;
653 struct gfs2_inode *ip;
654 struct inode *inode;
655 u64 no_addr = gl->gl_name.ln_number;
656
657 ip = gl->gl_object;
658 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
659
660 if (ip)
661 inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
662 else
663 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
664 if (inode && !IS_ERR(inode)) {
665 d_prune_aliases(inode);
666 iput(inode);
667 }
668 gfs2_glock_put(gl);
669}
670
671static void glock_work_func(struct work_struct *work)
672{
673 unsigned long delay = 0;
674 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
675 int drop_ref = 0;
676
677 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
678 finish_xmote(gl, gl->gl_reply);
679 drop_ref = 1;
680 }
681 spin_lock(&gl->gl_spin);
682 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
683 gl->gl_state != LM_ST_UNLOCKED &&
684 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
685 unsigned long holdtime, now = jiffies;
686
687 holdtime = gl->gl_tchange + gl->gl_hold_time;
688 if (time_before(now, holdtime))
689 delay = holdtime - now;
690
691 if (!delay) {
692 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
693 set_bit(GLF_DEMOTE, &gl->gl_flags);
694 }
695 }
696 run_queue(gl, 0);
697 spin_unlock(&gl->gl_spin);
698 if (!delay)
699 gfs2_glock_put(gl);
700 else {
701 if (gl->gl_name.ln_type != LM_TYPE_INODE)
702 delay = 0;
703 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
704 gfs2_glock_put(gl);
705 }
706 if (drop_ref)
707 gfs2_glock_put(gl);
708}
709
710/**
711 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
712 * @sdp: The GFS2 superblock
713 * @number: the lock number
714 * @glops: The glock_operations to use
715 * @create: If 0, don't create the glock if it doesn't exist
716 * @glp: the glock is returned here
717 *
718 * This does not lock a glock, just finds/creates structures for one.
719 *
720 * Returns: errno
721 */
722
723int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
724 const struct gfs2_glock_operations *glops, int create,
725 struct gfs2_glock **glp)
726{
727 struct super_block *s = sdp->sd_vfs;
728 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
729 struct gfs2_glock *gl, *tmp;
730 unsigned int hash = gl_hash(sdp, &name);
731 struct address_space *mapping;
732 struct kmem_cache *cachep;
733
734 rcu_read_lock();
735 gl = search_bucket(hash, sdp, &name);
736 rcu_read_unlock();
737
738 *glp = gl;
739 if (gl)
740 return 0;
741 if (!create)
742 return -ENOENT;
743
744 if (glops->go_flags & GLOF_ASPACE)
745 cachep = gfs2_glock_aspace_cachep;
746 else
747 cachep = gfs2_glock_cachep;
748 gl = kmem_cache_alloc(cachep, GFP_KERNEL);
749 if (!gl)
750 return -ENOMEM;
751
752 atomic_inc(&sdp->sd_glock_disposal);
753 gl->gl_sbd = sdp;
754 gl->gl_flags = 0;
755 gl->gl_name = name;
756 atomic_set(&gl->gl_ref, 1);
757 gl->gl_state = LM_ST_UNLOCKED;
758 gl->gl_target = LM_ST_UNLOCKED;
759 gl->gl_demote_state = LM_ST_EXCLUSIVE;
760 gl->gl_hash = hash;
761 gl->gl_ops = glops;
762 gl->gl_dstamp = ktime_set(0, 0);
763 preempt_disable();
764 /* We use the global stats to estimate the initial per-glock stats */
765 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
766 preempt_enable();
767 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
768 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
769 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
770 gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
771 gl->gl_tchange = jiffies;
772 gl->gl_object = NULL;
773 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
774 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
775 INIT_WORK(&gl->gl_delete, delete_work_func);
776
777 mapping = gfs2_glock2aspace(gl);
778 if (mapping) {
779 mapping->a_ops = &gfs2_meta_aops;
780 mapping->host = s->s_bdev->bd_inode;
781 mapping->flags = 0;
782 mapping_set_gfp_mask(mapping, GFP_NOFS);
783 mapping->assoc_mapping = NULL;
784 mapping->backing_dev_info = s->s_bdi;
785 mapping->writeback_index = 0;
786 }
787
788 spin_lock_bucket(hash);
789 tmp = search_bucket(hash, sdp, &name);
790 if (tmp) {
791 spin_unlock_bucket(hash);
792 kmem_cache_free(cachep, gl);
793 atomic_dec(&sdp->sd_glock_disposal);
794 gl = tmp;
795 } else {
796 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
797 spin_unlock_bucket(hash);
798 }
799
800 *glp = gl;
801
802 return 0;
803}
804
805/**
806 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
807 * @gl: the glock
808 * @state: the state we're requesting
809 * @flags: the modifier flags
810 * @gh: the holder structure
811 *
812 */
813
814void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
815 struct gfs2_holder *gh)
816{
817 INIT_LIST_HEAD(&gh->gh_list);
818 gh->gh_gl = gl;
819 gh->gh_ip = (unsigned long)__builtin_return_address(0);
820 gh->gh_owner_pid = get_pid(task_pid(current));
821 gh->gh_state = state;
822 gh->gh_flags = flags;
823 gh->gh_error = 0;
824 gh->gh_iflags = 0;
825 gfs2_glock_hold(gl);
826}
827
828/**
829 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
830 * @state: the state we're requesting
831 * @flags: the modifier flags
832 * @gh: the holder structure
833 *
834 * Don't mess with the glock.
835 *
836 */
837
838void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
839{
840 gh->gh_state = state;
841 gh->gh_flags = flags;
842 gh->gh_iflags = 0;
843 gh->gh_ip = (unsigned long)__builtin_return_address(0);
844 if (gh->gh_owner_pid)
845 put_pid(gh->gh_owner_pid);
846 gh->gh_owner_pid = get_pid(task_pid(current));
847}
848
849/**
850 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
851 * @gh: the holder structure
852 *
853 */
854
855void gfs2_holder_uninit(struct gfs2_holder *gh)
856{
857 put_pid(gh->gh_owner_pid);
858 gfs2_glock_put(gh->gh_gl);
859 gh->gh_gl = NULL;
860 gh->gh_ip = 0;
861}
862
863/**
864 * gfs2_glock_holder_wait
865 * @word: unused
866 *
867 * This function and gfs2_glock_demote_wait both show up in the WCHAN
868 * field. Thus I've separated these otherwise identical functions in
869 * order to be more informative to the user.
870 */
871
872static int gfs2_glock_holder_wait(void *word)
873{
874 schedule();
875 return 0;
876}
877
878static int gfs2_glock_demote_wait(void *word)
879{
880 schedule();
881 return 0;
882}
883
884static void wait_on_holder(struct gfs2_holder *gh)
885{
886 unsigned long time1 = jiffies;
887
888 might_sleep();
889 wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
890 if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
891 /* Lengthen the minimum hold time. */
892 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
893 GL_GLOCK_HOLD_INCR,
894 GL_GLOCK_MAX_HOLD);
895}
896
897static void wait_on_demote(struct gfs2_glock *gl)
898{
899 might_sleep();
900 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
901}
902
903/**
904 * handle_callback - process a demote request
905 * @gl: the glock
906 * @state: the state the caller wants us to change to
907 *
908 * There are only two requests that we are going to see in actual
909 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
910 */
911
912static void handle_callback(struct gfs2_glock *gl, unsigned int state,
913 unsigned long delay)
914{
915 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
916
917 set_bit(bit, &gl->gl_flags);
918 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
919 gl->gl_demote_state = state;
920 gl->gl_demote_time = jiffies;
921 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
922 gl->gl_demote_state != state) {
923 gl->gl_demote_state = LM_ST_UNLOCKED;
924 }
925 if (gl->gl_ops->go_callback)
926 gl->gl_ops->go_callback(gl);
927 trace_gfs2_demote_rq(gl);
928}
929
930/**
931 * gfs2_glock_wait - wait on a glock acquisition
932 * @gh: the glock holder
933 *
934 * Returns: 0 on success
935 */
936
937int gfs2_glock_wait(struct gfs2_holder *gh)
938{
939 wait_on_holder(gh);
940 return gh->gh_error;
941}
942
943void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
944{
945 struct va_format vaf;
946 va_list args;
947
948 va_start(args, fmt);
949
950 if (seq) {
951 struct gfs2_glock_iter *gi = seq->private;
952 vsprintf(gi->string, fmt, args);
953 seq_printf(seq, gi->string);
954 } else {
955 vaf.fmt = fmt;
956 vaf.va = &args;
957
958 printk(KERN_ERR " %pV", &vaf);
959 }
960
961 va_end(args);
962}
963
964/**
965 * add_to_queue - Add a holder to the wait queue (but look for recursion)
966 * @gh: the holder structure to add
967 *
968 * Eventually we should move the recursive locking trap to a
969 * debugging option or something like that. This is the fast
970 * path and needs to have the minimum number of distractions.
971 *
972 */
973
974static inline void add_to_queue(struct gfs2_holder *gh)
975__releases(&gl->gl_spin)
976__acquires(&gl->gl_spin)
977{
978 struct gfs2_glock *gl = gh->gh_gl;
979 struct gfs2_sbd *sdp = gl->gl_sbd;
980 struct list_head *insert_pt = NULL;
981 struct gfs2_holder *gh2;
982 int try_lock = 0;
983
984 BUG_ON(gh->gh_owner_pid == NULL);
985 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
986 BUG();
987
988 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
989 if (test_bit(GLF_LOCK, &gl->gl_flags))
990 try_lock = 1;
991 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
992 goto fail;
993 }
994
995 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
996 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
997 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
998 goto trap_recursive;
999 if (try_lock &&
1000 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
1001 !may_grant(gl, gh)) {
1002fail:
1003 gh->gh_error = GLR_TRYFAILED;
1004 gfs2_holder_wake(gh);
1005 return;
1006 }
1007 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1008 continue;
1009 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1010 insert_pt = &gh2->gh_list;
1011 }
1012 set_bit(GLF_QUEUED, &gl->gl_flags);
1013 trace_gfs2_glock_queue(gh, 1);
1014 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1015 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1016 if (likely(insert_pt == NULL)) {
1017 list_add_tail(&gh->gh_list, &gl->gl_holders);
1018 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1019 goto do_cancel;
1020 return;
1021 }
1022 list_add_tail(&gh->gh_list, insert_pt);
1023do_cancel:
1024 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1025 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1026 spin_unlock(&gl->gl_spin);
1027 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1028 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1029 spin_lock(&gl->gl_spin);
1030 }
1031 return;
1032
1033trap_recursive:
1034 print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1035 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1036 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1037 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1038 print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1039 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1040 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1041 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1042 __dump_glock(NULL, gl);
1043 BUG();
1044}
1045
1046/**
1047 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1048 * @gh: the holder structure
1049 *
1050 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1051 *
1052 * Returns: 0, GLR_TRYFAILED, or errno on failure
1053 */
1054
1055int gfs2_glock_nq(struct gfs2_holder *gh)
1056{
1057 struct gfs2_glock *gl = gh->gh_gl;
1058 struct gfs2_sbd *sdp = gl->gl_sbd;
1059 int error = 0;
1060
1061 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1062 return -EIO;
1063
1064 if (test_bit(GLF_LRU, &gl->gl_flags))
1065 gfs2_glock_remove_from_lru(gl);
1066
1067 spin_lock(&gl->gl_spin);
1068 add_to_queue(gh);
1069 if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1070 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1071 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1072 run_queue(gl, 1);
1073 spin_unlock(&gl->gl_spin);
1074
1075 if (!(gh->gh_flags & GL_ASYNC))
1076 error = gfs2_glock_wait(gh);
1077
1078 return error;
1079}
1080
1081/**
1082 * gfs2_glock_poll - poll to see if an async request has been completed
1083 * @gh: the holder
1084 *
1085 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1086 */
1087
1088int gfs2_glock_poll(struct gfs2_holder *gh)
1089{
1090 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1091}
1092
1093/**
1094 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1095 * @gh: the glock holder
1096 *
1097 */
1098
1099void gfs2_glock_dq(struct gfs2_holder *gh)
1100{
1101 struct gfs2_glock *gl = gh->gh_gl;
1102 const struct gfs2_glock_operations *glops = gl->gl_ops;
1103 unsigned delay = 0;
1104 int fast_path = 0;
1105
1106 spin_lock(&gl->gl_spin);
1107 if (gh->gh_flags & GL_NOCACHE)
1108 handle_callback(gl, LM_ST_UNLOCKED, 0);
1109
1110 list_del_init(&gh->gh_list);
1111 if (find_first_holder(gl) == NULL) {
1112 if (glops->go_unlock) {
1113 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1114 spin_unlock(&gl->gl_spin);
1115 glops->go_unlock(gh);
1116 spin_lock(&gl->gl_spin);
1117 clear_bit(GLF_LOCK, &gl->gl_flags);
1118 }
1119 if (list_empty(&gl->gl_holders) &&
1120 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1121 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1122 fast_path = 1;
1123 }
1124 if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1125 __gfs2_glock_schedule_for_reclaim(gl);
1126 trace_gfs2_glock_queue(gh, 0);
1127 spin_unlock(&gl->gl_spin);
1128 if (likely(fast_path))
1129 return;
1130
1131 gfs2_glock_hold(gl);
1132 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1133 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1134 gl->gl_name.ln_type == LM_TYPE_INODE)
1135 delay = gl->gl_hold_time;
1136 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1137 gfs2_glock_put(gl);
1138}
1139
1140void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1141{
1142 struct gfs2_glock *gl = gh->gh_gl;
1143 gfs2_glock_dq(gh);
1144 wait_on_demote(gl);
1145}
1146
1147/**
1148 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1149 * @gh: the holder structure
1150 *
1151 */
1152
1153void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1154{
1155 gfs2_glock_dq(gh);
1156 gfs2_holder_uninit(gh);
1157}
1158
1159/**
1160 * gfs2_glock_nq_num - acquire a glock based on lock number
1161 * @sdp: the filesystem
1162 * @number: the lock number
1163 * @glops: the glock operations for the type of glock
1164 * @state: the state to acquire the glock in
1165 * @flags: modifier flags for the acquisition
1166 * @gh: the struct gfs2_holder
1167 *
1168 * Returns: errno
1169 */
1170
1171int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1172 const struct gfs2_glock_operations *glops,
1173 unsigned int state, int flags, struct gfs2_holder *gh)
1174{
1175 struct gfs2_glock *gl;
1176 int error;
1177
1178 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1179 if (!error) {
1180 error = gfs2_glock_nq_init(gl, state, flags, gh);
1181 gfs2_glock_put(gl);
1182 }
1183
1184 return error;
1185}
1186
1187/**
1188 * glock_compare - Compare two struct gfs2_glock structures for sorting
1189 * @arg_a: the first structure
1190 * @arg_b: the second structure
1191 *
1192 */
1193
1194static int glock_compare(const void *arg_a, const void *arg_b)
1195{
1196 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1197 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1198 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1199 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1200
1201 if (a->ln_number > b->ln_number)
1202 return 1;
1203 if (a->ln_number < b->ln_number)
1204 return -1;
1205 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1206 return 0;
1207}
1208
1209/**
1210 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1211 * @num_gh: the number of structures
1212 * @ghs: an array of struct gfs2_holder structures
1213 *
1214 * Returns: 0 on success (all glocks acquired),
1215 * errno on failure (no glocks acquired)
1216 */
1217
1218static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1219 struct gfs2_holder **p)
1220{
1221 unsigned int x;
1222 int error = 0;
1223
1224 for (x = 0; x < num_gh; x++)
1225 p[x] = &ghs[x];
1226
1227 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1228
1229 for (x = 0; x < num_gh; x++) {
1230 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1231
1232 error = gfs2_glock_nq(p[x]);
1233 if (error) {
1234 while (x--)
1235 gfs2_glock_dq(p[x]);
1236 break;
1237 }
1238 }
1239
1240 return error;
1241}
1242
1243/**
1244 * gfs2_glock_nq_m - acquire multiple glocks
1245 * @num_gh: the number of structures
1246 * @ghs: an array of struct gfs2_holder structures
1247 *
1248 *
1249 * Returns: 0 on success (all glocks acquired),
1250 * errno on failure (no glocks acquired)
1251 */
1252
1253int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1254{
1255 struct gfs2_holder *tmp[4];
1256 struct gfs2_holder **pph = tmp;
1257 int error = 0;
1258
1259 switch(num_gh) {
1260 case 0:
1261 return 0;
1262 case 1:
1263 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1264 return gfs2_glock_nq(ghs);
1265 default:
1266 if (num_gh <= 4)
1267 break;
1268 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1269 if (!pph)
1270 return -ENOMEM;
1271 }
1272
1273 error = nq_m_sync(num_gh, ghs, pph);
1274
1275 if (pph != tmp)
1276 kfree(pph);
1277
1278 return error;
1279}
1280
1281/**
1282 * gfs2_glock_dq_m - release multiple glocks
1283 * @num_gh: the number of structures
1284 * @ghs: an array of struct gfs2_holder structures
1285 *
1286 */
1287
1288void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1289{
1290 while (num_gh--)
1291 gfs2_glock_dq(&ghs[num_gh]);
1292}
1293
1294/**
1295 * gfs2_glock_dq_uninit_m - release multiple glocks
1296 * @num_gh: the number of structures
1297 * @ghs: an array of struct gfs2_holder structures
1298 *
1299 */
1300
1301void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1302{
1303 while (num_gh--)
1304 gfs2_glock_dq_uninit(&ghs[num_gh]);
1305}
1306
1307void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1308{
1309 unsigned long delay = 0;
1310 unsigned long holdtime;
1311 unsigned long now = jiffies;
1312
1313 gfs2_glock_hold(gl);
1314 holdtime = gl->gl_tchange + gl->gl_hold_time;
1315 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1316 gl->gl_name.ln_type == LM_TYPE_INODE) {
1317 if (time_before(now, holdtime))
1318 delay = holdtime - now;
1319 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1320 delay = gl->gl_hold_time;
1321 }
1322
1323 spin_lock(&gl->gl_spin);
1324 handle_callback(gl, state, delay);
1325 spin_unlock(&gl->gl_spin);
1326 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1327 gfs2_glock_put(gl);
1328}
1329
1330/**
1331 * gfs2_should_freeze - Figure out if glock should be frozen
1332 * @gl: The glock in question
1333 *
1334 * Glocks are not frozen if (a) the result of the dlm operation is
1335 * an error, (b) the locking operation was an unlock operation or
1336 * (c) if there is a "noexp" flagged request anywhere in the queue
1337 *
1338 * Returns: 1 if freezing should occur, 0 otherwise
1339 */
1340
1341static int gfs2_should_freeze(const struct gfs2_glock *gl)
1342{
1343 const struct gfs2_holder *gh;
1344
1345 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1346 return 0;
1347 if (gl->gl_target == LM_ST_UNLOCKED)
1348 return 0;
1349
1350 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1351 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1352 continue;
1353 if (LM_FLAG_NOEXP & gh->gh_flags)
1354 return 0;
1355 }
1356
1357 return 1;
1358}
1359
1360/**
1361 * gfs2_glock_complete - Callback used by locking
1362 * @gl: Pointer to the glock
1363 * @ret: The return value from the dlm
1364 *
1365 * The gl_reply field is under the gl_spin lock so that it is ok
1366 * to use a bitfield shared with other glock state fields.
1367 */
1368
1369void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1370{
1371 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1372
1373 spin_lock(&gl->gl_spin);
1374 gl->gl_reply = ret;
1375
1376 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1377 if (gfs2_should_freeze(gl)) {
1378 set_bit(GLF_FROZEN, &gl->gl_flags);
1379 spin_unlock(&gl->gl_spin);
1380 return;
1381 }
1382 }
1383
1384 spin_unlock(&gl->gl_spin);
1385 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1386 smp_wmb();
1387 gfs2_glock_hold(gl);
1388 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1389 gfs2_glock_put(gl);
1390}
1391
1392
1393static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1394 struct shrink_control *sc)
1395{
1396 struct gfs2_glock *gl;
1397 int may_demote;
1398 int nr_skipped = 0;
1399 int nr = sc->nr_to_scan;
1400 gfp_t gfp_mask = sc->gfp_mask;
1401 LIST_HEAD(skipped);
1402
1403 if (nr == 0)
1404 goto out;
1405
1406 if (!(gfp_mask & __GFP_FS))
1407 return -1;
1408
1409 spin_lock(&lru_lock);
1410 while(nr && !list_empty(&lru_list)) {
1411 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1412 list_del_init(&gl->gl_lru);
1413 clear_bit(GLF_LRU, &gl->gl_flags);
1414 atomic_dec(&lru_count);
1415
1416 /* Test for being demotable */
1417 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1418 gfs2_glock_hold(gl);
1419 spin_unlock(&lru_lock);
1420 spin_lock(&gl->gl_spin);
1421 may_demote = demote_ok(gl);
1422 if (may_demote) {
1423 handle_callback(gl, LM_ST_UNLOCKED, 0);
1424 nr--;
1425 }
1426 clear_bit(GLF_LOCK, &gl->gl_flags);
1427 smp_mb__after_clear_bit();
1428 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1429 gfs2_glock_put_nolock(gl);
1430 spin_unlock(&gl->gl_spin);
1431 spin_lock(&lru_lock);
1432 continue;
1433 }
1434 nr_skipped++;
1435 list_add(&gl->gl_lru, &skipped);
1436 set_bit(GLF_LRU, &gl->gl_flags);
1437 }
1438 list_splice(&skipped, &lru_list);
1439 atomic_add(nr_skipped, &lru_count);
1440 spin_unlock(&lru_lock);
1441out:
1442 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1443}
1444
1445static struct shrinker glock_shrinker = {
1446 .shrink = gfs2_shrink_glock_memory,
1447 .seeks = DEFAULT_SEEKS,
1448};
1449
1450/**
1451 * examine_bucket - Call a function for glock in a hash bucket
1452 * @examiner: the function
1453 * @sdp: the filesystem
1454 * @bucket: the bucket
1455 *
1456 */
1457
1458static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1459 unsigned int hash)
1460{
1461 struct gfs2_glock *gl;
1462 struct hlist_bl_head *head = &gl_hash_table[hash];
1463 struct hlist_bl_node *pos;
1464
1465 rcu_read_lock();
1466 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1467 if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1468 examiner(gl);
1469 }
1470 rcu_read_unlock();
1471 cond_resched();
1472}
1473
1474static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1475{
1476 unsigned x;
1477
1478 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1479 examine_bucket(examiner, sdp, x);
1480}
1481
1482
1483/**
1484 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1485 * @gl: The glock to thaw
1486 *
1487 * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1488 * so this has to result in the ref count being dropped by one.
1489 */
1490
1491static void thaw_glock(struct gfs2_glock *gl)
1492{
1493 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1494 return;
1495 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1496 gfs2_glock_hold(gl);
1497 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1498 gfs2_glock_put(gl);
1499}
1500
1501/**
1502 * clear_glock - look at a glock and see if we can free it from glock cache
1503 * @gl: the glock to look at
1504 *
1505 */
1506
1507static void clear_glock(struct gfs2_glock *gl)
1508{
1509 gfs2_glock_remove_from_lru(gl);
1510
1511 spin_lock(&gl->gl_spin);
1512 if (gl->gl_state != LM_ST_UNLOCKED)
1513 handle_callback(gl, LM_ST_UNLOCKED, 0);
1514 spin_unlock(&gl->gl_spin);
1515 gfs2_glock_hold(gl);
1516 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1517 gfs2_glock_put(gl);
1518}
1519
1520/**
1521 * gfs2_glock_thaw - Thaw any frozen glocks
1522 * @sdp: The super block
1523 *
1524 */
1525
1526void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1527{
1528 glock_hash_walk(thaw_glock, sdp);
1529}
1530
1531static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1532{
1533 int ret;
1534 spin_lock(&gl->gl_spin);
1535 ret = __dump_glock(seq, gl);
1536 spin_unlock(&gl->gl_spin);
1537 return ret;
1538}
1539
1540static void dump_glock_func(struct gfs2_glock *gl)
1541{
1542 dump_glock(NULL, gl);
1543}
1544
1545/**
1546 * gfs2_gl_hash_clear - Empty out the glock hash table
1547 * @sdp: the filesystem
1548 * @wait: wait until it's all gone
1549 *
1550 * Called when unmounting the filesystem.
1551 */
1552
1553void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1554{
1555 glock_hash_walk(clear_glock, sdp);
1556 flush_workqueue(glock_workqueue);
1557 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1558 glock_hash_walk(dump_glock_func, sdp);
1559}
1560
1561void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1562{
1563 struct gfs2_glock *gl = ip->i_gl;
1564 int ret;
1565
1566 ret = gfs2_truncatei_resume(ip);
1567 gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1568
1569 spin_lock(&gl->gl_spin);
1570 clear_bit(GLF_LOCK, &gl->gl_flags);
1571 run_queue(gl, 1);
1572 spin_unlock(&gl->gl_spin);
1573}
1574
1575static const char *state2str(unsigned state)
1576{
1577 switch(state) {
1578 case LM_ST_UNLOCKED:
1579 return "UN";
1580 case LM_ST_SHARED:
1581 return "SH";
1582 case LM_ST_DEFERRED:
1583 return "DF";
1584 case LM_ST_EXCLUSIVE:
1585 return "EX";
1586 }
1587 return "??";
1588}
1589
1590static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1591{
1592 char *p = buf;
1593 if (flags & LM_FLAG_TRY)
1594 *p++ = 't';
1595 if (flags & LM_FLAG_TRY_1CB)
1596 *p++ = 'T';
1597 if (flags & LM_FLAG_NOEXP)
1598 *p++ = 'e';
1599 if (flags & LM_FLAG_ANY)
1600 *p++ = 'A';
1601 if (flags & LM_FLAG_PRIORITY)
1602 *p++ = 'p';
1603 if (flags & GL_ASYNC)
1604 *p++ = 'a';
1605 if (flags & GL_EXACT)
1606 *p++ = 'E';
1607 if (flags & GL_NOCACHE)
1608 *p++ = 'c';
1609 if (test_bit(HIF_HOLDER, &iflags))
1610 *p++ = 'H';
1611 if (test_bit(HIF_WAIT, &iflags))
1612 *p++ = 'W';
1613 if (test_bit(HIF_FIRST, &iflags))
1614 *p++ = 'F';
1615 *p = 0;
1616 return buf;
1617}
1618
1619/**
1620 * dump_holder - print information about a glock holder
1621 * @seq: the seq_file struct
1622 * @gh: the glock holder
1623 *
1624 * Returns: 0 on success, -ENOBUFS when we run out of space
1625 */
1626
1627static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1628{
1629 struct task_struct *gh_owner = NULL;
1630 char flags_buf[32];
1631
1632 if (gh->gh_owner_pid)
1633 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1634 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1635 state2str(gh->gh_state),
1636 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1637 gh->gh_error,
1638 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1639 gh_owner ? gh_owner->comm : "(ended)",
1640 (void *)gh->gh_ip);
1641 return 0;
1642}
1643
1644static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1645{
1646 const unsigned long *gflags = &gl->gl_flags;
1647 char *p = buf;
1648
1649 if (test_bit(GLF_LOCK, gflags))
1650 *p++ = 'l';
1651 if (test_bit(GLF_DEMOTE, gflags))
1652 *p++ = 'D';
1653 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1654 *p++ = 'd';
1655 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1656 *p++ = 'p';
1657 if (test_bit(GLF_DIRTY, gflags))
1658 *p++ = 'y';
1659 if (test_bit(GLF_LFLUSH, gflags))
1660 *p++ = 'f';
1661 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1662 *p++ = 'i';
1663 if (test_bit(GLF_REPLY_PENDING, gflags))
1664 *p++ = 'r';
1665 if (test_bit(GLF_INITIAL, gflags))
1666 *p++ = 'I';
1667 if (test_bit(GLF_FROZEN, gflags))
1668 *p++ = 'F';
1669 if (test_bit(GLF_QUEUED, gflags))
1670 *p++ = 'q';
1671 if (test_bit(GLF_LRU, gflags))
1672 *p++ = 'L';
1673 if (gl->gl_object)
1674 *p++ = 'o';
1675 if (test_bit(GLF_BLOCKING, gflags))
1676 *p++ = 'b';
1677 *p = 0;
1678 return buf;
1679}
1680
1681/**
1682 * __dump_glock - print information about a glock
1683 * @seq: The seq_file struct
1684 * @gl: the glock
1685 *
1686 * The file format is as follows:
1687 * One line per object, capital letters are used to indicate objects
1688 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1689 * other objects are indented by a single space and follow the glock to
1690 * which they are related. Fields are indicated by lower case letters
1691 * followed by a colon and the field value, except for strings which are in
1692 * [] so that its possible to see if they are composed of spaces for
1693 * example. The field's are n = number (id of the object), f = flags,
1694 * t = type, s = state, r = refcount, e = error, p = pid.
1695 *
1696 * Returns: 0 on success, -ENOBUFS when we run out of space
1697 */
1698
1699static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1700{
1701 const struct gfs2_glock_operations *glops = gl->gl_ops;
1702 unsigned long long dtime;
1703 const struct gfs2_holder *gh;
1704 char gflags_buf[32];
1705 int error = 0;
1706
1707 dtime = jiffies - gl->gl_demote_time;
1708 dtime *= 1000000/HZ; /* demote time in uSec */
1709 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1710 dtime = 0;
1711 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1712 state2str(gl->gl_state),
1713 gl->gl_name.ln_type,
1714 (unsigned long long)gl->gl_name.ln_number,
1715 gflags2str(gflags_buf, gl),
1716 state2str(gl->gl_target),
1717 state2str(gl->gl_demote_state), dtime,
1718 atomic_read(&gl->gl_ail_count),
1719 atomic_read(&gl->gl_revokes),
1720 atomic_read(&gl->gl_ref), gl->gl_hold_time);
1721
1722 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1723 error = dump_holder(seq, gh);
1724 if (error)
1725 goto out;
1726 }
1727 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1728 error = glops->go_dump(seq, gl);
1729out:
1730 return error;
1731}
1732
1733static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1734{
1735 struct gfs2_glock *gl = iter_ptr;
1736
1737 seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
1738 gl->gl_name.ln_type,
1739 (unsigned long long)gl->gl_name.ln_number,
1740 (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1741 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1742 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1743 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1744 (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1745 (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1746 (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1747 (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1748 return 0;
1749}
1750
1751static const char *gfs2_gltype[] = {
1752 "type",
1753 "reserved",
1754 "nondisk",
1755 "inode",
1756 "rgrp",
1757 "meta",
1758 "iopen",
1759 "flock",
1760 "plock",
1761 "quota",
1762 "journal",
1763};
1764
1765static const char *gfs2_stype[] = {
1766 [GFS2_LKS_SRTT] = "srtt",
1767 [GFS2_LKS_SRTTVAR] = "srttvar",
1768 [GFS2_LKS_SRTTB] = "srttb",
1769 [GFS2_LKS_SRTTVARB] = "srttvarb",
1770 [GFS2_LKS_SIRT] = "sirt",
1771 [GFS2_LKS_SIRTVAR] = "sirtvar",
1772 [GFS2_LKS_DCOUNT] = "dlm",
1773 [GFS2_LKS_QCOUNT] = "queue",
1774};
1775
1776#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1777
1778static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1779{
1780 struct gfs2_glock_iter *gi = seq->private;
1781 struct gfs2_sbd *sdp = gi->sdp;
1782 unsigned index = gi->hash >> 3;
1783 unsigned subindex = gi->hash & 0x07;
1784 s64 value;
1785 int i;
1786
1787 if (index == 0 && subindex != 0)
1788 return 0;
1789
1790 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1791 (index == 0) ? "cpu": gfs2_stype[subindex]);
1792
1793 for_each_possible_cpu(i) {
1794 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1795 if (index == 0) {
1796 value = i;
1797 } else {
1798 value = lkstats->lkstats[index - 1].stats[subindex];
1799 }
1800 seq_printf(seq, " %15lld", (long long)value);
1801 }
1802 seq_putc(seq, '\n');
1803 return 0;
1804}
1805
1806int __init gfs2_glock_init(void)
1807{
1808 unsigned i;
1809 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1810 INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1811 }
1812
1813 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1814 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1815 if (IS_ERR(glock_workqueue))
1816 return PTR_ERR(glock_workqueue);
1817 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1818 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1819 0);
1820 if (IS_ERR(gfs2_delete_workqueue)) {
1821 destroy_workqueue(glock_workqueue);
1822 return PTR_ERR(gfs2_delete_workqueue);
1823 }
1824
1825 register_shrinker(&glock_shrinker);
1826
1827 return 0;
1828}
1829
1830void gfs2_glock_exit(void)
1831{
1832 unregister_shrinker(&glock_shrinker);
1833 destroy_workqueue(glock_workqueue);
1834 destroy_workqueue(gfs2_delete_workqueue);
1835}
1836
1837static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1838{
1839 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1840 struct gfs2_glock, gl_list);
1841}
1842
1843static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1844{
1845 return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1846 struct gfs2_glock, gl_list);
1847}
1848
1849static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1850{
1851 struct gfs2_glock *gl;
1852
1853 do {
1854 gl = gi->gl;
1855 if (gl) {
1856 gi->gl = glock_hash_next(gl);
1857 } else {
1858 gi->gl = glock_hash_chain(gi->hash);
1859 }
1860 while (gi->gl == NULL) {
1861 gi->hash++;
1862 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1863 rcu_read_unlock();
1864 return 1;
1865 }
1866 gi->gl = glock_hash_chain(gi->hash);
1867 }
1868 /* Skip entries for other sb and dead entries */
1869 } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1870
1871 return 0;
1872}
1873
1874static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1875{
1876 struct gfs2_glock_iter *gi = seq->private;
1877 loff_t n = *pos;
1878
1879 gi->hash = 0;
1880 rcu_read_lock();
1881
1882 do {
1883 if (gfs2_glock_iter_next(gi))
1884 return NULL;
1885 } while (n--);
1886
1887 return gi->gl;
1888}
1889
1890static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1891 loff_t *pos)
1892{
1893 struct gfs2_glock_iter *gi = seq->private;
1894
1895 (*pos)++;
1896
1897 if (gfs2_glock_iter_next(gi))
1898 return NULL;
1899
1900 return gi->gl;
1901}
1902
1903static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1904{
1905 struct gfs2_glock_iter *gi = seq->private;
1906
1907 if (gi->gl)
1908 rcu_read_unlock();
1909 gi->gl = NULL;
1910}
1911
1912static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1913{
1914 return dump_glock(seq, iter_ptr);
1915}
1916
1917static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1918{
1919 struct gfs2_glock_iter *gi = seq->private;
1920
1921 gi->hash = *pos;
1922 if (*pos >= GFS2_NR_SBSTATS)
1923 return NULL;
1924 preempt_disable();
1925 return SEQ_START_TOKEN;
1926}
1927
1928static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1929 loff_t *pos)
1930{
1931 struct gfs2_glock_iter *gi = seq->private;
1932 (*pos)++;
1933 gi->hash++;
1934 if (gi->hash >= GFS2_NR_SBSTATS) {
1935 preempt_enable();
1936 return NULL;
1937 }
1938 return SEQ_START_TOKEN;
1939}
1940
1941static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1942{
1943 preempt_enable();
1944}
1945
1946static const struct seq_operations gfs2_glock_seq_ops = {
1947 .start = gfs2_glock_seq_start,
1948 .next = gfs2_glock_seq_next,
1949 .stop = gfs2_glock_seq_stop,
1950 .show = gfs2_glock_seq_show,
1951};
1952
1953static const struct seq_operations gfs2_glstats_seq_ops = {
1954 .start = gfs2_glock_seq_start,
1955 .next = gfs2_glock_seq_next,
1956 .stop = gfs2_glock_seq_stop,
1957 .show = gfs2_glstats_seq_show,
1958};
1959
1960static const struct seq_operations gfs2_sbstats_seq_ops = {
1961 .start = gfs2_sbstats_seq_start,
1962 .next = gfs2_sbstats_seq_next,
1963 .stop = gfs2_sbstats_seq_stop,
1964 .show = gfs2_sbstats_seq_show,
1965};
1966
1967static int gfs2_glocks_open(struct inode *inode, struct file *file)
1968{
1969 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1970 sizeof(struct gfs2_glock_iter));
1971 if (ret == 0) {
1972 struct seq_file *seq = file->private_data;
1973 struct gfs2_glock_iter *gi = seq->private;
1974 gi->sdp = inode->i_private;
1975 }
1976 return ret;
1977}
1978
1979static int gfs2_glstats_open(struct inode *inode, struct file *file)
1980{
1981 int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
1982 sizeof(struct gfs2_glock_iter));
1983 if (ret == 0) {
1984 struct seq_file *seq = file->private_data;
1985 struct gfs2_glock_iter *gi = seq->private;
1986 gi->sdp = inode->i_private;
1987 }
1988 return ret;
1989}
1990
1991static int gfs2_sbstats_open(struct inode *inode, struct file *file)
1992{
1993 int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
1994 sizeof(struct gfs2_glock_iter));
1995 if (ret == 0) {
1996 struct seq_file *seq = file->private_data;
1997 struct gfs2_glock_iter *gi = seq->private;
1998 gi->sdp = inode->i_private;
1999 }
2000 return ret;
2001}
2002
2003static const struct file_operations gfs2_glocks_fops = {
2004 .owner = THIS_MODULE,
2005 .open = gfs2_glocks_open,
2006 .read = seq_read,
2007 .llseek = seq_lseek,
2008 .release = seq_release_private,
2009};
2010
2011static const struct file_operations gfs2_glstats_fops = {
2012 .owner = THIS_MODULE,
2013 .open = gfs2_glstats_open,
2014 .read = seq_read,
2015 .llseek = seq_lseek,
2016 .release = seq_release_private,
2017};
2018
2019static const struct file_operations gfs2_sbstats_fops = {
2020 .owner = THIS_MODULE,
2021 .open = gfs2_sbstats_open,
2022 .read = seq_read,
2023 .llseek = seq_lseek,
2024 .release = seq_release_private,
2025};
2026
2027int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2028{
2029 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2030 if (!sdp->debugfs_dir)
2031 return -ENOMEM;
2032 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2033 S_IFREG | S_IRUGO,
2034 sdp->debugfs_dir, sdp,
2035 &gfs2_glocks_fops);
2036 if (!sdp->debugfs_dentry_glocks)
2037 goto fail;
2038
2039 sdp->debugfs_dentry_glstats = debugfs_create_file("glstats",
2040 S_IFREG | S_IRUGO,
2041 sdp->debugfs_dir, sdp,
2042 &gfs2_glstats_fops);
2043 if (!sdp->debugfs_dentry_glstats)
2044 goto fail;
2045
2046 sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats",
2047 S_IFREG | S_IRUGO,
2048 sdp->debugfs_dir, sdp,
2049 &gfs2_sbstats_fops);
2050 if (!sdp->debugfs_dentry_sbstats)
2051 goto fail;
2052
2053 return 0;
2054fail:
2055 gfs2_delete_debugfs_file(sdp);
2056 return -ENOMEM;
2057}
2058
2059void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2060{
2061 if (sdp->debugfs_dir) {
2062 if (sdp->debugfs_dentry_glocks) {
2063 debugfs_remove(sdp->debugfs_dentry_glocks);
2064 sdp->debugfs_dentry_glocks = NULL;
2065 }
2066 if (sdp->debugfs_dentry_glstats) {
2067 debugfs_remove(sdp->debugfs_dentry_glstats);
2068 sdp->debugfs_dentry_glstats = NULL;
2069 }
2070 if (sdp->debugfs_dentry_sbstats) {
2071 debugfs_remove(sdp->debugfs_dentry_sbstats);
2072 sdp->debugfs_dentry_sbstats = NULL;
2073 }
2074 debugfs_remove(sdp->debugfs_dir);
2075 sdp->debugfs_dir = NULL;
2076 }
2077}
2078
2079int gfs2_register_debugfs(void)
2080{
2081 gfs2_root = debugfs_create_dir("gfs2", NULL);
2082 return gfs2_root ? 0 : -ENOMEM;
2083}
2084
2085void gfs2_unregister_debugfs(void)
2086{
2087 debugfs_remove(gfs2_root);
2088 gfs2_root = NULL;
2089}