Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/buffer_head.h>
14#include <linux/delay.h>
15#include <linux/sort.h>
16#include <linux/jhash.h>
17#include <linux/kallsyms.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/list.h>
20#include <linux/wait.h>
21#include <linux/module.h>
22#include <asm/uaccess.h>
23#include <linux/seq_file.h>
24#include <linux/debugfs.h>
25#include <linux/kthread.h>
26#include <linux/freezer.h>
27#include <linux/workqueue.h>
28#include <linux/jiffies.h>
29#include <linux/rcupdate.h>
30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h>
32#include <linux/percpu.h>
33
34#include "gfs2.h"
35#include "incore.h"
36#include "glock.h"
37#include "glops.h"
38#include "inode.h"
39#include "lops.h"
40#include "meta_io.h"
41#include "quota.h"
42#include "super.h"
43#include "util.h"
44#include "bmap.h"
45#define CREATE_TRACE_POINTS
46#include "trace_gfs2.h"
47
48struct gfs2_glock_iter {
49 int hash; /* hash bucket index */
50 struct gfs2_sbd *sdp; /* incore superblock */
51 struct gfs2_glock *gl; /* current glock struct */
52 char string[512]; /* scratch space */
53};
54
55typedef void (*glock_examiner) (struct gfs2_glock * gl);
56
57static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
58#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
59static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
60
61static struct dentry *gfs2_root;
62static struct workqueue_struct *glock_workqueue;
63struct workqueue_struct *gfs2_delete_workqueue;
64static LIST_HEAD(lru_list);
65static atomic_t lru_count = ATOMIC_INIT(0);
66static DEFINE_SPINLOCK(lru_lock);
67
68#define GFS2_GL_HASH_SHIFT 15
69#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
70#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
71
72static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
73static struct dentry *gfs2_root;
74
75/**
76 * gl_hash() - Turn glock number into hash bucket number
77 * @lock: The glock number
78 *
79 * Returns: The number of the corresponding hash bucket
80 */
81
82static unsigned int gl_hash(const struct gfs2_sbd *sdp,
83 const struct lm_lockname *name)
84{
85 unsigned int h;
86
87 h = jhash(&name->ln_number, sizeof(u64), 0);
88 h = jhash(&name->ln_type, sizeof(unsigned int), h);
89 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
90 h &= GFS2_GL_HASH_MASK;
91
92 return h;
93}
94
95static inline void spin_lock_bucket(unsigned int hash)
96{
97 hlist_bl_lock(&gl_hash_table[hash]);
98}
99
100static inline void spin_unlock_bucket(unsigned int hash)
101{
102 hlist_bl_unlock(&gl_hash_table[hash]);
103}
104
105static void gfs2_glock_dealloc(struct rcu_head *rcu)
106{
107 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
108
109 if (gl->gl_ops->go_flags & GLOF_ASPACE)
110 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
111 else
112 kmem_cache_free(gfs2_glock_cachep, gl);
113}
114
115void gfs2_glock_free(struct gfs2_glock *gl)
116{
117 struct gfs2_sbd *sdp = gl->gl_sbd;
118
119 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
120 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
121 wake_up(&sdp->sd_glock_wait);
122}
123
124/**
125 * gfs2_glock_hold() - increment reference count on glock
126 * @gl: The glock to hold
127 *
128 */
129
130void gfs2_glock_hold(struct gfs2_glock *gl)
131{
132 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
133 atomic_inc(&gl->gl_ref);
134}
135
136/**
137 * demote_ok - Check to see if it's ok to unlock a glock
138 * @gl: the glock
139 *
140 * Returns: 1 if it's ok
141 */
142
143static int demote_ok(const struct gfs2_glock *gl)
144{
145 const struct gfs2_glock_operations *glops = gl->gl_ops;
146
147 if (gl->gl_state == LM_ST_UNLOCKED)
148 return 0;
149 if (!list_empty(&gl->gl_holders))
150 return 0;
151 if (glops->go_demote_ok)
152 return glops->go_demote_ok(gl);
153 return 1;
154}
155
156
157void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
158{
159 spin_lock(&lru_lock);
160
161 if (!list_empty(&gl->gl_lru))
162 list_del_init(&gl->gl_lru);
163 else
164 atomic_inc(&lru_count);
165
166 list_add_tail(&gl->gl_lru, &lru_list);
167 set_bit(GLF_LRU, &gl->gl_flags);
168 spin_unlock(&lru_lock);
169}
170
171static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
172{
173 if (!list_empty(&gl->gl_lru)) {
174 list_del_init(&gl->gl_lru);
175 atomic_dec(&lru_count);
176 clear_bit(GLF_LRU, &gl->gl_flags);
177 }
178}
179
180static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
181{
182 spin_lock(&lru_lock);
183 __gfs2_glock_remove_from_lru(gl);
184 spin_unlock(&lru_lock);
185}
186
187/**
188 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
189 * @gl: the glock
190 *
191 * If the glock is demotable, then we add it (or move it) to the end
192 * of the glock LRU list.
193 */
194
195static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
196{
197 if (demote_ok(gl))
198 gfs2_glock_add_to_lru(gl);
199}
200
201/**
202 * gfs2_glock_put_nolock() - Decrement reference count on glock
203 * @gl: The glock to put
204 *
205 * This function should only be used if the caller has its own reference
206 * to the glock, in addition to the one it is dropping.
207 */
208
209void gfs2_glock_put_nolock(struct gfs2_glock *gl)
210{
211 if (atomic_dec_and_test(&gl->gl_ref))
212 GLOCK_BUG_ON(gl, 1);
213}
214
215/**
216 * gfs2_glock_put() - Decrement reference count on glock
217 * @gl: The glock to put
218 *
219 */
220
221void gfs2_glock_put(struct gfs2_glock *gl)
222{
223 struct gfs2_sbd *sdp = gl->gl_sbd;
224 struct address_space *mapping = gfs2_glock2aspace(gl);
225
226 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
227 __gfs2_glock_remove_from_lru(gl);
228 spin_unlock(&lru_lock);
229 spin_lock_bucket(gl->gl_hash);
230 hlist_bl_del_rcu(&gl->gl_list);
231 spin_unlock_bucket(gl->gl_hash);
232 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
233 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
234 trace_gfs2_glock_put(gl);
235 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
236 }
237}
238
239/**
240 * search_bucket() - Find struct gfs2_glock by lock number
241 * @bucket: the bucket to search
242 * @name: The lock name
243 *
244 * Returns: NULL, or the struct gfs2_glock with the requested number
245 */
246
247static struct gfs2_glock *search_bucket(unsigned int hash,
248 const struct gfs2_sbd *sdp,
249 const struct lm_lockname *name)
250{
251 struct gfs2_glock *gl;
252 struct hlist_bl_node *h;
253
254 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
255 if (!lm_name_equal(&gl->gl_name, name))
256 continue;
257 if (gl->gl_sbd != sdp)
258 continue;
259 if (atomic_inc_not_zero(&gl->gl_ref))
260 return gl;
261 }
262
263 return NULL;
264}
265
266/**
267 * may_grant - check if its ok to grant a new lock
268 * @gl: The glock
269 * @gh: The lock request which we wish to grant
270 *
271 * Returns: true if its ok to grant the lock
272 */
273
274static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
275{
276 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
277 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
278 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
279 return 0;
280 if (gl->gl_state == gh->gh_state)
281 return 1;
282 if (gh->gh_flags & GL_EXACT)
283 return 0;
284 if (gl->gl_state == LM_ST_EXCLUSIVE) {
285 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
286 return 1;
287 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
288 return 1;
289 }
290 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
291 return 1;
292 return 0;
293}
294
295static void gfs2_holder_wake(struct gfs2_holder *gh)
296{
297 clear_bit(HIF_WAIT, &gh->gh_iflags);
298 smp_mb__after_clear_bit();
299 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
300}
301
302/**
303 * do_error - Something unexpected has happened during a lock request
304 *
305 */
306
307static inline void do_error(struct gfs2_glock *gl, const int ret)
308{
309 struct gfs2_holder *gh, *tmp;
310
311 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
312 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
313 continue;
314 if (ret & LM_OUT_ERROR)
315 gh->gh_error = -EIO;
316 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
317 gh->gh_error = GLR_TRYFAILED;
318 else
319 continue;
320 list_del_init(&gh->gh_list);
321 trace_gfs2_glock_queue(gh, 0);
322 gfs2_holder_wake(gh);
323 }
324}
325
326/**
327 * do_promote - promote as many requests as possible on the current queue
328 * @gl: The glock
329 *
330 * Returns: 1 if there is a blocked holder at the head of the list, or 2
331 * if a type specific operation is underway.
332 */
333
334static int do_promote(struct gfs2_glock *gl)
335__releases(&gl->gl_spin)
336__acquires(&gl->gl_spin)
337{
338 const struct gfs2_glock_operations *glops = gl->gl_ops;
339 struct gfs2_holder *gh, *tmp;
340 int ret;
341
342restart:
343 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
344 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
345 continue;
346 if (may_grant(gl, gh)) {
347 if (gh->gh_list.prev == &gl->gl_holders &&
348 glops->go_lock) {
349 spin_unlock(&gl->gl_spin);
350 /* FIXME: eliminate this eventually */
351 ret = glops->go_lock(gh);
352 spin_lock(&gl->gl_spin);
353 if (ret) {
354 if (ret == 1)
355 return 2;
356 gh->gh_error = ret;
357 list_del_init(&gh->gh_list);
358 trace_gfs2_glock_queue(gh, 0);
359 gfs2_holder_wake(gh);
360 goto restart;
361 }
362 set_bit(HIF_HOLDER, &gh->gh_iflags);
363 trace_gfs2_promote(gh, 1);
364 gfs2_holder_wake(gh);
365 goto restart;
366 }
367 set_bit(HIF_HOLDER, &gh->gh_iflags);
368 trace_gfs2_promote(gh, 0);
369 gfs2_holder_wake(gh);
370 continue;
371 }
372 if (gh->gh_list.prev == &gl->gl_holders)
373 return 1;
374 do_error(gl, 0);
375 break;
376 }
377 return 0;
378}
379
380/**
381 * find_first_waiter - find the first gh that's waiting for the glock
382 * @gl: the glock
383 */
384
385static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
386{
387 struct gfs2_holder *gh;
388
389 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
390 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
391 return gh;
392 }
393 return NULL;
394}
395
396/**
397 * state_change - record that the glock is now in a different state
398 * @gl: the glock
399 * @new_state the new state
400 *
401 */
402
403static void state_change(struct gfs2_glock *gl, unsigned int new_state)
404{
405 int held1, held2;
406
407 held1 = (gl->gl_state != LM_ST_UNLOCKED);
408 held2 = (new_state != LM_ST_UNLOCKED);
409
410 if (held1 != held2) {
411 if (held2)
412 gfs2_glock_hold(gl);
413 else
414 gfs2_glock_put_nolock(gl);
415 }
416 if (held1 && held2 && list_empty(&gl->gl_holders))
417 clear_bit(GLF_QUEUED, &gl->gl_flags);
418
419 if (new_state != gl->gl_target)
420 /* shorten our minimum hold time */
421 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
422 GL_GLOCK_MIN_HOLD);
423 gl->gl_state = new_state;
424 gl->gl_tchange = jiffies;
425}
426
427static void gfs2_demote_wake(struct gfs2_glock *gl)
428{
429 gl->gl_demote_state = LM_ST_EXCLUSIVE;
430 clear_bit(GLF_DEMOTE, &gl->gl_flags);
431 smp_mb__after_clear_bit();
432 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
433}
434
435/**
436 * finish_xmote - The DLM has replied to one of our lock requests
437 * @gl: The glock
438 * @ret: The status from the DLM
439 *
440 */
441
442static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
443{
444 const struct gfs2_glock_operations *glops = gl->gl_ops;
445 struct gfs2_holder *gh;
446 unsigned state = ret & LM_OUT_ST_MASK;
447 int rv;
448
449 spin_lock(&gl->gl_spin);
450 trace_gfs2_glock_state_change(gl, state);
451 state_change(gl, state);
452 gh = find_first_waiter(gl);
453
454 /* Demote to UN request arrived during demote to SH or DF */
455 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
456 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
457 gl->gl_target = LM_ST_UNLOCKED;
458
459 /* Check for state != intended state */
460 if (unlikely(state != gl->gl_target)) {
461 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
462 /* move to back of queue and try next entry */
463 if (ret & LM_OUT_CANCELED) {
464 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
465 list_move_tail(&gh->gh_list, &gl->gl_holders);
466 gh = find_first_waiter(gl);
467 gl->gl_target = gh->gh_state;
468 goto retry;
469 }
470 /* Some error or failed "try lock" - report it */
471 if ((ret & LM_OUT_ERROR) ||
472 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
473 gl->gl_target = gl->gl_state;
474 do_error(gl, ret);
475 goto out;
476 }
477 }
478 switch(state) {
479 /* Unlocked due to conversion deadlock, try again */
480 case LM_ST_UNLOCKED:
481retry:
482 do_xmote(gl, gh, gl->gl_target);
483 break;
484 /* Conversion fails, unlock and try again */
485 case LM_ST_SHARED:
486 case LM_ST_DEFERRED:
487 do_xmote(gl, gh, LM_ST_UNLOCKED);
488 break;
489 default: /* Everything else */
490 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
491 GLOCK_BUG_ON(gl, 1);
492 }
493 spin_unlock(&gl->gl_spin);
494 return;
495 }
496
497 /* Fast path - we got what we asked for */
498 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
499 gfs2_demote_wake(gl);
500 if (state != LM_ST_UNLOCKED) {
501 if (glops->go_xmote_bh) {
502 spin_unlock(&gl->gl_spin);
503 rv = glops->go_xmote_bh(gl, gh);
504 spin_lock(&gl->gl_spin);
505 if (rv) {
506 do_error(gl, rv);
507 goto out;
508 }
509 }
510 rv = do_promote(gl);
511 if (rv == 2)
512 goto out_locked;
513 }
514out:
515 clear_bit(GLF_LOCK, &gl->gl_flags);
516out_locked:
517 spin_unlock(&gl->gl_spin);
518}
519
520/**
521 * do_xmote - Calls the DLM to change the state of a lock
522 * @gl: The lock state
523 * @gh: The holder (only for promotes)
524 * @target: The target lock state
525 *
526 */
527
528static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
529__releases(&gl->gl_spin)
530__acquires(&gl->gl_spin)
531{
532 const struct gfs2_glock_operations *glops = gl->gl_ops;
533 struct gfs2_sbd *sdp = gl->gl_sbd;
534 unsigned int lck_flags = gh ? gh->gh_flags : 0;
535 int ret;
536
537 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
538 LM_FLAG_PRIORITY);
539 GLOCK_BUG_ON(gl, gl->gl_state == target);
540 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
541 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
542 glops->go_inval) {
543 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
544 do_error(gl, 0); /* Fail queued try locks */
545 }
546 gl->gl_req = target;
547 set_bit(GLF_BLOCKING, &gl->gl_flags);
548 if ((gl->gl_req == LM_ST_UNLOCKED) ||
549 (gl->gl_state == LM_ST_EXCLUSIVE) ||
550 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
551 clear_bit(GLF_BLOCKING, &gl->gl_flags);
552 spin_unlock(&gl->gl_spin);
553 if (glops->go_xmote_th)
554 glops->go_xmote_th(gl);
555 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
556 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
557 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
558
559 gfs2_glock_hold(gl);
560 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
561 /* lock_dlm */
562 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
563 GLOCK_BUG_ON(gl, ret);
564 } else { /* lock_nolock */
565 finish_xmote(gl, target);
566 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
567 gfs2_glock_put(gl);
568 }
569
570 spin_lock(&gl->gl_spin);
571}
572
573/**
574 * find_first_holder - find the first "holder" gh
575 * @gl: the glock
576 */
577
578static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
579{
580 struct gfs2_holder *gh;
581
582 if (!list_empty(&gl->gl_holders)) {
583 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
584 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
585 return gh;
586 }
587 return NULL;
588}
589
590/**
591 * run_queue - do all outstanding tasks related to a glock
592 * @gl: The glock in question
593 * @nonblock: True if we must not block in run_queue
594 *
595 */
596
597static void run_queue(struct gfs2_glock *gl, const int nonblock)
598__releases(&gl->gl_spin)
599__acquires(&gl->gl_spin)
600{
601 struct gfs2_holder *gh = NULL;
602 int ret;
603
604 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
605 return;
606
607 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
608
609 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
610 gl->gl_demote_state != gl->gl_state) {
611 if (find_first_holder(gl))
612 goto out_unlock;
613 if (nonblock)
614 goto out_sched;
615 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
616 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
617 gl->gl_target = gl->gl_demote_state;
618 } else {
619 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
620 gfs2_demote_wake(gl);
621 ret = do_promote(gl);
622 if (ret == 0)
623 goto out_unlock;
624 if (ret == 2)
625 goto out;
626 gh = find_first_waiter(gl);
627 gl->gl_target = gh->gh_state;
628 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
629 do_error(gl, 0); /* Fail queued try locks */
630 }
631 do_xmote(gl, gh, gl->gl_target);
632out:
633 return;
634
635out_sched:
636 clear_bit(GLF_LOCK, &gl->gl_flags);
637 smp_mb__after_clear_bit();
638 gfs2_glock_hold(gl);
639 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
640 gfs2_glock_put_nolock(gl);
641 return;
642
643out_unlock:
644 clear_bit(GLF_LOCK, &gl->gl_flags);
645 smp_mb__after_clear_bit();
646 return;
647}
648
649static void delete_work_func(struct work_struct *work)
650{
651 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
652 struct gfs2_sbd *sdp = gl->gl_sbd;
653 struct gfs2_inode *ip;
654 struct inode *inode;
655 u64 no_addr = gl->gl_name.ln_number;
656
657 ip = gl->gl_object;
658 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
659
660 if (ip)
661 inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
662 else
663 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
664 if (inode && !IS_ERR(inode)) {
665 d_prune_aliases(inode);
666 iput(inode);
667 }
668 gfs2_glock_put(gl);
669}
670
671static void glock_work_func(struct work_struct *work)
672{
673 unsigned long delay = 0;
674 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
675 int drop_ref = 0;
676
677 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
678 finish_xmote(gl, gl->gl_reply);
679 drop_ref = 1;
680 }
681 spin_lock(&gl->gl_spin);
682 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
683 gl->gl_state != LM_ST_UNLOCKED &&
684 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
685 unsigned long holdtime, now = jiffies;
686
687 holdtime = gl->gl_tchange + gl->gl_hold_time;
688 if (time_before(now, holdtime))
689 delay = holdtime - now;
690
691 if (!delay) {
692 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
693 set_bit(GLF_DEMOTE, &gl->gl_flags);
694 }
695 }
696 run_queue(gl, 0);
697 spin_unlock(&gl->gl_spin);
698 if (!delay)
699 gfs2_glock_put(gl);
700 else {
701 if (gl->gl_name.ln_type != LM_TYPE_INODE)
702 delay = 0;
703 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
704 gfs2_glock_put(gl);
705 }
706 if (drop_ref)
707 gfs2_glock_put(gl);
708}
709
710/**
711 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
712 * @sdp: The GFS2 superblock
713 * @number: the lock number
714 * @glops: The glock_operations to use
715 * @create: If 0, don't create the glock if it doesn't exist
716 * @glp: the glock is returned here
717 *
718 * This does not lock a glock, just finds/creates structures for one.
719 *
720 * Returns: errno
721 */
722
723int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
724 const struct gfs2_glock_operations *glops, int create,
725 struct gfs2_glock **glp)
726{
727 struct super_block *s = sdp->sd_vfs;
728 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
729 struct gfs2_glock *gl, *tmp;
730 unsigned int hash = gl_hash(sdp, &name);
731 struct address_space *mapping;
732 struct kmem_cache *cachep;
733
734 rcu_read_lock();
735 gl = search_bucket(hash, sdp, &name);
736 rcu_read_unlock();
737
738 *glp = gl;
739 if (gl)
740 return 0;
741 if (!create)
742 return -ENOENT;
743
744 if (glops->go_flags & GLOF_ASPACE)
745 cachep = gfs2_glock_aspace_cachep;
746 else
747 cachep = gfs2_glock_cachep;
748 gl = kmem_cache_alloc(cachep, GFP_KERNEL);
749 if (!gl)
750 return -ENOMEM;
751
752 atomic_inc(&sdp->sd_glock_disposal);
753 gl->gl_sbd = sdp;
754 gl->gl_flags = 0;
755 gl->gl_name = name;
756 atomic_set(&gl->gl_ref, 1);
757 gl->gl_state = LM_ST_UNLOCKED;
758 gl->gl_target = LM_ST_UNLOCKED;
759 gl->gl_demote_state = LM_ST_EXCLUSIVE;
760 gl->gl_hash = hash;
761 gl->gl_ops = glops;
762 gl->gl_dstamp = ktime_set(0, 0);
763 preempt_disable();
764 /* We use the global stats to estimate the initial per-glock stats */
765 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
766 preempt_enable();
767 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
768 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
769 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
770 gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
771 gl->gl_tchange = jiffies;
772 gl->gl_object = NULL;
773 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
774 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
775 INIT_WORK(&gl->gl_delete, delete_work_func);
776
777 mapping = gfs2_glock2aspace(gl);
778 if (mapping) {
779 mapping->a_ops = &gfs2_meta_aops;
780 mapping->host = s->s_bdev->bd_inode;
781 mapping->flags = 0;
782 mapping_set_gfp_mask(mapping, GFP_NOFS);
783 mapping->assoc_mapping = NULL;
784 mapping->backing_dev_info = s->s_bdi;
785 mapping->writeback_index = 0;
786 }
787
788 spin_lock_bucket(hash);
789 tmp = search_bucket(hash, sdp, &name);
790 if (tmp) {
791 spin_unlock_bucket(hash);
792 kmem_cache_free(cachep, gl);
793 atomic_dec(&sdp->sd_glock_disposal);
794 gl = tmp;
795 } else {
796 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
797 spin_unlock_bucket(hash);
798 }
799
800 *glp = gl;
801
802 return 0;
803}
804
805/**
806 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
807 * @gl: the glock
808 * @state: the state we're requesting
809 * @flags: the modifier flags
810 * @gh: the holder structure
811 *
812 */
813
814void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
815 struct gfs2_holder *gh)
816{
817 INIT_LIST_HEAD(&gh->gh_list);
818 gh->gh_gl = gl;
819 gh->gh_ip = (unsigned long)__builtin_return_address(0);
820 gh->gh_owner_pid = get_pid(task_pid(current));
821 gh->gh_state = state;
822 gh->gh_flags = flags;
823 gh->gh_error = 0;
824 gh->gh_iflags = 0;
825 gfs2_glock_hold(gl);
826}
827
828/**
829 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
830 * @state: the state we're requesting
831 * @flags: the modifier flags
832 * @gh: the holder structure
833 *
834 * Don't mess with the glock.
835 *
836 */
837
838void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
839{
840 gh->gh_state = state;
841 gh->gh_flags = flags;
842 gh->gh_iflags = 0;
843 gh->gh_ip = (unsigned long)__builtin_return_address(0);
844 if (gh->gh_owner_pid)
845 put_pid(gh->gh_owner_pid);
846 gh->gh_owner_pid = get_pid(task_pid(current));
847}
848
849/**
850 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
851 * @gh: the holder structure
852 *
853 */
854
855void gfs2_holder_uninit(struct gfs2_holder *gh)
856{
857 put_pid(gh->gh_owner_pid);
858 gfs2_glock_put(gh->gh_gl);
859 gh->gh_gl = NULL;
860 gh->gh_ip = 0;
861}
862
863/**
864 * gfs2_glock_holder_wait
865 * @word: unused
866 *
867 * This function and gfs2_glock_demote_wait both show up in the WCHAN
868 * field. Thus I've separated these otherwise identical functions in
869 * order to be more informative to the user.
870 */
871
872static int gfs2_glock_holder_wait(void *word)
873{
874 schedule();
875 return 0;
876}
877
878static int gfs2_glock_demote_wait(void *word)
879{
880 schedule();
881 return 0;
882}
883
884static void wait_on_holder(struct gfs2_holder *gh)
885{
886 unsigned long time1 = jiffies;
887
888 might_sleep();
889 wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
890 if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
891 /* Lengthen the minimum hold time. */
892 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
893 GL_GLOCK_HOLD_INCR,
894 GL_GLOCK_MAX_HOLD);
895}
896
897static void wait_on_demote(struct gfs2_glock *gl)
898{
899 might_sleep();
900 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
901}
902
903/**
904 * handle_callback - process a demote request
905 * @gl: the glock
906 * @state: the state the caller wants us to change to
907 *
908 * There are only two requests that we are going to see in actual
909 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
910 */
911
912static void handle_callback(struct gfs2_glock *gl, unsigned int state,
913 unsigned long delay)
914{
915 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
916
917 set_bit(bit, &gl->gl_flags);
918 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
919 gl->gl_demote_state = state;
920 gl->gl_demote_time = jiffies;
921 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
922 gl->gl_demote_state != state) {
923 gl->gl_demote_state = LM_ST_UNLOCKED;
924 }
925 if (gl->gl_ops->go_callback)
926 gl->gl_ops->go_callback(gl);
927 trace_gfs2_demote_rq(gl);
928}
929
930/**
931 * gfs2_glock_wait - wait on a glock acquisition
932 * @gh: the glock holder
933 *
934 * Returns: 0 on success
935 */
936
937int gfs2_glock_wait(struct gfs2_holder *gh)
938{
939 wait_on_holder(gh);
940 return gh->gh_error;
941}
942
943void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
944{
945 struct va_format vaf;
946 va_list args;
947
948 va_start(args, fmt);
949
950 if (seq) {
951 struct gfs2_glock_iter *gi = seq->private;
952 vsprintf(gi->string, fmt, args);
953 seq_printf(seq, gi->string);
954 } else {
955 vaf.fmt = fmt;
956 vaf.va = &args;
957
958 printk(KERN_ERR " %pV", &vaf);
959 }
960
961 va_end(args);
962}
963
964/**
965 * add_to_queue - Add a holder to the wait queue (but look for recursion)
966 * @gh: the holder structure to add
967 *
968 * Eventually we should move the recursive locking trap to a
969 * debugging option or something like that. This is the fast
970 * path and needs to have the minimum number of distractions.
971 *
972 */
973
974static inline void add_to_queue(struct gfs2_holder *gh)
975__releases(&gl->gl_spin)
976__acquires(&gl->gl_spin)
977{
978 struct gfs2_glock *gl = gh->gh_gl;
979 struct gfs2_sbd *sdp = gl->gl_sbd;
980 struct list_head *insert_pt = NULL;
981 struct gfs2_holder *gh2;
982 int try_lock = 0;
983
984 BUG_ON(gh->gh_owner_pid == NULL);
985 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
986 BUG();
987
988 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
989 if (test_bit(GLF_LOCK, &gl->gl_flags))
990 try_lock = 1;
991 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
992 goto fail;
993 }
994
995 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
996 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
997 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
998 goto trap_recursive;
999 if (try_lock &&
1000 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
1001 !may_grant(gl, gh)) {
1002fail:
1003 gh->gh_error = GLR_TRYFAILED;
1004 gfs2_holder_wake(gh);
1005 return;
1006 }
1007 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1008 continue;
1009 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1010 insert_pt = &gh2->gh_list;
1011 }
1012 set_bit(GLF_QUEUED, &gl->gl_flags);
1013 trace_gfs2_glock_queue(gh, 1);
1014 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1015 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1016 if (likely(insert_pt == NULL)) {
1017 list_add_tail(&gh->gh_list, &gl->gl_holders);
1018 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1019 goto do_cancel;
1020 return;
1021 }
1022 list_add_tail(&gh->gh_list, insert_pt);
1023do_cancel:
1024 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1025 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1026 spin_unlock(&gl->gl_spin);
1027 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1028 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1029 spin_lock(&gl->gl_spin);
1030 }
1031 return;
1032
1033trap_recursive:
1034 print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1035 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1036 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1037 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1038 print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1039 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1040 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1041 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1042 __dump_glock(NULL, gl);
1043 BUG();
1044}
1045
1046/**
1047 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1048 * @gh: the holder structure
1049 *
1050 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1051 *
1052 * Returns: 0, GLR_TRYFAILED, or errno on failure
1053 */
1054
1055int gfs2_glock_nq(struct gfs2_holder *gh)
1056{
1057 struct gfs2_glock *gl = gh->gh_gl;
1058 struct gfs2_sbd *sdp = gl->gl_sbd;
1059 int error = 0;
1060
1061 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1062 return -EIO;
1063
1064 if (test_bit(GLF_LRU, &gl->gl_flags))
1065 gfs2_glock_remove_from_lru(gl);
1066
1067 spin_lock(&gl->gl_spin);
1068 add_to_queue(gh);
1069 if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1070 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1071 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1072 run_queue(gl, 1);
1073 spin_unlock(&gl->gl_spin);
1074
1075 if (!(gh->gh_flags & GL_ASYNC))
1076 error = gfs2_glock_wait(gh);
1077
1078 return error;
1079}
1080
1081/**
1082 * gfs2_glock_poll - poll to see if an async request has been completed
1083 * @gh: the holder
1084 *
1085 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1086 */
1087
1088int gfs2_glock_poll(struct gfs2_holder *gh)
1089{
1090 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1091}
1092
1093/**
1094 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1095 * @gh: the glock holder
1096 *
1097 */
1098
1099void gfs2_glock_dq(struct gfs2_holder *gh)
1100{
1101 struct gfs2_glock *gl = gh->gh_gl;
1102 const struct gfs2_glock_operations *glops = gl->gl_ops;
1103 unsigned delay = 0;
1104 int fast_path = 0;
1105
1106 spin_lock(&gl->gl_spin);
1107 if (gh->gh_flags & GL_NOCACHE)
1108 handle_callback(gl, LM_ST_UNLOCKED, 0);
1109
1110 list_del_init(&gh->gh_list);
1111 if (find_first_holder(gl) == NULL) {
1112 if (glops->go_unlock) {
1113 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1114 spin_unlock(&gl->gl_spin);
1115 glops->go_unlock(gh);
1116 spin_lock(&gl->gl_spin);
1117 clear_bit(GLF_LOCK, &gl->gl_flags);
1118 }
1119 if (list_empty(&gl->gl_holders) &&
1120 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1121 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1122 fast_path = 1;
1123 }
1124 if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1125 __gfs2_glock_schedule_for_reclaim(gl);
1126 trace_gfs2_glock_queue(gh, 0);
1127 spin_unlock(&gl->gl_spin);
1128 if (likely(fast_path))
1129 return;
1130
1131 gfs2_glock_hold(gl);
1132 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1133 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1134 gl->gl_name.ln_type == LM_TYPE_INODE)
1135 delay = gl->gl_hold_time;
1136 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1137 gfs2_glock_put(gl);
1138}
1139
1140void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1141{
1142 struct gfs2_glock *gl = gh->gh_gl;
1143 gfs2_glock_dq(gh);
1144 wait_on_demote(gl);
1145}
1146
1147/**
1148 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1149 * @gh: the holder structure
1150 *
1151 */
1152
1153void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1154{
1155 gfs2_glock_dq(gh);
1156 gfs2_holder_uninit(gh);
1157}
1158
1159/**
1160 * gfs2_glock_nq_num - acquire a glock based on lock number
1161 * @sdp: the filesystem
1162 * @number: the lock number
1163 * @glops: the glock operations for the type of glock
1164 * @state: the state to acquire the glock in
1165 * @flags: modifier flags for the acquisition
1166 * @gh: the struct gfs2_holder
1167 *
1168 * Returns: errno
1169 */
1170
1171int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1172 const struct gfs2_glock_operations *glops,
1173 unsigned int state, int flags, struct gfs2_holder *gh)
1174{
1175 struct gfs2_glock *gl;
1176 int error;
1177
1178 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1179 if (!error) {
1180 error = gfs2_glock_nq_init(gl, state, flags, gh);
1181 gfs2_glock_put(gl);
1182 }
1183
1184 return error;
1185}
1186
1187/**
1188 * glock_compare - Compare two struct gfs2_glock structures for sorting
1189 * @arg_a: the first structure
1190 * @arg_b: the second structure
1191 *
1192 */
1193
1194static int glock_compare(const void *arg_a, const void *arg_b)
1195{
1196 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1197 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1198 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1199 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1200
1201 if (a->ln_number > b->ln_number)
1202 return 1;
1203 if (a->ln_number < b->ln_number)
1204 return -1;
1205 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1206 return 0;
1207}
1208
1209/**
1210 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1211 * @num_gh: the number of structures
1212 * @ghs: an array of struct gfs2_holder structures
1213 *
1214 * Returns: 0 on success (all glocks acquired),
1215 * errno on failure (no glocks acquired)
1216 */
1217
1218static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1219 struct gfs2_holder **p)
1220{
1221 unsigned int x;
1222 int error = 0;
1223
1224 for (x = 0; x < num_gh; x++)
1225 p[x] = &ghs[x];
1226
1227 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1228
1229 for (x = 0; x < num_gh; x++) {
1230 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1231
1232 error = gfs2_glock_nq(p[x]);
1233 if (error) {
1234 while (x--)
1235 gfs2_glock_dq(p[x]);
1236 break;
1237 }
1238 }
1239
1240 return error;
1241}
1242
1243/**
1244 * gfs2_glock_nq_m - acquire multiple glocks
1245 * @num_gh: the number of structures
1246 * @ghs: an array of struct gfs2_holder structures
1247 *
1248 *
1249 * Returns: 0 on success (all glocks acquired),
1250 * errno on failure (no glocks acquired)
1251 */
1252
1253int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1254{
1255 struct gfs2_holder *tmp[4];
1256 struct gfs2_holder **pph = tmp;
1257 int error = 0;
1258
1259 switch(num_gh) {
1260 case 0:
1261 return 0;
1262 case 1:
1263 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1264 return gfs2_glock_nq(ghs);
1265 default:
1266 if (num_gh <= 4)
1267 break;
1268 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1269 if (!pph)
1270 return -ENOMEM;
1271 }
1272
1273 error = nq_m_sync(num_gh, ghs, pph);
1274
1275 if (pph != tmp)
1276 kfree(pph);
1277
1278 return error;
1279}
1280
1281/**
1282 * gfs2_glock_dq_m - release multiple glocks
1283 * @num_gh: the number of structures
1284 * @ghs: an array of struct gfs2_holder structures
1285 *
1286 */
1287
1288void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1289{
1290 while (num_gh--)
1291 gfs2_glock_dq(&ghs[num_gh]);
1292}
1293
1294/**
1295 * gfs2_glock_dq_uninit_m - release multiple glocks
1296 * @num_gh: the number of structures
1297 * @ghs: an array of struct gfs2_holder structures
1298 *
1299 */
1300
1301void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1302{
1303 while (num_gh--)
1304 gfs2_glock_dq_uninit(&ghs[num_gh]);
1305}
1306
1307void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1308{
1309 unsigned long delay = 0;
1310 unsigned long holdtime;
1311 unsigned long now = jiffies;
1312
1313 gfs2_glock_hold(gl);
1314 holdtime = gl->gl_tchange + gl->gl_hold_time;
1315 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1316 gl->gl_name.ln_type == LM_TYPE_INODE) {
1317 if (time_before(now, holdtime))
1318 delay = holdtime - now;
1319 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1320 delay = gl->gl_hold_time;
1321 }
1322
1323 spin_lock(&gl->gl_spin);
1324 handle_callback(gl, state, delay);
1325 spin_unlock(&gl->gl_spin);
1326 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1327 gfs2_glock_put(gl);
1328}
1329
1330/**
1331 * gfs2_should_freeze - Figure out if glock should be frozen
1332 * @gl: The glock in question
1333 *
1334 * Glocks are not frozen if (a) the result of the dlm operation is
1335 * an error, (b) the locking operation was an unlock operation or
1336 * (c) if there is a "noexp" flagged request anywhere in the queue
1337 *
1338 * Returns: 1 if freezing should occur, 0 otherwise
1339 */
1340
1341static int gfs2_should_freeze(const struct gfs2_glock *gl)
1342{
1343 const struct gfs2_holder *gh;
1344
1345 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1346 return 0;
1347 if (gl->gl_target == LM_ST_UNLOCKED)
1348 return 0;
1349
1350 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1351 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1352 continue;
1353 if (LM_FLAG_NOEXP & gh->gh_flags)
1354 return 0;
1355 }
1356
1357 return 1;
1358}
1359
1360/**
1361 * gfs2_glock_complete - Callback used by locking
1362 * @gl: Pointer to the glock
1363 * @ret: The return value from the dlm
1364 *
1365 * The gl_reply field is under the gl_spin lock so that it is ok
1366 * to use a bitfield shared with other glock state fields.
1367 */
1368
1369void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1370{
1371 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1372
1373 spin_lock(&gl->gl_spin);
1374 gl->gl_reply = ret;
1375
1376 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1377 if (gfs2_should_freeze(gl)) {
1378 set_bit(GLF_FROZEN, &gl->gl_flags);
1379 spin_unlock(&gl->gl_spin);
1380 return;
1381 }
1382 }
1383
1384 spin_unlock(&gl->gl_spin);
1385 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1386 smp_wmb();
1387 gfs2_glock_hold(gl);
1388 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1389 gfs2_glock_put(gl);
1390}
1391
1392
1393static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1394 struct shrink_control *sc)
1395{
1396 struct gfs2_glock *gl;
1397 int may_demote;
1398 int nr_skipped = 0;
1399 int nr = sc->nr_to_scan;
1400 gfp_t gfp_mask = sc->gfp_mask;
1401 LIST_HEAD(skipped);
1402
1403 if (nr == 0)
1404 goto out;
1405
1406 if (!(gfp_mask & __GFP_FS))
1407 return -1;
1408
1409 spin_lock(&lru_lock);
1410 while(nr && !list_empty(&lru_list)) {
1411 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1412 list_del_init(&gl->gl_lru);
1413 clear_bit(GLF_LRU, &gl->gl_flags);
1414 atomic_dec(&lru_count);
1415
1416 /* Test for being demotable */
1417 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1418 gfs2_glock_hold(gl);
1419 spin_unlock(&lru_lock);
1420 spin_lock(&gl->gl_spin);
1421 may_demote = demote_ok(gl);
1422 if (may_demote) {
1423 handle_callback(gl, LM_ST_UNLOCKED, 0);
1424 nr--;
1425 }
1426 clear_bit(GLF_LOCK, &gl->gl_flags);
1427 smp_mb__after_clear_bit();
1428 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1429 gfs2_glock_put_nolock(gl);
1430 spin_unlock(&gl->gl_spin);
1431 spin_lock(&lru_lock);
1432 continue;
1433 }
1434 nr_skipped++;
1435 list_add(&gl->gl_lru, &skipped);
1436 set_bit(GLF_LRU, &gl->gl_flags);
1437 }
1438 list_splice(&skipped, &lru_list);
1439 atomic_add(nr_skipped, &lru_count);
1440 spin_unlock(&lru_lock);
1441out:
1442 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1443}
1444
1445static struct shrinker glock_shrinker = {
1446 .shrink = gfs2_shrink_glock_memory,
1447 .seeks = DEFAULT_SEEKS,
1448};
1449
1450/**
1451 * examine_bucket - Call a function for glock in a hash bucket
1452 * @examiner: the function
1453 * @sdp: the filesystem
1454 * @bucket: the bucket
1455 *
1456 */
1457
1458static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1459 unsigned int hash)
1460{
1461 struct gfs2_glock *gl;
1462 struct hlist_bl_head *head = &gl_hash_table[hash];
1463 struct hlist_bl_node *pos;
1464
1465 rcu_read_lock();
1466 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1467 if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1468 examiner(gl);
1469 }
1470 rcu_read_unlock();
1471 cond_resched();
1472}
1473
1474static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1475{
1476 unsigned x;
1477
1478 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1479 examine_bucket(examiner, sdp, x);
1480}
1481
1482
1483/**
1484 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1485 * @gl: The glock to thaw
1486 *
1487 * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1488 * so this has to result in the ref count being dropped by one.
1489 */
1490
1491static void thaw_glock(struct gfs2_glock *gl)
1492{
1493 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1494 return;
1495 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1496 gfs2_glock_hold(gl);
1497 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1498 gfs2_glock_put(gl);
1499}
1500
1501/**
1502 * clear_glock - look at a glock and see if we can free it from glock cache
1503 * @gl: the glock to look at
1504 *
1505 */
1506
1507static void clear_glock(struct gfs2_glock *gl)
1508{
1509 gfs2_glock_remove_from_lru(gl);
1510
1511 spin_lock(&gl->gl_spin);
1512 if (gl->gl_state != LM_ST_UNLOCKED)
1513 handle_callback(gl, LM_ST_UNLOCKED, 0);
1514 spin_unlock(&gl->gl_spin);
1515 gfs2_glock_hold(gl);
1516 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1517 gfs2_glock_put(gl);
1518}
1519
1520/**
1521 * gfs2_glock_thaw - Thaw any frozen glocks
1522 * @sdp: The super block
1523 *
1524 */
1525
1526void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1527{
1528 glock_hash_walk(thaw_glock, sdp);
1529}
1530
1531static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1532{
1533 int ret;
1534 spin_lock(&gl->gl_spin);
1535 ret = __dump_glock(seq, gl);
1536 spin_unlock(&gl->gl_spin);
1537 return ret;
1538}
1539
1540static void dump_glock_func(struct gfs2_glock *gl)
1541{
1542 dump_glock(NULL, gl);
1543}
1544
1545/**
1546 * gfs2_gl_hash_clear - Empty out the glock hash table
1547 * @sdp: the filesystem
1548 * @wait: wait until it's all gone
1549 *
1550 * Called when unmounting the filesystem.
1551 */
1552
1553void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1554{
1555 glock_hash_walk(clear_glock, sdp);
1556 flush_workqueue(glock_workqueue);
1557 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1558 glock_hash_walk(dump_glock_func, sdp);
1559}
1560
1561void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1562{
1563 struct gfs2_glock *gl = ip->i_gl;
1564 int ret;
1565
1566 ret = gfs2_truncatei_resume(ip);
1567 gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1568
1569 spin_lock(&gl->gl_spin);
1570 clear_bit(GLF_LOCK, &gl->gl_flags);
1571 run_queue(gl, 1);
1572 spin_unlock(&gl->gl_spin);
1573}
1574
1575static const char *state2str(unsigned state)
1576{
1577 switch(state) {
1578 case LM_ST_UNLOCKED:
1579 return "UN";
1580 case LM_ST_SHARED:
1581 return "SH";
1582 case LM_ST_DEFERRED:
1583 return "DF";
1584 case LM_ST_EXCLUSIVE:
1585 return "EX";
1586 }
1587 return "??";
1588}
1589
1590static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1591{
1592 char *p = buf;
1593 if (flags & LM_FLAG_TRY)
1594 *p++ = 't';
1595 if (flags & LM_FLAG_TRY_1CB)
1596 *p++ = 'T';
1597 if (flags & LM_FLAG_NOEXP)
1598 *p++ = 'e';
1599 if (flags & LM_FLAG_ANY)
1600 *p++ = 'A';
1601 if (flags & LM_FLAG_PRIORITY)
1602 *p++ = 'p';
1603 if (flags & GL_ASYNC)
1604 *p++ = 'a';
1605 if (flags & GL_EXACT)
1606 *p++ = 'E';
1607 if (flags & GL_NOCACHE)
1608 *p++ = 'c';
1609 if (test_bit(HIF_HOLDER, &iflags))
1610 *p++ = 'H';
1611 if (test_bit(HIF_WAIT, &iflags))
1612 *p++ = 'W';
1613 if (test_bit(HIF_FIRST, &iflags))
1614 *p++ = 'F';
1615 *p = 0;
1616 return buf;
1617}
1618
1619/**
1620 * dump_holder - print information about a glock holder
1621 * @seq: the seq_file struct
1622 * @gh: the glock holder
1623 *
1624 * Returns: 0 on success, -ENOBUFS when we run out of space
1625 */
1626
1627static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1628{
1629 struct task_struct *gh_owner = NULL;
1630 char flags_buf[32];
1631
1632 if (gh->gh_owner_pid)
1633 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1634 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1635 state2str(gh->gh_state),
1636 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1637 gh->gh_error,
1638 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1639 gh_owner ? gh_owner->comm : "(ended)",
1640 (void *)gh->gh_ip);
1641 return 0;
1642}
1643
1644static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1645{
1646 const unsigned long *gflags = &gl->gl_flags;
1647 char *p = buf;
1648
1649 if (test_bit(GLF_LOCK, gflags))
1650 *p++ = 'l';
1651 if (test_bit(GLF_DEMOTE, gflags))
1652 *p++ = 'D';
1653 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1654 *p++ = 'd';
1655 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1656 *p++ = 'p';
1657 if (test_bit(GLF_DIRTY, gflags))
1658 *p++ = 'y';
1659 if (test_bit(GLF_LFLUSH, gflags))
1660 *p++ = 'f';
1661 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1662 *p++ = 'i';
1663 if (test_bit(GLF_REPLY_PENDING, gflags))
1664 *p++ = 'r';
1665 if (test_bit(GLF_INITIAL, gflags))
1666 *p++ = 'I';
1667 if (test_bit(GLF_FROZEN, gflags))
1668 *p++ = 'F';
1669 if (test_bit(GLF_QUEUED, gflags))
1670 *p++ = 'q';
1671 if (test_bit(GLF_LRU, gflags))
1672 *p++ = 'L';
1673 if (gl->gl_object)
1674 *p++ = 'o';
1675 if (test_bit(GLF_BLOCKING, gflags))
1676 *p++ = 'b';
1677 *p = 0;
1678 return buf;
1679}
1680
1681/**
1682 * __dump_glock - print information about a glock
1683 * @seq: The seq_file struct
1684 * @gl: the glock
1685 *
1686 * The file format is as follows:
1687 * One line per object, capital letters are used to indicate objects
1688 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1689 * other objects are indented by a single space and follow the glock to
1690 * which they are related. Fields are indicated by lower case letters
1691 * followed by a colon and the field value, except for strings which are in
1692 * [] so that its possible to see if they are composed of spaces for
1693 * example. The field's are n = number (id of the object), f = flags,
1694 * t = type, s = state, r = refcount, e = error, p = pid.
1695 *
1696 * Returns: 0 on success, -ENOBUFS when we run out of space
1697 */
1698
1699static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1700{
1701 const struct gfs2_glock_operations *glops = gl->gl_ops;
1702 unsigned long long dtime;
1703 const struct gfs2_holder *gh;
1704 char gflags_buf[32];
1705 int error = 0;
1706
1707 dtime = jiffies - gl->gl_demote_time;
1708 dtime *= 1000000/HZ; /* demote time in uSec */
1709 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1710 dtime = 0;
1711 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1712 state2str(gl->gl_state),
1713 gl->gl_name.ln_type,
1714 (unsigned long long)gl->gl_name.ln_number,
1715 gflags2str(gflags_buf, gl),
1716 state2str(gl->gl_target),
1717 state2str(gl->gl_demote_state), dtime,
1718 atomic_read(&gl->gl_ail_count),
1719 atomic_read(&gl->gl_revokes),
1720 atomic_read(&gl->gl_ref), gl->gl_hold_time);
1721
1722 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1723 error = dump_holder(seq, gh);
1724 if (error)
1725 goto out;
1726 }
1727 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1728 error = glops->go_dump(seq, gl);
1729out:
1730 return error;
1731}
1732
1733static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1734{
1735 struct gfs2_glock *gl = iter_ptr;
1736
1737 seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
1738 gl->gl_name.ln_type,
1739 (unsigned long long)gl->gl_name.ln_number,
1740 (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1741 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1742 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1743 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1744 (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1745 (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1746 (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1747 (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1748 return 0;
1749}
1750
1751static const char *gfs2_gltype[] = {
1752 "type",
1753 "reserved",
1754 "nondisk",
1755 "inode",
1756 "rgrp",
1757 "meta",
1758 "iopen",
1759 "flock",
1760 "plock",
1761 "quota",
1762 "journal",
1763};
1764
1765static const char *gfs2_stype[] = {
1766 [GFS2_LKS_SRTT] = "srtt",
1767 [GFS2_LKS_SRTTVAR] = "srttvar",
1768 [GFS2_LKS_SRTTB] = "srttb",
1769 [GFS2_LKS_SRTTVARB] = "srttvarb",
1770 [GFS2_LKS_SIRT] = "sirt",
1771 [GFS2_LKS_SIRTVAR] = "sirtvar",
1772 [GFS2_LKS_DCOUNT] = "dlm",
1773 [GFS2_LKS_QCOUNT] = "queue",
1774};
1775
1776#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1777
1778static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1779{
1780 struct gfs2_glock_iter *gi = seq->private;
1781 struct gfs2_sbd *sdp = gi->sdp;
1782 unsigned index = gi->hash >> 3;
1783 unsigned subindex = gi->hash & 0x07;
1784 s64 value;
1785 int i;
1786
1787 if (index == 0 && subindex != 0)
1788 return 0;
1789
1790 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1791 (index == 0) ? "cpu": gfs2_stype[subindex]);
1792
1793 for_each_possible_cpu(i) {
1794 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1795 if (index == 0) {
1796 value = i;
1797 } else {
1798 value = lkstats->lkstats[index - 1].stats[subindex];
1799 }
1800 seq_printf(seq, " %15lld", (long long)value);
1801 }
1802 seq_putc(seq, '\n');
1803 return 0;
1804}
1805
1806int __init gfs2_glock_init(void)
1807{
1808 unsigned i;
1809 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1810 INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1811 }
1812
1813 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1814 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1815 if (IS_ERR(glock_workqueue))
1816 return PTR_ERR(glock_workqueue);
1817 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1818 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1819 0);
1820 if (IS_ERR(gfs2_delete_workqueue)) {
1821 destroy_workqueue(glock_workqueue);
1822 return PTR_ERR(gfs2_delete_workqueue);
1823 }
1824
1825 register_shrinker(&glock_shrinker);
1826
1827 return 0;
1828}
1829
1830void gfs2_glock_exit(void)
1831{
1832 unregister_shrinker(&glock_shrinker);
1833 destroy_workqueue(glock_workqueue);
1834 destroy_workqueue(gfs2_delete_workqueue);
1835}
1836
1837static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1838{
1839 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1840 struct gfs2_glock, gl_list);
1841}
1842
1843static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1844{
1845 return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1846 struct gfs2_glock, gl_list);
1847}
1848
1849static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1850{
1851 struct gfs2_glock *gl;
1852
1853 do {
1854 gl = gi->gl;
1855 if (gl) {
1856 gi->gl = glock_hash_next(gl);
1857 } else {
1858 gi->gl = glock_hash_chain(gi->hash);
1859 }
1860 while (gi->gl == NULL) {
1861 gi->hash++;
1862 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1863 rcu_read_unlock();
1864 return 1;
1865 }
1866 gi->gl = glock_hash_chain(gi->hash);
1867 }
1868 /* Skip entries for other sb and dead entries */
1869 } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1870
1871 return 0;
1872}
1873
1874static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1875{
1876 struct gfs2_glock_iter *gi = seq->private;
1877 loff_t n = *pos;
1878
1879 gi->hash = 0;
1880 rcu_read_lock();
1881
1882 do {
1883 if (gfs2_glock_iter_next(gi))
1884 return NULL;
1885 } while (n--);
1886
1887 return gi->gl;
1888}
1889
1890static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1891 loff_t *pos)
1892{
1893 struct gfs2_glock_iter *gi = seq->private;
1894
1895 (*pos)++;
1896
1897 if (gfs2_glock_iter_next(gi))
1898 return NULL;
1899
1900 return gi->gl;
1901}
1902
1903static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1904{
1905 struct gfs2_glock_iter *gi = seq->private;
1906
1907 if (gi->gl)
1908 rcu_read_unlock();
1909 gi->gl = NULL;
1910}
1911
1912static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1913{
1914 return dump_glock(seq, iter_ptr);
1915}
1916
1917static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1918{
1919 struct gfs2_glock_iter *gi = seq->private;
1920
1921 gi->hash = *pos;
1922 if (*pos >= GFS2_NR_SBSTATS)
1923 return NULL;
1924 preempt_disable();
1925 return SEQ_START_TOKEN;
1926}
1927
1928static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1929 loff_t *pos)
1930{
1931 struct gfs2_glock_iter *gi = seq->private;
1932 (*pos)++;
1933 gi->hash++;
1934 if (gi->hash >= GFS2_NR_SBSTATS) {
1935 preempt_enable();
1936 return NULL;
1937 }
1938 return SEQ_START_TOKEN;
1939}
1940
1941static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1942{
1943 preempt_enable();
1944}
1945
1946static const struct seq_operations gfs2_glock_seq_ops = {
1947 .start = gfs2_glock_seq_start,
1948 .next = gfs2_glock_seq_next,
1949 .stop = gfs2_glock_seq_stop,
1950 .show = gfs2_glock_seq_show,
1951};
1952
1953static const struct seq_operations gfs2_glstats_seq_ops = {
1954 .start = gfs2_glock_seq_start,
1955 .next = gfs2_glock_seq_next,
1956 .stop = gfs2_glock_seq_stop,
1957 .show = gfs2_glstats_seq_show,
1958};
1959
1960static const struct seq_operations gfs2_sbstats_seq_ops = {
1961 .start = gfs2_sbstats_seq_start,
1962 .next = gfs2_sbstats_seq_next,
1963 .stop = gfs2_sbstats_seq_stop,
1964 .show = gfs2_sbstats_seq_show,
1965};
1966
1967static int gfs2_glocks_open(struct inode *inode, struct file *file)
1968{
1969 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1970 sizeof(struct gfs2_glock_iter));
1971 if (ret == 0) {
1972 struct seq_file *seq = file->private_data;
1973 struct gfs2_glock_iter *gi = seq->private;
1974 gi->sdp = inode->i_private;
1975 }
1976 return ret;
1977}
1978
1979static int gfs2_glstats_open(struct inode *inode, struct file *file)
1980{
1981 int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
1982 sizeof(struct gfs2_glock_iter));
1983 if (ret == 0) {
1984 struct seq_file *seq = file->private_data;
1985 struct gfs2_glock_iter *gi = seq->private;
1986 gi->sdp = inode->i_private;
1987 }
1988 return ret;
1989}
1990
1991static int gfs2_sbstats_open(struct inode *inode, struct file *file)
1992{
1993 int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
1994 sizeof(struct gfs2_glock_iter));
1995 if (ret == 0) {
1996 struct seq_file *seq = file->private_data;
1997 struct gfs2_glock_iter *gi = seq->private;
1998 gi->sdp = inode->i_private;
1999 }
2000 return ret;
2001}
2002
2003static const struct file_operations gfs2_glocks_fops = {
2004 .owner = THIS_MODULE,
2005 .open = gfs2_glocks_open,
2006 .read = seq_read,
2007 .llseek = seq_lseek,
2008 .release = seq_release_private,
2009};
2010
2011static const struct file_operations gfs2_glstats_fops = {
2012 .owner = THIS_MODULE,
2013 .open = gfs2_glstats_open,
2014 .read = seq_read,
2015 .llseek = seq_lseek,
2016 .release = seq_release_private,
2017};
2018
2019static const struct file_operations gfs2_sbstats_fops = {
2020 .owner = THIS_MODULE,
2021 .open = gfs2_sbstats_open,
2022 .read = seq_read,
2023 .llseek = seq_lseek,
2024 .release = seq_release_private,
2025};
2026
2027int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2028{
2029 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2030 if (!sdp->debugfs_dir)
2031 return -ENOMEM;
2032 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2033 S_IFREG | S_IRUGO,
2034 sdp->debugfs_dir, sdp,
2035 &gfs2_glocks_fops);
2036 if (!sdp->debugfs_dentry_glocks)
2037 goto fail;
2038
2039 sdp->debugfs_dentry_glstats = debugfs_create_file("glstats",
2040 S_IFREG | S_IRUGO,
2041 sdp->debugfs_dir, sdp,
2042 &gfs2_glstats_fops);
2043 if (!sdp->debugfs_dentry_glstats)
2044 goto fail;
2045
2046 sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats",
2047 S_IFREG | S_IRUGO,
2048 sdp->debugfs_dir, sdp,
2049 &gfs2_sbstats_fops);
2050 if (!sdp->debugfs_dentry_sbstats)
2051 goto fail;
2052
2053 return 0;
2054fail:
2055 gfs2_delete_debugfs_file(sdp);
2056 return -ENOMEM;
2057}
2058
2059void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2060{
2061 if (sdp->debugfs_dir) {
2062 if (sdp->debugfs_dentry_glocks) {
2063 debugfs_remove(sdp->debugfs_dentry_glocks);
2064 sdp->debugfs_dentry_glocks = NULL;
2065 }
2066 if (sdp->debugfs_dentry_glstats) {
2067 debugfs_remove(sdp->debugfs_dentry_glstats);
2068 sdp->debugfs_dentry_glstats = NULL;
2069 }
2070 if (sdp->debugfs_dentry_sbstats) {
2071 debugfs_remove(sdp->debugfs_dentry_sbstats);
2072 sdp->debugfs_dentry_sbstats = NULL;
2073 }
2074 debugfs_remove(sdp->debugfs_dir);
2075 sdp->debugfs_dir = NULL;
2076 }
2077}
2078
2079int gfs2_register_debugfs(void)
2080{
2081 gfs2_root = debugfs_create_dir("gfs2", NULL);
2082 return gfs2_root ? 0 : -ENOMEM;
2083}
2084
2085void gfs2_unregister_debugfs(void)
2086{
2087 debugfs_remove(gfs2_root);
2088 gfs2_root = NULL;
2089}
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/buffer_head.h>
16#include <linux/delay.h>
17#include <linux/sort.h>
18#include <linux/hash.h>
19#include <linux/jhash.h>
20#include <linux/kallsyms.h>
21#include <linux/gfs2_ondisk.h>
22#include <linux/list.h>
23#include <linux/wait.h>
24#include <linux/module.h>
25#include <linux/uaccess.h>
26#include <linux/seq_file.h>
27#include <linux/debugfs.h>
28#include <linux/kthread.h>
29#include <linux/freezer.h>
30#include <linux/workqueue.h>
31#include <linux/jiffies.h>
32#include <linux/rcupdate.h>
33#include <linux/rculist_bl.h>
34#include <linux/bit_spinlock.h>
35#include <linux/percpu.h>
36#include <linux/list_sort.h>
37#include <linux/lockref.h>
38#include <linux/rhashtable.h>
39
40#include "gfs2.h"
41#include "incore.h"
42#include "glock.h"
43#include "glops.h"
44#include "inode.h"
45#include "lops.h"
46#include "meta_io.h"
47#include "quota.h"
48#include "super.h"
49#include "util.h"
50#include "bmap.h"
51#define CREATE_TRACE_POINTS
52#include "trace_gfs2.h"
53
54struct gfs2_glock_iter {
55 struct gfs2_sbd *sdp; /* incore superblock */
56 struct rhashtable_iter hti; /* rhashtable iterator */
57 struct gfs2_glock *gl; /* current glock struct */
58 loff_t last_pos; /* last position */
59};
60
61typedef void (*glock_examiner) (struct gfs2_glock * gl);
62
63static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
64
65static struct dentry *gfs2_root;
66static struct workqueue_struct *glock_workqueue;
67struct workqueue_struct *gfs2_delete_workqueue;
68static LIST_HEAD(lru_list);
69static atomic_t lru_count = ATOMIC_INIT(0);
70static DEFINE_SPINLOCK(lru_lock);
71
72#define GFS2_GL_HASH_SHIFT 15
73#define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
74
75static const struct rhashtable_params ht_parms = {
76 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
77 .key_len = offsetofend(struct lm_lockname, ln_type),
78 .key_offset = offsetof(struct gfs2_glock, gl_name),
79 .head_offset = offsetof(struct gfs2_glock, gl_node),
80};
81
82static struct rhashtable gl_hash_table;
83
84#define GLOCK_WAIT_TABLE_BITS 12
85#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
86static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
87
88struct wait_glock_queue {
89 struct lm_lockname *name;
90 wait_queue_entry_t wait;
91};
92
93static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
94 int sync, void *key)
95{
96 struct wait_glock_queue *wait_glock =
97 container_of(wait, struct wait_glock_queue, wait);
98 struct lm_lockname *wait_name = wait_glock->name;
99 struct lm_lockname *wake_name = key;
100
101 if (wake_name->ln_sbd != wait_name->ln_sbd ||
102 wake_name->ln_number != wait_name->ln_number ||
103 wake_name->ln_type != wait_name->ln_type)
104 return 0;
105 return autoremove_wake_function(wait, mode, sync, key);
106}
107
108static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
109{
110 u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
111
112 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
113}
114
115/**
116 * wake_up_glock - Wake up waiters on a glock
117 * @gl: the glock
118 */
119static void wake_up_glock(struct gfs2_glock *gl)
120{
121 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
122
123 if (waitqueue_active(wq))
124 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
125}
126
127static void gfs2_glock_dealloc(struct rcu_head *rcu)
128{
129 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
130
131 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
132 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
133 } else {
134 kfree(gl->gl_lksb.sb_lvbptr);
135 kmem_cache_free(gfs2_glock_cachep, gl);
136 }
137}
138
139void gfs2_glock_free(struct gfs2_glock *gl)
140{
141 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
142
143 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
144 smp_mb();
145 wake_up_glock(gl);
146 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
147 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
148 wake_up(&sdp->sd_glock_wait);
149}
150
151/**
152 * gfs2_glock_hold() - increment reference count on glock
153 * @gl: The glock to hold
154 *
155 */
156
157void gfs2_glock_hold(struct gfs2_glock *gl)
158{
159 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
160 lockref_get(&gl->gl_lockref);
161}
162
163/**
164 * demote_ok - Check to see if it's ok to unlock a glock
165 * @gl: the glock
166 *
167 * Returns: 1 if it's ok
168 */
169
170static int demote_ok(const struct gfs2_glock *gl)
171{
172 const struct gfs2_glock_operations *glops = gl->gl_ops;
173
174 if (gl->gl_state == LM_ST_UNLOCKED)
175 return 0;
176 if (!list_empty(&gl->gl_holders))
177 return 0;
178 if (glops->go_demote_ok)
179 return glops->go_demote_ok(gl);
180 return 1;
181}
182
183
184void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
185{
186 spin_lock(&lru_lock);
187
188 if (!list_empty(&gl->gl_lru))
189 list_del_init(&gl->gl_lru);
190 else
191 atomic_inc(&lru_count);
192
193 list_add_tail(&gl->gl_lru, &lru_list);
194 set_bit(GLF_LRU, &gl->gl_flags);
195 spin_unlock(&lru_lock);
196}
197
198static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
199{
200 if (!(gl->gl_ops->go_flags & GLOF_LRU))
201 return;
202
203 spin_lock(&lru_lock);
204 if (!list_empty(&gl->gl_lru)) {
205 list_del_init(&gl->gl_lru);
206 atomic_dec(&lru_count);
207 clear_bit(GLF_LRU, &gl->gl_flags);
208 }
209 spin_unlock(&lru_lock);
210}
211
212/*
213 * Enqueue the glock on the work queue. Passes one glock reference on to the
214 * work queue.
215 */
216static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
217 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
218 /*
219 * We are holding the lockref spinlock, and the work was still
220 * queued above. The queued work (glock_work_func) takes that
221 * spinlock before dropping its glock reference(s), so it
222 * cannot have dropped them in the meantime.
223 */
224 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
225 gl->gl_lockref.count--;
226 }
227}
228
229static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
230 spin_lock(&gl->gl_lockref.lock);
231 __gfs2_glock_queue_work(gl, delay);
232 spin_unlock(&gl->gl_lockref.lock);
233}
234
235static void __gfs2_glock_put(struct gfs2_glock *gl)
236{
237 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
238 struct address_space *mapping = gfs2_glock2aspace(gl);
239
240 lockref_mark_dead(&gl->gl_lockref);
241
242 gfs2_glock_remove_from_lru(gl);
243 spin_unlock(&gl->gl_lockref.lock);
244 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
245 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
246 trace_gfs2_glock_put(gl);
247 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
248}
249
250/*
251 * Cause the glock to be put in work queue context.
252 */
253void gfs2_glock_queue_put(struct gfs2_glock *gl)
254{
255 gfs2_glock_queue_work(gl, 0);
256}
257
258/**
259 * gfs2_glock_put() - Decrement reference count on glock
260 * @gl: The glock to put
261 *
262 */
263
264void gfs2_glock_put(struct gfs2_glock *gl)
265{
266 if (lockref_put_or_lock(&gl->gl_lockref))
267 return;
268
269 __gfs2_glock_put(gl);
270}
271
272/**
273 * may_grant - check if its ok to grant a new lock
274 * @gl: The glock
275 * @gh: The lock request which we wish to grant
276 *
277 * Returns: true if its ok to grant the lock
278 */
279
280static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
281{
282 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
283 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
284 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
285 return 0;
286 if (gl->gl_state == gh->gh_state)
287 return 1;
288 if (gh->gh_flags & GL_EXACT)
289 return 0;
290 if (gl->gl_state == LM_ST_EXCLUSIVE) {
291 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
292 return 1;
293 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
294 return 1;
295 }
296 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
297 return 1;
298 return 0;
299}
300
301static void gfs2_holder_wake(struct gfs2_holder *gh)
302{
303 clear_bit(HIF_WAIT, &gh->gh_iflags);
304 smp_mb__after_atomic();
305 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
306}
307
308/**
309 * do_error - Something unexpected has happened during a lock request
310 *
311 */
312
313static void do_error(struct gfs2_glock *gl, const int ret)
314{
315 struct gfs2_holder *gh, *tmp;
316
317 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
318 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
319 continue;
320 if (ret & LM_OUT_ERROR)
321 gh->gh_error = -EIO;
322 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
323 gh->gh_error = GLR_TRYFAILED;
324 else
325 continue;
326 list_del_init(&gh->gh_list);
327 trace_gfs2_glock_queue(gh, 0);
328 gfs2_holder_wake(gh);
329 }
330}
331
332/**
333 * do_promote - promote as many requests as possible on the current queue
334 * @gl: The glock
335 *
336 * Returns: 1 if there is a blocked holder at the head of the list, or 2
337 * if a type specific operation is underway.
338 */
339
340static int do_promote(struct gfs2_glock *gl)
341__releases(&gl->gl_lockref.lock)
342__acquires(&gl->gl_lockref.lock)
343{
344 const struct gfs2_glock_operations *glops = gl->gl_ops;
345 struct gfs2_holder *gh, *tmp;
346 int ret;
347
348restart:
349 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
350 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
351 continue;
352 if (may_grant(gl, gh)) {
353 if (gh->gh_list.prev == &gl->gl_holders &&
354 glops->go_lock) {
355 spin_unlock(&gl->gl_lockref.lock);
356 /* FIXME: eliminate this eventually */
357 ret = glops->go_lock(gh);
358 spin_lock(&gl->gl_lockref.lock);
359 if (ret) {
360 if (ret == 1)
361 return 2;
362 gh->gh_error = ret;
363 list_del_init(&gh->gh_list);
364 trace_gfs2_glock_queue(gh, 0);
365 gfs2_holder_wake(gh);
366 goto restart;
367 }
368 set_bit(HIF_HOLDER, &gh->gh_iflags);
369 trace_gfs2_promote(gh, 1);
370 gfs2_holder_wake(gh);
371 goto restart;
372 }
373 set_bit(HIF_HOLDER, &gh->gh_iflags);
374 trace_gfs2_promote(gh, 0);
375 gfs2_holder_wake(gh);
376 continue;
377 }
378 if (gh->gh_list.prev == &gl->gl_holders)
379 return 1;
380 do_error(gl, 0);
381 break;
382 }
383 return 0;
384}
385
386/**
387 * find_first_waiter - find the first gh that's waiting for the glock
388 * @gl: the glock
389 */
390
391static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
392{
393 struct gfs2_holder *gh;
394
395 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
396 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
397 return gh;
398 }
399 return NULL;
400}
401
402/**
403 * state_change - record that the glock is now in a different state
404 * @gl: the glock
405 * @new_state the new state
406 *
407 */
408
409static void state_change(struct gfs2_glock *gl, unsigned int new_state)
410{
411 int held1, held2;
412
413 held1 = (gl->gl_state != LM_ST_UNLOCKED);
414 held2 = (new_state != LM_ST_UNLOCKED);
415
416 if (held1 != held2) {
417 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
418 if (held2)
419 gl->gl_lockref.count++;
420 else
421 gl->gl_lockref.count--;
422 }
423 if (held1 && held2 && list_empty(&gl->gl_holders))
424 clear_bit(GLF_QUEUED, &gl->gl_flags);
425
426 if (new_state != gl->gl_target)
427 /* shorten our minimum hold time */
428 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
429 GL_GLOCK_MIN_HOLD);
430 gl->gl_state = new_state;
431 gl->gl_tchange = jiffies;
432}
433
434static void gfs2_demote_wake(struct gfs2_glock *gl)
435{
436 gl->gl_demote_state = LM_ST_EXCLUSIVE;
437 clear_bit(GLF_DEMOTE, &gl->gl_flags);
438 smp_mb__after_atomic();
439 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
440}
441
442/**
443 * finish_xmote - The DLM has replied to one of our lock requests
444 * @gl: The glock
445 * @ret: The status from the DLM
446 *
447 */
448
449static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
450{
451 const struct gfs2_glock_operations *glops = gl->gl_ops;
452 struct gfs2_holder *gh;
453 unsigned state = ret & LM_OUT_ST_MASK;
454 int rv;
455
456 spin_lock(&gl->gl_lockref.lock);
457 trace_gfs2_glock_state_change(gl, state);
458 state_change(gl, state);
459 gh = find_first_waiter(gl);
460
461 /* Demote to UN request arrived during demote to SH or DF */
462 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
463 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
464 gl->gl_target = LM_ST_UNLOCKED;
465
466 /* Check for state != intended state */
467 if (unlikely(state != gl->gl_target)) {
468 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
469 /* move to back of queue and try next entry */
470 if (ret & LM_OUT_CANCELED) {
471 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
472 list_move_tail(&gh->gh_list, &gl->gl_holders);
473 gh = find_first_waiter(gl);
474 gl->gl_target = gh->gh_state;
475 goto retry;
476 }
477 /* Some error or failed "try lock" - report it */
478 if ((ret & LM_OUT_ERROR) ||
479 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
480 gl->gl_target = gl->gl_state;
481 do_error(gl, ret);
482 goto out;
483 }
484 }
485 switch(state) {
486 /* Unlocked due to conversion deadlock, try again */
487 case LM_ST_UNLOCKED:
488retry:
489 do_xmote(gl, gh, gl->gl_target);
490 break;
491 /* Conversion fails, unlock and try again */
492 case LM_ST_SHARED:
493 case LM_ST_DEFERRED:
494 do_xmote(gl, gh, LM_ST_UNLOCKED);
495 break;
496 default: /* Everything else */
497 pr_err("wanted %u got %u\n", gl->gl_target, state);
498 GLOCK_BUG_ON(gl, 1);
499 }
500 spin_unlock(&gl->gl_lockref.lock);
501 return;
502 }
503
504 /* Fast path - we got what we asked for */
505 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
506 gfs2_demote_wake(gl);
507 if (state != LM_ST_UNLOCKED) {
508 if (glops->go_xmote_bh) {
509 spin_unlock(&gl->gl_lockref.lock);
510 rv = glops->go_xmote_bh(gl, gh);
511 spin_lock(&gl->gl_lockref.lock);
512 if (rv) {
513 do_error(gl, rv);
514 goto out;
515 }
516 }
517 rv = do_promote(gl);
518 if (rv == 2)
519 goto out_locked;
520 }
521out:
522 clear_bit(GLF_LOCK, &gl->gl_flags);
523out_locked:
524 spin_unlock(&gl->gl_lockref.lock);
525}
526
527/**
528 * do_xmote - Calls the DLM to change the state of a lock
529 * @gl: The lock state
530 * @gh: The holder (only for promotes)
531 * @target: The target lock state
532 *
533 */
534
535static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
536__releases(&gl->gl_lockref.lock)
537__acquires(&gl->gl_lockref.lock)
538{
539 const struct gfs2_glock_operations *glops = gl->gl_ops;
540 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
541 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
542 int ret;
543
544 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
545 target != LM_ST_UNLOCKED)
546 return;
547 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
548 LM_FLAG_PRIORITY);
549 GLOCK_BUG_ON(gl, gl->gl_state == target);
550 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
551 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
552 glops->go_inval) {
553 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
554 do_error(gl, 0); /* Fail queued try locks */
555 }
556 gl->gl_req = target;
557 set_bit(GLF_BLOCKING, &gl->gl_flags);
558 if ((gl->gl_req == LM_ST_UNLOCKED) ||
559 (gl->gl_state == LM_ST_EXCLUSIVE) ||
560 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
561 clear_bit(GLF_BLOCKING, &gl->gl_flags);
562 spin_unlock(&gl->gl_lockref.lock);
563 if (glops->go_sync)
564 glops->go_sync(gl);
565 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
566 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
567 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
568
569 gfs2_glock_hold(gl);
570 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
571 /* lock_dlm */
572 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
573 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
574 target == LM_ST_UNLOCKED &&
575 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
576 finish_xmote(gl, target);
577 gfs2_glock_queue_work(gl, 0);
578 }
579 else if (ret) {
580 pr_err("lm_lock ret %d\n", ret);
581 GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
582 &sdp->sd_flags));
583 }
584 } else { /* lock_nolock */
585 finish_xmote(gl, target);
586 gfs2_glock_queue_work(gl, 0);
587 }
588
589 spin_lock(&gl->gl_lockref.lock);
590}
591
592/**
593 * find_first_holder - find the first "holder" gh
594 * @gl: the glock
595 */
596
597static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
598{
599 struct gfs2_holder *gh;
600
601 if (!list_empty(&gl->gl_holders)) {
602 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
603 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
604 return gh;
605 }
606 return NULL;
607}
608
609/**
610 * run_queue - do all outstanding tasks related to a glock
611 * @gl: The glock in question
612 * @nonblock: True if we must not block in run_queue
613 *
614 */
615
616static void run_queue(struct gfs2_glock *gl, const int nonblock)
617__releases(&gl->gl_lockref.lock)
618__acquires(&gl->gl_lockref.lock)
619{
620 struct gfs2_holder *gh = NULL;
621 int ret;
622
623 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
624 return;
625
626 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
627
628 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
629 gl->gl_demote_state != gl->gl_state) {
630 if (find_first_holder(gl))
631 goto out_unlock;
632 if (nonblock)
633 goto out_sched;
634 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
635 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
636 gl->gl_target = gl->gl_demote_state;
637 } else {
638 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
639 gfs2_demote_wake(gl);
640 ret = do_promote(gl);
641 if (ret == 0)
642 goto out_unlock;
643 if (ret == 2)
644 goto out;
645 gh = find_first_waiter(gl);
646 gl->gl_target = gh->gh_state;
647 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
648 do_error(gl, 0); /* Fail queued try locks */
649 }
650 do_xmote(gl, gh, gl->gl_target);
651out:
652 return;
653
654out_sched:
655 clear_bit(GLF_LOCK, &gl->gl_flags);
656 smp_mb__after_atomic();
657 gl->gl_lockref.count++;
658 __gfs2_glock_queue_work(gl, 0);
659 return;
660
661out_unlock:
662 clear_bit(GLF_LOCK, &gl->gl_flags);
663 smp_mb__after_atomic();
664 return;
665}
666
667static void delete_work_func(struct work_struct *work)
668{
669 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
670 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
671 struct inode *inode;
672 u64 no_addr = gl->gl_name.ln_number;
673
674 /* If someone's using this glock to create a new dinode, the block must
675 have been freed by another node, then re-used, in which case our
676 iopen callback is too late after the fact. Ignore it. */
677 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
678 goto out;
679
680 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
681 if (inode && !IS_ERR(inode)) {
682 d_prune_aliases(inode);
683 iput(inode);
684 }
685out:
686 gfs2_glock_put(gl);
687}
688
689static void glock_work_func(struct work_struct *work)
690{
691 unsigned long delay = 0;
692 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
693 unsigned int drop_refs = 1;
694
695 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
696 finish_xmote(gl, gl->gl_reply);
697 drop_refs++;
698 }
699 spin_lock(&gl->gl_lockref.lock);
700 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
701 gl->gl_state != LM_ST_UNLOCKED &&
702 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
703 unsigned long holdtime, now = jiffies;
704
705 holdtime = gl->gl_tchange + gl->gl_hold_time;
706 if (time_before(now, holdtime))
707 delay = holdtime - now;
708
709 if (!delay) {
710 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
711 set_bit(GLF_DEMOTE, &gl->gl_flags);
712 }
713 }
714 run_queue(gl, 0);
715 if (delay) {
716 /* Keep one glock reference for the work we requeue. */
717 drop_refs--;
718 if (gl->gl_name.ln_type != LM_TYPE_INODE)
719 delay = 0;
720 __gfs2_glock_queue_work(gl, delay);
721 }
722
723 /*
724 * Drop the remaining glock references manually here. (Mind that
725 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
726 * here as well.)
727 */
728 gl->gl_lockref.count -= drop_refs;
729 if (!gl->gl_lockref.count) {
730 __gfs2_glock_put(gl);
731 return;
732 }
733 spin_unlock(&gl->gl_lockref.lock);
734}
735
736static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
737 struct gfs2_glock *new)
738{
739 struct wait_glock_queue wait;
740 wait_queue_head_t *wq = glock_waitqueue(name);
741 struct gfs2_glock *gl;
742
743 wait.name = name;
744 init_wait(&wait.wait);
745 wait.wait.func = glock_wake_function;
746
747again:
748 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
749 rcu_read_lock();
750 if (new) {
751 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
752 &new->gl_node, ht_parms);
753 if (IS_ERR(gl))
754 goto out;
755 } else {
756 gl = rhashtable_lookup_fast(&gl_hash_table,
757 name, ht_parms);
758 }
759 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
760 rcu_read_unlock();
761 schedule();
762 goto again;
763 }
764out:
765 rcu_read_unlock();
766 finish_wait(wq, &wait.wait);
767 return gl;
768}
769
770/**
771 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
772 * @sdp: The GFS2 superblock
773 * @number: the lock number
774 * @glops: The glock_operations to use
775 * @create: If 0, don't create the glock if it doesn't exist
776 * @glp: the glock is returned here
777 *
778 * This does not lock a glock, just finds/creates structures for one.
779 *
780 * Returns: errno
781 */
782
783int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
784 const struct gfs2_glock_operations *glops, int create,
785 struct gfs2_glock **glp)
786{
787 struct super_block *s = sdp->sd_vfs;
788 struct lm_lockname name = { .ln_number = number,
789 .ln_type = glops->go_type,
790 .ln_sbd = sdp };
791 struct gfs2_glock *gl, *tmp;
792 struct address_space *mapping;
793 struct kmem_cache *cachep;
794 int ret = 0;
795
796 gl = find_insert_glock(&name, NULL);
797 if (gl) {
798 *glp = gl;
799 return 0;
800 }
801 if (!create)
802 return -ENOENT;
803
804 if (glops->go_flags & GLOF_ASPACE)
805 cachep = gfs2_glock_aspace_cachep;
806 else
807 cachep = gfs2_glock_cachep;
808 gl = kmem_cache_alloc(cachep, GFP_NOFS);
809 if (!gl)
810 return -ENOMEM;
811
812 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
813
814 if (glops->go_flags & GLOF_LVB) {
815 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
816 if (!gl->gl_lksb.sb_lvbptr) {
817 kmem_cache_free(cachep, gl);
818 return -ENOMEM;
819 }
820 }
821
822 atomic_inc(&sdp->sd_glock_disposal);
823 gl->gl_node.next = NULL;
824 gl->gl_flags = 0;
825 gl->gl_name = name;
826 gl->gl_lockref.count = 1;
827 gl->gl_state = LM_ST_UNLOCKED;
828 gl->gl_target = LM_ST_UNLOCKED;
829 gl->gl_demote_state = LM_ST_EXCLUSIVE;
830 gl->gl_ops = glops;
831 gl->gl_dstamp = 0;
832 preempt_disable();
833 /* We use the global stats to estimate the initial per-glock stats */
834 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
835 preempt_enable();
836 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
837 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
838 gl->gl_tchange = jiffies;
839 gl->gl_object = NULL;
840 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
841 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
842 INIT_WORK(&gl->gl_delete, delete_work_func);
843
844 mapping = gfs2_glock2aspace(gl);
845 if (mapping) {
846 mapping->a_ops = &gfs2_meta_aops;
847 mapping->host = s->s_bdev->bd_inode;
848 mapping->flags = 0;
849 mapping_set_gfp_mask(mapping, GFP_NOFS);
850 mapping->private_data = NULL;
851 mapping->writeback_index = 0;
852 }
853
854 tmp = find_insert_glock(&name, gl);
855 if (!tmp) {
856 *glp = gl;
857 goto out;
858 }
859 if (IS_ERR(tmp)) {
860 ret = PTR_ERR(tmp);
861 goto out_free;
862 }
863 *glp = tmp;
864
865out_free:
866 kfree(gl->gl_lksb.sb_lvbptr);
867 kmem_cache_free(cachep, gl);
868 atomic_dec(&sdp->sd_glock_disposal);
869
870out:
871 return ret;
872}
873
874/**
875 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
876 * @gl: the glock
877 * @state: the state we're requesting
878 * @flags: the modifier flags
879 * @gh: the holder structure
880 *
881 */
882
883void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
884 struct gfs2_holder *gh)
885{
886 INIT_LIST_HEAD(&gh->gh_list);
887 gh->gh_gl = gl;
888 gh->gh_ip = _RET_IP_;
889 gh->gh_owner_pid = get_pid(task_pid(current));
890 gh->gh_state = state;
891 gh->gh_flags = flags;
892 gh->gh_error = 0;
893 gh->gh_iflags = 0;
894 gfs2_glock_hold(gl);
895}
896
897/**
898 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
899 * @state: the state we're requesting
900 * @flags: the modifier flags
901 * @gh: the holder structure
902 *
903 * Don't mess with the glock.
904 *
905 */
906
907void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
908{
909 gh->gh_state = state;
910 gh->gh_flags = flags;
911 gh->gh_iflags = 0;
912 gh->gh_ip = _RET_IP_;
913 put_pid(gh->gh_owner_pid);
914 gh->gh_owner_pid = get_pid(task_pid(current));
915}
916
917/**
918 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
919 * @gh: the holder structure
920 *
921 */
922
923void gfs2_holder_uninit(struct gfs2_holder *gh)
924{
925 put_pid(gh->gh_owner_pid);
926 gfs2_glock_put(gh->gh_gl);
927 gfs2_holder_mark_uninitialized(gh);
928 gh->gh_ip = 0;
929}
930
931/**
932 * gfs2_glock_wait - wait on a glock acquisition
933 * @gh: the glock holder
934 *
935 * Returns: 0 on success
936 */
937
938int gfs2_glock_wait(struct gfs2_holder *gh)
939{
940 unsigned long time1 = jiffies;
941
942 might_sleep();
943 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
944 if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
945 /* Lengthen the minimum hold time. */
946 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
947 GL_GLOCK_HOLD_INCR,
948 GL_GLOCK_MAX_HOLD);
949 return gh->gh_error;
950}
951
952/**
953 * handle_callback - process a demote request
954 * @gl: the glock
955 * @state: the state the caller wants us to change to
956 *
957 * There are only two requests that we are going to see in actual
958 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
959 */
960
961static void handle_callback(struct gfs2_glock *gl, unsigned int state,
962 unsigned long delay, bool remote)
963{
964 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
965
966 set_bit(bit, &gl->gl_flags);
967 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
968 gl->gl_demote_state = state;
969 gl->gl_demote_time = jiffies;
970 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
971 gl->gl_demote_state != state) {
972 gl->gl_demote_state = LM_ST_UNLOCKED;
973 }
974 if (gl->gl_ops->go_callback)
975 gl->gl_ops->go_callback(gl, remote);
976 trace_gfs2_demote_rq(gl, remote);
977}
978
979void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
980{
981 struct va_format vaf;
982 va_list args;
983
984 va_start(args, fmt);
985
986 if (seq) {
987 seq_vprintf(seq, fmt, args);
988 } else {
989 vaf.fmt = fmt;
990 vaf.va = &args;
991
992 pr_err("%pV", &vaf);
993 }
994
995 va_end(args);
996}
997
998/**
999 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1000 * @gh: the holder structure to add
1001 *
1002 * Eventually we should move the recursive locking trap to a
1003 * debugging option or something like that. This is the fast
1004 * path and needs to have the minimum number of distractions.
1005 *
1006 */
1007
1008static inline void add_to_queue(struct gfs2_holder *gh)
1009__releases(&gl->gl_lockref.lock)
1010__acquires(&gl->gl_lockref.lock)
1011{
1012 struct gfs2_glock *gl = gh->gh_gl;
1013 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1014 struct list_head *insert_pt = NULL;
1015 struct gfs2_holder *gh2;
1016 int try_futile = 0;
1017
1018 BUG_ON(gh->gh_owner_pid == NULL);
1019 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1020 BUG();
1021
1022 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1023 if (test_bit(GLF_LOCK, &gl->gl_flags))
1024 try_futile = !may_grant(gl, gh);
1025 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1026 goto fail;
1027 }
1028
1029 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1030 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1031 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1032 goto trap_recursive;
1033 if (try_futile &&
1034 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1035fail:
1036 gh->gh_error = GLR_TRYFAILED;
1037 gfs2_holder_wake(gh);
1038 return;
1039 }
1040 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1041 continue;
1042 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1043 insert_pt = &gh2->gh_list;
1044 }
1045 set_bit(GLF_QUEUED, &gl->gl_flags);
1046 trace_gfs2_glock_queue(gh, 1);
1047 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1048 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1049 if (likely(insert_pt == NULL)) {
1050 list_add_tail(&gh->gh_list, &gl->gl_holders);
1051 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1052 goto do_cancel;
1053 return;
1054 }
1055 list_add_tail(&gh->gh_list, insert_pt);
1056do_cancel:
1057 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1058 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1059 spin_unlock(&gl->gl_lockref.lock);
1060 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1061 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1062 spin_lock(&gl->gl_lockref.lock);
1063 }
1064 return;
1065
1066trap_recursive:
1067 pr_err("original: %pSR\n", (void *)gh2->gh_ip);
1068 pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
1069 pr_err("lock type: %d req lock state : %d\n",
1070 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1071 pr_err("new: %pSR\n", (void *)gh->gh_ip);
1072 pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
1073 pr_err("lock type: %d req lock state : %d\n",
1074 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1075 gfs2_dump_glock(NULL, gl);
1076 BUG();
1077}
1078
1079/**
1080 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1081 * @gh: the holder structure
1082 *
1083 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1084 *
1085 * Returns: 0, GLR_TRYFAILED, or errno on failure
1086 */
1087
1088int gfs2_glock_nq(struct gfs2_holder *gh)
1089{
1090 struct gfs2_glock *gl = gh->gh_gl;
1091 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1092 int error = 0;
1093
1094 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1095 return -EIO;
1096
1097 if (test_bit(GLF_LRU, &gl->gl_flags))
1098 gfs2_glock_remove_from_lru(gl);
1099
1100 spin_lock(&gl->gl_lockref.lock);
1101 add_to_queue(gh);
1102 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1103 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1104 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1105 gl->gl_lockref.count++;
1106 __gfs2_glock_queue_work(gl, 0);
1107 }
1108 run_queue(gl, 1);
1109 spin_unlock(&gl->gl_lockref.lock);
1110
1111 if (!(gh->gh_flags & GL_ASYNC))
1112 error = gfs2_glock_wait(gh);
1113
1114 return error;
1115}
1116
1117/**
1118 * gfs2_glock_poll - poll to see if an async request has been completed
1119 * @gh: the holder
1120 *
1121 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1122 */
1123
1124int gfs2_glock_poll(struct gfs2_holder *gh)
1125{
1126 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1127}
1128
1129/**
1130 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1131 * @gh: the glock holder
1132 *
1133 */
1134
1135void gfs2_glock_dq(struct gfs2_holder *gh)
1136{
1137 struct gfs2_glock *gl = gh->gh_gl;
1138 const struct gfs2_glock_operations *glops = gl->gl_ops;
1139 unsigned delay = 0;
1140 int fast_path = 0;
1141
1142 spin_lock(&gl->gl_lockref.lock);
1143 if (gh->gh_flags & GL_NOCACHE)
1144 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1145
1146 list_del_init(&gh->gh_list);
1147 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1148 if (find_first_holder(gl) == NULL) {
1149 if (glops->go_unlock) {
1150 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1151 spin_unlock(&gl->gl_lockref.lock);
1152 glops->go_unlock(gh);
1153 spin_lock(&gl->gl_lockref.lock);
1154 clear_bit(GLF_LOCK, &gl->gl_flags);
1155 }
1156 if (list_empty(&gl->gl_holders) &&
1157 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1158 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1159 fast_path = 1;
1160 }
1161 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1162 (glops->go_flags & GLOF_LRU))
1163 gfs2_glock_add_to_lru(gl);
1164
1165 trace_gfs2_glock_queue(gh, 0);
1166 if (unlikely(!fast_path)) {
1167 gl->gl_lockref.count++;
1168 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1169 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1170 gl->gl_name.ln_type == LM_TYPE_INODE)
1171 delay = gl->gl_hold_time;
1172 __gfs2_glock_queue_work(gl, delay);
1173 }
1174 spin_unlock(&gl->gl_lockref.lock);
1175}
1176
1177void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1178{
1179 struct gfs2_glock *gl = gh->gh_gl;
1180 gfs2_glock_dq(gh);
1181 might_sleep();
1182 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1183}
1184
1185/**
1186 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1187 * @gh: the holder structure
1188 *
1189 */
1190
1191void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1192{
1193 gfs2_glock_dq(gh);
1194 gfs2_holder_uninit(gh);
1195}
1196
1197/**
1198 * gfs2_glock_nq_num - acquire a glock based on lock number
1199 * @sdp: the filesystem
1200 * @number: the lock number
1201 * @glops: the glock operations for the type of glock
1202 * @state: the state to acquire the glock in
1203 * @flags: modifier flags for the acquisition
1204 * @gh: the struct gfs2_holder
1205 *
1206 * Returns: errno
1207 */
1208
1209int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1210 const struct gfs2_glock_operations *glops,
1211 unsigned int state, u16 flags, struct gfs2_holder *gh)
1212{
1213 struct gfs2_glock *gl;
1214 int error;
1215
1216 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1217 if (!error) {
1218 error = gfs2_glock_nq_init(gl, state, flags, gh);
1219 gfs2_glock_put(gl);
1220 }
1221
1222 return error;
1223}
1224
1225/**
1226 * glock_compare - Compare two struct gfs2_glock structures for sorting
1227 * @arg_a: the first structure
1228 * @arg_b: the second structure
1229 *
1230 */
1231
1232static int glock_compare(const void *arg_a, const void *arg_b)
1233{
1234 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1235 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1236 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1237 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1238
1239 if (a->ln_number > b->ln_number)
1240 return 1;
1241 if (a->ln_number < b->ln_number)
1242 return -1;
1243 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1244 return 0;
1245}
1246
1247/**
1248 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1249 * @num_gh: the number of structures
1250 * @ghs: an array of struct gfs2_holder structures
1251 *
1252 * Returns: 0 on success (all glocks acquired),
1253 * errno on failure (no glocks acquired)
1254 */
1255
1256static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1257 struct gfs2_holder **p)
1258{
1259 unsigned int x;
1260 int error = 0;
1261
1262 for (x = 0; x < num_gh; x++)
1263 p[x] = &ghs[x];
1264
1265 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1266
1267 for (x = 0; x < num_gh; x++) {
1268 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1269
1270 error = gfs2_glock_nq(p[x]);
1271 if (error) {
1272 while (x--)
1273 gfs2_glock_dq(p[x]);
1274 break;
1275 }
1276 }
1277
1278 return error;
1279}
1280
1281/**
1282 * gfs2_glock_nq_m - acquire multiple glocks
1283 * @num_gh: the number of structures
1284 * @ghs: an array of struct gfs2_holder structures
1285 *
1286 *
1287 * Returns: 0 on success (all glocks acquired),
1288 * errno on failure (no glocks acquired)
1289 */
1290
1291int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1292{
1293 struct gfs2_holder *tmp[4];
1294 struct gfs2_holder **pph = tmp;
1295 int error = 0;
1296
1297 switch(num_gh) {
1298 case 0:
1299 return 0;
1300 case 1:
1301 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1302 return gfs2_glock_nq(ghs);
1303 default:
1304 if (num_gh <= 4)
1305 break;
1306 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1307 if (!pph)
1308 return -ENOMEM;
1309 }
1310
1311 error = nq_m_sync(num_gh, ghs, pph);
1312
1313 if (pph != tmp)
1314 kfree(pph);
1315
1316 return error;
1317}
1318
1319/**
1320 * gfs2_glock_dq_m - release multiple glocks
1321 * @num_gh: the number of structures
1322 * @ghs: an array of struct gfs2_holder structures
1323 *
1324 */
1325
1326void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1327{
1328 while (num_gh--)
1329 gfs2_glock_dq(&ghs[num_gh]);
1330}
1331
1332void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1333{
1334 unsigned long delay = 0;
1335 unsigned long holdtime;
1336 unsigned long now = jiffies;
1337
1338 gfs2_glock_hold(gl);
1339 holdtime = gl->gl_tchange + gl->gl_hold_time;
1340 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1341 gl->gl_name.ln_type == LM_TYPE_INODE) {
1342 if (time_before(now, holdtime))
1343 delay = holdtime - now;
1344 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1345 delay = gl->gl_hold_time;
1346 }
1347
1348 spin_lock(&gl->gl_lockref.lock);
1349 handle_callback(gl, state, delay, true);
1350 __gfs2_glock_queue_work(gl, delay);
1351 spin_unlock(&gl->gl_lockref.lock);
1352}
1353
1354/**
1355 * gfs2_should_freeze - Figure out if glock should be frozen
1356 * @gl: The glock in question
1357 *
1358 * Glocks are not frozen if (a) the result of the dlm operation is
1359 * an error, (b) the locking operation was an unlock operation or
1360 * (c) if there is a "noexp" flagged request anywhere in the queue
1361 *
1362 * Returns: 1 if freezing should occur, 0 otherwise
1363 */
1364
1365static int gfs2_should_freeze(const struct gfs2_glock *gl)
1366{
1367 const struct gfs2_holder *gh;
1368
1369 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1370 return 0;
1371 if (gl->gl_target == LM_ST_UNLOCKED)
1372 return 0;
1373
1374 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1375 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1376 continue;
1377 if (LM_FLAG_NOEXP & gh->gh_flags)
1378 return 0;
1379 }
1380
1381 return 1;
1382}
1383
1384/**
1385 * gfs2_glock_complete - Callback used by locking
1386 * @gl: Pointer to the glock
1387 * @ret: The return value from the dlm
1388 *
1389 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1390 * to use a bitfield shared with other glock state fields.
1391 */
1392
1393void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1394{
1395 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1396
1397 spin_lock(&gl->gl_lockref.lock);
1398 gl->gl_reply = ret;
1399
1400 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1401 if (gfs2_should_freeze(gl)) {
1402 set_bit(GLF_FROZEN, &gl->gl_flags);
1403 spin_unlock(&gl->gl_lockref.lock);
1404 return;
1405 }
1406 }
1407
1408 gl->gl_lockref.count++;
1409 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1410 __gfs2_glock_queue_work(gl, 0);
1411 spin_unlock(&gl->gl_lockref.lock);
1412}
1413
1414static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1415{
1416 struct gfs2_glock *gla, *glb;
1417
1418 gla = list_entry(a, struct gfs2_glock, gl_lru);
1419 glb = list_entry(b, struct gfs2_glock, gl_lru);
1420
1421 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1422 return 1;
1423 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1424 return -1;
1425
1426 return 0;
1427}
1428
1429/**
1430 * gfs2_dispose_glock_lru - Demote a list of glocks
1431 * @list: The list to dispose of
1432 *
1433 * Disposing of glocks may involve disk accesses, so that here we sort
1434 * the glocks by number (i.e. disk location of the inodes) so that if
1435 * there are any such accesses, they'll be sent in order (mostly).
1436 *
1437 * Must be called under the lru_lock, but may drop and retake this
1438 * lock. While the lru_lock is dropped, entries may vanish from the
1439 * list, but no new entries will appear on the list (since it is
1440 * private)
1441 */
1442
1443static void gfs2_dispose_glock_lru(struct list_head *list)
1444__releases(&lru_lock)
1445__acquires(&lru_lock)
1446{
1447 struct gfs2_glock *gl;
1448
1449 list_sort(NULL, list, glock_cmp);
1450
1451 while(!list_empty(list)) {
1452 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1453 list_del_init(&gl->gl_lru);
1454 if (!spin_trylock(&gl->gl_lockref.lock)) {
1455add_back_to_lru:
1456 list_add(&gl->gl_lru, &lru_list);
1457 atomic_inc(&lru_count);
1458 continue;
1459 }
1460 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1461 spin_unlock(&gl->gl_lockref.lock);
1462 goto add_back_to_lru;
1463 }
1464 clear_bit(GLF_LRU, &gl->gl_flags);
1465 gl->gl_lockref.count++;
1466 if (demote_ok(gl))
1467 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1468 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1469 __gfs2_glock_queue_work(gl, 0);
1470 spin_unlock(&gl->gl_lockref.lock);
1471 cond_resched_lock(&lru_lock);
1472 }
1473}
1474
1475/**
1476 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1477 * @nr: The number of entries to scan
1478 *
1479 * This function selects the entries on the LRU which are able to
1480 * be demoted, and then kicks off the process by calling
1481 * gfs2_dispose_glock_lru() above.
1482 */
1483
1484static long gfs2_scan_glock_lru(int nr)
1485{
1486 struct gfs2_glock *gl;
1487 LIST_HEAD(skipped);
1488 LIST_HEAD(dispose);
1489 long freed = 0;
1490
1491 spin_lock(&lru_lock);
1492 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1493 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1494
1495 /* Test for being demotable */
1496 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1497 list_move(&gl->gl_lru, &dispose);
1498 atomic_dec(&lru_count);
1499 freed++;
1500 continue;
1501 }
1502
1503 list_move(&gl->gl_lru, &skipped);
1504 }
1505 list_splice(&skipped, &lru_list);
1506 if (!list_empty(&dispose))
1507 gfs2_dispose_glock_lru(&dispose);
1508 spin_unlock(&lru_lock);
1509
1510 return freed;
1511}
1512
1513static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1514 struct shrink_control *sc)
1515{
1516 if (!(sc->gfp_mask & __GFP_FS))
1517 return SHRINK_STOP;
1518 return gfs2_scan_glock_lru(sc->nr_to_scan);
1519}
1520
1521static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1522 struct shrink_control *sc)
1523{
1524 return vfs_pressure_ratio(atomic_read(&lru_count));
1525}
1526
1527static struct shrinker glock_shrinker = {
1528 .seeks = DEFAULT_SEEKS,
1529 .count_objects = gfs2_glock_shrink_count,
1530 .scan_objects = gfs2_glock_shrink_scan,
1531};
1532
1533/**
1534 * examine_bucket - Call a function for glock in a hash bucket
1535 * @examiner: the function
1536 * @sdp: the filesystem
1537 * @bucket: the bucket
1538 *
1539 * Note that the function can be called multiple times on the same
1540 * object. So the user must ensure that the function can cope with
1541 * that.
1542 */
1543
1544static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1545{
1546 struct gfs2_glock *gl;
1547 struct rhashtable_iter iter;
1548
1549 rhashtable_walk_enter(&gl_hash_table, &iter);
1550
1551 do {
1552 rhashtable_walk_start(&iter);
1553
1554 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
1555 if (gl->gl_name.ln_sbd == sdp &&
1556 lockref_get_not_dead(&gl->gl_lockref))
1557 examiner(gl);
1558
1559 rhashtable_walk_stop(&iter);
1560 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1561
1562 rhashtable_walk_exit(&iter);
1563}
1564
1565/**
1566 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1567 * @gl: The glock to thaw
1568 *
1569 */
1570
1571static void thaw_glock(struct gfs2_glock *gl)
1572{
1573 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1574 gfs2_glock_put(gl);
1575 return;
1576 }
1577 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1578 gfs2_glock_queue_work(gl, 0);
1579}
1580
1581/**
1582 * clear_glock - look at a glock and see if we can free it from glock cache
1583 * @gl: the glock to look at
1584 *
1585 */
1586
1587static void clear_glock(struct gfs2_glock *gl)
1588{
1589 gfs2_glock_remove_from_lru(gl);
1590
1591 spin_lock(&gl->gl_lockref.lock);
1592 if (gl->gl_state != LM_ST_UNLOCKED)
1593 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1594 __gfs2_glock_queue_work(gl, 0);
1595 spin_unlock(&gl->gl_lockref.lock);
1596}
1597
1598/**
1599 * gfs2_glock_thaw - Thaw any frozen glocks
1600 * @sdp: The super block
1601 *
1602 */
1603
1604void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1605{
1606 glock_hash_walk(thaw_glock, sdp);
1607}
1608
1609static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1610{
1611 spin_lock(&gl->gl_lockref.lock);
1612 gfs2_dump_glock(seq, gl);
1613 spin_unlock(&gl->gl_lockref.lock);
1614}
1615
1616static void dump_glock_func(struct gfs2_glock *gl)
1617{
1618 dump_glock(NULL, gl);
1619}
1620
1621/**
1622 * gfs2_gl_hash_clear - Empty out the glock hash table
1623 * @sdp: the filesystem
1624 * @wait: wait until it's all gone
1625 *
1626 * Called when unmounting the filesystem.
1627 */
1628
1629void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1630{
1631 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1632 flush_workqueue(glock_workqueue);
1633 glock_hash_walk(clear_glock, sdp);
1634 flush_workqueue(glock_workqueue);
1635 wait_event_timeout(sdp->sd_glock_wait,
1636 atomic_read(&sdp->sd_glock_disposal) == 0,
1637 HZ * 600);
1638 glock_hash_walk(dump_glock_func, sdp);
1639}
1640
1641void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1642{
1643 struct gfs2_glock *gl = ip->i_gl;
1644 int ret;
1645
1646 ret = gfs2_truncatei_resume(ip);
1647 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1648
1649 spin_lock(&gl->gl_lockref.lock);
1650 clear_bit(GLF_LOCK, &gl->gl_flags);
1651 run_queue(gl, 1);
1652 spin_unlock(&gl->gl_lockref.lock);
1653}
1654
1655static const char *state2str(unsigned state)
1656{
1657 switch(state) {
1658 case LM_ST_UNLOCKED:
1659 return "UN";
1660 case LM_ST_SHARED:
1661 return "SH";
1662 case LM_ST_DEFERRED:
1663 return "DF";
1664 case LM_ST_EXCLUSIVE:
1665 return "EX";
1666 }
1667 return "??";
1668}
1669
1670static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1671{
1672 char *p = buf;
1673 if (flags & LM_FLAG_TRY)
1674 *p++ = 't';
1675 if (flags & LM_FLAG_TRY_1CB)
1676 *p++ = 'T';
1677 if (flags & LM_FLAG_NOEXP)
1678 *p++ = 'e';
1679 if (flags & LM_FLAG_ANY)
1680 *p++ = 'A';
1681 if (flags & LM_FLAG_PRIORITY)
1682 *p++ = 'p';
1683 if (flags & GL_ASYNC)
1684 *p++ = 'a';
1685 if (flags & GL_EXACT)
1686 *p++ = 'E';
1687 if (flags & GL_NOCACHE)
1688 *p++ = 'c';
1689 if (test_bit(HIF_HOLDER, &iflags))
1690 *p++ = 'H';
1691 if (test_bit(HIF_WAIT, &iflags))
1692 *p++ = 'W';
1693 if (test_bit(HIF_FIRST, &iflags))
1694 *p++ = 'F';
1695 *p = 0;
1696 return buf;
1697}
1698
1699/**
1700 * dump_holder - print information about a glock holder
1701 * @seq: the seq_file struct
1702 * @gh: the glock holder
1703 *
1704 */
1705
1706static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1707{
1708 struct task_struct *gh_owner = NULL;
1709 char flags_buf[32];
1710
1711 rcu_read_lock();
1712 if (gh->gh_owner_pid)
1713 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1714 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1715 state2str(gh->gh_state),
1716 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1717 gh->gh_error,
1718 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1719 gh_owner ? gh_owner->comm : "(ended)",
1720 (void *)gh->gh_ip);
1721 rcu_read_unlock();
1722}
1723
1724static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1725{
1726 const unsigned long *gflags = &gl->gl_flags;
1727 char *p = buf;
1728
1729 if (test_bit(GLF_LOCK, gflags))
1730 *p++ = 'l';
1731 if (test_bit(GLF_DEMOTE, gflags))
1732 *p++ = 'D';
1733 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1734 *p++ = 'd';
1735 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1736 *p++ = 'p';
1737 if (test_bit(GLF_DIRTY, gflags))
1738 *p++ = 'y';
1739 if (test_bit(GLF_LFLUSH, gflags))
1740 *p++ = 'f';
1741 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1742 *p++ = 'i';
1743 if (test_bit(GLF_REPLY_PENDING, gflags))
1744 *p++ = 'r';
1745 if (test_bit(GLF_INITIAL, gflags))
1746 *p++ = 'I';
1747 if (test_bit(GLF_FROZEN, gflags))
1748 *p++ = 'F';
1749 if (test_bit(GLF_QUEUED, gflags))
1750 *p++ = 'q';
1751 if (test_bit(GLF_LRU, gflags))
1752 *p++ = 'L';
1753 if (gl->gl_object)
1754 *p++ = 'o';
1755 if (test_bit(GLF_BLOCKING, gflags))
1756 *p++ = 'b';
1757 *p = 0;
1758 return buf;
1759}
1760
1761/**
1762 * gfs2_dump_glock - print information about a glock
1763 * @seq: The seq_file struct
1764 * @gl: the glock
1765 *
1766 * The file format is as follows:
1767 * One line per object, capital letters are used to indicate objects
1768 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1769 * other objects are indented by a single space and follow the glock to
1770 * which they are related. Fields are indicated by lower case letters
1771 * followed by a colon and the field value, except for strings which are in
1772 * [] so that its possible to see if they are composed of spaces for
1773 * example. The field's are n = number (id of the object), f = flags,
1774 * t = type, s = state, r = refcount, e = error, p = pid.
1775 *
1776 */
1777
1778void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1779{
1780 const struct gfs2_glock_operations *glops = gl->gl_ops;
1781 unsigned long long dtime;
1782 const struct gfs2_holder *gh;
1783 char gflags_buf[32];
1784
1785 dtime = jiffies - gl->gl_demote_time;
1786 dtime *= 1000000/HZ; /* demote time in uSec */
1787 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1788 dtime = 0;
1789 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1790 state2str(gl->gl_state),
1791 gl->gl_name.ln_type,
1792 (unsigned long long)gl->gl_name.ln_number,
1793 gflags2str(gflags_buf, gl),
1794 state2str(gl->gl_target),
1795 state2str(gl->gl_demote_state), dtime,
1796 atomic_read(&gl->gl_ail_count),
1797 atomic_read(&gl->gl_revokes),
1798 (int)gl->gl_lockref.count, gl->gl_hold_time);
1799
1800 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1801 dump_holder(seq, gh);
1802
1803 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1804 glops->go_dump(seq, gl);
1805}
1806
1807static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1808{
1809 struct gfs2_glock *gl = iter_ptr;
1810
1811 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1812 gl->gl_name.ln_type,
1813 (unsigned long long)gl->gl_name.ln_number,
1814 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1815 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1816 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1817 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1818 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1819 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1820 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1821 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1822 return 0;
1823}
1824
1825static const char *gfs2_gltype[] = {
1826 "type",
1827 "reserved",
1828 "nondisk",
1829 "inode",
1830 "rgrp",
1831 "meta",
1832 "iopen",
1833 "flock",
1834 "plock",
1835 "quota",
1836 "journal",
1837};
1838
1839static const char *gfs2_stype[] = {
1840 [GFS2_LKS_SRTT] = "srtt",
1841 [GFS2_LKS_SRTTVAR] = "srttvar",
1842 [GFS2_LKS_SRTTB] = "srttb",
1843 [GFS2_LKS_SRTTVARB] = "srttvarb",
1844 [GFS2_LKS_SIRT] = "sirt",
1845 [GFS2_LKS_SIRTVAR] = "sirtvar",
1846 [GFS2_LKS_DCOUNT] = "dlm",
1847 [GFS2_LKS_QCOUNT] = "queue",
1848};
1849
1850#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1851
1852static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1853{
1854 struct gfs2_sbd *sdp = seq->private;
1855 loff_t pos = *(loff_t *)iter_ptr;
1856 unsigned index = pos >> 3;
1857 unsigned subindex = pos & 0x07;
1858 int i;
1859
1860 if (index == 0 && subindex != 0)
1861 return 0;
1862
1863 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1864 (index == 0) ? "cpu": gfs2_stype[subindex]);
1865
1866 for_each_possible_cpu(i) {
1867 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1868
1869 if (index == 0)
1870 seq_printf(seq, " %15u", i);
1871 else
1872 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1873 lkstats[index - 1].stats[subindex]);
1874 }
1875 seq_putc(seq, '\n');
1876 return 0;
1877}
1878
1879int __init gfs2_glock_init(void)
1880{
1881 int i, ret;
1882
1883 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1884 if (ret < 0)
1885 return ret;
1886
1887 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1888 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1889 if (!glock_workqueue) {
1890 rhashtable_destroy(&gl_hash_table);
1891 return -ENOMEM;
1892 }
1893 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1894 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1895 0);
1896 if (!gfs2_delete_workqueue) {
1897 destroy_workqueue(glock_workqueue);
1898 rhashtable_destroy(&gl_hash_table);
1899 return -ENOMEM;
1900 }
1901
1902 ret = register_shrinker(&glock_shrinker);
1903 if (ret) {
1904 destroy_workqueue(gfs2_delete_workqueue);
1905 destroy_workqueue(glock_workqueue);
1906 rhashtable_destroy(&gl_hash_table);
1907 return ret;
1908 }
1909
1910 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
1911 init_waitqueue_head(glock_wait_table + i);
1912
1913 return 0;
1914}
1915
1916void gfs2_glock_exit(void)
1917{
1918 unregister_shrinker(&glock_shrinker);
1919 rhashtable_destroy(&gl_hash_table);
1920 destroy_workqueue(glock_workqueue);
1921 destroy_workqueue(gfs2_delete_workqueue);
1922}
1923
1924static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
1925{
1926 struct gfs2_glock *gl = gi->gl;
1927
1928 if (gl) {
1929 if (n == 0)
1930 return;
1931 if (!lockref_put_not_zero(&gl->gl_lockref))
1932 gfs2_glock_queue_put(gl);
1933 }
1934 for (;;) {
1935 gl = rhashtable_walk_next(&gi->hti);
1936 if (IS_ERR_OR_NULL(gl)) {
1937 if (gl == ERR_PTR(-EAGAIN)) {
1938 n = 1;
1939 continue;
1940 }
1941 gl = NULL;
1942 break;
1943 }
1944 if (gl->gl_name.ln_sbd != gi->sdp)
1945 continue;
1946 if (n <= 1) {
1947 if (!lockref_get_not_dead(&gl->gl_lockref))
1948 continue;
1949 break;
1950 } else {
1951 if (__lockref_is_dead(&gl->gl_lockref))
1952 continue;
1953 n--;
1954 }
1955 }
1956 gi->gl = gl;
1957}
1958
1959static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1960 __acquires(RCU)
1961{
1962 struct gfs2_glock_iter *gi = seq->private;
1963 loff_t n;
1964
1965 /*
1966 * We can either stay where we are, skip to the next hash table
1967 * entry, or start from the beginning.
1968 */
1969 if (*pos < gi->last_pos) {
1970 rhashtable_walk_exit(&gi->hti);
1971 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
1972 n = *pos + 1;
1973 } else {
1974 n = *pos - gi->last_pos;
1975 }
1976
1977 rhashtable_walk_start(&gi->hti);
1978
1979 gfs2_glock_iter_next(gi, n);
1980 gi->last_pos = *pos;
1981 return gi->gl;
1982}
1983
1984static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1985 loff_t *pos)
1986{
1987 struct gfs2_glock_iter *gi = seq->private;
1988
1989 (*pos)++;
1990 gi->last_pos = *pos;
1991 gfs2_glock_iter_next(gi, 1);
1992 return gi->gl;
1993}
1994
1995static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1996 __releases(RCU)
1997{
1998 struct gfs2_glock_iter *gi = seq->private;
1999
2000 rhashtable_walk_stop(&gi->hti);
2001}
2002
2003static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2004{
2005 dump_glock(seq, iter_ptr);
2006 return 0;
2007}
2008
2009static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2010{
2011 preempt_disable();
2012 if (*pos >= GFS2_NR_SBSTATS)
2013 return NULL;
2014 return pos;
2015}
2016
2017static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2018 loff_t *pos)
2019{
2020 (*pos)++;
2021 if (*pos >= GFS2_NR_SBSTATS)
2022 return NULL;
2023 return pos;
2024}
2025
2026static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2027{
2028 preempt_enable();
2029}
2030
2031static const struct seq_operations gfs2_glock_seq_ops = {
2032 .start = gfs2_glock_seq_start,
2033 .next = gfs2_glock_seq_next,
2034 .stop = gfs2_glock_seq_stop,
2035 .show = gfs2_glock_seq_show,
2036};
2037
2038static const struct seq_operations gfs2_glstats_seq_ops = {
2039 .start = gfs2_glock_seq_start,
2040 .next = gfs2_glock_seq_next,
2041 .stop = gfs2_glock_seq_stop,
2042 .show = gfs2_glstats_seq_show,
2043};
2044
2045static const struct seq_operations gfs2_sbstats_seq_ops = {
2046 .start = gfs2_sbstats_seq_start,
2047 .next = gfs2_sbstats_seq_next,
2048 .stop = gfs2_sbstats_seq_stop,
2049 .show = gfs2_sbstats_seq_show,
2050};
2051
2052#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2053
2054static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2055 const struct seq_operations *ops)
2056{
2057 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2058 if (ret == 0) {
2059 struct seq_file *seq = file->private_data;
2060 struct gfs2_glock_iter *gi = seq->private;
2061
2062 gi->sdp = inode->i_private;
2063 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2064 if (seq->buf)
2065 seq->size = GFS2_SEQ_GOODSIZE;
2066 /*
2067 * Initially, we are "before" the first hash table entry; the
2068 * first call to rhashtable_walk_next gets us the first entry.
2069 */
2070 gi->last_pos = -1;
2071 gi->gl = NULL;
2072 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2073 }
2074 return ret;
2075}
2076
2077static int gfs2_glocks_open(struct inode *inode, struct file *file)
2078{
2079 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2080}
2081
2082static int gfs2_glocks_release(struct inode *inode, struct file *file)
2083{
2084 struct seq_file *seq = file->private_data;
2085 struct gfs2_glock_iter *gi = seq->private;
2086
2087 if (gi->gl)
2088 gfs2_glock_put(gi->gl);
2089 rhashtable_walk_exit(&gi->hti);
2090 return seq_release_private(inode, file);
2091}
2092
2093static int gfs2_glstats_open(struct inode *inode, struct file *file)
2094{
2095 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2096}
2097
2098static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2099{
2100 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2101 if (ret == 0) {
2102 struct seq_file *seq = file->private_data;
2103 seq->private = inode->i_private; /* sdp */
2104 }
2105 return ret;
2106}
2107
2108static const struct file_operations gfs2_glocks_fops = {
2109 .owner = THIS_MODULE,
2110 .open = gfs2_glocks_open,
2111 .read = seq_read,
2112 .llseek = seq_lseek,
2113 .release = gfs2_glocks_release,
2114};
2115
2116static const struct file_operations gfs2_glstats_fops = {
2117 .owner = THIS_MODULE,
2118 .open = gfs2_glstats_open,
2119 .read = seq_read,
2120 .llseek = seq_lseek,
2121 .release = gfs2_glocks_release,
2122};
2123
2124static const struct file_operations gfs2_sbstats_fops = {
2125 .owner = THIS_MODULE,
2126 .open = gfs2_sbstats_open,
2127 .read = seq_read,
2128 .llseek = seq_lseek,
2129 .release = seq_release,
2130};
2131
2132int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2133{
2134 struct dentry *dent;
2135
2136 dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2137 if (IS_ERR_OR_NULL(dent))
2138 goto fail;
2139 sdp->debugfs_dir = dent;
2140
2141 dent = debugfs_create_file("glocks",
2142 S_IFREG | S_IRUGO,
2143 sdp->debugfs_dir, sdp,
2144 &gfs2_glocks_fops);
2145 if (IS_ERR_OR_NULL(dent))
2146 goto fail;
2147 sdp->debugfs_dentry_glocks = dent;
2148
2149 dent = debugfs_create_file("glstats",
2150 S_IFREG | S_IRUGO,
2151 sdp->debugfs_dir, sdp,
2152 &gfs2_glstats_fops);
2153 if (IS_ERR_OR_NULL(dent))
2154 goto fail;
2155 sdp->debugfs_dentry_glstats = dent;
2156
2157 dent = debugfs_create_file("sbstats",
2158 S_IFREG | S_IRUGO,
2159 sdp->debugfs_dir, sdp,
2160 &gfs2_sbstats_fops);
2161 if (IS_ERR_OR_NULL(dent))
2162 goto fail;
2163 sdp->debugfs_dentry_sbstats = dent;
2164
2165 return 0;
2166fail:
2167 gfs2_delete_debugfs_file(sdp);
2168 return dent ? PTR_ERR(dent) : -ENOMEM;
2169}
2170
2171void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2172{
2173 if (sdp->debugfs_dir) {
2174 if (sdp->debugfs_dentry_glocks) {
2175 debugfs_remove(sdp->debugfs_dentry_glocks);
2176 sdp->debugfs_dentry_glocks = NULL;
2177 }
2178 if (sdp->debugfs_dentry_glstats) {
2179 debugfs_remove(sdp->debugfs_dentry_glstats);
2180 sdp->debugfs_dentry_glstats = NULL;
2181 }
2182 if (sdp->debugfs_dentry_sbstats) {
2183 debugfs_remove(sdp->debugfs_dentry_sbstats);
2184 sdp->debugfs_dentry_sbstats = NULL;
2185 }
2186 debugfs_remove(sdp->debugfs_dir);
2187 sdp->debugfs_dir = NULL;
2188 }
2189}
2190
2191int gfs2_register_debugfs(void)
2192{
2193 gfs2_root = debugfs_create_dir("gfs2", NULL);
2194 if (IS_ERR(gfs2_root))
2195 return PTR_ERR(gfs2_root);
2196 return gfs2_root ? 0 : -ENOMEM;
2197}
2198
2199void gfs2_unregister_debugfs(void)
2200{
2201 debugfs_remove(gfs2_root);
2202 gfs2_root = NULL;
2203}