Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#ifndef __GLOCK_DOT_H__
 11#define __GLOCK_DOT_H__
 12
 13#include <linux/sched.h>
 14#include <linux/parser.h>
 15#include "incore.h"
 
 16
 17/* Options for hostdata parser */
 18
 19enum {
 20	Opt_jid,
 21	Opt_id,
 22	Opt_first,
 23	Opt_nodir,
 24	Opt_err,
 25};
 26
 27/*
 28 * lm_lockname types
 29 */
 30
 31#define LM_TYPE_RESERVED	0x00
 32#define LM_TYPE_NONDISK		0x01
 33#define LM_TYPE_INODE		0x02
 34#define LM_TYPE_RGRP		0x03
 35#define LM_TYPE_META		0x04
 36#define LM_TYPE_IOPEN		0x05
 37#define LM_TYPE_FLOCK		0x06
 38#define LM_TYPE_PLOCK		0x07
 39#define LM_TYPE_QUOTA		0x08
 40#define LM_TYPE_JOURNAL		0x09
 41
 42/*
 43 * lm_lock() states
 44 *
 45 * SHARED is compatible with SHARED, not with DEFERRED or EX.
 46 * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
 47 */
 48
 49#define LM_ST_UNLOCKED		0
 50#define LM_ST_EXCLUSIVE		1
 51#define LM_ST_DEFERRED		2
 52#define LM_ST_SHARED		3
 53
 54/*
 55 * lm_lock() flags
 56 *
 57 * LM_FLAG_TRY
 58 * Don't wait to acquire the lock if it can't be granted immediately.
 59 *
 60 * LM_FLAG_TRY_1CB
 61 * Send one blocking callback if TRY is set and the lock is not granted.
 62 *
 63 * LM_FLAG_NOEXP
 64 * GFS sets this flag on lock requests it makes while doing journal recovery.
 65 * These special requests should not be blocked due to the recovery like
 66 * ordinary locks would be.
 67 *
 68 * LM_FLAG_ANY
 69 * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
 70 * also be granted in SHARED.  The preferred state is whichever is compatible
 71 * with other granted locks, or the specified state if no other locks exist.
 72 *
 73 * LM_FLAG_PRIORITY
 74 * Override fairness considerations.  Suppose a lock is held in a shared state
 75 * and there is a pending request for the deferred state.  A shared lock
 76 * request with the priority flag would be allowed to bypass the deferred
 77 * request and directly join the other shared lock.  A shared lock request
 78 * without the priority flag might be forced to wait until the deferred
 79 * requested had acquired and released the lock.
 80 */
 81
 82#define LM_FLAG_TRY		0x00000001
 83#define LM_FLAG_TRY_1CB		0x00000002
 84#define LM_FLAG_NOEXP		0x00000004
 85#define LM_FLAG_ANY		0x00000008
 86#define LM_FLAG_PRIORITY	0x00000010
 87#define GL_ASYNC		0x00000040
 88#define GL_EXACT		0x00000080
 89#define GL_SKIP			0x00000100
 90#define GL_NOCACHE		0x00000400
 91  
 92/*
 93 * lm_async_cb return flags
 94 *
 95 * LM_OUT_ST_MASK
 96 * Masks the lower two bits of lock state in the returned value.
 97 *
 98 * LM_OUT_CANCELED
 99 * The lock request was canceled.
100 *
101 */
102
103#define LM_OUT_ST_MASK		0x00000003
104#define LM_OUT_CANCELED		0x00000008
105#define LM_OUT_ERROR		0x00000004
106
107/*
108 * lm_recovery_done() messages
109 */
110
111#define LM_RD_GAVEUP		308
112#define LM_RD_SUCCESS		309
113
114#define GLR_TRYFAILED		13
115
116#define GL_GLOCK_MAX_HOLD        (long)(HZ / 5)
117#define GL_GLOCK_DFT_HOLD        (long)(HZ / 5)
118#define GL_GLOCK_MIN_HOLD        (long)(10)
119#define GL_GLOCK_HOLD_INCR       (long)(HZ / 20)
120#define GL_GLOCK_HOLD_DECR       (long)(HZ / 40)
121
122struct lm_lockops {
123	const char *lm_proto_name;
124	int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
125	void (*lm_first_done) (struct gfs2_sbd *sdp);
126	void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
127				    unsigned int result);
128	void (*lm_unmount) (struct gfs2_sbd *sdp);
129	void (*lm_withdraw) (struct gfs2_sbd *sdp);
130	void (*lm_put_lock) (struct gfs2_glock *gl);
131	int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
132			unsigned int flags);
133	void (*lm_cancel) (struct gfs2_glock *gl);
134	const match_table_t *lm_tokens;
135};
136
137extern struct workqueue_struct *gfs2_delete_workqueue;
138static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
139{
140	struct gfs2_holder *gh;
141	struct pid *pid;
142
143	/* Look in glock's list of holders for one with current task as owner */
144	spin_lock(&gl->gl_spin);
145	pid = task_pid(current);
146	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
147		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
148			break;
149		if (gh->gh_owner_pid == pid)
150			goto out;
151	}
152	gh = NULL;
153out:
154	spin_unlock(&gl->gl_spin);
155
156	return gh;
157}
158
159static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
160{
161	return gl->gl_state == LM_ST_EXCLUSIVE;
162}
163
164static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
165{
166	return gl->gl_state == LM_ST_DEFERRED;
167}
168
169static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
170{
171	return gl->gl_state == LM_ST_SHARED;
172}
173
174static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
175{
176	if (gl->gl_ops->go_flags & GLOF_ASPACE)
177		return (struct address_space *)(gl + 1);
178	return NULL;
179}
180
181int gfs2_glock_get(struct gfs2_sbd *sdp,
182		   u64 number, const struct gfs2_glock_operations *glops,
183		   int create, struct gfs2_glock **glp);
184void gfs2_glock_hold(struct gfs2_glock *gl);
185void gfs2_glock_put_nolock(struct gfs2_glock *gl);
186void gfs2_glock_put(struct gfs2_glock *gl);
187void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
188		      struct gfs2_holder *gh);
189void gfs2_holder_reinit(unsigned int state, unsigned flags,
190			struct gfs2_holder *gh);
191void gfs2_holder_uninit(struct gfs2_holder *gh);
192int gfs2_glock_nq(struct gfs2_holder *gh);
193int gfs2_glock_poll(struct gfs2_holder *gh);
194int gfs2_glock_wait(struct gfs2_holder *gh);
195void gfs2_glock_dq(struct gfs2_holder *gh);
196void gfs2_glock_dq_wait(struct gfs2_holder *gh);
197
198void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
199int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
200		      u64 number, const struct gfs2_glock_operations *glops,
201		      unsigned int state, int flags, struct gfs2_holder *gh);
202
203int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
204void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
205void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
206
207__printf(2, 3)
208void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
209
210/**
211 * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock
212 * @gl: the glock
213 * @state: the state we're requesting
214 * @flags: the modifier flags
215 * @gh: the holder structure
216 *
217 * Returns: 0, GLR_*, or errno
218 */
219
220static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
221				     unsigned int state, int flags,
222				     struct gfs2_holder *gh)
223{
224	int error;
225
226	gfs2_holder_init(gl, state, flags, gh);
227
228	error = gfs2_glock_nq(gh);
229	if (error)
230		gfs2_holder_uninit(gh);
231
232	return error;
233}
234
235extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
236extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
237extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
238extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
239extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
240extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
241extern void gfs2_glock_free(struct gfs2_glock *gl);
242
243extern int __init gfs2_glock_init(void);
244extern void gfs2_glock_exit(void);
245
246extern int gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
247extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
248extern int gfs2_register_debugfs(void);
249extern void gfs2_unregister_debugfs(void);
250
251extern const struct lm_lockops gfs2_dlm_ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
253#endif /* __GLOCK_DOT_H__ */
v4.17
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#ifndef __GLOCK_DOT_H__
 11#define __GLOCK_DOT_H__
 12
 13#include <linux/sched.h>
 14#include <linux/parser.h>
 15#include "incore.h"
 16#include "util.h"
 17
 18/* Options for hostdata parser */
 19
 20enum {
 21	Opt_jid,
 22	Opt_id,
 23	Opt_first,
 24	Opt_nodir,
 25	Opt_err,
 26};
 27
 28/*
 29 * lm_lockname types
 30 */
 31
 32#define LM_TYPE_RESERVED	0x00
 33#define LM_TYPE_NONDISK		0x01
 34#define LM_TYPE_INODE		0x02
 35#define LM_TYPE_RGRP		0x03
 36#define LM_TYPE_META		0x04
 37#define LM_TYPE_IOPEN		0x05
 38#define LM_TYPE_FLOCK		0x06
 39#define LM_TYPE_PLOCK		0x07
 40#define LM_TYPE_QUOTA		0x08
 41#define LM_TYPE_JOURNAL		0x09
 42
 43/*
 44 * lm_lock() states
 45 *
 46 * SHARED is compatible with SHARED, not with DEFERRED or EX.
 47 * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
 48 */
 49
 50#define LM_ST_UNLOCKED		0
 51#define LM_ST_EXCLUSIVE		1
 52#define LM_ST_DEFERRED		2
 53#define LM_ST_SHARED		3
 54
 55/*
 56 * lm_lock() flags
 57 *
 58 * LM_FLAG_TRY
 59 * Don't wait to acquire the lock if it can't be granted immediately.
 60 *
 61 * LM_FLAG_TRY_1CB
 62 * Send one blocking callback if TRY is set and the lock is not granted.
 63 *
 64 * LM_FLAG_NOEXP
 65 * GFS sets this flag on lock requests it makes while doing journal recovery.
 66 * These special requests should not be blocked due to the recovery like
 67 * ordinary locks would be.
 68 *
 69 * LM_FLAG_ANY
 70 * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
 71 * also be granted in SHARED.  The preferred state is whichever is compatible
 72 * with other granted locks, or the specified state if no other locks exist.
 73 *
 74 * LM_FLAG_PRIORITY
 75 * Override fairness considerations.  Suppose a lock is held in a shared state
 76 * and there is a pending request for the deferred state.  A shared lock
 77 * request with the priority flag would be allowed to bypass the deferred
 78 * request and directly join the other shared lock.  A shared lock request
 79 * without the priority flag might be forced to wait until the deferred
 80 * requested had acquired and released the lock.
 81 */
 82
 83#define LM_FLAG_TRY		0x0001
 84#define LM_FLAG_TRY_1CB		0x0002
 85#define LM_FLAG_NOEXP		0x0004
 86#define LM_FLAG_ANY		0x0008
 87#define LM_FLAG_PRIORITY	0x0010
 88#define GL_ASYNC		0x0040
 89#define GL_EXACT		0x0080
 90#define GL_SKIP			0x0100
 91#define GL_NOCACHE		0x0400
 92  
 93/*
 94 * lm_async_cb return flags
 95 *
 96 * LM_OUT_ST_MASK
 97 * Masks the lower two bits of lock state in the returned value.
 98 *
 99 * LM_OUT_CANCELED
100 * The lock request was canceled.
101 *
102 */
103
104#define LM_OUT_ST_MASK		0x00000003
105#define LM_OUT_CANCELED		0x00000008
106#define LM_OUT_ERROR		0x00000004
107
108/*
109 * lm_recovery_done() messages
110 */
111
112#define LM_RD_GAVEUP		308
113#define LM_RD_SUCCESS		309
114
115#define GLR_TRYFAILED		13
116
117#define GL_GLOCK_MAX_HOLD        (long)(HZ / 5)
118#define GL_GLOCK_DFT_HOLD        (long)(HZ / 5)
119#define GL_GLOCK_MIN_HOLD        (long)(10)
120#define GL_GLOCK_HOLD_INCR       (long)(HZ / 20)
121#define GL_GLOCK_HOLD_DECR       (long)(HZ / 40)
122
123struct lm_lockops {
124	const char *lm_proto_name;
125	int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
126	void (*lm_first_done) (struct gfs2_sbd *sdp);
127	void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
128				    unsigned int result);
129	void (*lm_unmount) (struct gfs2_sbd *sdp);
130	void (*lm_withdraw) (struct gfs2_sbd *sdp);
131	void (*lm_put_lock) (struct gfs2_glock *gl);
132	int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
133			unsigned int flags);
134	void (*lm_cancel) (struct gfs2_glock *gl);
135	const match_table_t *lm_tokens;
136};
137
138extern struct workqueue_struct *gfs2_delete_workqueue;
139static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
140{
141	struct gfs2_holder *gh;
142	struct pid *pid;
143
144	/* Look in glock's list of holders for one with current task as owner */
145	spin_lock(&gl->gl_lockref.lock);
146	pid = task_pid(current);
147	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
148		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
149			break;
150		if (gh->gh_owner_pid == pid)
151			goto out;
152	}
153	gh = NULL;
154out:
155	spin_unlock(&gl->gl_lockref.lock);
156
157	return gh;
158}
159
160static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
161{
162	return gl->gl_state == LM_ST_EXCLUSIVE;
163}
164
165static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
166{
167	return gl->gl_state == LM_ST_DEFERRED;
168}
169
170static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
171{
172	return gl->gl_state == LM_ST_SHARED;
173}
174
175static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
176{
177	if (gl->gl_ops->go_flags & GLOF_ASPACE)
178		return (struct address_space *)(gl + 1);
179	return NULL;
180}
181
182extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
183			  const struct gfs2_glock_operations *glops,
184			  int create, struct gfs2_glock **glp);
185extern void gfs2_glock_hold(struct gfs2_glock *gl);
186extern void gfs2_glock_put(struct gfs2_glock *gl);
187extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
188extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
189			     u16 flags, struct gfs2_holder *gh);
190extern void gfs2_holder_reinit(unsigned int state, u16 flags,
191			       struct gfs2_holder *gh);
192extern void gfs2_holder_uninit(struct gfs2_holder *gh);
193extern int gfs2_glock_nq(struct gfs2_holder *gh);
194extern int gfs2_glock_poll(struct gfs2_holder *gh);
195extern int gfs2_glock_wait(struct gfs2_holder *gh);
196extern void gfs2_glock_dq(struct gfs2_holder *gh);
197extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
198extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
199extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
200			     const struct gfs2_glock_operations *glops,
201			     unsigned int state, u16 flags,
202			     struct gfs2_holder *gh);
203extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
204extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
205extern void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
206#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { gfs2_dump_glock(NULL, gl); BUG(); } } while(0)
207extern __printf(2, 3)
 
208void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
209
210/**
211 * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock
212 * @gl: the glock
213 * @state: the state we're requesting
214 * @flags: the modifier flags
215 * @gh: the holder structure
216 *
217 * Returns: 0, GLR_*, or errno
218 */
219
220static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
221				     unsigned int state, u16 flags,
222				     struct gfs2_holder *gh)
223{
224	int error;
225
226	gfs2_holder_init(gl, state, flags, gh);
227
228	error = gfs2_glock_nq(gh);
229	if (error)
230		gfs2_holder_uninit(gh);
231
232	return error;
233}
234
235extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
236extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
237extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
238extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
239extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
240extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
241extern void gfs2_glock_free(struct gfs2_glock *gl);
242
243extern int __init gfs2_glock_init(void);
244extern void gfs2_glock_exit(void);
245
246extern int gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
247extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
248extern int gfs2_register_debugfs(void);
249extern void gfs2_unregister_debugfs(void);
250
251extern const struct lm_lockops gfs2_dlm_ops;
252
253static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh)
254{
255	gh->gh_gl = NULL;
256}
257
258static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
259{
260	return gh->gh_gl;
261}
262
263/**
264 * glock_set_object - set the gl_object field of a glock
265 * @gl: the glock
266 * @object: the object
267 */
268static inline void glock_set_object(struct gfs2_glock *gl, void *object)
269{
270	spin_lock(&gl->gl_lockref.lock);
271	if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL))
272		gfs2_dump_glock(NULL, gl);
273	gl->gl_object = object;
274	spin_unlock(&gl->gl_lockref.lock);
275}
276
277/**
278 * glock_clear_object - clear the gl_object field of a glock
279 * @gl: the glock
280 * @object: the object
281 *
282 * I'd love to similarly add this:
283 *	else if (gfs2_assert_warn(gl->gl_sbd, gl->gl_object == object))
284 *		gfs2_dump_glock(NULL, gl);
285 * Unfortunately, that's not possible because as soon as gfs2_delete_inode
286 * frees the block in the rgrp, another process can reassign it for an I_NEW
287 * inode in gfs2_create_inode because that calls new_inode, not gfs2_iget.
288 * That means gfs2_delete_inode may subsequently try to call this function
289 * for a glock that's already pointing to a brand new inode. If we clear the
290 * new inode's gl_object, we'll introduce metadata corruption. Function
291 * gfs2_delete_inode calls clear_inode which calls gfs2_clear_inode which also
292 * tries to clear gl_object, so it's more than just gfs2_delete_inode.
293 *
294 */
295static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
296{
297	spin_lock(&gl->gl_lockref.lock);
298	if (gl->gl_object == object)
299		gl->gl_object = NULL;
300	spin_unlock(&gl->gl_lockref.lock);
301}
302
303#endif /* __GLOCK_DOT_H__ */