Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#ifndef __INCORE_DOT_H__
 11#define __INCORE_DOT_H__
 12
 13#include <linux/fs.h>
 14#include <linux/kobject.h>
 15#include <linux/workqueue.h>
 16#include <linux/dlm.h>
 17#include <linux/buffer_head.h>
 18#include <linux/rcupdate.h>
 19#include <linux/rculist_bl.h>
 20#include <linux/completion.h>
 21#include <linux/rbtree.h>
 22#include <linux/ktime.h>
 23#include <linux/percpu.h>
 24#include <linux/lockref.h>
 25#include <linux/rhashtable.h>
 26
 27#define DIO_WAIT	0x00000010
 28#define DIO_METADATA	0x00000020
 29
 30struct gfs2_log_operations;
 31struct gfs2_bufdata;
 32struct gfs2_holder;
 33struct gfs2_glock;
 34struct gfs2_quota_data;
 35struct gfs2_trans;
 36struct gfs2_jdesc;
 37struct gfs2_sbd;
 38struct lm_lockops;
 39
 40typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
 41
 42struct gfs2_log_header_host {
 43	u64 lh_sequence;	/* Sequence number of this transaction */
 44	u32 lh_flags;		/* GFS2_LOG_HEAD_... */
 45	u32 lh_tail;		/* Block number of log tail */
 46	u32 lh_blkno;
 
 47};
 48
 49/*
 50 * Structure of operations that are associated with each
 51 * type of element in the log.
 52 */
 53
 54struct gfs2_log_operations {
 55	void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
 56	void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
 57	void (*lo_before_scan) (struct gfs2_jdesc *jd,
 58				struct gfs2_log_header_host *head, int pass);
 59	int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
 60				 struct gfs2_log_descriptor *ld, __be64 *ptr,
 61				 int pass);
 62	void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
 63	const char *lo_name;
 64};
 65
 66#define GBF_FULL 1
 67
 68struct gfs2_bitmap {
 69	struct buffer_head *bi_bh;
 70	char *bi_clone;
 71	unsigned long bi_flags;
 72	u32 bi_offset;
 73	u32 bi_start;
 74	u32 bi_len;
 75	u32 bi_blocks;
 76};
 77
 78struct gfs2_rgrpd {
 79	struct rb_node rd_node;		/* Link with superblock */
 80	struct gfs2_glock *rd_gl;	/* Glock for this rgrp */
 81	u64 rd_addr;			/* grp block disk address */
 82	u64 rd_data0;			/* first data location */
 83	u32 rd_length;			/* length of rgrp header in fs blocks */
 84	u32 rd_data;			/* num of data blocks in rgrp */
 85	u32 rd_bitbytes;		/* number of bytes in data bitmaps */
 86	u32 rd_free;
 87	u32 rd_reserved;                /* number of blocks reserved */
 88	u32 rd_free_clone;
 89	u32 rd_dinodes;
 90	u64 rd_igeneration;
 91	struct gfs2_bitmap *rd_bits;
 92	struct gfs2_sbd *rd_sbd;
 93	struct gfs2_rgrp_lvb *rd_rgl;
 94	u32 rd_last_alloc;
 95	u32 rd_flags;
 96	u32 rd_extfail_pt;		/* extent failure point */
 97#define GFS2_RDF_CHECK		0x10000000 /* check for unlinked inodes */
 98#define GFS2_RDF_UPTODATE	0x20000000 /* rg is up to date */
 99#define GFS2_RDF_ERROR		0x40000000 /* error in rg */
100#define GFS2_RDF_PREFERRED	0x80000000 /* This rgrp is preferred */
101#define GFS2_RDF_MASK		0xf0000000 /* mask for internal flags */
102	spinlock_t rd_rsspin;           /* protects reservation related vars */
103	struct rb_root rd_rstree;       /* multi-block reservation tree */
104};
105
106struct gfs2_rbm {
107	struct gfs2_rgrpd *rgd;
108	u32 offset;		/* The offset is bitmap relative */
109	int bii;		/* Bitmap index */
110};
111
112static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
113{
114	return rbm->rgd->rd_bits + rbm->bii;
115}
116
117static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
118{
119	return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
120		rbm->offset;
121}
122
123static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
124			       const struct gfs2_rbm *rbm2)
125{
126	return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
127	       (rbm1->offset == rbm2->offset);
128}
129
130enum gfs2_state_bits {
131	BH_Pinned = BH_PrivateStart,
132	BH_Escaped = BH_PrivateStart + 1,
 
133};
134
135BUFFER_FNS(Pinned, pinned)
136TAS_BUFFER_FNS(Pinned, pinned)
137BUFFER_FNS(Escaped, escaped)
138TAS_BUFFER_FNS(Escaped, escaped)
 
 
139
140struct gfs2_bufdata {
141	struct buffer_head *bd_bh;
142	struct gfs2_glock *bd_gl;
143	u64 bd_blkno;
144
145	struct list_head bd_list;
146	const struct gfs2_log_operations *bd_ops;
147
148	struct gfs2_trans *bd_tr;
149	struct list_head bd_ail_st_list;
150	struct list_head bd_ail_gl_list;
151};
152
153/*
154 * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
155 * prefix of lock_dlm_ gets awkward.
156 */
157
158#define GDLM_STRNAME_BYTES	25
159#define GDLM_LVB_SIZE		32
160
161/*
162 * ls_recover_flags:
163 *
164 * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been
165 * held by failed nodes whose journals need recovery.  Those locks should
166 * only be used for journal recovery until the journal recovery is done.
167 * This is set by the dlm recover_prep callback and cleared by the
168 * gfs2_control thread when journal recovery is complete.  To avoid
169 * races between recover_prep setting and gfs2_control clearing, recover_spin
170 * is held while changing this bit and reading/writing recover_block
171 * and recover_start.
172 *
173 * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used.
174 *
175 * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing
176 * recovery of all journals before allowing other nodes to mount the fs.
177 * This is cleared when FIRST_MOUNT_DONE is set.
178 *
179 * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished
180 * recovery of all journals, and now allows other nodes to mount the fs.
181 *
182 * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared
183 * BLOCK_LOCKS for the first time.  The gfs2_control thread should now
184 * control clearing BLOCK_LOCKS for further recoveries.
185 *
186 * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq.
187 *
188 * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep()
189 * and recover_done(), i.e. set while recover_block == recover_start.
190 */
191
192enum {
193	DFL_BLOCK_LOCKS		= 0,
194	DFL_NO_DLM_OPS		= 1,
195	DFL_FIRST_MOUNT		= 2,
196	DFL_FIRST_MOUNT_DONE	= 3,
197	DFL_MOUNT_DONE		= 4,
198	DFL_UNMOUNT		= 5,
199	DFL_DLM_RECOVERY	= 6,
200};
201
202/*
203 * We are using struct lm_lockname as an rhashtable key.  Avoid holes within
204 * the struct; padding at the end is fine.
205 */
206struct lm_lockname {
207	u64 ln_number;
208	struct gfs2_sbd *ln_sbd;
209	unsigned int ln_type;
210};
211
212#define lm_name_equal(name1, name2) \
213        (((name1)->ln_number == (name2)->ln_number) &&	\
214	 ((name1)->ln_type == (name2)->ln_type) &&	\
215	 ((name1)->ln_sbd == (name2)->ln_sbd))
216
217
218struct gfs2_glock_operations {
219	void (*go_sync) (struct gfs2_glock *gl);
220	int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
221	void (*go_inval) (struct gfs2_glock *gl, int flags);
222	int (*go_demote_ok) (const struct gfs2_glock *gl);
223	int (*go_lock) (struct gfs2_holder *gh);
224	void (*go_unlock) (struct gfs2_holder *gh);
225	void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
226	void (*go_callback)(struct gfs2_glock *gl, bool remote);
227	const int go_type;
228	const unsigned long go_flags;
229#define GLOF_ASPACE 1
230#define GLOF_LVB    2
231#define GLOF_LRU    4
232};
233
234enum {
235	GFS2_LKS_SRTT = 0,	/* Non blocking smoothed round trip time */
236	GFS2_LKS_SRTTVAR = 1,	/* Non blocking smoothed variance */
237	GFS2_LKS_SRTTB = 2,	/* Blocking smoothed round trip time */
238	GFS2_LKS_SRTTVARB = 3,	/* Blocking smoothed variance */
239	GFS2_LKS_SIRT = 4,	/* Smoothed Inter-request time */
240	GFS2_LKS_SIRTVAR = 5,	/* Smoothed Inter-request variance */
241	GFS2_LKS_DCOUNT = 6,	/* Count of dlm requests */
242	GFS2_LKS_QCOUNT = 7,	/* Count of gfs2_holder queues */
243	GFS2_NR_LKSTATS
244};
245
246struct gfs2_lkstats {
247	u64 stats[GFS2_NR_LKSTATS];
248};
249
250enum {
251	/* States */
252	HIF_HOLDER		= 6,  /* Set for gh that "holds" the glock */
253	HIF_FIRST		= 7,
254	HIF_WAIT		= 10,
255};
256
257struct gfs2_holder {
258	struct list_head gh_list;
259
260	struct gfs2_glock *gh_gl;
261	struct pid *gh_owner_pid;
262	u16 gh_flags;
263	u16 gh_state;
264
265	int gh_error;
266	unsigned long gh_iflags; /* HIF_... */
267	unsigned long gh_ip;
268};
269
270/* Number of quota types we support */
271#define GFS2_MAXQUOTAS 2
272
273struct gfs2_qadata { /* quota allocation data */
274	/* Quota stuff */
275	struct gfs2_quota_data *qa_qd[2 * GFS2_MAXQUOTAS];
276	struct gfs2_holder qa_qd_ghs[2 * GFS2_MAXQUOTAS];
277	unsigned int qa_qd_num;
278};
279
280/* Resource group multi-block reservation, in order of appearance:
281
282   Step 1. Function prepares to write, allocates a mb, sets the size hint.
283   Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
284   Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
285   Step 4. Bits are assigned from the rgrp based on either the reservation
286           or wherever it can.
287*/
288
289struct gfs2_blkreserv {
290	/* components used during write (step 1): */
291	atomic_t rs_sizehint;         /* hint of the write size */
292
293	struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
294	struct rb_node rs_node;       /* link to other block reservations */
295	struct gfs2_rbm rs_rbm;       /* Start of reservation */
296	u32 rs_free;                  /* how many blocks are still free */
297	u64 rs_inum;                  /* Inode number for reservation */
 
 
 
 
 
298};
299
300/*
301 * Allocation parameters
302 * @target: The number of blocks we'd ideally like to allocate
303 * @aflags: The flags (e.g. Orlov flag)
304 *
305 * The intent is to gradually expand this structure over time in
306 * order to give more information, e.g. alignment, min extent size
307 * to the allocation code.
308 */
309struct gfs2_alloc_parms {
310	u64 target;
311	u32 min_target;
312	u32 aflags;
313	u64 allowed;
314};
315
316enum {
317	GLF_LOCK			= 1,
318	GLF_DEMOTE			= 3,
319	GLF_PENDING_DEMOTE		= 4,
320	GLF_DEMOTE_IN_PROGRESS		= 5,
321	GLF_DIRTY			= 6,
322	GLF_LFLUSH			= 7,
323	GLF_INVALIDATE_IN_PROGRESS	= 8,
324	GLF_REPLY_PENDING		= 9,
325	GLF_INITIAL			= 10,
326	GLF_FROZEN			= 11,
327	GLF_QUEUED			= 12,
328	GLF_LRU				= 13,
329	GLF_OBJECT			= 14, /* Used only for tracing */
330	GLF_BLOCKING			= 15,
331	GLF_INODE_CREATING		= 16, /* Inode creation occurring */
332};
333
334struct gfs2_glock {
 
 
335	unsigned long gl_flags;		/* GLF_... */
336	struct lm_lockname gl_name;
337
338	struct lockref gl_lockref;
 
339
340	/* State fields protected by gl_lockref.lock */
341	unsigned int gl_state:2,	/* Current state */
342		     gl_target:2,	/* Target state */
343		     gl_demote_state:2,	/* State requested by remote node */
344		     gl_req:2,		/* State in last dlm request */
345		     gl_reply:8;	/* Last reply from the dlm */
346
 
347	unsigned long gl_demote_time; /* time of first demote request */
348	long gl_hold_time;
349	struct list_head gl_holders;
350
351	const struct gfs2_glock_operations *gl_ops;
352	ktime_t gl_dstamp;
353	struct gfs2_lkstats gl_stats;
354	struct dlm_lksb gl_lksb;
355	unsigned long gl_tchange;
356	void *gl_object;
357
358	struct list_head gl_lru;
359	struct list_head gl_ail_list;
360	atomic_t gl_ail_count;
361	atomic_t gl_revokes;
362	struct delayed_work gl_work;
363	union {
364		/* For inode and iopen glocks only */
365		struct work_struct gl_delete;
366		/* For rgrp glocks only */
367		struct {
368			loff_t start;
369			loff_t end;
370		} gl_vm;
371	};
372	struct rcu_head gl_rcu;
373	struct rhash_head gl_node;
374};
375
376#define GFS2_MIN_LVB_SIZE 32	/* Min size of LVB that gfs2 supports */
377
378enum {
379	GIF_INVALID		= 0,
380	GIF_QD_LOCKED		= 1,
381	GIF_ALLOC_FAILED	= 2,
382	GIF_SW_PAGED		= 3,
383	GIF_ORDERED		= 4,
384	GIF_FREE_VFS_INODE      = 5,
385	GIF_GLOP_PENDING	= 6,
386};
387
388struct gfs2_inode {
389	struct inode i_inode;
390	u64 i_no_addr;
391	u64 i_no_formal_ino;
392	u64 i_generation;
393	u64 i_eattr;
394	unsigned long i_flags;		/* GIF_... */
395	struct gfs2_glock *i_gl; /* Move into i_gh? */
396	struct gfs2_holder i_iopen_gh;
397	struct gfs2_holder i_gh; /* for prepare/commit_write only */
398	struct gfs2_qadata *i_qadata; /* quota allocation data */
399	struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
400	struct gfs2_rgrpd *i_rgd;
401	u64 i_goal;	/* goal block for allocations */
402	struct rw_semaphore i_rw_mutex;
403	struct list_head i_ordered;
404	struct list_head i_trunc_list;
405	__be64 *i_hash_cache;
406	u32 i_entries;
407	u32 i_diskflags;
408	u8 i_height;
409	u8 i_depth;
410	u16 i_rahead;
411};
412
413/*
414 * Since i_inode is the first element of struct gfs2_inode,
415 * this is effectively a cast.
416 */
417static inline struct gfs2_inode *GFS2_I(struct inode *inode)
418{
419	return container_of(inode, struct gfs2_inode, i_inode);
420}
421
422static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
423{
424	return inode->i_sb->s_fs_info;
425}
426
427struct gfs2_file {
428	struct mutex f_fl_mutex;
429	struct gfs2_holder f_fl_gh;
430};
431
432struct gfs2_revoke_replay {
433	struct list_head rr_list;
434	u64 rr_blkno;
435	unsigned int rr_where;
436};
437
438enum {
439	QDF_CHANGE		= 1,
440	QDF_LOCKED		= 2,
441	QDF_REFRESH		= 3,
442	QDF_QMSG_QUIET          = 4,
443};
444
445struct gfs2_quota_data {
446	struct hlist_bl_node qd_hlist;
447	struct list_head qd_list;
448	struct kqid qd_id;
449	struct gfs2_sbd *qd_sbd;
450	struct lockref qd_lockref;
451	struct list_head qd_lru;
452	unsigned qd_hash;
453
454	unsigned long qd_flags;		/* QDF_... */
455
456	s64 qd_change;
457	s64 qd_change_sync;
458
459	unsigned int qd_slot;
460	unsigned int qd_slot_count;
461
462	struct buffer_head *qd_bh;
463	struct gfs2_quota_change *qd_bh_qc;
464	unsigned int qd_bh_count;
465
466	struct gfs2_glock *qd_gl;
467	struct gfs2_quota_lvb qd_qb;
468
469	u64 qd_sync_gen;
470	unsigned long qd_last_warn;
471	struct rcu_head qd_rcu;
472};
473
474enum {
475	TR_TOUCHED = 1,
476	TR_ATTACHED = 2,
477	TR_ALLOCED = 3,
478};
479
480struct gfs2_trans {
481	unsigned long tr_ip;
482
483	unsigned int tr_blocks;
484	unsigned int tr_revokes;
485	unsigned int tr_reserved;
486	unsigned long tr_flags;
 
 
 
 
487
488	unsigned int tr_num_buf_new;
489	unsigned int tr_num_databuf_new;
490	unsigned int tr_num_buf_rm;
491	unsigned int tr_num_databuf_rm;
492	unsigned int tr_num_revoke;
493	unsigned int tr_num_revoke_rm;
494
495	struct list_head tr_list;
496	struct list_head tr_databuf;
497	struct list_head tr_buf;
498
499	unsigned int tr_first;
500	struct list_head tr_ail1_list;
501	struct list_head tr_ail2_list;
502};
503
504struct gfs2_journal_extent {
505	struct list_head list;
506
507	unsigned int lblock; /* First logical block */
508	u64 dblock; /* First disk block */
509	u64 blocks;
510};
511
512struct gfs2_jdesc {
513	struct list_head jd_list;
514	struct list_head extent_list;
515	unsigned int nr_extents;
516	struct work_struct jd_work;
517	struct inode *jd_inode;
518	unsigned long jd_flags;
519#define JDF_RECOVERY 1
520	unsigned int jd_jid;
521	unsigned int jd_blocks;
522	int jd_recover_error;
523	/* Replay stuff */
524
525	unsigned int jd_found_blocks;
526	unsigned int jd_found_revokes;
527	unsigned int jd_replayed_blocks;
528
529	struct list_head jd_revoke_list;
530	unsigned int jd_replay_tail;
531
532};
533
534struct gfs2_statfs_change_host {
535	s64 sc_total;
536	s64 sc_free;
537	s64 sc_dinodes;
538};
539
540#define GFS2_QUOTA_DEFAULT	GFS2_QUOTA_OFF
541#define GFS2_QUOTA_OFF		0
542#define GFS2_QUOTA_ACCOUNT	1
543#define GFS2_QUOTA_ON		2
544
545#define GFS2_DATA_DEFAULT	GFS2_DATA_ORDERED
546#define GFS2_DATA_WRITEBACK	1
547#define GFS2_DATA_ORDERED	2
548
549#define GFS2_ERRORS_DEFAULT     GFS2_ERRORS_WITHDRAW
550#define GFS2_ERRORS_WITHDRAW    0
551#define GFS2_ERRORS_CONTINUE    1 /* place holder for future feature */
552#define GFS2_ERRORS_RO          2 /* place holder for future feature */
553#define GFS2_ERRORS_PANIC       3
554
555struct gfs2_args {
556	char ar_lockproto[GFS2_LOCKNAME_LEN];	/* Name of the Lock Protocol */
557	char ar_locktable[GFS2_LOCKNAME_LEN];	/* Name of the Lock Table */
558	char ar_hostdata[GFS2_LOCKNAME_LEN];	/* Host specific data */
559	unsigned int ar_spectator:1;		/* Don't get a journal */
560	unsigned int ar_localflocks:1;		/* Let the VFS do flock|fcntl */
561	unsigned int ar_debug:1;		/* Oops on errors */
562	unsigned int ar_posix_acl:1;		/* Enable posix acls */
563	unsigned int ar_quota:2;		/* off/account/on */
564	unsigned int ar_suiddir:1;		/* suiddir support */
565	unsigned int ar_data:2;			/* ordered/writeback */
566	unsigned int ar_meta:1;			/* mount metafs */
567	unsigned int ar_discard:1;		/* discard requests */
568	unsigned int ar_errors:2;               /* errors=withdraw | panic */
569	unsigned int ar_nobarrier:1;            /* do not send barriers */
570	unsigned int ar_rgrplvb:1;		/* use lvbs for rgrp info */
571	unsigned int ar_loccookie:1;		/* use location based readdir
572						   cookies */
573	int ar_commit;				/* Commit interval */
574	int ar_statfs_quantum;			/* The fast statfs interval */
575	int ar_quota_quantum;			/* The quota interval */
576	int ar_statfs_percent;			/* The % change to force sync */
577};
578
579struct gfs2_tune {
580	spinlock_t gt_spin;
581
582	unsigned int gt_logd_secs;
583
584	unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
585	unsigned int gt_quota_scale_num; /* Numerator */
586	unsigned int gt_quota_scale_den; /* Denominator */
587	unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
588	unsigned int gt_new_files_jdata;
589	unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
590	unsigned int gt_complain_secs;
591	unsigned int gt_statfs_quantum;
592	unsigned int gt_statfs_slow;
593};
594
595enum {
596	SDF_JOURNAL_CHECKED	= 0,
597	SDF_JOURNAL_LIVE	= 1,
598	SDF_SHUTDOWN		= 2,
599	SDF_NOBARRIERS		= 3,
600	SDF_NORECOVERY		= 4,
601	SDF_DEMOTE		= 5,
602	SDF_NOJOURNALID		= 6,
603	SDF_RORECOVERY		= 7, /* read only recovery */
604	SDF_SKIP_DLM_UNLOCK	= 8,
605	SDF_FORCE_AIL_FLUSH     = 9,
606};
607
608enum gfs2_freeze_state {
609	SFS_UNFROZEN		= 0,
610	SFS_STARTING_FREEZE	= 1,
611	SFS_FROZEN		= 2,
612};
613
614#define GFS2_FSNAME_LEN		256
615
616struct gfs2_inum_host {
617	u64 no_formal_ino;
618	u64 no_addr;
619};
620
621struct gfs2_sb_host {
622	u32 sb_magic;
623	u32 sb_type;
624	u32 sb_format;
625
626	u32 sb_fs_format;
627	u32 sb_multihost_format;
628	u32 sb_bsize;
629	u32 sb_bsize_shift;
630
631	struct gfs2_inum_host sb_master_dir;
632	struct gfs2_inum_host sb_root_dir;
633
634	char sb_lockproto[GFS2_LOCKNAME_LEN];
635	char sb_locktable[GFS2_LOCKNAME_LEN];
636};
637
638/*
639 * lm_mount() return values
640 *
641 * ls_jid - the journal ID this node should use
642 * ls_first - this node is the first to mount the file system
643 * ls_lockspace - lock module's context for this file system
644 * ls_ops - lock module's functions
645 */
646
647struct lm_lockstruct {
648	int ls_jid;
649	unsigned int ls_first;
650	const struct lm_lockops *ls_ops;
651	dlm_lockspace_t *ls_dlm;
652
653	int ls_recover_jid_done;   /* These two are deprecated, */
654	int ls_recover_jid_status; /* used previously by gfs_controld */
655
656	struct dlm_lksb ls_mounted_lksb; /* mounted_lock */
657	struct dlm_lksb ls_control_lksb; /* control_lock */
658	char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
659	struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
660	char *ls_lvb_bits;
661
662	spinlock_t ls_recover_spin; /* protects following fields */
663	unsigned long ls_recover_flags; /* DFL_ */
664	uint32_t ls_recover_mount; /* gen in first recover_done cb */
665	uint32_t ls_recover_start; /* gen in last recover_done cb */
666	uint32_t ls_recover_block; /* copy recover_start in last recover_prep */
667	uint32_t ls_recover_size; /* size of recover_submit, recover_result */
668	uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */
669	uint32_t *ls_recover_result; /* result of last jid recovery */
670};
671
672struct gfs2_pcpu_lkstats {
673	/* One struct for each glock type */
674	struct gfs2_lkstats lkstats[10];
675};
676
677struct gfs2_sbd {
678	struct super_block *sd_vfs;
679	struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
680	struct kobject sd_kobj;
681	unsigned long sd_flags;	/* SDF_... */
682	struct gfs2_sb_host sd_sb;
683
684	/* Constants computed on mount */
685
686	u32 sd_fsb2bb;
687	u32 sd_fsb2bb_shift;
688	u32 sd_diptrs;	/* Number of pointers in a dinode */
689	u32 sd_inptrs;	/* Number of pointers in a indirect block */
690	u32 sd_jbsize;	/* Size of a journaled data block */
691	u32 sd_hash_bsize;	/* sizeof(exhash block) */
692	u32 sd_hash_bsize_shift;
693	u32 sd_hash_ptrs;	/* Number of pointers in a hash block */
694	u32 sd_qc_per_block;
695	u32 sd_blocks_per_bitmap;
696	u32 sd_max_dirres;	/* Max blocks needed to add a directory entry */
697	u32 sd_max_height;	/* Max height of a file's metadata tree */
698	u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
699	u32 sd_max_jheight; /* Max height of journaled file's meta tree */
700	u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
701	u32 sd_max_dents_per_leaf; /* Max number of dirents in a leaf block */
702
703	struct gfs2_args sd_args;	/* Mount arguments */
704	struct gfs2_tune sd_tune;	/* Filesystem tuning structure */
705
706	/* Lock Stuff */
707
708	struct lm_lockstruct sd_lockstruct;
709	struct gfs2_holder sd_live_gh;
710	struct gfs2_glock *sd_rename_gl;
711	struct gfs2_glock *sd_freeze_gl;
712	struct work_struct sd_freeze_work;
713	wait_queue_head_t sd_glock_wait;
714	atomic_t sd_glock_disposal;
715	struct completion sd_locking_init;
716	struct completion sd_wdack;
717	struct delayed_work sd_control_work;
718
719	/* Inode Stuff */
720
721	struct dentry *sd_master_dir;
722	struct dentry *sd_root_dir;
723
724	struct inode *sd_jindex;
725	struct inode *sd_statfs_inode;
726	struct inode *sd_sc_inode;
727	struct inode *sd_qc_inode;
728	struct inode *sd_rindex;
729	struct inode *sd_quota_inode;
730
731	/* StatFS stuff */
732
733	spinlock_t sd_statfs_spin;
734	struct gfs2_statfs_change_host sd_statfs_master;
735	struct gfs2_statfs_change_host sd_statfs_local;
736	int sd_statfs_force_sync;
737
738	/* Resource group stuff */
739
740	int sd_rindex_uptodate;
741	spinlock_t sd_rindex_spin;
742	struct rb_root sd_rindex_tree;
743	unsigned int sd_rgrps;
744	unsigned int sd_max_rg_data;
745
746	/* Journal index stuff */
747
748	struct list_head sd_jindex_list;
749	spinlock_t sd_jindex_spin;
750	struct mutex sd_jindex_mutex;
751	unsigned int sd_journals;
752
753	struct gfs2_jdesc *sd_jdesc;
754	struct gfs2_holder sd_journal_gh;
755	struct gfs2_holder sd_jinode_gh;
756
757	struct gfs2_holder sd_sc_gh;
758	struct gfs2_holder sd_qc_gh;
759
760	struct completion sd_journal_ready;
761
762	/* Daemon stuff */
763
764	struct task_struct *sd_logd_process;
765	struct task_struct *sd_quotad_process;
766
767	/* Quota stuff */
768
769	struct list_head sd_quota_list;
770	atomic_t sd_quota_count;
771	struct mutex sd_quota_mutex;
772	struct mutex sd_quota_sync_mutex;
773	wait_queue_head_t sd_quota_wait;
774	struct list_head sd_trunc_list;
775	spinlock_t sd_trunc_lock;
776
777	unsigned int sd_quota_slots;
778	unsigned long *sd_quota_bitmap;
779	spinlock_t sd_bitmap_lock;
780
781	u64 sd_quota_sync_gen;
782
783	/* Log stuff */
784
785	struct address_space sd_aspace;
786
787	spinlock_t sd_log_lock;
788
789	struct gfs2_trans *sd_log_tr;
790	unsigned int sd_log_blks_reserved;
791	int sd_log_commited_revoke;
792
793	atomic_t sd_log_pinned;
794	unsigned int sd_log_num_revoke;
795
796	struct list_head sd_log_le_revoke;
797	struct list_head sd_log_le_ordered;
798	spinlock_t sd_ordered_lock;
799
800	atomic_t sd_log_thresh1;
801	atomic_t sd_log_thresh2;
802	atomic_t sd_log_blks_free;
803	atomic_t sd_log_blks_needed;
804	wait_queue_head_t sd_log_waitq;
805	wait_queue_head_t sd_logd_waitq;
806
807	u64 sd_log_sequence;
808	unsigned int sd_log_head;
809	unsigned int sd_log_tail;
810	int sd_log_idle;
811
812	struct rw_semaphore sd_log_flush_lock;
813	atomic_t sd_log_in_flight;
814	struct bio *sd_log_bio;
815	wait_queue_head_t sd_log_flush_wait;
816	int sd_log_error;
817
818	atomic_t sd_reserving_log;
819	wait_queue_head_t sd_reserving_log_wait;
820
821	unsigned int sd_log_flush_head;
 
822
823	spinlock_t sd_ail_lock;
824	struct list_head sd_ail1_list;
825	struct list_head sd_ail2_list;
826
827	/* For quiescing the filesystem */
828	struct gfs2_holder sd_freeze_gh;
829	atomic_t sd_freeze_state;
830	struct mutex sd_freeze_mutex;
831
832	char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
833	char sd_table_name[GFS2_FSNAME_LEN];
834	char sd_proto_name[GFS2_FSNAME_LEN];
835
836	/* Debugging crud */
837
838	unsigned long sd_last_warning;
839	struct dentry *debugfs_dir;    /* debugfs directory */
840	struct dentry *debugfs_dentry_glocks;
841	struct dentry *debugfs_dentry_glstats;
842	struct dentry *debugfs_dentry_sbstats;
843};
844
845static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
846{
847	gl->gl_stats.stats[which]++;
848}
849
850static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
851{
852	const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
853	preempt_disable();
854	this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
855	preempt_enable();
856}
857
858extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
859
860static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
861{
862	return GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
863}
864
865#endif /* __INCORE_DOT_H__ */
866
v3.15
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#ifndef __INCORE_DOT_H__
 11#define __INCORE_DOT_H__
 12
 13#include <linux/fs.h>
 14#include <linux/kobject.h>
 15#include <linux/workqueue.h>
 16#include <linux/dlm.h>
 17#include <linux/buffer_head.h>
 18#include <linux/rcupdate.h>
 19#include <linux/rculist_bl.h>
 20#include <linux/completion.h>
 21#include <linux/rbtree.h>
 22#include <linux/ktime.h>
 23#include <linux/percpu.h>
 24#include <linux/lockref.h>
 
 25
 26#define DIO_WAIT	0x00000010
 27#define DIO_METADATA	0x00000020
 28
 29struct gfs2_log_operations;
 30struct gfs2_bufdata;
 31struct gfs2_holder;
 32struct gfs2_glock;
 33struct gfs2_quota_data;
 34struct gfs2_trans;
 35struct gfs2_jdesc;
 36struct gfs2_sbd;
 37struct lm_lockops;
 38
 39typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
 40
 41struct gfs2_log_header_host {
 42	u64 lh_sequence;	/* Sequence number of this transaction */
 43	u32 lh_flags;		/* GFS2_LOG_HEAD_... */
 44	u32 lh_tail;		/* Block number of log tail */
 45	u32 lh_blkno;
 46	u32 lh_hash;
 47};
 48
 49/*
 50 * Structure of operations that are associated with each
 51 * type of element in the log.
 52 */
 53
 54struct gfs2_log_operations {
 55	void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
 56	void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
 57	void (*lo_before_scan) (struct gfs2_jdesc *jd,
 58				struct gfs2_log_header_host *head, int pass);
 59	int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
 60				 struct gfs2_log_descriptor *ld, __be64 *ptr,
 61				 int pass);
 62	void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
 63	const char *lo_name;
 64};
 65
 66#define GBF_FULL 1
 67
 68struct gfs2_bitmap {
 69	struct buffer_head *bi_bh;
 70	char *bi_clone;
 71	unsigned long bi_flags;
 72	u32 bi_offset;
 73	u32 bi_start;
 74	u32 bi_len;
 75	u32 bi_blocks;
 76};
 77
 78struct gfs2_rgrpd {
 79	struct rb_node rd_node;		/* Link with superblock */
 80	struct gfs2_glock *rd_gl;	/* Glock for this rgrp */
 81	u64 rd_addr;			/* grp block disk address */
 82	u64 rd_data0;			/* first data location */
 83	u32 rd_length;			/* length of rgrp header in fs blocks */
 84	u32 rd_data;			/* num of data blocks in rgrp */
 85	u32 rd_bitbytes;		/* number of bytes in data bitmaps */
 86	u32 rd_free;
 87	u32 rd_reserved;                /* number of blocks reserved */
 88	u32 rd_free_clone;
 89	u32 rd_dinodes;
 90	u64 rd_igeneration;
 91	struct gfs2_bitmap *rd_bits;
 92	struct gfs2_sbd *rd_sbd;
 93	struct gfs2_rgrp_lvb *rd_rgl;
 94	u32 rd_last_alloc;
 95	u32 rd_flags;
 96	u32 rd_extfail_pt;		/* extent failure point */
 97#define GFS2_RDF_CHECK		0x10000000 /* check for unlinked inodes */
 98#define GFS2_RDF_UPTODATE	0x20000000 /* rg is up to date */
 99#define GFS2_RDF_ERROR		0x40000000 /* error in rg */
 
100#define GFS2_RDF_MASK		0xf0000000 /* mask for internal flags */
101	spinlock_t rd_rsspin;           /* protects reservation related vars */
102	struct rb_root rd_rstree;       /* multi-block reservation tree */
103};
104
105struct gfs2_rbm {
106	struct gfs2_rgrpd *rgd;
107	u32 offset;		/* The offset is bitmap relative */
108	int bii;		/* Bitmap index */
109};
110
111static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
112{
113	return rbm->rgd->rd_bits + rbm->bii;
114}
115
116static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
117{
118	return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
119		rbm->offset;
120}
121
122static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
123			       const struct gfs2_rbm *rbm2)
124{
125	return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
126	       (rbm1->offset == rbm2->offset);
127}
128
129enum gfs2_state_bits {
130	BH_Pinned = BH_PrivateStart,
131	BH_Escaped = BH_PrivateStart + 1,
132	BH_Zeronew = BH_PrivateStart + 2,
133};
134
135BUFFER_FNS(Pinned, pinned)
136TAS_BUFFER_FNS(Pinned, pinned)
137BUFFER_FNS(Escaped, escaped)
138TAS_BUFFER_FNS(Escaped, escaped)
139BUFFER_FNS(Zeronew, zeronew)
140TAS_BUFFER_FNS(Zeronew, zeronew)
141
142struct gfs2_bufdata {
143	struct buffer_head *bd_bh;
144	struct gfs2_glock *bd_gl;
145	u64 bd_blkno;
146
147	struct list_head bd_list;
148	const struct gfs2_log_operations *bd_ops;
149
150	struct gfs2_trans *bd_tr;
151	struct list_head bd_ail_st_list;
152	struct list_head bd_ail_gl_list;
153};
154
155/*
156 * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
157 * prefix of lock_dlm_ gets awkward.
158 */
159
160#define GDLM_STRNAME_BYTES	25
161#define GDLM_LVB_SIZE		32
162
163/*
164 * ls_recover_flags:
165 *
166 * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been
167 * held by failed nodes whose journals need recovery.  Those locks should
168 * only be used for journal recovery until the journal recovery is done.
169 * This is set by the dlm recover_prep callback and cleared by the
170 * gfs2_control thread when journal recovery is complete.  To avoid
171 * races between recover_prep setting and gfs2_control clearing, recover_spin
172 * is held while changing this bit and reading/writing recover_block
173 * and recover_start.
174 *
175 * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used.
176 *
177 * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing
178 * recovery of all journals before allowing other nodes to mount the fs.
179 * This is cleared when FIRST_MOUNT_DONE is set.
180 *
181 * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished
182 * recovery of all journals, and now allows other nodes to mount the fs.
183 *
184 * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared
185 * BLOCK_LOCKS for the first time.  The gfs2_control thread should now
186 * control clearing BLOCK_LOCKS for further recoveries.
187 *
188 * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq.
189 *
190 * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep()
191 * and recover_done(), i.e. set while recover_block == recover_start.
192 */
193
194enum {
195	DFL_BLOCK_LOCKS		= 0,
196	DFL_NO_DLM_OPS		= 1,
197	DFL_FIRST_MOUNT		= 2,
198	DFL_FIRST_MOUNT_DONE	= 3,
199	DFL_MOUNT_DONE		= 4,
200	DFL_UNMOUNT		= 5,
201	DFL_DLM_RECOVERY	= 6,
202};
203
 
 
 
 
204struct lm_lockname {
205	u64 ln_number;
 
206	unsigned int ln_type;
207};
208
209#define lm_name_equal(name1, name2) \
210        (((name1)->ln_number == (name2)->ln_number) && \
211         ((name1)->ln_type == (name2)->ln_type))
 
212
213
214struct gfs2_glock_operations {
215	void (*go_sync) (struct gfs2_glock *gl);
216	int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
217	void (*go_inval) (struct gfs2_glock *gl, int flags);
218	int (*go_demote_ok) (const struct gfs2_glock *gl);
219	int (*go_lock) (struct gfs2_holder *gh);
220	void (*go_unlock) (struct gfs2_holder *gh);
221	void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
222	void (*go_callback)(struct gfs2_glock *gl, bool remote);
223	const int go_type;
224	const unsigned long go_flags;
225#define GLOF_ASPACE 1
226#define GLOF_LVB    2
 
227};
228
229enum {
230	GFS2_LKS_SRTT = 0,	/* Non blocking smoothed round trip time */
231	GFS2_LKS_SRTTVAR = 1,	/* Non blocking smoothed variance */
232	GFS2_LKS_SRTTB = 2,	/* Blocking smoothed round trip time */
233	GFS2_LKS_SRTTVARB = 3,	/* Blocking smoothed variance */
234	GFS2_LKS_SIRT = 4,	/* Smoothed Inter-request time */
235	GFS2_LKS_SIRTVAR = 5,	/* Smoothed Inter-request variance */
236	GFS2_LKS_DCOUNT = 6,	/* Count of dlm requests */
237	GFS2_LKS_QCOUNT = 7,	/* Count of gfs2_holder queues */
238	GFS2_NR_LKSTATS
239};
240
241struct gfs2_lkstats {
242	s64 stats[GFS2_NR_LKSTATS];
243};
244
245enum {
246	/* States */
247	HIF_HOLDER		= 6,  /* Set for gh that "holds" the glock */
248	HIF_FIRST		= 7,
249	HIF_WAIT		= 10,
250};
251
252struct gfs2_holder {
253	struct list_head gh_list;
254
255	struct gfs2_glock *gh_gl;
256	struct pid *gh_owner_pid;
257	unsigned int gh_state;
258	unsigned gh_flags;
259
260	int gh_error;
261	unsigned long gh_iflags; /* HIF_... */
262	unsigned long gh_ip;
263};
264
 
 
 
 
 
 
 
 
 
 
265/* Resource group multi-block reservation, in order of appearance:
266
267   Step 1. Function prepares to write, allocates a mb, sets the size hint.
268   Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
269   Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
270   Step 4. Bits are assigned from the rgrp based on either the reservation
271           or wherever it can.
272*/
273
274struct gfs2_blkreserv {
275	/* components used during write (step 1): */
276	atomic_t rs_sizehint;         /* hint of the write size */
277
278	struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
279	struct rb_node rs_node;       /* link to other block reservations */
280	struct gfs2_rbm rs_rbm;       /* Start of reservation */
281	u32 rs_free;                  /* how many blocks are still free */
282	u64 rs_inum;                  /* Inode number for reservation */
283
284	/* ancillary quota stuff */
285	struct gfs2_quota_data *rs_qa_qd[2 * MAXQUOTAS];
286	struct gfs2_holder rs_qa_qd_ghs[2 * MAXQUOTAS];
287	unsigned int rs_qa_qd_num;
288};
289
290/*
291 * Allocation parameters
292 * @target: The number of blocks we'd ideally like to allocate
293 * @aflags: The flags (e.g. Orlov flag)
294 *
295 * The intent is to gradually expand this structure over time in
296 * order to give more information, e.g. alignment, min extent size
297 * to the allocation code.
298 */
299struct gfs2_alloc_parms {
300	u32 target;
 
301	u32 aflags;
 
302};
303
304enum {
305	GLF_LOCK			= 1,
306	GLF_DEMOTE			= 3,
307	GLF_PENDING_DEMOTE		= 4,
308	GLF_DEMOTE_IN_PROGRESS		= 5,
309	GLF_DIRTY			= 6,
310	GLF_LFLUSH			= 7,
311	GLF_INVALIDATE_IN_PROGRESS	= 8,
312	GLF_REPLY_PENDING		= 9,
313	GLF_INITIAL			= 10,
314	GLF_FROZEN			= 11,
315	GLF_QUEUED			= 12,
316	GLF_LRU				= 13,
317	GLF_OBJECT			= 14, /* Used only for tracing */
318	GLF_BLOCKING			= 15,
 
319};
320
321struct gfs2_glock {
322	struct hlist_bl_node gl_list;
323	struct gfs2_sbd *gl_sbd;
324	unsigned long gl_flags;		/* GLF_... */
325	struct lm_lockname gl_name;
326
327	struct lockref gl_lockref;
328#define gl_spin gl_lockref.lock
329
330	/* State fields protected by gl_spin */
331	unsigned int gl_state:2,	/* Current state */
332		     gl_target:2,	/* Target state */
333		     gl_demote_state:2,	/* State requested by remote node */
334		     gl_req:2,		/* State in last dlm request */
335		     gl_reply:8;	/* Last reply from the dlm */
336
337	unsigned int gl_hash;
338	unsigned long gl_demote_time; /* time of first demote request */
339	long gl_hold_time;
340	struct list_head gl_holders;
341
342	const struct gfs2_glock_operations *gl_ops;
343	ktime_t gl_dstamp;
344	struct gfs2_lkstats gl_stats;
345	struct dlm_lksb gl_lksb;
346	unsigned long gl_tchange;
347	void *gl_object;
348
349	struct list_head gl_lru;
350	struct list_head gl_ail_list;
351	atomic_t gl_ail_count;
352	atomic_t gl_revokes;
353	struct delayed_work gl_work;
354	union {
355		/* For inode and iopen glocks only */
356		struct work_struct gl_delete;
357		/* For rgrp glocks only */
358		struct {
359			loff_t start;
360			loff_t end;
361		} gl_vm;
362	};
363	struct rcu_head gl_rcu;
 
364};
365
366#define GFS2_MIN_LVB_SIZE 32	/* Min size of LVB that gfs2 supports */
367
368enum {
369	GIF_INVALID		= 0,
370	GIF_QD_LOCKED		= 1,
371	GIF_ALLOC_FAILED	= 2,
372	GIF_SW_PAGED		= 3,
373	GIF_ORDERED		= 4,
374	GIF_FREE_VFS_INODE      = 5,
 
375};
376
377struct gfs2_inode {
378	struct inode i_inode;
379	u64 i_no_addr;
380	u64 i_no_formal_ino;
381	u64 i_generation;
382	u64 i_eattr;
383	unsigned long i_flags;		/* GIF_... */
384	struct gfs2_glock *i_gl; /* Move into i_gh? */
385	struct gfs2_holder i_iopen_gh;
386	struct gfs2_holder i_gh; /* for prepare/commit_write only */
387	struct gfs2_blkreserv *i_res; /* rgrp multi-block reservation */
 
388	struct gfs2_rgrpd *i_rgd;
389	u64 i_goal;	/* goal block for allocations */
390	struct rw_semaphore i_rw_mutex;
391	struct list_head i_ordered;
392	struct list_head i_trunc_list;
393	__be64 *i_hash_cache;
394	u32 i_entries;
395	u32 i_diskflags;
396	u8 i_height;
397	u8 i_depth;
 
398};
399
400/*
401 * Since i_inode is the first element of struct gfs2_inode,
402 * this is effectively a cast.
403 */
404static inline struct gfs2_inode *GFS2_I(struct inode *inode)
405{
406	return container_of(inode, struct gfs2_inode, i_inode);
407}
408
409static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
410{
411	return inode->i_sb->s_fs_info;
412}
413
414struct gfs2_file {
415	struct mutex f_fl_mutex;
416	struct gfs2_holder f_fl_gh;
417};
418
419struct gfs2_revoke_replay {
420	struct list_head rr_list;
421	u64 rr_blkno;
422	unsigned int rr_where;
423};
424
425enum {
426	QDF_CHANGE		= 1,
427	QDF_LOCKED		= 2,
428	QDF_REFRESH		= 3,
 
429};
430
431struct gfs2_quota_data {
432	struct hlist_bl_node qd_hlist;
433	struct list_head qd_list;
434	struct kqid qd_id;
435	struct gfs2_sbd *qd_sbd;
436	struct lockref qd_lockref;
437	struct list_head qd_lru;
438	unsigned qd_hash;
439
440	unsigned long qd_flags;		/* QDF_... */
441
442	s64 qd_change;
443	s64 qd_change_sync;
444
445	unsigned int qd_slot;
446	unsigned int qd_slot_count;
447
448	struct buffer_head *qd_bh;
449	struct gfs2_quota_change *qd_bh_qc;
450	unsigned int qd_bh_count;
451
452	struct gfs2_glock *qd_gl;
453	struct gfs2_quota_lvb qd_qb;
454
455	u64 qd_sync_gen;
456	unsigned long qd_last_warn;
457	struct rcu_head qd_rcu;
458};
459
 
 
 
 
 
 
460struct gfs2_trans {
461	unsigned long tr_ip;
462
463	unsigned int tr_blocks;
464	unsigned int tr_revokes;
465	unsigned int tr_reserved;
466	unsigned int tr_touched:1;
467	unsigned int tr_attached:1;
468
469	struct gfs2_holder tr_t_gh;
470
471
472	unsigned int tr_num_buf_new;
473	unsigned int tr_num_databuf_new;
474	unsigned int tr_num_buf_rm;
475	unsigned int tr_num_databuf_rm;
476	unsigned int tr_num_revoke;
477	unsigned int tr_num_revoke_rm;
478
479	struct list_head tr_list;
480	struct list_head tr_databuf;
481	struct list_head tr_buf;
482
483	unsigned int tr_first;
484	struct list_head tr_ail1_list;
485	struct list_head tr_ail2_list;
486};
487
488struct gfs2_journal_extent {
489	struct list_head list;
490
491	unsigned int lblock; /* First logical block */
492	u64 dblock; /* First disk block */
493	u64 blocks;
494};
495
496struct gfs2_jdesc {
497	struct list_head jd_list;
498	struct list_head extent_list;
499	unsigned int nr_extents;
500	struct work_struct jd_work;
501	struct inode *jd_inode;
502	unsigned long jd_flags;
503#define JDF_RECOVERY 1
504	unsigned int jd_jid;
505	unsigned int jd_blocks;
506	int jd_recover_error;
507	/* Replay stuff */
508
509	unsigned int jd_found_blocks;
510	unsigned int jd_found_revokes;
511	unsigned int jd_replayed_blocks;
512
513	struct list_head jd_revoke_list;
514	unsigned int jd_replay_tail;
515
516};
517
518struct gfs2_statfs_change_host {
519	s64 sc_total;
520	s64 sc_free;
521	s64 sc_dinodes;
522};
523
524#define GFS2_QUOTA_DEFAULT	GFS2_QUOTA_OFF
525#define GFS2_QUOTA_OFF		0
526#define GFS2_QUOTA_ACCOUNT	1
527#define GFS2_QUOTA_ON		2
528
529#define GFS2_DATA_DEFAULT	GFS2_DATA_ORDERED
530#define GFS2_DATA_WRITEBACK	1
531#define GFS2_DATA_ORDERED	2
532
533#define GFS2_ERRORS_DEFAULT     GFS2_ERRORS_WITHDRAW
534#define GFS2_ERRORS_WITHDRAW    0
535#define GFS2_ERRORS_CONTINUE    1 /* place holder for future feature */
536#define GFS2_ERRORS_RO          2 /* place holder for future feature */
537#define GFS2_ERRORS_PANIC       3
538
539struct gfs2_args {
540	char ar_lockproto[GFS2_LOCKNAME_LEN];	/* Name of the Lock Protocol */
541	char ar_locktable[GFS2_LOCKNAME_LEN];	/* Name of the Lock Table */
542	char ar_hostdata[GFS2_LOCKNAME_LEN];	/* Host specific data */
543	unsigned int ar_spectator:1;		/* Don't get a journal */
544	unsigned int ar_localflocks:1;		/* Let the VFS do flock|fcntl */
545	unsigned int ar_debug:1;		/* Oops on errors */
546	unsigned int ar_posix_acl:1;		/* Enable posix acls */
547	unsigned int ar_quota:2;		/* off/account/on */
548	unsigned int ar_suiddir:1;		/* suiddir support */
549	unsigned int ar_data:2;			/* ordered/writeback */
550	unsigned int ar_meta:1;			/* mount metafs */
551	unsigned int ar_discard:1;		/* discard requests */
552	unsigned int ar_errors:2;               /* errors=withdraw | panic */
553	unsigned int ar_nobarrier:1;            /* do not send barriers */
554	unsigned int ar_rgrplvb:1;		/* use lvbs for rgrp info */
 
 
555	int ar_commit;				/* Commit interval */
556	int ar_statfs_quantum;			/* The fast statfs interval */
557	int ar_quota_quantum;			/* The quota interval */
558	int ar_statfs_percent;			/* The % change to force sync */
559};
560
561struct gfs2_tune {
562	spinlock_t gt_spin;
563
564	unsigned int gt_logd_secs;
565
566	unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
567	unsigned int gt_quota_scale_num; /* Numerator */
568	unsigned int gt_quota_scale_den; /* Denominator */
569	unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
570	unsigned int gt_new_files_jdata;
571	unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
572	unsigned int gt_complain_secs;
573	unsigned int gt_statfs_quantum;
574	unsigned int gt_statfs_slow;
575};
576
577enum {
578	SDF_JOURNAL_CHECKED	= 0,
579	SDF_JOURNAL_LIVE	= 1,
580	SDF_SHUTDOWN		= 2,
581	SDF_NOBARRIERS		= 3,
582	SDF_NORECOVERY		= 4,
583	SDF_DEMOTE		= 5,
584	SDF_NOJOURNALID		= 6,
585	SDF_RORECOVERY		= 7, /* read only recovery */
586	SDF_SKIP_DLM_UNLOCK	= 8,
 
 
 
 
 
 
 
587};
588
589#define GFS2_FSNAME_LEN		256
590
591struct gfs2_inum_host {
592	u64 no_formal_ino;
593	u64 no_addr;
594};
595
596struct gfs2_sb_host {
597	u32 sb_magic;
598	u32 sb_type;
599	u32 sb_format;
600
601	u32 sb_fs_format;
602	u32 sb_multihost_format;
603	u32 sb_bsize;
604	u32 sb_bsize_shift;
605
606	struct gfs2_inum_host sb_master_dir;
607	struct gfs2_inum_host sb_root_dir;
608
609	char sb_lockproto[GFS2_LOCKNAME_LEN];
610	char sb_locktable[GFS2_LOCKNAME_LEN];
611};
612
613/*
614 * lm_mount() return values
615 *
616 * ls_jid - the journal ID this node should use
617 * ls_first - this node is the first to mount the file system
618 * ls_lockspace - lock module's context for this file system
619 * ls_ops - lock module's functions
620 */
621
622struct lm_lockstruct {
623	int ls_jid;
624	unsigned int ls_first;
625	const struct lm_lockops *ls_ops;
626	dlm_lockspace_t *ls_dlm;
627
628	int ls_recover_jid_done;   /* These two are deprecated, */
629	int ls_recover_jid_status; /* used previously by gfs_controld */
630
631	struct dlm_lksb ls_mounted_lksb; /* mounted_lock */
632	struct dlm_lksb ls_control_lksb; /* control_lock */
633	char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
634	struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
635	char *ls_lvb_bits;
636
637	spinlock_t ls_recover_spin; /* protects following fields */
638	unsigned long ls_recover_flags; /* DFL_ */
639	uint32_t ls_recover_mount; /* gen in first recover_done cb */
640	uint32_t ls_recover_start; /* gen in last recover_done cb */
641	uint32_t ls_recover_block; /* copy recover_start in last recover_prep */
642	uint32_t ls_recover_size; /* size of recover_submit, recover_result */
643	uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */
644	uint32_t *ls_recover_result; /* result of last jid recovery */
645};
646
647struct gfs2_pcpu_lkstats {
648	/* One struct for each glock type */
649	struct gfs2_lkstats lkstats[10];
650};
651
652struct gfs2_sbd {
653	struct super_block *sd_vfs;
654	struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
655	struct kobject sd_kobj;
656	unsigned long sd_flags;	/* SDF_... */
657	struct gfs2_sb_host sd_sb;
658
659	/* Constants computed on mount */
660
661	u32 sd_fsb2bb;
662	u32 sd_fsb2bb_shift;
663	u32 sd_diptrs;	/* Number of pointers in a dinode */
664	u32 sd_inptrs;	/* Number of pointers in a indirect block */
665	u32 sd_jbsize;	/* Size of a journaled data block */
666	u32 sd_hash_bsize;	/* sizeof(exhash block) */
667	u32 sd_hash_bsize_shift;
668	u32 sd_hash_ptrs;	/* Number of pointers in a hash block */
669	u32 sd_qc_per_block;
670	u32 sd_blocks_per_bitmap;
671	u32 sd_max_dirres;	/* Max blocks needed to add a directory entry */
672	u32 sd_max_height;	/* Max height of a file's metadata tree */
673	u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
674	u32 sd_max_jheight; /* Max height of journaled file's meta tree */
675	u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
 
676
677	struct gfs2_args sd_args;	/* Mount arguments */
678	struct gfs2_tune sd_tune;	/* Filesystem tuning structure */
679
680	/* Lock Stuff */
681
682	struct lm_lockstruct sd_lockstruct;
683	struct gfs2_holder sd_live_gh;
684	struct gfs2_glock *sd_rename_gl;
685	struct gfs2_glock *sd_trans_gl;
 
686	wait_queue_head_t sd_glock_wait;
687	atomic_t sd_glock_disposal;
688	struct completion sd_locking_init;
689	struct completion sd_wdack;
690	struct delayed_work sd_control_work;
691
692	/* Inode Stuff */
693
694	struct dentry *sd_master_dir;
695	struct dentry *sd_root_dir;
696
697	struct inode *sd_jindex;
698	struct inode *sd_statfs_inode;
699	struct inode *sd_sc_inode;
700	struct inode *sd_qc_inode;
701	struct inode *sd_rindex;
702	struct inode *sd_quota_inode;
703
704	/* StatFS stuff */
705
706	spinlock_t sd_statfs_spin;
707	struct gfs2_statfs_change_host sd_statfs_master;
708	struct gfs2_statfs_change_host sd_statfs_local;
709	int sd_statfs_force_sync;
710
711	/* Resource group stuff */
712
713	int sd_rindex_uptodate;
714	spinlock_t sd_rindex_spin;
715	struct rb_root sd_rindex_tree;
716	unsigned int sd_rgrps;
717	unsigned int sd_max_rg_data;
718
719	/* Journal index stuff */
720
721	struct list_head sd_jindex_list;
722	spinlock_t sd_jindex_spin;
723	struct mutex sd_jindex_mutex;
724	unsigned int sd_journals;
725
726	struct gfs2_jdesc *sd_jdesc;
727	struct gfs2_holder sd_journal_gh;
728	struct gfs2_holder sd_jinode_gh;
729
730	struct gfs2_holder sd_sc_gh;
731	struct gfs2_holder sd_qc_gh;
732
 
 
733	/* Daemon stuff */
734
735	struct task_struct *sd_logd_process;
736	struct task_struct *sd_quotad_process;
737
738	/* Quota stuff */
739
740	struct list_head sd_quota_list;
741	atomic_t sd_quota_count;
742	struct mutex sd_quota_mutex;
743	struct mutex sd_quota_sync_mutex;
744	wait_queue_head_t sd_quota_wait;
745	struct list_head sd_trunc_list;
746	spinlock_t sd_trunc_lock;
747
748	unsigned int sd_quota_slots;
749	unsigned long *sd_quota_bitmap;
750	spinlock_t sd_bitmap_lock;
751
752	u64 sd_quota_sync_gen;
753
754	/* Log stuff */
755
756	struct address_space sd_aspace;
757
758	spinlock_t sd_log_lock;
759
760	struct gfs2_trans *sd_log_tr;
761	unsigned int sd_log_blks_reserved;
762	int sd_log_commited_revoke;
763
764	atomic_t sd_log_pinned;
765	unsigned int sd_log_num_revoke;
766
767	struct list_head sd_log_le_revoke;
768	struct list_head sd_log_le_ordered;
769	spinlock_t sd_ordered_lock;
770
771	atomic_t sd_log_thresh1;
772	atomic_t sd_log_thresh2;
773	atomic_t sd_log_blks_free;
 
774	wait_queue_head_t sd_log_waitq;
775	wait_queue_head_t sd_logd_waitq;
776
777	u64 sd_log_sequence;
778	unsigned int sd_log_head;
779	unsigned int sd_log_tail;
780	int sd_log_idle;
781
782	struct rw_semaphore sd_log_flush_lock;
783	atomic_t sd_log_in_flight;
784	struct bio *sd_log_bio;
785	wait_queue_head_t sd_log_flush_wait;
786	int sd_log_error;
787
 
 
 
788	unsigned int sd_log_flush_head;
789	u64 sd_log_flush_wrapped;
790
791	spinlock_t sd_ail_lock;
792	struct list_head sd_ail1_list;
793	struct list_head sd_ail2_list;
794
795	/* For quiescing the filesystem */
796	struct gfs2_holder sd_freeze_gh;
 
 
797
798	char sd_fsname[GFS2_FSNAME_LEN];
799	char sd_table_name[GFS2_FSNAME_LEN];
800	char sd_proto_name[GFS2_FSNAME_LEN];
801
802	/* Debugging crud */
803
804	unsigned long sd_last_warning;
805	struct dentry *debugfs_dir;    /* debugfs directory */
806	struct dentry *debugfs_dentry_glocks;
807	struct dentry *debugfs_dentry_glstats;
808	struct dentry *debugfs_dentry_sbstats;
809};
810
811static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
812{
813	gl->gl_stats.stats[which]++;
814}
815
816static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
817{
818	const struct gfs2_sbd *sdp = gl->gl_sbd;
819	preempt_disable();
820	this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
821	preempt_enable();
 
 
 
 
 
 
 
822}
823
824#endif /* __INCORE_DOT_H__ */
825