Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
 
  3 * ocfs2.h
  4 *
  5 * Defines macros and structures used in OCFS2
  6 *
  7 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  8 */
  9
 10#ifndef OCFS2_H
 11#define OCFS2_H
 12
 13#include <linux/spinlock.h>
 14#include <linux/sched.h>
 15#include <linux/wait.h>
 16#include <linux/list.h>
 17#include <linux/llist.h>
 18#include <linux/rbtree.h>
 19#include <linux/workqueue.h>
 20#include <linux/kref.h>
 21#include <linux/mutex.h>
 22#include <linux/lockdep.h>
 23#include <linux/jbd2.h>
 24
 25/* For union ocfs2_dlm_lksb */
 26#include "stackglue.h"
 27
 28#include "ocfs2_fs.h"
 29#include "ocfs2_lockid.h"
 30#include "ocfs2_ioctl.h"
 31
 32/* For struct ocfs2_blockcheck_stats */
 33#include "blockcheck.h"
 34
 35#include "reservations.h"
 36
 37#include "filecheck.h"
 38
 39/* Caching of metadata buffers */
 40
 41/* Most user visible OCFS2 inodes will have very few pieces of
 42 * metadata, but larger files (including bitmaps, etc) must be taken
 43 * into account when designing an access scheme. We allow a small
 44 * amount of inlined blocks to be stored on an array and grow the
 45 * structure into a rb tree when necessary. */
 46#define OCFS2_CACHE_INFO_MAX_ARRAY 2
 47
 48/* Flags for ocfs2_caching_info */
 49
 50enum ocfs2_caching_info_flags {
 51	/* Indicates that the metadata cache is using the inline array */
 52	OCFS2_CACHE_FL_INLINE	= 1<<1,
 53};
 54
 55struct ocfs2_caching_operations;
 56struct ocfs2_caching_info {
 57	/*
 58	 * The parent structure provides the locks, but because the
 59	 * parent structure can differ, it provides locking operations
 60	 * to struct ocfs2_caching_info.
 61	 */
 62	const struct ocfs2_caching_operations *ci_ops;
 63
 64	/* next two are protected by trans_inc_lock */
 65	/* which transaction were we created on? Zero if none. */
 66	unsigned long		ci_created_trans;
 67	/* last transaction we were a part of. */
 68	unsigned long		ci_last_trans;
 69
 70	/* Cache structures */
 71	unsigned int		ci_flags;
 72	unsigned int		ci_num_cached;
 73	union {
 74	sector_t	ci_array[OCFS2_CACHE_INFO_MAX_ARRAY];
 75		struct rb_root	ci_tree;
 76	} ci_cache;
 77};
 78/*
 79 * Need this prototype here instead of in uptodate.h because journal.h
 80 * uses it.
 81 */
 82struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci);
 83
 84/* this limits us to 256 nodes
 85 * if we need more, we can do a kmalloc for the map */
 86#define OCFS2_NODE_MAP_MAX_NODES    256
 87struct ocfs2_node_map {
 88	u16 num_nodes;
 89	unsigned long map[BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES)];
 90};
 91
 92enum ocfs2_ast_action {
 93	OCFS2_AST_INVALID = 0,
 94	OCFS2_AST_ATTACH,
 95	OCFS2_AST_CONVERT,
 96	OCFS2_AST_DOWNCONVERT,
 97};
 98
 99/* actions for an unlockast function to take. */
100enum ocfs2_unlock_action {
101	OCFS2_UNLOCK_INVALID = 0,
102	OCFS2_UNLOCK_CANCEL_CONVERT,
103	OCFS2_UNLOCK_DROP_LOCK,
104};
105
106/* ocfs2_lock_res->l_flags flags. */
107#define OCFS2_LOCK_ATTACHED      (0x00000001) /* we have initialized
108					       * the lvb */
109#define OCFS2_LOCK_BUSY          (0x00000002) /* we are currently in
110					       * dlm_lock */
111#define OCFS2_LOCK_BLOCKED       (0x00000004) /* blocked waiting to
112					       * downconvert*/
113#define OCFS2_LOCK_LOCAL         (0x00000008) /* newly created inode */
114#define OCFS2_LOCK_NEEDS_REFRESH (0x00000010)
115#define OCFS2_LOCK_REFRESHING    (0x00000020)
116#define OCFS2_LOCK_INITIALIZED   (0x00000040) /* track initialization
117					       * for shutdown paths */
118#define OCFS2_LOCK_FREEING       (0x00000080) /* help dlmglue track
119					       * when to skip queueing
120					       * a lock because it's
121					       * about to be
122					       * dropped. */
123#define OCFS2_LOCK_QUEUED        (0x00000100) /* queued for downconvert */
124#define OCFS2_LOCK_NOCACHE       (0x00000200) /* don't use a holder count */
125#define OCFS2_LOCK_PENDING       (0x00000400) /* This lockres is pending a
126						 call to dlm_lock.  Only
127						 exists with BUSY set. */
128#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
129						     * from downconverting
130						     * before the upconvert
131						     * has completed */
132
133#define OCFS2_LOCK_NONBLOCK_FINISHED (0x00001000) /* NONBLOCK cluster
134						   * lock has already
135						   * returned, do not block
136						   * dc thread from
137						   * downconverting */
138
139struct ocfs2_lock_res_ops;
140
141typedef void (*ocfs2_lock_callback)(int status, unsigned long data);
142
143#ifdef CONFIG_OCFS2_FS_STATS
144struct ocfs2_lock_stats {
145	u64		ls_total;	/* Total wait in NSEC */
146	u32		ls_gets;	/* Num acquires */
147	u32		ls_fail;	/* Num failed acquires */
148
149	/* Storing max wait in usecs saves 24 bytes per inode */
150	u32		ls_max;		/* Max wait in USEC */
151	u64		ls_last;	/* Last unlock time in USEC */
152};
153#endif
154
155struct ocfs2_lock_res {
156	void                    *l_priv;
157	struct ocfs2_lock_res_ops *l_ops;
158
159
160	struct list_head         l_blocked_list;
161	struct list_head         l_mask_waiters;
162	struct list_head	 l_holders;
163
164	unsigned long		 l_flags;
165	char                     l_name[OCFS2_LOCK_ID_MAX_LEN];
166	unsigned int             l_ro_holders;
167	unsigned int             l_ex_holders;
168	signed char		 l_level;
169	signed char		 l_requested;
170	signed char		 l_blocking;
171
172	/* Data packed - type enum ocfs2_lock_type */
173	unsigned char            l_type;
174
175	/* used from AST/BAST funcs. */
176	/* Data packed - enum type ocfs2_ast_action */
177	unsigned char            l_action;
178	/* Data packed - enum type ocfs2_unlock_action */
179	unsigned char            l_unlock_action;
180	unsigned int             l_pending_gen;
181
182	spinlock_t               l_lock;
183
184	struct ocfs2_dlm_lksb    l_lksb;
185
186	wait_queue_head_t        l_event;
187
188	struct list_head         l_debug_list;
189
190#ifdef CONFIG_OCFS2_FS_STATS
191	struct ocfs2_lock_stats  l_lock_prmode;		/* PR mode stats */
192	u32                      l_lock_refresh;	/* Disk refreshes */
193	u64                      l_lock_wait;	/* First lock wait time */
194	struct ocfs2_lock_stats  l_lock_exmode;		/* EX mode stats */
195#endif
196#ifdef CONFIG_DEBUG_LOCK_ALLOC
197	struct lockdep_map	 l_lockdep_map;
198#endif
199};
200
201enum ocfs2_orphan_reco_type {
202	ORPHAN_NO_NEED_TRUNCATE = 0,
203	ORPHAN_NEED_TRUNCATE,
204};
205
206enum ocfs2_orphan_scan_state {
207	ORPHAN_SCAN_ACTIVE,
208	ORPHAN_SCAN_INACTIVE
209};
210
211struct ocfs2_orphan_scan {
212	struct mutex 		os_lock;
213	struct ocfs2_super 	*os_osb;
214	struct ocfs2_lock_res 	os_lockres;     /* lock to synchronize scans */
215	struct delayed_work 	os_orphan_scan_work;
216	time64_t		os_scantime;  /* time this node ran the scan */
217	u32			os_count;      /* tracks node specific scans */
218	u32  			os_seqno;       /* tracks cluster wide scans */
219	atomic_t		os_state;              /* ACTIVE or INACTIVE */
220};
221
222struct ocfs2_dlm_debug {
223	struct kref d_refcnt;
224	u32 d_filter_secs;
225	struct list_head d_lockres_tracking;
226};
227
228enum ocfs2_vol_state
229{
230	VOLUME_INIT = 0,
231	VOLUME_MOUNTED,
232	VOLUME_MOUNTED_QUOTAS,
233	VOLUME_DISMOUNTED,
234	VOLUME_DISABLED
235};
236
237struct ocfs2_alloc_stats
238{
239	atomic_t moves;
240	atomic_t local_data;
241	atomic_t bitmap_data;
242	atomic_t bg_allocs;
243	atomic_t bg_extends;
244};
245
246enum ocfs2_local_alloc_state
247{
248	OCFS2_LA_UNUSED = 0,	/* Local alloc will never be used for
249				 * this mountpoint. */
250	OCFS2_LA_ENABLED,	/* Local alloc is in use. */
251	OCFS2_LA_THROTTLED,	/* Local alloc is in use, but number
252				 * of bits has been reduced. */
253	OCFS2_LA_DISABLED	/* Local alloc has temporarily been
254				 * disabled. */
255};
256
257enum ocfs2_mount_options
258{
259	OCFS2_MOUNT_HB_LOCAL = 1 << 0, /* Local heartbeat */
260	OCFS2_MOUNT_BARRIER = 1 << 1,	/* Use block barriers */
261	OCFS2_MOUNT_NOINTR  = 1 << 2,   /* Don't catch signals */
262	OCFS2_MOUNT_ERRORS_PANIC = 1 << 3, /* Panic on errors */
263	OCFS2_MOUNT_DATA_WRITEBACK = 1 << 4, /* No data ordering */
264	OCFS2_MOUNT_LOCALFLOCKS = 1 << 5, /* No cluster aware user file locks */
265	OCFS2_MOUNT_NOUSERXATTR = 1 << 6, /* No user xattr */
266	OCFS2_MOUNT_INODE64 = 1 << 7,	/* Allow inode numbers > 2^32 */
267	OCFS2_MOUNT_POSIX_ACL = 1 << 8,	/* Force POSIX access control lists */
268	OCFS2_MOUNT_NO_POSIX_ACL = 1 << 9,	/* Disable POSIX access
269						   control lists */
270	OCFS2_MOUNT_USRQUOTA = 1 << 10, /* We support user quotas */
271	OCFS2_MOUNT_GRPQUOTA = 1 << 11, /* We support group quotas */
272	OCFS2_MOUNT_COHERENCY_BUFFERED = 1 << 12, /* Allow concurrent O_DIRECT
273						     writes */
274	OCFS2_MOUNT_HB_NONE = 1 << 13, /* No heartbeat */
275	OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */
276
277	OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15,  /* Journal Async Commit */
278	OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */
279	OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */
280};
281
282#define OCFS2_OSB_SOFT_RO	0x0001
283#define OCFS2_OSB_HARD_RO	0x0002
284#define OCFS2_OSB_ERROR_FS	0x0004
285#define OCFS2_DEFAULT_ATIME_QUANTUM	60
286
287struct ocfs2_journal;
288struct ocfs2_slot_info;
289struct ocfs2_recovery_map;
290struct ocfs2_replay_map;
291struct ocfs2_quota_recovery;
292struct ocfs2_super
293{
294	struct task_struct *commit_task;
295	struct super_block *sb;
296	struct inode *root_inode;
297	struct inode *sys_root_inode;
298	struct inode *global_system_inodes[NUM_GLOBAL_SYSTEM_INODES];
299	struct inode **local_system_inodes;
300
301	struct ocfs2_slot_info *slot_info;
302
303	u32 *slot_recovery_generations;
304
305	spinlock_t node_map_lock;
306
307	u64 root_blkno;
308	u64 system_dir_blkno;
309	u64 bitmap_blkno;
310	u32 bitmap_cpg;
 
311	char *uuid_str;
312	u32 uuid_hash;
313	u8 *vol_label;
314	u64 first_cluster_group_blkno;
315	u32 fs_generation;
316
317	u32 s_feature_compat;
318	u32 s_feature_incompat;
319	u32 s_feature_ro_compat;
320
321	/* Protects s_next_generation, osb_flags and s_inode_steal_slot.
322	 * Could protect more on osb as it's very short lived.
323	 */
324	spinlock_t osb_lock;
325	u32 s_next_generation;
326	unsigned long osb_flags;
327	u16 s_inode_steal_slot;
328	u16 s_meta_steal_slot;
329	atomic_t s_num_inodes_stolen;
330	atomic_t s_num_meta_stolen;
331
332	unsigned long s_mount_opt;
333	unsigned int s_atime_quantum;
334
335	unsigned int max_slots;
336	unsigned int node_num;
337	int slot_num;
338	int preferred_slot;
339	int s_sectsize_bits;
340	int s_clustersize;
341	int s_clustersize_bits;
342	unsigned int s_xattr_inline_size;
343
344	atomic_t vol_state;
345	struct mutex recovery_lock;
346	struct ocfs2_recovery_map *recovery_map;
347	struct ocfs2_replay_map *replay_map;
348	struct task_struct *recovery_thread_task;
349	int disable_recovery;
350	wait_queue_head_t checkpoint_event;
351	struct ocfs2_journal *journal;
352	unsigned long osb_commit_interval;
353
354	struct delayed_work		la_enable_wq;
355
356	/*
357	 * Must hold local alloc i_rwsem and osb->osb_lock to change
358	 * local_alloc_bits. Reads can be done under either lock.
359	 */
360	unsigned int local_alloc_bits;
361	unsigned int local_alloc_default_bits;
362	/* osb_clusters_at_boot can become stale! Do not trust it to
363	 * be up to date. */
364	unsigned int osb_clusters_at_boot;
365
366	enum ocfs2_local_alloc_state local_alloc_state; /* protected
367							 * by osb_lock */
368
369	struct buffer_head *local_alloc_bh;
370
371	u64 la_last_gd;
372
373	struct ocfs2_reservation_map	osb_la_resmap;
374
375	unsigned int	osb_resv_level;
376	unsigned int	osb_dir_resv_level;
377
378	/* Next two fields are for local node slot recovery during
379	 * mount. */
 
380	struct ocfs2_dinode *local_alloc_copy;
381	struct ocfs2_quota_recovery *quota_rec;
382
383	struct ocfs2_blockcheck_stats osb_ecc_stats;
384	struct ocfs2_alloc_stats alloc_stats;
385	char dev_str[20];		/* "major,minor" of the device */
386
387	u8 osb_stackflags;
388
389	char osb_cluster_stack[OCFS2_STACK_LABEL_LEN + 1];
390	char osb_cluster_name[OCFS2_CLUSTER_NAME_LEN + 1];
391	struct ocfs2_cluster_connection *cconn;
392	struct ocfs2_lock_res osb_super_lockres;
393	struct ocfs2_lock_res osb_rename_lockres;
394	struct ocfs2_lock_res osb_nfs_sync_lockres;
395	struct rw_semaphore nfs_sync_rwlock;
396	struct ocfs2_lock_res osb_trim_fs_lockres;
397	struct mutex obs_trim_fs_mutex;
398	struct ocfs2_dlm_debug *osb_dlm_debug;
399
400	struct dentry *osb_debug_root;
 
401
402	wait_queue_head_t recovery_event;
403
404	spinlock_t dc_task_lock;
405	struct task_struct *dc_task;
406	wait_queue_head_t dc_event;
407	unsigned long dc_wake_sequence;
408	unsigned long dc_work_sequence;
409
410	/*
411	 * Any thread can add locks to the list, but the downconvert
412	 * thread is the only one allowed to remove locks. Any change
413	 * to this rule requires updating
414	 * ocfs2_downconvert_thread_do_work().
415	 */
416	struct list_head blocked_lock_list;
417	unsigned long blocked_lock_count;
418
419	/* List of dquot structures to drop last reference to */
420	struct llist_head dquot_drop_list;
421	struct work_struct dquot_drop_work;
422
423	wait_queue_head_t		osb_mount_event;
424
425	/* Truncate log info */
426	struct inode			*osb_tl_inode;
427	struct buffer_head		*osb_tl_bh;
428	struct delayed_work		osb_truncate_log_wq;
429	atomic_t			osb_tl_disable;
430	/*
431	 * How many clusters in our truncate log.
432	 * It must be protected by osb_tl_inode->i_rwsem.
433	 */
434	unsigned int truncated_clusters;
435
436	struct ocfs2_node_map		osb_recovering_orphan_dirs;
437	unsigned int			*osb_orphan_wipes;
438	wait_queue_head_t		osb_wipe_event;
439
440	struct ocfs2_orphan_scan	osb_orphan_scan;
441
442	/* used to protect metaecc calculation check of xattr. */
443	spinlock_t osb_xattr_lock;
444
445	unsigned int			osb_dx_mask;
446	u32				osb_dx_seed[4];
447
448	/* the group we used to allocate inodes. */
449	u64				osb_inode_alloc_group;
450
451	/* rb tree root for refcount lock. */
452	struct rb_root	osb_rf_lock_tree;
453	struct ocfs2_refcount_tree *osb_ref_tree_lru;
454
455	struct mutex system_file_mutex;
456
457	/*
458	 * OCFS2 needs to schedule several different types of work which
459	 * require cluster locking, disk I/O, recovery waits, etc. Since these
460	 * types of work tend to be heavy we avoid using the kernel events
461	 * workqueue and schedule on our own.
462	 */
463	struct workqueue_struct *ocfs2_wq;
464
465	/* sysfs directory per partition */
466	struct kset *osb_dev_kset;
467
468	/* file check related stuff */
469	struct ocfs2_filecheck_sysfs_entry osb_fc_ent;
470};
471
472#define OCFS2_SB(sb)	    ((struct ocfs2_super *)(sb)->s_fs_info)
473
474/* Useful typedef for passing around journal access functions */
475typedef int (*ocfs2_journal_access_func)(handle_t *handle,
476					 struct ocfs2_caching_info *ci,
477					 struct buffer_head *bh, int type);
478
479static inline int ocfs2_should_order_data(struct inode *inode)
480{
481	if (!S_ISREG(inode->i_mode))
482		return 0;
483	if (OCFS2_SB(inode->i_sb)->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)
484		return 0;
485	return 1;
486}
487
488static inline int ocfs2_sparse_alloc(struct ocfs2_super *osb)
489{
490	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC)
491		return 1;
492	return 0;
493}
494
495static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb)
496{
497	/*
498	 * Support for sparse files is a pre-requisite
499	 */
500	if (!ocfs2_sparse_alloc(osb))
501		return 0;
502
503	if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_UNWRITTEN)
504		return 1;
505	return 0;
506}
507
508static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb)
509{
510	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO)
511		return 1;
512	return 0;
513}
514
515
516static inline int ocfs2_supports_inline_data(struct ocfs2_super *osb)
517{
518	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INLINE_DATA)
519		return 1;
520	return 0;
521}
522
523static inline int ocfs2_supports_xattr(struct ocfs2_super *osb)
524{
525	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_XATTR)
526		return 1;
527	return 0;
528}
529
530static inline int ocfs2_meta_ecc(struct ocfs2_super *osb)
531{
532	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_META_ECC)
533		return 1;
534	return 0;
535}
536
537static inline int ocfs2_supports_indexed_dirs(struct ocfs2_super *osb)
538{
539	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS)
540		return 1;
541	return 0;
542}
543
544static inline int ocfs2_supports_discontig_bg(struct ocfs2_super *osb)
545{
546	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG)
547		return 1;
548	return 0;
549}
550
551static inline unsigned int ocfs2_link_max(struct ocfs2_super *osb)
552{
553	if (ocfs2_supports_indexed_dirs(osb))
554		return OCFS2_DX_LINK_MAX;
555	return OCFS2_LINK_MAX;
556}
557
558static inline unsigned int ocfs2_read_links_count(struct ocfs2_dinode *di)
559{
560	u32 nlink = le16_to_cpu(di->i_links_count);
561	u32 hi = le16_to_cpu(di->i_links_count_hi);
562
563	nlink |= (hi << OCFS2_LINKS_HI_SHIFT);
 
564
565	return nlink;
566}
567
568static inline void ocfs2_set_links_count(struct ocfs2_dinode *di, u32 nlink)
569{
570	u16 lo, hi;
571
572	lo = nlink;
573	hi = nlink >> OCFS2_LINKS_HI_SHIFT;
574
575	di->i_links_count = cpu_to_le16(lo);
576	di->i_links_count_hi = cpu_to_le16(hi);
577}
578
579static inline void ocfs2_add_links_count(struct ocfs2_dinode *di, int n)
580{
581	u32 links = ocfs2_read_links_count(di);
582
583	links += n;
584
585	ocfs2_set_links_count(di, links);
586}
587
588static inline int ocfs2_refcount_tree(struct ocfs2_super *osb)
589{
590	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE)
591		return 1;
592	return 0;
593}
594
595/* set / clear functions because cluster events can make these happen
596 * in parallel so we want the transitions to be atomic. this also
597 * means that any future flags osb_flags must be protected by spinlock
598 * too! */
599static inline void ocfs2_set_osb_flag(struct ocfs2_super *osb,
600				      unsigned long flag)
601{
602	spin_lock(&osb->osb_lock);
603	osb->osb_flags |= flag;
604	spin_unlock(&osb->osb_lock);
605}
606
607static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb,
608				     int hard)
609{
610	spin_lock(&osb->osb_lock);
611	osb->osb_flags &= ~(OCFS2_OSB_SOFT_RO|OCFS2_OSB_HARD_RO);
612	if (hard)
613		osb->osb_flags |= OCFS2_OSB_HARD_RO;
614	else
615		osb->osb_flags |= OCFS2_OSB_SOFT_RO;
616	spin_unlock(&osb->osb_lock);
617}
618
619static inline int ocfs2_is_hard_readonly(struct ocfs2_super *osb)
620{
621	int ret;
622
623	spin_lock(&osb->osb_lock);
624	ret = osb->osb_flags & OCFS2_OSB_HARD_RO;
625	spin_unlock(&osb->osb_lock);
626
627	return ret;
628}
629
630static inline int ocfs2_is_soft_readonly(struct ocfs2_super *osb)
631{
632	int ret;
633
634	spin_lock(&osb->osb_lock);
635	ret = osb->osb_flags & OCFS2_OSB_SOFT_RO;
636	spin_unlock(&osb->osb_lock);
637
638	return ret;
639}
640
641static inline int ocfs2_clusterinfo_valid(struct ocfs2_super *osb)
642{
643	return (osb->s_feature_incompat &
644		(OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK |
645		 OCFS2_FEATURE_INCOMPAT_CLUSTERINFO));
646}
647
648static inline int ocfs2_userspace_stack(struct ocfs2_super *osb)
649{
650	if (ocfs2_clusterinfo_valid(osb) &&
651	    memcmp(osb->osb_cluster_stack, OCFS2_CLASSIC_CLUSTER_STACK,
652		   OCFS2_STACK_LABEL_LEN))
653		return 1;
654	return 0;
655}
656
657static inline int ocfs2_o2cb_stack(struct ocfs2_super *osb)
658{
659	if (ocfs2_clusterinfo_valid(osb) &&
660	    !memcmp(osb->osb_cluster_stack, OCFS2_CLASSIC_CLUSTER_STACK,
661		   OCFS2_STACK_LABEL_LEN))
662		return 1;
663	return 0;
664}
665
666static inline int ocfs2_cluster_o2cb_global_heartbeat(struct ocfs2_super *osb)
667{
668	return ocfs2_o2cb_stack(osb) &&
669		(osb->osb_stackflags & OCFS2_CLUSTER_O2CB_GLOBAL_HEARTBEAT);
670}
671
672static inline int ocfs2_mount_local(struct ocfs2_super *osb)
673{
674	return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
675}
676
677static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)
678{
679	return (osb->s_feature_incompat &
680		OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP);
681}
682
683
684#define OCFS2_IS_VALID_DINODE(ptr)					\
685	(!strcmp((ptr)->i_signature, OCFS2_INODE_SIGNATURE))
686
687#define OCFS2_IS_VALID_EXTENT_BLOCK(ptr)				\
688	(!strcmp((ptr)->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE))
689
690#define OCFS2_IS_VALID_GROUP_DESC(ptr)					\
691	(!strcmp((ptr)->bg_signature, OCFS2_GROUP_DESC_SIGNATURE))
692
693
694#define OCFS2_IS_VALID_XATTR_BLOCK(ptr)					\
695	(!strcmp((ptr)->xb_signature, OCFS2_XATTR_BLOCK_SIGNATURE))
696
697#define OCFS2_IS_VALID_DIR_TRAILER(ptr)					\
698	(!strcmp((ptr)->db_signature, OCFS2_DIR_TRAILER_SIGNATURE))
699
700#define OCFS2_IS_VALID_DX_ROOT(ptr)					\
701	(!strcmp((ptr)->dr_signature, OCFS2_DX_ROOT_SIGNATURE))
702
703#define OCFS2_IS_VALID_DX_LEAF(ptr)					\
704	(!strcmp((ptr)->dl_signature, OCFS2_DX_LEAF_SIGNATURE))
705
706#define OCFS2_IS_VALID_REFCOUNT_BLOCK(ptr)				\
707	(!strcmp((ptr)->rf_signature, OCFS2_REFCOUNT_BLOCK_SIGNATURE))
708
709static inline unsigned long ino_from_blkno(struct super_block *sb,
710					   u64 blkno)
711{
712	return (unsigned long)(blkno & (u64)ULONG_MAX);
713}
714
715static inline u64 ocfs2_clusters_to_blocks(struct super_block *sb,
716					   u32 clusters)
717{
718	int c_to_b_bits = OCFS2_SB(sb)->s_clustersize_bits -
719		sb->s_blocksize_bits;
720
721	return (u64)clusters << c_to_b_bits;
722}
723
724static inline u32 ocfs2_clusters_for_blocks(struct super_block *sb,
725		u64 blocks)
726{
727	int b_to_c_bits = OCFS2_SB(sb)->s_clustersize_bits -
728			sb->s_blocksize_bits;
729
730	blocks += (1 << b_to_c_bits) - 1;
731	return (u32)(blocks >> b_to_c_bits);
732}
733
734static inline u32 ocfs2_blocks_to_clusters(struct super_block *sb,
735					   u64 blocks)
736{
737	int b_to_c_bits = OCFS2_SB(sb)->s_clustersize_bits -
738		sb->s_blocksize_bits;
739
740	return (u32)(blocks >> b_to_c_bits);
741}
742
743static inline unsigned int ocfs2_clusters_for_bytes(struct super_block *sb,
744						    u64 bytes)
745{
746	int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
747	unsigned int clusters;
748
749	bytes += OCFS2_SB(sb)->s_clustersize - 1;
750	/* OCFS2 just cannot have enough clusters to overflow this */
751	clusters = (unsigned int)(bytes >> cl_bits);
752
753	return clusters;
754}
755
756static inline unsigned int ocfs2_bytes_to_clusters(struct super_block *sb,
757		u64 bytes)
758{
759	int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
760	unsigned int clusters;
761
762	clusters = (unsigned int)(bytes >> cl_bits);
763	return clusters;
764}
765
766static inline u64 ocfs2_blocks_for_bytes(struct super_block *sb,
767					 u64 bytes)
768{
769	bytes += sb->s_blocksize - 1;
770	return bytes >> sb->s_blocksize_bits;
771}
772
773static inline u64 ocfs2_clusters_to_bytes(struct super_block *sb,
774					  u32 clusters)
775{
776	return (u64)clusters << OCFS2_SB(sb)->s_clustersize_bits;
777}
778
779static inline u64 ocfs2_block_to_cluster_start(struct super_block *sb,
780					       u64 blocks)
781{
782	int bits = OCFS2_SB(sb)->s_clustersize_bits - sb->s_blocksize_bits;
783	unsigned int clusters;
784
785	clusters = ocfs2_blocks_to_clusters(sb, blocks);
786	return (u64)clusters << bits;
787}
788
789static inline u64 ocfs2_align_bytes_to_clusters(struct super_block *sb,
790						u64 bytes)
791{
792	int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
793	unsigned int clusters;
794
795	clusters = ocfs2_clusters_for_bytes(sb, bytes);
796	return (u64)clusters << cl_bits;
797}
798
799static inline u64 ocfs2_align_bytes_to_blocks(struct super_block *sb,
800					      u64 bytes)
801{
802	u64 blocks;
803
804        blocks = ocfs2_blocks_for_bytes(sb, bytes);
805	return blocks << sb->s_blocksize_bits;
806}
807
808static inline unsigned long ocfs2_align_bytes_to_sectors(u64 bytes)
809{
810	return (unsigned long)((bytes + 511) >> 9);
811}
812
813static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb,
814							unsigned long pg_index)
815{
816	u32 clusters = pg_index;
817	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
818
819	if (unlikely(PAGE_SHIFT > cbits))
820		clusters = pg_index << (PAGE_SHIFT - cbits);
821	else if (PAGE_SHIFT < cbits)
822		clusters = pg_index >> (cbits - PAGE_SHIFT);
823
824	return clusters;
825}
826
827/*
828 * Find the 1st page index which covers the given clusters.
829 */
830static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb,
831							u32 clusters)
832{
833	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
834        pgoff_t index = clusters;
835
836	if (PAGE_SHIFT > cbits) {
837		index = (pgoff_t)clusters >> (PAGE_SHIFT - cbits);
838	} else if (PAGE_SHIFT < cbits) {
839		index = (pgoff_t)clusters << (cbits - PAGE_SHIFT);
840	}
841
842	return index;
843}
844
845static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb)
846{
847	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
848	unsigned int pages_per_cluster = 1;
849
850	if (PAGE_SHIFT < cbits)
851		pages_per_cluster = 1 << (cbits - PAGE_SHIFT);
852
853	return pages_per_cluster;
854}
855
856static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb,
857						       unsigned int megs)
858{
859	BUILD_BUG_ON(OCFS2_MAX_CLUSTERSIZE > 1048576);
860
861	return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
862}
863
864static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
865						       unsigned int clusters)
866{
867	return clusters >> (20 - OCFS2_SB(sb)->s_clustersize_bits);
868}
869
870static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
871{
872	__set_bit_le(bit, bitmap);
873}
874#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
875
876static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
877{
878	__clear_bit_le(bit, bitmap);
879}
880#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
881
882#define ocfs2_test_bit test_bit_le
883#define ocfs2_find_next_zero_bit find_next_zero_bit_le
884#define ocfs2_find_next_bit find_next_bit_le
885
886static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr)
887{
888#if BITS_PER_LONG == 64
889	*bit += ((unsigned long) addr & 7UL) << 3;
890	addr = (void *) ((unsigned long) addr & ~7UL);
891#elif BITS_PER_LONG == 32
892	*bit += ((unsigned long) addr & 3UL) << 3;
893	addr = (void *) ((unsigned long) addr & ~3UL);
894#else
895#error "how many bits you are?!"
896#endif
897	return addr;
898}
899
900static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap)
901{
902	bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
903	ocfs2_set_bit(bit, bitmap);
904}
905
906static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap)
907{
908	bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
909	ocfs2_clear_bit(bit, bitmap);
910}
911
912static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap)
913{
914	bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
915	return ocfs2_test_bit(bit, bitmap);
916}
917
918static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max,
919							int start)
920{
921	int fix = 0, ret, tmpmax;
922	bitmap = correct_addr_and_bit_unaligned(&fix, bitmap);
923	tmpmax = max + fix;
924	start += fix;
925
926	ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix;
927	if (ret > max)
928		return max;
929	return ret;
930}
931
932#endif  /* OCFS2_H */
933
v3.15
  1/* -*- mode: c; c-basic-offset: 8; -*-
  2 * vim: noexpandtab sw=8 ts=8 sts=0:
  3 *
  4 * ocfs2.h
  5 *
  6 * Defines macros and structures used in OCFS2
  7 *
  8 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
  9 *
 10 * This program is free software; you can redistribute it and/or
 11 * modify it under the terms of the GNU General Public
 12 * License as published by the Free Software Foundation; either
 13 * version 2 of the License, or (at your option) any later version.
 14 *
 15 * This program is distributed in the hope that it will be useful,
 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 18 * General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public
 21 * License along with this program; if not, write to the
 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 23 * Boston, MA 021110-1307, USA.
 24 */
 25
 26#ifndef OCFS2_H
 27#define OCFS2_H
 28
 29#include <linux/spinlock.h>
 30#include <linux/sched.h>
 31#include <linux/wait.h>
 32#include <linux/list.h>
 33#include <linux/llist.h>
 34#include <linux/rbtree.h>
 35#include <linux/workqueue.h>
 36#include <linux/kref.h>
 37#include <linux/mutex.h>
 38#include <linux/lockdep.h>
 39#include <linux/jbd2.h>
 40
 41/* For union ocfs2_dlm_lksb */
 42#include "stackglue.h"
 43
 44#include "ocfs2_fs.h"
 45#include "ocfs2_lockid.h"
 46#include "ocfs2_ioctl.h"
 47
 48/* For struct ocfs2_blockcheck_stats */
 49#include "blockcheck.h"
 50
 51#include "reservations.h"
 52
 
 
 53/* Caching of metadata buffers */
 54
 55/* Most user visible OCFS2 inodes will have very few pieces of
 56 * metadata, but larger files (including bitmaps, etc) must be taken
 57 * into account when designing an access scheme. We allow a small
 58 * amount of inlined blocks to be stored on an array and grow the
 59 * structure into a rb tree when necessary. */
 60#define OCFS2_CACHE_INFO_MAX_ARRAY 2
 61
 62/* Flags for ocfs2_caching_info */
 63
 64enum ocfs2_caching_info_flags {
 65	/* Indicates that the metadata cache is using the inline array */
 66	OCFS2_CACHE_FL_INLINE	= 1<<1,
 67};
 68
 69struct ocfs2_caching_operations;
 70struct ocfs2_caching_info {
 71	/*
 72	 * The parent structure provides the locks, but because the
 73	 * parent structure can differ, it provides locking operations
 74	 * to struct ocfs2_caching_info.
 75	 */
 76	const struct ocfs2_caching_operations *ci_ops;
 77
 78	/* next two are protected by trans_inc_lock */
 79	/* which transaction were we created on? Zero if none. */
 80	unsigned long		ci_created_trans;
 81	/* last transaction we were a part of. */
 82	unsigned long		ci_last_trans;
 83
 84	/* Cache structures */
 85	unsigned int		ci_flags;
 86	unsigned int		ci_num_cached;
 87	union {
 88	sector_t	ci_array[OCFS2_CACHE_INFO_MAX_ARRAY];
 89		struct rb_root	ci_tree;
 90	} ci_cache;
 91};
 92/*
 93 * Need this prototype here instead of in uptodate.h because journal.h
 94 * uses it.
 95 */
 96struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci);
 97
 98/* this limits us to 256 nodes
 99 * if we need more, we can do a kmalloc for the map */
100#define OCFS2_NODE_MAP_MAX_NODES    256
101struct ocfs2_node_map {
102	u16 num_nodes;
103	unsigned long map[BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES)];
104};
105
106enum ocfs2_ast_action {
107	OCFS2_AST_INVALID = 0,
108	OCFS2_AST_ATTACH,
109	OCFS2_AST_CONVERT,
110	OCFS2_AST_DOWNCONVERT,
111};
112
113/* actions for an unlockast function to take. */
114enum ocfs2_unlock_action {
115	OCFS2_UNLOCK_INVALID = 0,
116	OCFS2_UNLOCK_CANCEL_CONVERT,
117	OCFS2_UNLOCK_DROP_LOCK,
118};
119
120/* ocfs2_lock_res->l_flags flags. */
121#define OCFS2_LOCK_ATTACHED      (0x00000001) /* we have initialized
122					       * the lvb */
123#define OCFS2_LOCK_BUSY          (0x00000002) /* we are currently in
124					       * dlm_lock */
125#define OCFS2_LOCK_BLOCKED       (0x00000004) /* blocked waiting to
126					       * downconvert*/
127#define OCFS2_LOCK_LOCAL         (0x00000008) /* newly created inode */
128#define OCFS2_LOCK_NEEDS_REFRESH (0x00000010)
129#define OCFS2_LOCK_REFRESHING    (0x00000020)
130#define OCFS2_LOCK_INITIALIZED   (0x00000040) /* track initialization
131					       * for shutdown paths */
132#define OCFS2_LOCK_FREEING       (0x00000080) /* help dlmglue track
133					       * when to skip queueing
134					       * a lock because it's
135					       * about to be
136					       * dropped. */
137#define OCFS2_LOCK_QUEUED        (0x00000100) /* queued for downconvert */
138#define OCFS2_LOCK_NOCACHE       (0x00000200) /* don't use a holder count */
139#define OCFS2_LOCK_PENDING       (0x00000400) /* This lockres is pending a
140						 call to dlm_lock.  Only
141						 exists with BUSY set. */
142#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
143						     * from downconverting
144						     * before the upconvert
145						     * has completed */
146
 
 
 
 
 
 
147struct ocfs2_lock_res_ops;
148
149typedef void (*ocfs2_lock_callback)(int status, unsigned long data);
150
151#ifdef CONFIG_OCFS2_FS_STATS
152struct ocfs2_lock_stats {
153	u64		ls_total;	/* Total wait in NSEC */
154	u32		ls_gets;	/* Num acquires */
155	u32		ls_fail;	/* Num failed acquires */
156
157	/* Storing max wait in usecs saves 24 bytes per inode */
158	u32		ls_max;		/* Max wait in USEC */
 
159};
160#endif
161
162struct ocfs2_lock_res {
163	void                    *l_priv;
164	struct ocfs2_lock_res_ops *l_ops;
165
166
167	struct list_head         l_blocked_list;
168	struct list_head         l_mask_waiters;
 
169
170	unsigned long		 l_flags;
171	char                     l_name[OCFS2_LOCK_ID_MAX_LEN];
172	unsigned int             l_ro_holders;
173	unsigned int             l_ex_holders;
174	signed char		 l_level;
175	signed char		 l_requested;
176	signed char		 l_blocking;
177
178	/* Data packed - type enum ocfs2_lock_type */
179	unsigned char            l_type;
180
181	/* used from AST/BAST funcs. */
182	/* Data packed - enum type ocfs2_ast_action */
183	unsigned char            l_action;
184	/* Data packed - enum type ocfs2_unlock_action */
185	unsigned char            l_unlock_action;
186	unsigned int             l_pending_gen;
187
188	spinlock_t               l_lock;
189
190	struct ocfs2_dlm_lksb    l_lksb;
191
192	wait_queue_head_t        l_event;
193
194	struct list_head         l_debug_list;
195
196#ifdef CONFIG_OCFS2_FS_STATS
197	struct ocfs2_lock_stats  l_lock_prmode;		/* PR mode stats */
198	u32                      l_lock_refresh;	/* Disk refreshes */
 
199	struct ocfs2_lock_stats  l_lock_exmode;		/* EX mode stats */
200#endif
201#ifdef CONFIG_DEBUG_LOCK_ALLOC
202	struct lockdep_map	 l_lockdep_map;
203#endif
204};
205
 
 
 
 
 
206enum ocfs2_orphan_scan_state {
207	ORPHAN_SCAN_ACTIVE,
208	ORPHAN_SCAN_INACTIVE
209};
210
211struct ocfs2_orphan_scan {
212	struct mutex 		os_lock;
213	struct ocfs2_super 	*os_osb;
214	struct ocfs2_lock_res 	os_lockres;     /* lock to synchronize scans */
215	struct delayed_work 	os_orphan_scan_work;
216	struct timespec		os_scantime;  /* time this node ran the scan */
217	u32			os_count;      /* tracks node specific scans */
218	u32  			os_seqno;       /* tracks cluster wide scans */
219	atomic_t		os_state;              /* ACTIVE or INACTIVE */
220};
221
222struct ocfs2_dlm_debug {
223	struct kref d_refcnt;
224	struct dentry *d_locking_state;
225	struct list_head d_lockres_tracking;
226};
227
228enum ocfs2_vol_state
229{
230	VOLUME_INIT = 0,
231	VOLUME_MOUNTED,
232	VOLUME_MOUNTED_QUOTAS,
233	VOLUME_DISMOUNTED,
234	VOLUME_DISABLED
235};
236
237struct ocfs2_alloc_stats
238{
239	atomic_t moves;
240	atomic_t local_data;
241	atomic_t bitmap_data;
242	atomic_t bg_allocs;
243	atomic_t bg_extends;
244};
245
246enum ocfs2_local_alloc_state
247{
248	OCFS2_LA_UNUSED = 0,	/* Local alloc will never be used for
249				 * this mountpoint. */
250	OCFS2_LA_ENABLED,	/* Local alloc is in use. */
251	OCFS2_LA_THROTTLED,	/* Local alloc is in use, but number
252				 * of bits has been reduced. */
253	OCFS2_LA_DISABLED	/* Local alloc has temporarily been
254				 * disabled. */
255};
256
257enum ocfs2_mount_options
258{
259	OCFS2_MOUNT_HB_LOCAL = 1 << 0, /* Local heartbeat */
260	OCFS2_MOUNT_BARRIER = 1 << 1,	/* Use block barriers */
261	OCFS2_MOUNT_NOINTR  = 1 << 2,   /* Don't catch signals */
262	OCFS2_MOUNT_ERRORS_PANIC = 1 << 3, /* Panic on errors */
263	OCFS2_MOUNT_DATA_WRITEBACK = 1 << 4, /* No data ordering */
264	OCFS2_MOUNT_LOCALFLOCKS = 1 << 5, /* No cluster aware user file locks */
265	OCFS2_MOUNT_NOUSERXATTR = 1 << 6, /* No user xattr */
266	OCFS2_MOUNT_INODE64 = 1 << 7,	/* Allow inode numbers > 2^32 */
267	OCFS2_MOUNT_POSIX_ACL = 1 << 8,	/* Force POSIX access control lists */
268	OCFS2_MOUNT_NO_POSIX_ACL = 1 << 9,	/* Disable POSIX access
269						   control lists */
270	OCFS2_MOUNT_USRQUOTA = 1 << 10, /* We support user quotas */
271	OCFS2_MOUNT_GRPQUOTA = 1 << 11, /* We support group quotas */
272	OCFS2_MOUNT_COHERENCY_BUFFERED = 1 << 12, /* Allow concurrent O_DIRECT
273						     writes */
274	OCFS2_MOUNT_HB_NONE = 1 << 13, /* No heartbeat */
275	OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */
 
 
 
 
276};
277
278#define OCFS2_OSB_SOFT_RO	0x0001
279#define OCFS2_OSB_HARD_RO	0x0002
280#define OCFS2_OSB_ERROR_FS	0x0004
281#define OCFS2_DEFAULT_ATIME_QUANTUM	60
282
283struct ocfs2_journal;
284struct ocfs2_slot_info;
285struct ocfs2_recovery_map;
286struct ocfs2_replay_map;
287struct ocfs2_quota_recovery;
288struct ocfs2_super
289{
290	struct task_struct *commit_task;
291	struct super_block *sb;
292	struct inode *root_inode;
293	struct inode *sys_root_inode;
294	struct inode *global_system_inodes[NUM_GLOBAL_SYSTEM_INODES];
295	struct inode **local_system_inodes;
296
297	struct ocfs2_slot_info *slot_info;
298
299	u32 *slot_recovery_generations;
300
301	spinlock_t node_map_lock;
302
303	u64 root_blkno;
304	u64 system_dir_blkno;
305	u64 bitmap_blkno;
306	u32 bitmap_cpg;
307	u8 *uuid;
308	char *uuid_str;
309	u32 uuid_hash;
310	u8 *vol_label;
311	u64 first_cluster_group_blkno;
312	u32 fs_generation;
313
314	u32 s_feature_compat;
315	u32 s_feature_incompat;
316	u32 s_feature_ro_compat;
317
318	/* Protects s_next_generation, osb_flags and s_inode_steal_slot.
319	 * Could protect more on osb as it's very short lived.
320	 */
321	spinlock_t osb_lock;
322	u32 s_next_generation;
323	unsigned long osb_flags;
324	s16 s_inode_steal_slot;
325	s16 s_meta_steal_slot;
326	atomic_t s_num_inodes_stolen;
327	atomic_t s_num_meta_stolen;
328
329	unsigned long s_mount_opt;
330	unsigned int s_atime_quantum;
331
332	unsigned int max_slots;
333	unsigned int node_num;
334	int slot_num;
335	int preferred_slot;
336	int s_sectsize_bits;
337	int s_clustersize;
338	int s_clustersize_bits;
339	unsigned int s_xattr_inline_size;
340
341	atomic_t vol_state;
342	struct mutex recovery_lock;
343	struct ocfs2_recovery_map *recovery_map;
344	struct ocfs2_replay_map *replay_map;
345	struct task_struct *recovery_thread_task;
346	int disable_recovery;
347	wait_queue_head_t checkpoint_event;
348	struct ocfs2_journal *journal;
349	unsigned long osb_commit_interval;
350
351	struct delayed_work		la_enable_wq;
352
353	/*
354	 * Must hold local alloc i_mutex and osb->osb_lock to change
355	 * local_alloc_bits. Reads can be done under either lock.
356	 */
357	unsigned int local_alloc_bits;
358	unsigned int local_alloc_default_bits;
359	/* osb_clusters_at_boot can become stale! Do not trust it to
360	 * be up to date. */
361	unsigned int osb_clusters_at_boot;
362
363	enum ocfs2_local_alloc_state local_alloc_state; /* protected
364							 * by osb_lock */
365
366	struct buffer_head *local_alloc_bh;
367
368	u64 la_last_gd;
369
370	struct ocfs2_reservation_map	osb_la_resmap;
371
372	unsigned int	osb_resv_level;
373	unsigned int	osb_dir_resv_level;
374
375	/* Next three fields are for local node slot recovery during
376	 * mount. */
377	int dirty;
378	struct ocfs2_dinode *local_alloc_copy;
379	struct ocfs2_quota_recovery *quota_rec;
380
381	struct ocfs2_blockcheck_stats osb_ecc_stats;
382	struct ocfs2_alloc_stats alloc_stats;
383	char dev_str[20];		/* "major,minor" of the device */
384
385	u8 osb_stackflags;
386
387	char osb_cluster_stack[OCFS2_STACK_LABEL_LEN + 1];
388	char osb_cluster_name[OCFS2_CLUSTER_NAME_LEN + 1];
389	struct ocfs2_cluster_connection *cconn;
390	struct ocfs2_lock_res osb_super_lockres;
391	struct ocfs2_lock_res osb_rename_lockres;
392	struct ocfs2_lock_res osb_nfs_sync_lockres;
 
 
 
393	struct ocfs2_dlm_debug *osb_dlm_debug;
394
395	struct dentry *osb_debug_root;
396	struct dentry *osb_ctxt;
397
398	wait_queue_head_t recovery_event;
399
400	spinlock_t dc_task_lock;
401	struct task_struct *dc_task;
402	wait_queue_head_t dc_event;
403	unsigned long dc_wake_sequence;
404	unsigned long dc_work_sequence;
405
406	/*
407	 * Any thread can add locks to the list, but the downconvert
408	 * thread is the only one allowed to remove locks. Any change
409	 * to this rule requires updating
410	 * ocfs2_downconvert_thread_do_work().
411	 */
412	struct list_head blocked_lock_list;
413	unsigned long blocked_lock_count;
414
415	/* List of dquot structures to drop last reference to */
416	struct llist_head dquot_drop_list;
417	struct work_struct dquot_drop_work;
418
419	wait_queue_head_t		osb_mount_event;
420
421	/* Truncate log info */
422	struct inode			*osb_tl_inode;
423	struct buffer_head		*osb_tl_bh;
424	struct delayed_work		osb_truncate_log_wq;
 
425	/*
426	 * How many clusters in our truncate log.
427	 * It must be protected by osb_tl_inode->i_mutex.
428	 */
429	unsigned int truncated_clusters;
430
431	struct ocfs2_node_map		osb_recovering_orphan_dirs;
432	unsigned int			*osb_orphan_wipes;
433	wait_queue_head_t		osb_wipe_event;
434
435	struct ocfs2_orphan_scan	osb_orphan_scan;
436
437	/* used to protect metaecc calculation check of xattr. */
438	spinlock_t osb_xattr_lock;
439
440	unsigned int			osb_dx_mask;
441	u32				osb_dx_seed[4];
442
443	/* the group we used to allocate inodes. */
444	u64				osb_inode_alloc_group;
445
446	/* rb tree root for refcount lock. */
447	struct rb_root	osb_rf_lock_tree;
448	struct ocfs2_refcount_tree *osb_ref_tree_lru;
449
450	struct mutex system_file_mutex;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451};
452
453#define OCFS2_SB(sb)	    ((struct ocfs2_super *)(sb)->s_fs_info)
454
455/* Useful typedef for passing around journal access functions */
456typedef int (*ocfs2_journal_access_func)(handle_t *handle,
457					 struct ocfs2_caching_info *ci,
458					 struct buffer_head *bh, int type);
459
460static inline int ocfs2_should_order_data(struct inode *inode)
461{
462	if (!S_ISREG(inode->i_mode))
463		return 0;
464	if (OCFS2_SB(inode->i_sb)->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)
465		return 0;
466	return 1;
467}
468
469static inline int ocfs2_sparse_alloc(struct ocfs2_super *osb)
470{
471	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC)
472		return 1;
473	return 0;
474}
475
476static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb)
477{
478	/*
479	 * Support for sparse files is a pre-requisite
480	 */
481	if (!ocfs2_sparse_alloc(osb))
482		return 0;
483
484	if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_UNWRITTEN)
485		return 1;
486	return 0;
487}
488
 
 
 
 
 
 
 
 
489static inline int ocfs2_supports_inline_data(struct ocfs2_super *osb)
490{
491	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INLINE_DATA)
492		return 1;
493	return 0;
494}
495
496static inline int ocfs2_supports_xattr(struct ocfs2_super *osb)
497{
498	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_XATTR)
499		return 1;
500	return 0;
501}
502
503static inline int ocfs2_meta_ecc(struct ocfs2_super *osb)
504{
505	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_META_ECC)
506		return 1;
507	return 0;
508}
509
510static inline int ocfs2_supports_indexed_dirs(struct ocfs2_super *osb)
511{
512	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS)
513		return 1;
514	return 0;
515}
516
517static inline int ocfs2_supports_discontig_bg(struct ocfs2_super *osb)
518{
519	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG)
520		return 1;
521	return 0;
522}
523
524static inline unsigned int ocfs2_link_max(struct ocfs2_super *osb)
525{
526	if (ocfs2_supports_indexed_dirs(osb))
527		return OCFS2_DX_LINK_MAX;
528	return OCFS2_LINK_MAX;
529}
530
531static inline unsigned int ocfs2_read_links_count(struct ocfs2_dinode *di)
532{
533	u32 nlink = le16_to_cpu(di->i_links_count);
534	u32 hi = le16_to_cpu(di->i_links_count_hi);
535
536	if (di->i_dyn_features & cpu_to_le16(OCFS2_INDEXED_DIR_FL))
537		nlink |= (hi << OCFS2_LINKS_HI_SHIFT);
538
539	return nlink;
540}
541
542static inline void ocfs2_set_links_count(struct ocfs2_dinode *di, u32 nlink)
543{
544	u16 lo, hi;
545
546	lo = nlink;
547	hi = nlink >> OCFS2_LINKS_HI_SHIFT;
548
549	di->i_links_count = cpu_to_le16(lo);
550	di->i_links_count_hi = cpu_to_le16(hi);
551}
552
553static inline void ocfs2_add_links_count(struct ocfs2_dinode *di, int n)
554{
555	u32 links = ocfs2_read_links_count(di);
556
557	links += n;
558
559	ocfs2_set_links_count(di, links);
560}
561
562static inline int ocfs2_refcount_tree(struct ocfs2_super *osb)
563{
564	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE)
565		return 1;
566	return 0;
567}
568
569/* set / clear functions because cluster events can make these happen
570 * in parallel so we want the transitions to be atomic. this also
571 * means that any future flags osb_flags must be protected by spinlock
572 * too! */
573static inline void ocfs2_set_osb_flag(struct ocfs2_super *osb,
574				      unsigned long flag)
575{
576	spin_lock(&osb->osb_lock);
577	osb->osb_flags |= flag;
578	spin_unlock(&osb->osb_lock);
579}
580
581static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb,
582				     int hard)
583{
584	spin_lock(&osb->osb_lock);
585	osb->osb_flags &= ~(OCFS2_OSB_SOFT_RO|OCFS2_OSB_HARD_RO);
586	if (hard)
587		osb->osb_flags |= OCFS2_OSB_HARD_RO;
588	else
589		osb->osb_flags |= OCFS2_OSB_SOFT_RO;
590	spin_unlock(&osb->osb_lock);
591}
592
593static inline int ocfs2_is_hard_readonly(struct ocfs2_super *osb)
594{
595	int ret;
596
597	spin_lock(&osb->osb_lock);
598	ret = osb->osb_flags & OCFS2_OSB_HARD_RO;
599	spin_unlock(&osb->osb_lock);
600
601	return ret;
602}
603
604static inline int ocfs2_is_soft_readonly(struct ocfs2_super *osb)
605{
606	int ret;
607
608	spin_lock(&osb->osb_lock);
609	ret = osb->osb_flags & OCFS2_OSB_SOFT_RO;
610	spin_unlock(&osb->osb_lock);
611
612	return ret;
613}
614
615static inline int ocfs2_clusterinfo_valid(struct ocfs2_super *osb)
616{
617	return (osb->s_feature_incompat &
618		(OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK |
619		 OCFS2_FEATURE_INCOMPAT_CLUSTERINFO));
620}
621
622static inline int ocfs2_userspace_stack(struct ocfs2_super *osb)
623{
624	if (ocfs2_clusterinfo_valid(osb) &&
625	    memcmp(osb->osb_cluster_stack, OCFS2_CLASSIC_CLUSTER_STACK,
626		   OCFS2_STACK_LABEL_LEN))
627		return 1;
628	return 0;
629}
630
631static inline int ocfs2_o2cb_stack(struct ocfs2_super *osb)
632{
633	if (ocfs2_clusterinfo_valid(osb) &&
634	    !memcmp(osb->osb_cluster_stack, OCFS2_CLASSIC_CLUSTER_STACK,
635		   OCFS2_STACK_LABEL_LEN))
636		return 1;
637	return 0;
638}
639
640static inline int ocfs2_cluster_o2cb_global_heartbeat(struct ocfs2_super *osb)
641{
642	return ocfs2_o2cb_stack(osb) &&
643		(osb->osb_stackflags & OCFS2_CLUSTER_O2CB_GLOBAL_HEARTBEAT);
644}
645
646static inline int ocfs2_mount_local(struct ocfs2_super *osb)
647{
648	return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
649}
650
651static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)
652{
653	return (osb->s_feature_incompat &
654		OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP);
655}
656
657
658#define OCFS2_IS_VALID_DINODE(ptr)					\
659	(!strcmp((ptr)->i_signature, OCFS2_INODE_SIGNATURE))
660
661#define OCFS2_IS_VALID_EXTENT_BLOCK(ptr)				\
662	(!strcmp((ptr)->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE))
663
664#define OCFS2_IS_VALID_GROUP_DESC(ptr)					\
665	(!strcmp((ptr)->bg_signature, OCFS2_GROUP_DESC_SIGNATURE))
666
667
668#define OCFS2_IS_VALID_XATTR_BLOCK(ptr)					\
669	(!strcmp((ptr)->xb_signature, OCFS2_XATTR_BLOCK_SIGNATURE))
670
671#define OCFS2_IS_VALID_DIR_TRAILER(ptr)					\
672	(!strcmp((ptr)->db_signature, OCFS2_DIR_TRAILER_SIGNATURE))
673
674#define OCFS2_IS_VALID_DX_ROOT(ptr)					\
675	(!strcmp((ptr)->dr_signature, OCFS2_DX_ROOT_SIGNATURE))
676
677#define OCFS2_IS_VALID_DX_LEAF(ptr)					\
678	(!strcmp((ptr)->dl_signature, OCFS2_DX_LEAF_SIGNATURE))
679
680#define OCFS2_IS_VALID_REFCOUNT_BLOCK(ptr)				\
681	(!strcmp((ptr)->rf_signature, OCFS2_REFCOUNT_BLOCK_SIGNATURE))
682
683static inline unsigned long ino_from_blkno(struct super_block *sb,
684					   u64 blkno)
685{
686	return (unsigned long)(blkno & (u64)ULONG_MAX);
687}
688
689static inline u64 ocfs2_clusters_to_blocks(struct super_block *sb,
690					   u32 clusters)
691{
692	int c_to_b_bits = OCFS2_SB(sb)->s_clustersize_bits -
693		sb->s_blocksize_bits;
694
695	return (u64)clusters << c_to_b_bits;
696}
697
 
 
 
 
 
 
 
 
 
 
698static inline u32 ocfs2_blocks_to_clusters(struct super_block *sb,
699					   u64 blocks)
700{
701	int b_to_c_bits = OCFS2_SB(sb)->s_clustersize_bits -
702		sb->s_blocksize_bits;
703
704	return (u32)(blocks >> b_to_c_bits);
705}
706
707static inline unsigned int ocfs2_clusters_for_bytes(struct super_block *sb,
708						    u64 bytes)
709{
710	int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
711	unsigned int clusters;
712
713	bytes += OCFS2_SB(sb)->s_clustersize - 1;
714	/* OCFS2 just cannot have enough clusters to overflow this */
715	clusters = (unsigned int)(bytes >> cl_bits);
716
717	return clusters;
718}
719
 
 
 
 
 
 
 
 
 
 
720static inline u64 ocfs2_blocks_for_bytes(struct super_block *sb,
721					 u64 bytes)
722{
723	bytes += sb->s_blocksize - 1;
724	return bytes >> sb->s_blocksize_bits;
725}
726
727static inline u64 ocfs2_clusters_to_bytes(struct super_block *sb,
728					  u32 clusters)
729{
730	return (u64)clusters << OCFS2_SB(sb)->s_clustersize_bits;
731}
732
733static inline u64 ocfs2_block_to_cluster_start(struct super_block *sb,
734					       u64 blocks)
735{
736	int bits = OCFS2_SB(sb)->s_clustersize_bits - sb->s_blocksize_bits;
737	unsigned int clusters;
738
739	clusters = ocfs2_blocks_to_clusters(sb, blocks);
740	return (u64)clusters << bits;
741}
742
743static inline u64 ocfs2_align_bytes_to_clusters(struct super_block *sb,
744						u64 bytes)
745{
746	int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
747	unsigned int clusters;
748
749	clusters = ocfs2_clusters_for_bytes(sb, bytes);
750	return (u64)clusters << cl_bits;
751}
752
753static inline u64 ocfs2_align_bytes_to_blocks(struct super_block *sb,
754					      u64 bytes)
755{
756	u64 blocks;
757
758        blocks = ocfs2_blocks_for_bytes(sb, bytes);
759	return blocks << sb->s_blocksize_bits;
760}
761
762static inline unsigned long ocfs2_align_bytes_to_sectors(u64 bytes)
763{
764	return (unsigned long)((bytes + 511) >> 9);
765}
766
767static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb,
768							unsigned long pg_index)
769{
770	u32 clusters = pg_index;
771	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
772
773	if (unlikely(PAGE_CACHE_SHIFT > cbits))
774		clusters = pg_index << (PAGE_CACHE_SHIFT - cbits);
775	else if (PAGE_CACHE_SHIFT < cbits)
776		clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT);
777
778	return clusters;
779}
780
781/*
782 * Find the 1st page index which covers the given clusters.
783 */
784static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb,
785							u32 clusters)
786{
787	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
788        pgoff_t index = clusters;
789
790	if (PAGE_CACHE_SHIFT > cbits) {
791		index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits);
792	} else if (PAGE_CACHE_SHIFT < cbits) {
793		index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT);
794	}
795
796	return index;
797}
798
799static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb)
800{
801	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
802	unsigned int pages_per_cluster = 1;
803
804	if (PAGE_CACHE_SHIFT < cbits)
805		pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT);
806
807	return pages_per_cluster;
808}
809
810static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb,
811						       unsigned int megs)
812{
813	BUILD_BUG_ON(OCFS2_MAX_CLUSTERSIZE > 1048576);
814
815	return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
816}
817
818static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
819						       unsigned int clusters)
820{
821	return clusters >> (20 - OCFS2_SB(sb)->s_clustersize_bits);
822}
823
824static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
825{
826	__set_bit_le(bit, bitmap);
827}
828#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
829
830static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
831{
832	__clear_bit_le(bit, bitmap);
833}
834#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
835
836#define ocfs2_test_bit test_bit_le
837#define ocfs2_find_next_zero_bit find_next_zero_bit_le
838#define ocfs2_find_next_bit find_next_bit_le
839
840static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr)
841{
842#if BITS_PER_LONG == 64
843	*bit += ((unsigned long) addr & 7UL) << 3;
844	addr = (void *) ((unsigned long) addr & ~7UL);
845#elif BITS_PER_LONG == 32
846	*bit += ((unsigned long) addr & 3UL) << 3;
847	addr = (void *) ((unsigned long) addr & ~3UL);
848#else
849#error "how many bits you are?!"
850#endif
851	return addr;
852}
853
854static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap)
855{
856	bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
857	ocfs2_set_bit(bit, bitmap);
858}
859
860static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap)
861{
862	bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
863	ocfs2_clear_bit(bit, bitmap);
864}
865
866static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap)
867{
868	bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
869	return ocfs2_test_bit(bit, bitmap);
870}
871
872static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max,
873							int start)
874{
875	int fix = 0, ret, tmpmax;
876	bitmap = correct_addr_and_bit_unaligned(&fix, bitmap);
877	tmpmax = max + fix;
878	start += fix;
879
880	ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix;
881	if (ret > max)
882		return max;
883	return ret;
884}
885
886#endif  /* OCFS2_H */
887