Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2   md_k.h : kernel internal structure of the Linux MD driver
  3          Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
  4	  
  5   This program is free software; you can redistribute it and/or modify
  6   it under the terms of the GNU General Public License as published by
  7   the Free Software Foundation; either version 2, or (at your option)
  8   any later version.
  9   
 10   You should have received a copy of the GNU General Public License
 11   (for example /usr/src/linux/COPYING); if not, write to the Free
 12   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
 13*/
 14
 15#ifndef _MD_MD_H
 16#define _MD_MD_H
 17
 18#include <linux/blkdev.h>
 
 
 19#include <linux/kobject.h>
 20#include <linux/list.h>
 21#include <linux/mm.h>
 22#include <linux/mutex.h>
 23#include <linux/timer.h>
 24#include <linux/wait.h>
 25#include <linux/workqueue.h>
 
 26
 27#define MaxSector (~(sector_t)0)
 28
 29typedef struct mddev_s mddev_t;
 30typedef struct mdk_rdev_s mdk_rdev_t;
 
 
 
 
 
 
 
 
 31
 32/* Bad block numbers are stored sorted in a single page.
 33 * 64bits is used for each block or extent.
 34 * 54 bits are sector number, 9 bits are extent size,
 35 * 1 bit is an 'acknowledged' flag.
 36 */
 37#define MD_MAX_BADBLOCKS	(PAGE_SIZE/8)
 
 
 
 
 38
 39/*
 40 * MD's 'extended' device
 41 */
 42struct mdk_rdev_s
 43{
 44	struct list_head same_set;	/* RAID devices within the same set */
 45
 46	sector_t sectors;		/* Device size (in 512bytes sectors) */
 47	mddev_t *mddev;			/* RAID array if running */
 48	int last_events;		/* IO event timestamp */
 49
 50	/*
 51	 * If meta_bdev is non-NULL, it means that a separate device is
 52	 * being used to store the metadata (superblock/bitmap) which
 53	 * would otherwise be contained on the same device as the data (bdev).
 54	 */
 55	struct block_device *meta_bdev;
 56	struct block_device *bdev;	/* block device handle */
 
 57
 58	struct page	*sb_page, *bb_page;
 59	int		sb_loaded;
 60	__u64		sb_events;
 61	sector_t	data_offset;	/* start of data in array */
 62	sector_t 	sb_start;	/* offset of the super block (in 512byte sectors) */
 
 63	int		sb_size;	/* bytes in the superblock */
 64	int		preferred_minor;	/* autorun support */
 65
 66	struct kobject	kobj;
 67
 68	/* A device can be in one of three states based on two flags:
 69	 * Not working:   faulty==1 in_sync==0
 70	 * Fully working: faulty==0 in_sync==1
 71	 * Working, but not
 72	 * in sync with array
 73	 *                faulty==0 in_sync==0
 74	 *
 75	 * It can never have faulty==1, in_sync==1
 76	 * This reduces the burden of testing multiple flags in many cases
 77	 */
 78
 79	unsigned long	flags;
 80#define	Faulty		1		/* device is known to have a fault */
 81#define	In_sync		2		/* device is in_sync with rest of array */
 82#define	WriteMostly	4		/* Avoid reading if at all possible */
 83#define	AutoDetected	7		/* added by auto-detect */
 84#define Blocked		8		/* An error occurred but has not yet
 85					 * been acknowledged by the metadata
 86					 * handler, so don't allow writes
 87					 * until it is cleared */
 88#define WriteErrorSeen	9		/* A write error has been seen on this
 89					 * device
 90					 */
 91#define FaultRecorded	10		/* Intermediate state for clearing
 92					 * Blocked.  The Fault is/will-be
 93					 * recorded in the metadata, but that
 94					 * metadata hasn't been stored safely
 95					 * on disk yet.
 96					 */
 97#define BlockedBadBlocks 11		/* A writer is blocked because they
 98					 * found an unacknowledged bad-block.
 99					 * This can safely be cleared at any
100					 * time, and the writer will re-check.
101					 * It may be set at any time, and at
102					 * worst the writer will timeout and
103					 * re-check.  So setting it as
104					 * accurately as possible is good, but
105					 * not absolutely critical.
106					 */
107	wait_queue_head_t blocked_wait;
108
109	int desc_nr;			/* descriptor index in the superblock */
110	int raid_disk;			/* role of device in array */
111	int new_raid_disk;		/* role that the device will have in
112					 * the array after a level-change completes.
113					 */
114	int saved_raid_disk;		/* role that device used to have in the
115					 * array and could again if we did a partial
116					 * resync from the bitmap
117					 */
118	sector_t	recovery_offset;/* If this device has been partially
 
119					 * recovered, this is where we were
120					 * up to.
121					 */
 
 
 
 
 
122
123	atomic_t	nr_pending;	/* number of pending requests.
124					 * only maintained for arrays that
125					 * support hot removal
126					 */
127	atomic_t	read_errors;	/* number of consecutive read errors that
128					 * we have tried to ignore.
129					 */
130	struct timespec last_read_error;	/* monotonic time since our
131						 * last read error
132						 */
133	atomic_t	corrected_errors; /* number of corrected read errors,
134					   * for reporting to userspace and storing
135					   * in superblock.
136					   */
137	struct work_struct del_work;	/* used for delayed sysfs removal */
138
139	struct sysfs_dirent *sysfs_state; /* handle for 'state'
 
 
140					   * sysfs entry */
 
 
 
 
 
141
142	struct badblocks {
143		int	count;		/* count of bad blocks */
144		int	unacked_exist;	/* there probably are unacknowledged
145					 * bad blocks.  This is only cleared
146					 * when a read discovers none
147					 */
148		int	shift;		/* shift from sectors to block size
149					 * a -ve shift means badblocks are
150					 * disabled.*/
151		u64	*page;		/* badblock list */
152		int	changed;
153		seqlock_t lock;
154
155		sector_t sector;
156		sector_t size;		/* in sectors */
157	} badblocks;
158};
159
160#define BB_LEN_MASK	(0x00000000000001FFULL)
161#define BB_OFFSET_MASK	(0x7FFFFFFFFFFFFE00ULL)
162#define BB_ACK_MASK	(0x8000000000000000ULL)
163#define BB_MAX_LEN	512
164#define BB_OFFSET(x)	(((x) & BB_OFFSET_MASK) >> 9)
165#define BB_LEN(x)	(((x) & BB_LEN_MASK) + 1)
166#define BB_ACK(x)	(!!((x) & BB_ACK_MASK))
167#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
168
169extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
170			  sector_t *first_bad, int *bad_sectors);
171static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172			      sector_t *first_bad, int *bad_sectors)
173{
174	if (unlikely(rdev->badblocks.count)) {
175		int rv = md_is_badblock(&rdev->badblocks, rdev->data_offset + s,
176					sectors,
177					first_bad, bad_sectors);
178		if (rv)
179			*first_bad -= rdev->data_offset;
180		return rv;
181	}
182	return 0;
183}
184extern int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors,
185			      int acknowledged);
186extern int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors);
187extern void md_ack_all_badblocks(struct badblocks *bb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
189struct mddev_s
190{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191	void				*private;
192	struct mdk_personality		*pers;
193	dev_t				unit;
194	int				md_minor;
195	struct list_head 		disks;
196	unsigned long			flags;
197#define MD_CHANGE_DEVS	0	/* Some device status has changed */
198#define MD_CHANGE_CLEAN 1	/* transition to or from 'clean' */
199#define MD_CHANGE_PENDING 2	/* switch from 'clean' to 'active' in progress */
200#define MD_ARRAY_FIRST_USE 3    /* First use of array, needs initialization */
201
202	int				suspended;
203	atomic_t			active_io;
 
204	int				ro;
205	int				sysfs_active; /* set when sysfs deletes
206						       * are happening, so run/
207						       * takeover/stop are not safe
208						       */
209	int				ready; /* See when safe to pass 
210						* IO requests down */
211	struct gendisk			*gendisk;
212
213	struct kobject			kobj;
214	int				hold_active;
215#define	UNTIL_IOCTL	1
216#define	UNTIL_STOP	2
217
218	/* Superblock information */
219	int				major_version,
220					minor_version,
221					patch_version;
222	int				persistent;
223	int 				external;	/* metadata is
224							 * managed externally */
225	char				metadata_type[17]; /* externally set*/
226	int				chunk_sectors;
227	time_t				ctime, utime;
228	int				level, layout;
229	char				clevel[16];
230	int				raid_disks;
231	int				max_disks;
232	sector_t			dev_sectors; 	/* used size of
233							 * component devices */
234	sector_t			array_sectors; /* exported array size */
235	int				external_size; /* size managed
236							* externally */
237	__u64				events;
238	/* If the last 'event' was simply a clean->dirty transition, and
239	 * we didn't write it to the spares, then it is safe and simple
240	 * to just decrement the event count on a dirty->clean transition.
241	 * So we record that possibility here.
242	 */
243	int				can_decrease_events;
244
245	char				uuid[16];
246
247	/* If the array is being reshaped, we need to record the
248	 * new shape and an indication of where we are up to.
249	 * This is written to the superblock.
250	 * If reshape_position is MaxSector, then no reshape is happening (yet).
251	 */
252	sector_t			reshape_position;
253	int				delta_disks, new_level, new_layout;
254	int				new_chunk_sectors;
 
255
256	atomic_t			plug_cnt;	/* If device is expecting
257							 * more bios soon.
258							 */
259	struct mdk_thread_s		*thread;	/* management thread */
260	struct mdk_thread_s		*sync_thread;	/* doing resync or reconstruct */
 
 
 
 
 
261	sector_t			curr_resync;	/* last block scheduled */
262	/* As resync requests can complete out of order, we cannot easily track
263	 * how much resync has been completed.  So we occasionally pause until
264	 * everything completes, then set curr_resync_completed to curr_resync.
265	 * As such it may be well behind the real resync mark, but it is a value
266	 * we are certain of.
267	 */
268	sector_t			curr_resync_completed;
269	unsigned long			resync_mark;	/* a recent timestamp */
270	sector_t			resync_mark_cnt;/* blocks written at resync_mark */
271	sector_t			curr_mark_cnt; /* blocks scheduled now */
272
273	sector_t			resync_max_sectors; /* may be set by personality */
274
275	sector_t			resync_mismatches; /* count of sectors where
276							    * parity/replica mismatch found
277							    */
278
279	/* allow user-space to request suspension of IO to regions of the array */
280	sector_t			suspend_lo;
281	sector_t			suspend_hi;
282	/* if zero, use the system-wide default */
283	int				sync_speed_min;
284	int				sync_speed_max;
285
286	/* resync even though the same disks are shared among md-devices */
287	int				parallel_resync;
288
289	int				ok_start_degraded;
290	/* recovery/resync flags 
291	 * NEEDED:   we might need to start a resync/recover
292	 * RUNNING:  a thread is running, or about to be started
293	 * SYNC:     actually doing a resync, not a recovery
294	 * RECOVER:  doing recovery, or need to try it.
295	 * INTR:     resync needs to be aborted for some reason
296	 * DONE:     thread is done and is waiting to be reaped
297	 * REQUEST:  user-space has requested a sync (used with SYNC)
298	 * CHECK:    user-space request for check-only, no repair
299	 * RESHAPE:  A reshape is happening
300	 *
301	 * If neither SYNC or RESHAPE are set, then it is a recovery.
302	 */
303#define	MD_RECOVERY_RUNNING	0
304#define	MD_RECOVERY_SYNC	1
305#define	MD_RECOVERY_RECOVER	2
306#define	MD_RECOVERY_INTR	3
307#define	MD_RECOVERY_DONE	4
308#define	MD_RECOVERY_NEEDED	5
309#define	MD_RECOVERY_REQUESTED	6
310#define	MD_RECOVERY_CHECK	7
311#define MD_RECOVERY_RESHAPE	8
312#define	MD_RECOVERY_FROZEN	9
313
314	unsigned long			recovery;
315	/* If a RAID personality determines that recovery (of a particular
316	 * device) will fail due to a read error on the source device, it
317	 * takes a copy of this number and does not attempt recovery again
318	 * until this number changes.
319	 */
320	int				recovery_disabled;
321
322	int				in_sync;	/* know to not need resync */
323	/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
324	 * that we are never stopping an array while it is open.
325	 * 'reconfig_mutex' protects all other reconfiguration.
326	 * These locks are separate due to conflicting interactions
327	 * with bdev->bd_mutex.
328	 * Lock ordering is:
329	 *  reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
330	 *  bd_mutex -> open_mutex:  e.g. __blkdev_get -> md_open
331	 */
332	struct mutex			open_mutex;
333	struct mutex			reconfig_mutex;
334	atomic_t			active;		/* general refcount */
335	atomic_t			openers;	/* number of active opens */
336
337	int				changed;	/* True if we might need to
338							 * reread partition info */
339	int				degraded;	/* whether md should consider
340							 * adding a spare
341							 */
342
343	atomic_t			recovery_active; /* blocks scheduled, but not written */
344	wait_queue_head_t		recovery_wait;
345	sector_t			recovery_cp;
346	sector_t			resync_min;	/* user requested sync
347							 * starts here */
348	sector_t			resync_max;	/* resync should pause
349							 * when it gets here */
350
351	struct sysfs_dirent		*sysfs_state;	/* handle for 'array_state'
352							 * file in sysfs.
353							 */
354	struct sysfs_dirent		*sysfs_action;  /* handle for 'sync_action' */
355
356	struct work_struct del_work;	/* used for delayed sysfs removal */
357
358	spinlock_t			write_lock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359	wait_queue_head_t		sb_wait;	/* for waiting on superblock updates */
360	atomic_t			pending_writes;	/* number of active superblock writes */
361
362	unsigned int			safemode;	/* if set, update "clean" superblock
363							 * when no writes pending.
364							 */ 
365	unsigned int			safemode_delay;
366	struct timer_list		safemode_timer;
367	atomic_t			writes_pending; 
 
368	struct request_queue		*queue;	/* for plugging ... */
369
370	struct bitmap                   *bitmap; /* the bitmap for the device */
371	struct {
372		struct file		*file; /* the bitmap file */
373		loff_t			offset; /* offset from superblock of
374						 * start of bitmap. May be
375						 * negative, but not '0'
376						 * For external metadata, offset
377						 * from start of device. 
378						 */
 
379		loff_t			default_offset; /* this is the offset to use when
380							 * hot-adding a bitmap.  It should
381							 * eventually be settable by sysfs.
382							 */
 
 
383		struct mutex		mutex;
384		unsigned long		chunksize;
385		unsigned long		daemon_sleep; /* how many jiffies between updates? */
386		unsigned long		max_write_behind; /* write-behind mode */
387		int			external;
 
 
388	} bitmap_info;
389
390	atomic_t 			max_corr_read_errors; /* max read retries */
391	struct list_head		all_mddevs;
392
393	struct attribute_group		*to_remove;
394
395	struct bio_set			*bio_set;
 
 
 
 
396
397	/* Generic flush handling.
398	 * The last to finish preflush schedules a worker to submit
399	 * the rest of the request (without the REQ_FLUSH flag).
400	 */
401	struct bio *flush_bio;
402	atomic_t flush_pending;
 
 
 
403	struct work_struct flush_work;
404	struct work_struct event_work;	/* used by dm to report failure event */
405	void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
406};
407
 
 
 
 
408
409static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
 
 
 
410{
411	int faulty = test_bit(Faulty, &rdev->flags);
412	if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
413		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
414}
415
 
 
 
 
 
 
416static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
417{
418        atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
419}
420
421struct mdk_personality
 
 
 
 
 
422{
423	char *name;
424	int level;
425	struct list_head list;
426	struct module *owner;
427	int (*make_request)(mddev_t *mddev, struct bio *bio);
428	int (*run)(mddev_t *mddev);
429	int (*stop)(mddev_t *mddev);
430	void (*status)(struct seq_file *seq, mddev_t *mddev);
 
 
 
 
 
 
431	/* error_handler must set ->faulty and clear ->in_sync
432	 * if appropriate, and should abort recovery if needed 
433	 */
434	void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
435	int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
436	int (*hot_remove_disk) (mddev_t *mddev, int number);
437	int (*spare_active) (mddev_t *mddev);
438	sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
439	int (*resize) (mddev_t *mddev, sector_t sectors);
440	sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
441	int (*check_reshape) (mddev_t *mddev);
442	int (*start_reshape) (mddev_t *mddev);
443	void (*finish_reshape) (mddev_t *mddev);
444	/* quiesce moves between quiescence states
445	 * 0 - fully active
446	 * 1 - no new requests allowed
447	 * others - reserved
448	 */
449	void (*quiesce) (mddev_t *mddev, int state);
450	/* takeover is used to transition an array from one
451	 * personality to another.  The new personality must be able
452	 * to handle the data in the current layout.
453	 * e.g. 2drive raid1 -> 2drive raid5
454	 *      ndrive raid5 -> degraded n+1drive raid6 with special layout
455	 * If the takeover succeeds, a new 'private' structure is returned.
456	 * This needs to be installed and then ->run used to activate the
457	 * array.
458	 */
459	void *(*takeover) (mddev_t *mddev);
 
 
460};
461
462
463struct md_sysfs_entry {
464	struct attribute attr;
465	ssize_t (*show)(mddev_t *, char *);
466	ssize_t (*store)(mddev_t *, const char *, size_t);
467};
468extern struct attribute_group md_bitmap_group;
469
470static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
471{
472	if (sd)
473		return sysfs_get_dirent(sd, NULL, name);
474	return sd;
475}
476static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
477{
478	if (sd)
479		sysfs_notify_dirent(sd);
480}
481
482static inline char * mdname (mddev_t * mddev)
483{
484	return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
485}
486
487static inline int sysfs_link_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
488{
489	char nm[20];
490	sprintf(nm, "rd%d", rdev->raid_disk);
491	return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
 
 
 
 
 
492}
493
494static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
495{
496	char nm[20];
497	sprintf(nm, "rd%d", rdev->raid_disk);
498	sysfs_remove_link(&mddev->kobj, nm);
 
 
 
 
499}
500
501/*
502 * iterates through some rdev ringlist. It's safe to remove the
503 * current 'rdev'. Dont touch 'tmp' though.
504 */
505#define rdev_for_each_list(rdev, tmp, head)				\
506	list_for_each_entry_safe(rdev, tmp, head, same_set)
507
508/*
509 * iterates through the 'same array disks' ringlist
510 */
511#define rdev_for_each(rdev, tmp, mddev)				\
 
 
 
512	list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
513
514#define rdev_for_each_rcu(rdev, mddev)				\
515	list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
516
517typedef struct mdk_thread_s {
518	void			(*run) (mddev_t *mddev);
519	mddev_t			*mddev;
520	wait_queue_head_t	wqueue;
521	unsigned long           flags;
522	struct task_struct	*tsk;
523	unsigned long		timeout;
524} mdk_thread_t;
 
525
526#define THREAD_WAKEUP  0
 
 
 
 
 
527
528#define __wait_event_lock_irq(wq, condition, lock, cmd) 		\
529do {									\
530	wait_queue_t __wait;						\
531	init_waitqueue_entry(&__wait, current);				\
532									\
533	add_wait_queue(&wq, &__wait);					\
534	for (;;) {							\
535		set_current_state(TASK_UNINTERRUPTIBLE);		\
536		if (condition)						\
537			break;						\
538		spin_unlock_irq(&lock);					\
539		cmd;							\
540		schedule();						\
541		spin_lock_irq(&lock);					\
542	}								\
543	current->state = TASK_RUNNING;					\
544	remove_wait_queue(&wq, &__wait);				\
545} while (0)
546
547#define wait_event_lock_irq(wq, condition, lock, cmd) 			\
548do {									\
549	if (condition)	 						\
550		break;							\
551	__wait_event_lock_irq(wq, condition, lock, cmd);		\
552} while (0)
553
554static inline void safe_put_page(struct page *p)
555{
556	if (p) put_page(p);
557}
558
559extern int register_md_personality(struct mdk_personality *p);
560extern int unregister_md_personality(struct mdk_personality *p);
561extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
562				mddev_t *mddev, const char *name);
563extern void md_unregister_thread(mdk_thread_t **threadp);
564extern void md_wakeup_thread(mdk_thread_t *thread);
565extern void md_check_recovery(mddev_t *mddev);
566extern void md_write_start(mddev_t *mddev, struct bio *bi);
567extern void md_write_end(mddev_t *mddev);
568extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
569extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
570
571extern int mddev_congested(mddev_t *mddev, int bits);
572extern void md_flush_request(mddev_t *mddev, struct bio *bio);
573extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
 
 
 
 
 
 
 
 
 
 
 
 
574			   sector_t sector, int size, struct page *page);
575extern void md_super_wait(mddev_t *mddev);
576extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, 
577			struct page *page, int rw, bool metadata_op);
578extern void md_do_sync(mddev_t *mddev);
579extern void md_new_event(mddev_t *mddev);
580extern int md_allow_write(mddev_t *mddev);
581extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
582extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
583extern int md_check_no_bitmap(mddev_t *mddev);
584extern int md_integrity_register(mddev_t *mddev);
585extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
586extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
587extern void restore_bitmap_write_access(struct file *file);
588
589extern void mddev_init(mddev_t *mddev);
590extern int md_run(mddev_t *mddev);
591extern void md_stop(mddev_t *mddev);
592extern void md_stop_writes(mddev_t *mddev);
593extern int md_rdev_init(mdk_rdev_t *rdev);
594
595extern void mddev_suspend(mddev_t *mddev);
596extern void mddev_resume(mddev_t *mddev);
597extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
598				   mddev_t *mddev);
599extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
600				   mddev_t *mddev);
601extern int mddev_check_plugged(mddev_t *mddev);
602extern void md_trim_bio(struct bio *bio, int offset, int size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603#endif /* _MD_MD_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3   md.h : kernel internal structure of the Linux MD driver
  4          Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
  5
 
 
 
 
 
 
 
 
  6*/
  7
  8#ifndef _MD_MD_H
  9#define _MD_MD_H
 10
 11#include <linux/blkdev.h>
 12#include <linux/backing-dev.h>
 13#include <linux/badblocks.h>
 14#include <linux/kobject.h>
 15#include <linux/list.h>
 16#include <linux/mm.h>
 17#include <linux/mutex.h>
 18#include <linux/timer.h>
 19#include <linux/wait.h>
 20#include <linux/workqueue.h>
 21#include "md-cluster.h"
 22
 23#define MaxSector (~(sector_t)0)
 24
 25/*
 26 * These flags should really be called "NO_RETRY" rather than
 27 * "FAILFAST" because they don't make any promise about time lapse,
 28 * only about the number of retries, which will be zero.
 29 * REQ_FAILFAST_DRIVER is not included because
 30 * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
 31 * seems to suggest that the errors it avoids retrying should usually
 32 * be retried.
 33 */
 34#define	MD_FAILFAST	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
 35
 36/*
 37 * The struct embedded in rdev is used to serialize IO.
 
 
 38 */
 39struct serial_in_rdev {
 40	struct rb_root_cached serial_rb;
 41	spinlock_t serial_lock;
 42	wait_queue_head_t serial_io_wait;
 43};
 44
 45/*
 46 * MD's 'extended' device
 47 */
 48struct md_rdev {
 
 49	struct list_head same_set;	/* RAID devices within the same set */
 50
 51	sector_t sectors;		/* Device size (in 512bytes sectors) */
 52	struct mddev *mddev;		/* RAID array if running */
 53	int last_events;		/* IO event timestamp */
 54
 55	/*
 56	 * If meta_bdev is non-NULL, it means that a separate device is
 57	 * being used to store the metadata (superblock/bitmap) which
 58	 * would otherwise be contained on the same device as the data (bdev).
 59	 */
 60	struct block_device *meta_bdev;
 61	struct block_device *bdev;	/* block device handle */
 62	struct bdev_handle *bdev_handle;	/* Handle from open for bdev */
 63
 64	struct page	*sb_page, *bb_page;
 65	int		sb_loaded;
 66	__u64		sb_events;
 67	sector_t	data_offset;	/* start of data in array */
 68	sector_t	new_data_offset;/* only relevant while reshaping */
 69	sector_t	sb_start;	/* offset of the super block (in 512byte sectors) */
 70	int		sb_size;	/* bytes in the superblock */
 71	int		preferred_minor;	/* autorun support */
 72
 73	struct kobject	kobj;
 74
 75	/* A device can be in one of three states based on two flags:
 76	 * Not working:   faulty==1 in_sync==0
 77	 * Fully working: faulty==0 in_sync==1
 78	 * Working, but not
 79	 * in sync with array
 80	 *                faulty==0 in_sync==0
 81	 *
 82	 * It can never have faulty==1, in_sync==1
 83	 * This reduces the burden of testing multiple flags in many cases
 84	 */
 85
 86	unsigned long	flags;	/* bit set of 'enum flag_bits' bits. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87	wait_queue_head_t blocked_wait;
 88
 89	int desc_nr;			/* descriptor index in the superblock */
 90	int raid_disk;			/* role of device in array */
 91	int new_raid_disk;		/* role that the device will have in
 92					 * the array after a level-change completes.
 93					 */
 94	int saved_raid_disk;		/* role that device used to have in the
 95					 * array and could again if we did a partial
 96					 * resync from the bitmap
 97					 */
 98	union {
 99		sector_t recovery_offset;/* If this device has been partially
100					 * recovered, this is where we were
101					 * up to.
102					 */
103		sector_t journal_tail;	/* If this device is a journal device,
104					 * this is the journal tail (journal
105					 * recovery start point)
106					 */
107	};
108
109	atomic_t	nr_pending;	/* number of pending requests.
110					 * only maintained for arrays that
111					 * support hot removal
112					 */
113	atomic_t	read_errors;	/* number of consecutive read errors that
114					 * we have tried to ignore.
115					 */
116	time64_t	last_read_error;	/* monotonic time since our
117						 * last read error
118						 */
119	atomic_t	corrected_errors; /* number of corrected read errors,
120					   * for reporting to userspace and storing
121					   * in superblock.
122					   */
 
123
124	struct serial_in_rdev *serial;  /* used for raid1 io serialization */
125
126	struct kernfs_node *sysfs_state; /* handle for 'state'
127					   * sysfs entry */
128	/* handle for 'unacknowledged_bad_blocks' sysfs dentry */
129	struct kernfs_node *sysfs_unack_badblocks;
130	/* handle for 'bad_blocks' sysfs dentry */
131	struct kernfs_node *sysfs_badblocks;
132	struct badblocks badblocks;
133
134	struct {
135		short offset;	/* Offset from superblock to start of PPL.
136				 * Not used by external metadata. */
137		unsigned int size;	/* Size in sectors of the PPL space */
138		sector_t sector;	/* First sector of the PPL space */
139	} ppl;
140};
141enum flag_bits {
142	Faulty,			/* device is known to have a fault */
143	In_sync,		/* device is in_sync with rest of array */
144	Bitmap_sync,		/* ..actually, not quite In_sync.  Need a
145				 * bitmap-based recovery to get fully in sync.
146				 * The bit is only meaningful before device
147				 * has been passed to pers->hot_add_disk.
148				 */
149	WriteMostly,		/* Avoid reading if at all possible */
150	AutoDetected,		/* added by auto-detect */
151	Blocked,		/* An error occurred but has not yet
152				 * been acknowledged by the metadata
153				 * handler, so don't allow writes
154				 * until it is cleared */
155	WriteErrorSeen,		/* A write error has been seen on this
156				 * device
157				 */
158	FaultRecorded,		/* Intermediate state for clearing
159				 * Blocked.  The Fault is/will-be
160				 * recorded in the metadata, but that
161				 * metadata hasn't been stored safely
162				 * on disk yet.
163				 */
164	BlockedBadBlocks,	/* A writer is blocked because they
165				 * found an unacknowledged bad-block.
166				 * This can safely be cleared at any
167				 * time, and the writer will re-check.
168				 * It may be set at any time, and at
169				 * worst the writer will timeout and
170				 * re-check.  So setting it as
171				 * accurately as possible is good, but
172				 * not absolutely critical.
173				 */
174	WantReplacement,	/* This device is a candidate to be
175				 * hot-replaced, either because it has
176				 * reported some faults, or because
177				 * of explicit request.
178				 */
179	Replacement,		/* This device is a replacement for
180				 * a want_replacement device with same
181				 * raid_disk number.
182				 */
183	Candidate,		/* For clustered environments only:
184				 * This device is seen locally but not
185				 * by the whole cluster
186				 */
187	Journal,		/* This device is used as journal for
188				 * raid-5/6.
189				 * Usually, this device should be faster
190				 * than other devices in the array
191				 */
192	ClusterRemove,
193	ExternalBbl,            /* External metadata provides bad
194				 * block management for a disk
195				 */
196	FailFast,		/* Minimal retries should be attempted on
197				 * this device, so use REQ_FAILFAST_DEV.
198				 * Also don't try to repair failed reads.
199				 * It is expects that no bad block log
200				 * is present.
201				 */
202	LastDev,		/* Seems to be the last working dev as
203				 * it didn't fail, so don't use FailFast
204				 * any more for metadata
205				 */
206	CollisionCheck,		/*
207				 * check if there is collision between raid1
208				 * serial bios.
209				 */
210};
211
212static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
213			      sector_t *first_bad, int *bad_sectors)
214{
215	if (unlikely(rdev->badblocks.count)) {
216		int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
217					sectors,
218					first_bad, bad_sectors);
219		if (rv)
220			*first_bad -= rdev->data_offset;
221		return rv;
222	}
223	return 0;
224}
225extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
226			      int is_new);
227extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
228				int is_new);
229struct md_cluster_info;
230
231/**
232 * enum mddev_flags - md device flags.
233 * @MD_ARRAY_FIRST_USE: First use of array, needs initialization.
234 * @MD_CLOSING: If set, we are closing the array, do not open it then.
235 * @MD_JOURNAL_CLEAN: A raid with journal is already clean.
236 * @MD_HAS_JOURNAL: The raid array has journal feature set.
237 * @MD_CLUSTER_RESYNC_LOCKED: cluster raid only, which means node, already took
238 *			       resync lock, need to release the lock.
239 * @MD_FAILFAST_SUPPORTED: Using MD_FAILFAST on metadata writes is supported as
240 *			    calls to md_error() will never cause the array to
241 *			    become failed.
242 * @MD_HAS_PPL:  The raid array has PPL feature set.
243 * @MD_HAS_MULTIPLE_PPLS: The raid array has multiple PPLs feature set.
244 * @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that
245 *		   array is ready yet.
246 * @MD_BROKEN: This is used to stop writes and mark array as failed.
247 * @MD_DELETED: This device is being deleted
248 *
249 * change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added
250 */
251enum mddev_flags {
252	MD_ARRAY_FIRST_USE,
253	MD_CLOSING,
254	MD_JOURNAL_CLEAN,
255	MD_HAS_JOURNAL,
256	MD_CLUSTER_RESYNC_LOCKED,
257	MD_FAILFAST_SUPPORTED,
258	MD_HAS_PPL,
259	MD_HAS_MULTIPLE_PPLS,
260	MD_NOT_READY,
261	MD_BROKEN,
262	MD_DELETED,
263};
264
265enum mddev_sb_flags {
266	MD_SB_CHANGE_DEVS,		/* Some device status has changed */
267	MD_SB_CHANGE_CLEAN,	/* transition to or from 'clean' */
268	MD_SB_CHANGE_PENDING,	/* switch from 'clean' to 'active' in progress */
269	MD_SB_NEED_REWRITE,	/* metadata write needs to be repeated */
270};
271
272#define NR_SERIAL_INFOS		8
273/* record current range of serialize IOs */
274struct serial_info {
275	struct rb_node node;
276	sector_t start;		/* start sector of rb node */
277	sector_t last;		/* end sector of rb node */
278	sector_t _subtree_last; /* highest sector in subtree of rb node */
279};
280
281/*
282 * mddev->curr_resync stores the current sector of the resync but
283 * also has some overloaded values.
284 */
285enum {
286	/* No resync in progress */
287	MD_RESYNC_NONE = 0,
288	/* Yielded to allow another conflicting resync to commence */
289	MD_RESYNC_YIELDED = 1,
290	/* Delayed to check that there is no conflict with another sync */
291	MD_RESYNC_DELAYED = 2,
292	/* Any value greater than or equal to this is in an active resync */
293	MD_RESYNC_ACTIVE = 3,
294};
295
296struct mddev {
297	void				*private;
298	struct md_personality		*pers;
299	dev_t				unit;
300	int				md_minor;
301	struct list_head		disks;
302	unsigned long			flags;
303	unsigned long			sb_flags;
 
 
 
304
305	int				suspended;
306	struct mutex			suspend_mutex;
307	struct percpu_ref		active_io;
308	int				ro;
309	int				sysfs_active; /* set when sysfs deletes
310						       * are happening, so run/
311						       * takeover/stop are not safe
312						       */
 
 
313	struct gendisk			*gendisk;
314
315	struct kobject			kobj;
316	int				hold_active;
317#define	UNTIL_IOCTL	1
318#define	UNTIL_STOP	2
319
320	/* Superblock information */
321	int				major_version,
322					minor_version,
323					patch_version;
324	int				persistent;
325	int				external;	/* metadata is
326							 * managed externally */
327	char				metadata_type[17]; /* externally set*/
328	int				chunk_sectors;
329	time64_t			ctime, utime;
330	int				level, layout;
331	char				clevel[16];
332	int				raid_disks;
333	int				max_disks;
334	sector_t			dev_sectors;	/* used size of
335							 * component devices */
336	sector_t			array_sectors; /* exported array size */
337	int				external_size; /* size managed
338							* externally */
339	__u64				events;
340	/* If the last 'event' was simply a clean->dirty transition, and
341	 * we didn't write it to the spares, then it is safe and simple
342	 * to just decrement the event count on a dirty->clean transition.
343	 * So we record that possibility here.
344	 */
345	int				can_decrease_events;
346
347	char				uuid[16];
348
349	/* If the array is being reshaped, we need to record the
350	 * new shape and an indication of where we are up to.
351	 * This is written to the superblock.
352	 * If reshape_position is MaxSector, then no reshape is happening (yet).
353	 */
354	sector_t			reshape_position;
355	int				delta_disks, new_level, new_layout;
356	int				new_chunk_sectors;
357	int				reshape_backwards;
358
359	struct md_thread __rcu		*thread;	/* management thread */
360	struct md_thread __rcu		*sync_thread;	/* doing resync or reconstruct */
361
362	/* 'last_sync_action' is initialized to "none".  It is set when a
363	 * sync operation (i.e "data-check", "requested-resync", "resync",
364	 * "recovery", or "reshape") is started.  It holds this value even
365	 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
366	 * or finished).  It is overwritten when a new sync operation is begun.
367	 */
368	char				*last_sync_action;
369	sector_t			curr_resync;	/* last block scheduled */
370	/* As resync requests can complete out of order, we cannot easily track
371	 * how much resync has been completed.  So we occasionally pause until
372	 * everything completes, then set curr_resync_completed to curr_resync.
373	 * As such it may be well behind the real resync mark, but it is a value
374	 * we are certain of.
375	 */
376	sector_t			curr_resync_completed;
377	unsigned long			resync_mark;	/* a recent timestamp */
378	sector_t			resync_mark_cnt;/* blocks written at resync_mark */
379	sector_t			curr_mark_cnt; /* blocks scheduled now */
380
381	sector_t			resync_max_sectors; /* may be set by personality */
382
383	atomic64_t			resync_mismatches; /* count of sectors where
384							    * parity/replica mismatch found
385							    */
386
387	/* allow user-space to request suspension of IO to regions of the array */
388	sector_t			suspend_lo;
389	sector_t			suspend_hi;
390	/* if zero, use the system-wide default */
391	int				sync_speed_min;
392	int				sync_speed_max;
393
394	/* resync even though the same disks are shared among md-devices */
395	int				parallel_resync;
396
397	int				ok_start_degraded;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398
399	unsigned long			recovery;
400	/* If a RAID personality determines that recovery (of a particular
401	 * device) will fail due to a read error on the source device, it
402	 * takes a copy of this number and does not attempt recovery again
403	 * until this number changes.
404	 */
405	int				recovery_disabled;
406
407	int				in_sync;	/* know to not need resync */
408	/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
409	 * that we are never stopping an array while it is open.
410	 * 'reconfig_mutex' protects all other reconfiguration.
411	 * These locks are separate due to conflicting interactions
412	 * with disk->open_mutex.
413	 * Lock ordering is:
414	 *  reconfig_mutex -> disk->open_mutex
415	 *  disk->open_mutex -> open_mutex:  e.g. __blkdev_get -> md_open
416	 */
417	struct mutex			open_mutex;
418	struct mutex			reconfig_mutex;
419	atomic_t			active;		/* general refcount */
420	atomic_t			openers;	/* number of active opens */
421
422	int				changed;	/* True if we might need to
423							 * reread partition info */
424	int				degraded;	/* whether md should consider
425							 * adding a spare
426							 */
427
428	atomic_t			recovery_active; /* blocks scheduled, but not written */
429	wait_queue_head_t		recovery_wait;
430	sector_t			recovery_cp;
431	sector_t			resync_min;	/* user requested sync
432							 * starts here */
433	sector_t			resync_max;	/* resync should pause
434							 * when it gets here */
435
436	struct kernfs_node		*sysfs_state;	/* handle for 'array_state'
437							 * file in sysfs.
438							 */
439	struct kernfs_node		*sysfs_action;  /* handle for 'sync_action' */
440	struct kernfs_node		*sysfs_completed;	/*handle for 'sync_completed' */
441	struct kernfs_node		*sysfs_degraded;	/*handle for 'degraded' */
442	struct kernfs_node		*sysfs_level;		/*handle for 'level' */
443
444	/* used for delayed sysfs removal */
445	struct work_struct del_work;
446	/* used for register new sync thread */
447	struct work_struct sync_work;
448
449	/* "lock" protects:
450	 *   flush_bio transition from NULL to !NULL
451	 *   rdev superblocks, events
452	 *   clearing MD_CHANGE_*
453	 *   in_sync - and related safemode and MD_CHANGE changes
454	 *   pers (also protected by reconfig_mutex and pending IO).
455	 *   clearing ->bitmap
456	 *   clearing ->bitmap_info.file
457	 *   changing ->resync_{min,max}
458	 *   setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
459	 */
460	spinlock_t			lock;
461	wait_queue_head_t		sb_wait;	/* for waiting on superblock updates */
462	atomic_t			pending_writes;	/* number of active superblock writes */
463
464	unsigned int			safemode;	/* if set, update "clean" superblock
465							 * when no writes pending.
466							 */
467	unsigned int			safemode_delay;
468	struct timer_list		safemode_timer;
469	struct percpu_ref		writes_pending;
470	int				sync_checkers;	/* # of threads checking writes_pending */
471	struct request_queue		*queue;	/* for plugging ... */
472
473	struct bitmap			*bitmap; /* the bitmap for the device */
474	struct {
475		struct file		*file; /* the bitmap file */
476		loff_t			offset; /* offset from superblock of
477						 * start of bitmap. May be
478						 * negative, but not '0'
479						 * For external metadata, offset
480						 * from start of device.
481						 */
482		unsigned long		space; /* space available at this offset */
483		loff_t			default_offset; /* this is the offset to use when
484							 * hot-adding a bitmap.  It should
485							 * eventually be settable by sysfs.
486							 */
487		unsigned long		default_space; /* space available at
488							* default offset */
489		struct mutex		mutex;
490		unsigned long		chunksize;
491		unsigned long		daemon_sleep; /* how many jiffies between updates? */
492		unsigned long		max_write_behind; /* write-behind mode */
493		int			external;
494		int			nodes; /* Maximum number of nodes in the cluster */
495		char                    cluster_name[64]; /* Name of the cluster */
496	} bitmap_info;
497
498	atomic_t			max_corr_read_errors; /* max read retries */
499	struct list_head		all_mddevs;
500
501	const struct attribute_group	*to_remove;
502
503	struct bio_set			bio_set;
504	struct bio_set			sync_set; /* for sync operations like
505						   * metadata and bitmap writes
506						   */
507	struct bio_set			io_clone_set;
508
509	/* Generic flush handling.
510	 * The last to finish preflush schedules a worker to submit
511	 * the rest of the request (without the REQ_PREFLUSH flag).
512	 */
513	struct bio *flush_bio;
514	atomic_t flush_pending;
515	ktime_t start_flush, prev_flush_start; /* prev_flush_start is when the previous completed
516						* flush was started.
517						*/
518	struct work_struct flush_work;
519	struct work_struct event_work;	/* used by dm to report failure event */
520	mempool_t *serial_info_pool;
521	void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
522	struct md_cluster_info		*cluster_info;
523	unsigned int			good_device_nr;	/* good device num within cluster raid */
524	unsigned int			noio_flag; /* for memalloc scope API */
525
526	/*
527	 * Temporarily store rdev that will be finally removed when
528	 * reconfig_mutex is unlocked, protected by reconfig_mutex.
529	 */
530	struct list_head		deleting;
531
532	/* Used to synchronize idle and frozen for action_store() */
533	struct mutex			sync_mutex;
534	/* The sequence number for sync thread */
535	atomic_t sync_seq;
536
537	bool	has_superblocks:1;
538	bool	fail_last_dev:1;
539	bool	serialize_policy:1;
540};
541
542enum recovery_flags {
543	/*
544	 * If neither SYNC or RESHAPE are set, then it is a recovery.
545	 */
546	MD_RECOVERY_RUNNING,	/* a thread is running, or about to be started */
547	MD_RECOVERY_SYNC,	/* actually doing a resync, not a recovery */
548	MD_RECOVERY_RECOVER,	/* doing recovery, or need to try it. */
549	MD_RECOVERY_INTR,	/* resync needs to be aborted for some reason */
550	MD_RECOVERY_DONE,	/* thread is done and is waiting to be reaped */
551	MD_RECOVERY_NEEDED,	/* we might need to start a resync/recover */
552	MD_RECOVERY_REQUESTED,	/* user-space has requested a sync (used with SYNC) */
553	MD_RECOVERY_CHECK,	/* user-space request for check-only, no repair */
554	MD_RECOVERY_RESHAPE,	/* A reshape is happening */
555	MD_RECOVERY_FROZEN,	/* User request to abort, and not restart, any action */
556	MD_RECOVERY_ERROR,	/* sync-action interrupted because io-error */
557	MD_RECOVERY_WAIT,	/* waiting for pers->start() to finish */
558	MD_RESYNCING_REMOTE,	/* remote node is running resync thread */
559};
560
561static inline int __must_check mddev_lock(struct mddev *mddev)
562{
563	return mutex_lock_interruptible(&mddev->reconfig_mutex);
564}
565
566/* Sometimes we need to take the lock in a situation where
567 * failure due to interrupts is not acceptable.
568 */
569static inline void mddev_lock_nointr(struct mddev *mddev)
570{
571	mutex_lock(&mddev->reconfig_mutex);
 
 
572}
573
574static inline int mddev_trylock(struct mddev *mddev)
575{
576	return mutex_trylock(&mddev->reconfig_mutex);
577}
578extern void mddev_unlock(struct mddev *mddev);
579
580static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
581{
582	atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
583}
584
585static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
586{
587	md_sync_acct(bio->bi_bdev, nr_sectors);
588}
589
590struct md_personality
591{
592	char *name;
593	int level;
594	struct list_head list;
595	struct module *owner;
596	bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
597	/*
598	 * start up works that do NOT require md_thread. tasks that
599	 * requires md_thread should go into start()
600	 */
601	int (*run)(struct mddev *mddev);
602	/* start up works that require md threads */
603	int (*start)(struct mddev *mddev);
604	void (*free)(struct mddev *mddev, void *priv);
605	void (*status)(struct seq_file *seq, struct mddev *mddev);
606	/* error_handler must set ->faulty and clear ->in_sync
607	 * if appropriate, and should abort recovery if needed
608	 */
609	void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
610	int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
611	int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
612	int (*spare_active) (struct mddev *mddev);
613	sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
614	int (*resize) (struct mddev *mddev, sector_t sectors);
615	sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
616	int (*check_reshape) (struct mddev *mddev);
617	int (*start_reshape) (struct mddev *mddev);
618	void (*finish_reshape) (struct mddev *mddev);
619	void (*update_reshape_pos) (struct mddev *mddev);
620	/* quiesce suspends or resumes internal processing.
621	 * 1 - stop new actions and wait for action io to complete
622	 * 0 - return to normal behaviour
623	 */
624	void (*quiesce) (struct mddev *mddev, int quiesce);
625	/* takeover is used to transition an array from one
626	 * personality to another.  The new personality must be able
627	 * to handle the data in the current layout.
628	 * e.g. 2drive raid1 -> 2drive raid5
629	 *      ndrive raid5 -> degraded n+1drive raid6 with special layout
630	 * If the takeover succeeds, a new 'private' structure is returned.
631	 * This needs to be installed and then ->run used to activate the
632	 * array.
633	 */
634	void *(*takeover) (struct mddev *mddev);
635	/* Changes the consistency policy of an active array. */
636	int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
637};
638
 
639struct md_sysfs_entry {
640	struct attribute attr;
641	ssize_t (*show)(struct mddev *, char *);
642	ssize_t (*store)(struct mddev *, const char *, size_t);
643};
644extern const struct attribute_group md_bitmap_group;
645
646static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
647{
648	if (sd)
649		return sysfs_get_dirent(sd, name);
650	return sd;
651}
652static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
653{
654	if (sd)
655		sysfs_notify_dirent(sd);
656}
657
658static inline char * mdname (struct mddev * mddev)
659{
660	return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
661}
662
663static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
664{
665	char nm[20];
666	if (!test_bit(Replacement, &rdev->flags) &&
667	    !test_bit(Journal, &rdev->flags) &&
668	    mddev->kobj.sd) {
669		sprintf(nm, "rd%d", rdev->raid_disk);
670		return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
671	} else
672		return 0;
673}
674
675static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
676{
677	char nm[20];
678	if (!test_bit(Replacement, &rdev->flags) &&
679	    !test_bit(Journal, &rdev->flags) &&
680	    mddev->kobj.sd) {
681		sprintf(nm, "rd%d", rdev->raid_disk);
682		sysfs_remove_link(&mddev->kobj, nm);
683	}
684}
685
686/*
687 * iterates through some rdev ringlist. It's safe to remove the
688 * current 'rdev'. Dont touch 'tmp' though.
689 */
690#define rdev_for_each_list(rdev, tmp, head)				\
691	list_for_each_entry_safe(rdev, tmp, head, same_set)
692
693/*
694 * iterates through the 'same array disks' ringlist
695 */
696#define rdev_for_each(rdev, mddev)				\
697	list_for_each_entry(rdev, &((mddev)->disks), same_set)
698
699#define rdev_for_each_safe(rdev, tmp, mddev)				\
700	list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
701
702#define rdev_for_each_rcu(rdev, mddev)				\
703	list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
704
705struct md_thread {
706	void			(*run) (struct md_thread *thread);
707	struct mddev		*mddev;
708	wait_queue_head_t	wqueue;
709	unsigned long		flags;
710	struct task_struct	*tsk;
711	unsigned long		timeout;
712	void			*private;
713};
714
715struct md_io_clone {
716	struct mddev	*mddev;
717	struct bio	*orig_bio;
718	unsigned long	start_time;
719	struct bio	bio_clone;
720};
721
722#define THREAD_WAKEUP  0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
723
724static inline void safe_put_page(struct page *p)
725{
726	if (p) put_page(p);
727}
728
729extern int register_md_personality(struct md_personality *p);
730extern int unregister_md_personality(struct md_personality *p);
731extern int register_md_cluster_operations(struct md_cluster_operations *ops,
732		struct module *module);
733extern int unregister_md_cluster_operations(void);
734extern int md_setup_cluster(struct mddev *mddev, int nodes);
735extern void md_cluster_stop(struct mddev *mddev);
736extern struct md_thread *md_register_thread(
737	void (*run)(struct md_thread *thread),
738	struct mddev *mddev,
739	const char *name);
740extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp);
741extern void md_wakeup_thread(struct md_thread __rcu *thread);
742extern void md_check_recovery(struct mddev *mddev);
743extern void md_reap_sync_thread(struct mddev *mddev);
744extern bool md_write_start(struct mddev *mddev, struct bio *bi);
745extern void md_write_inc(struct mddev *mddev, struct bio *bi);
746extern void md_write_end(struct mddev *mddev);
747extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
748extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
749extern void md_finish_reshape(struct mddev *mddev);
750void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
751			struct bio *bio, sector_t start, sector_t size);
752void md_account_bio(struct mddev *mddev, struct bio **bio);
753
754extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
755extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
756			   sector_t sector, int size, struct page *page);
757extern int md_super_wait(struct mddev *mddev);
758extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
759		struct page *page, blk_opf_t opf, bool metadata_op);
760extern void md_do_sync(struct md_thread *thread);
761extern void md_new_event(void);
762extern void md_allow_write(struct mddev *mddev);
763extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
764extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
765extern int md_check_no_bitmap(struct mddev *mddev);
766extern int md_integrity_register(struct mddev *mddev);
767extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
768extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
 
769
770extern int mddev_init(struct mddev *mddev);
771extern void mddev_destroy(struct mddev *mddev);
772struct mddev *md_alloc(dev_t dev, char *name);
773void mddev_put(struct mddev *mddev);
774extern int md_run(struct mddev *mddev);
775extern int md_start(struct mddev *mddev);
776extern void md_stop(struct mddev *mddev);
777extern void md_stop_writes(struct mddev *mddev);
778extern int md_rdev_init(struct md_rdev *rdev);
779extern void md_rdev_clear(struct md_rdev *rdev);
780
781extern void md_handle_request(struct mddev *mddev, struct bio *bio);
782extern int mddev_suspend(struct mddev *mddev, bool interruptible);
783extern void mddev_resume(struct mddev *mddev);
784
785extern void md_reload_sb(struct mddev *mddev, int raid_disk);
786extern void md_update_sb(struct mddev *mddev, int force);
787extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev);
788extern void mddev_destroy_serial_pool(struct mddev *mddev,
789				      struct md_rdev *rdev);
790struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
791struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
792
793static inline bool is_rdev_broken(struct md_rdev *rdev)
794{
795	return !disk_live(rdev->bdev->bd_disk);
796}
797
798static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
799{
800	int faulty = test_bit(Faulty, &rdev->flags);
801	if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
802		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
803		md_wakeup_thread(mddev->thread);
804	}
805}
806
807extern struct md_cluster_operations *md_cluster_ops;
808static inline int mddev_is_clustered(struct mddev *mddev)
809{
810	return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
811}
812
813/* clear unsupported mddev_flags */
814static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
815	unsigned long unsupported_flags)
816{
817	mddev->flags &= ~unsupported_flags;
818}
819
820static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
821{
822	if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
823	    !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
824		mddev->queue->limits.max_write_zeroes_sectors = 0;
825}
826
827static inline int mddev_suspend_and_lock(struct mddev *mddev)
828{
829	int ret;
830
831	ret = mddev_suspend(mddev, true);
832	if (ret)
833		return ret;
834
835	ret = mddev_lock(mddev);
836	if (ret)
837		mddev_resume(mddev);
838
839	return ret;
840}
841
842static inline void mddev_suspend_and_lock_nointr(struct mddev *mddev)
843{
844	mddev_suspend(mddev, false);
845	mutex_lock(&mddev->reconfig_mutex);
846}
847
848static inline void mddev_unlock_and_resume(struct mddev *mddev)
849{
850	mddev_unlock(mddev);
851	mddev_resume(mddev);
852}
853
854struct mdu_array_info_s;
855struct mdu_disk_info_s;
856
857extern int mdp_major;
858extern struct workqueue_struct *md_bitmap_wq;
859void md_autostart_arrays(int part);
860int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
861int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
862int do_md_run(struct mddev *mddev);
863
864extern const struct block_device_operations md_fops;
865
866#endif /* _MD_MD_H */