Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3   md.h : kernel internal structure of the Linux MD driver
  4          Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
  5
  6*/
  7
  8#ifndef _MD_MD_H
  9#define _MD_MD_H
 10
 11#include <linux/blkdev.h>
 12#include <linux/backing-dev.h>
 13#include <linux/badblocks.h>
 14#include <linux/kobject.h>
 15#include <linux/list.h>
 16#include <linux/mm.h>
 17#include <linux/mutex.h>
 18#include <linux/timer.h>
 19#include <linux/wait.h>
 20#include <linux/workqueue.h>
 21#include "md-cluster.h"
 22
 23#define MaxSector (~(sector_t)0)
 24
 25/*
 26 * These flags should really be called "NO_RETRY" rather than
 27 * "FAILFAST" because they don't make any promise about time lapse,
 28 * only about the number of retries, which will be zero.
 29 * REQ_FAILFAST_DRIVER is not included because
 30 * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
 31 * seems to suggest that the errors it avoids retrying should usually
 32 * be retried.
 33 */
 34#define	MD_FAILFAST	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
 35
 36/*
 37 * The struct embedded in rdev is used to serialize IO.
 38 */
 39struct serial_in_rdev {
 40	struct rb_root_cached serial_rb;
 41	spinlock_t serial_lock;
 42	wait_queue_head_t serial_io_wait;
 43};
 44
 45/*
 46 * MD's 'extended' device
 47 */
 48struct md_rdev {
 49	struct list_head same_set;	/* RAID devices within the same set */
 50
 51	sector_t sectors;		/* Device size (in 512bytes sectors) */
 52	struct mddev *mddev;		/* RAID array if running */
 53	int last_events;		/* IO event timestamp */
 54
 55	/*
 56	 * If meta_bdev is non-NULL, it means that a separate device is
 57	 * being used to store the metadata (superblock/bitmap) which
 58	 * would otherwise be contained on the same device as the data (bdev).
 59	 */
 60	struct block_device *meta_bdev;
 61	struct block_device *bdev;	/* block device handle */
 62
 63	struct page	*sb_page, *bb_page;
 64	int		sb_loaded;
 65	__u64		sb_events;
 66	sector_t	data_offset;	/* start of data in array */
 67	sector_t	new_data_offset;/* only relevant while reshaping */
 68	sector_t	sb_start;	/* offset of the super block (in 512byte sectors) */
 69	int		sb_size;	/* bytes in the superblock */
 70	int		preferred_minor;	/* autorun support */
 71
 72	struct kobject	kobj;
 73
 74	/* A device can be in one of three states based on two flags:
 75	 * Not working:   faulty==1 in_sync==0
 76	 * Fully working: faulty==0 in_sync==1
 77	 * Working, but not
 78	 * in sync with array
 79	 *                faulty==0 in_sync==0
 80	 *
 81	 * It can never have faulty==1, in_sync==1
 82	 * This reduces the burden of testing multiple flags in many cases
 83	 */
 84
 85	unsigned long	flags;	/* bit set of 'enum flag_bits' bits. */
 86	wait_queue_head_t blocked_wait;
 87
 88	int desc_nr;			/* descriptor index in the superblock */
 89	int raid_disk;			/* role of device in array */
 90	int new_raid_disk;		/* role that the device will have in
 91					 * the array after a level-change completes.
 92					 */
 93	int saved_raid_disk;		/* role that device used to have in the
 94					 * array and could again if we did a partial
 95					 * resync from the bitmap
 96					 */
 97	union {
 98		sector_t recovery_offset;/* If this device has been partially
 99					 * recovered, this is where we were
100					 * up to.
101					 */
102		sector_t journal_tail;	/* If this device is a journal device,
103					 * this is the journal tail (journal
104					 * recovery start point)
105					 */
106	};
107
108	atomic_t	nr_pending;	/* number of pending requests.
109					 * only maintained for arrays that
110					 * support hot removal
111					 */
112	atomic_t	read_errors;	/* number of consecutive read errors that
113					 * we have tried to ignore.
114					 */
115	time64_t	last_read_error;	/* monotonic time since our
116						 * last read error
117						 */
118	atomic_t	corrected_errors; /* number of corrected read errors,
119					   * for reporting to userspace and storing
120					   * in superblock.
121					   */
122
123	struct serial_in_rdev *serial;  /* used for raid1 io serialization */
 
 
 
 
 
124
125	struct work_struct del_work;	/* used for delayed sysfs removal */
126
127	struct kernfs_node *sysfs_state; /* handle for 'state'
128					   * sysfs entry */
129	/* handle for 'unacknowledged_bad_blocks' sysfs dentry */
130	struct kernfs_node *sysfs_unack_badblocks;
131	/* handle for 'bad_blocks' sysfs dentry */
132	struct kernfs_node *sysfs_badblocks;
133	struct badblocks badblocks;
134
135	struct {
136		short offset;	/* Offset from superblock to start of PPL.
137				 * Not used by external metadata. */
138		unsigned int size;	/* Size in sectors of the PPL space */
139		sector_t sector;	/* First sector of the PPL space */
140	} ppl;
141};
142enum flag_bits {
143	Faulty,			/* device is known to have a fault */
144	In_sync,		/* device is in_sync with rest of array */
145	Bitmap_sync,		/* ..actually, not quite In_sync.  Need a
146				 * bitmap-based recovery to get fully in sync.
147				 * The bit is only meaningful before device
148				 * has been passed to pers->hot_add_disk.
149				 */
150	WriteMostly,		/* Avoid reading if at all possible */
151	AutoDetected,		/* added by auto-detect */
152	Blocked,		/* An error occurred but has not yet
153				 * been acknowledged by the metadata
154				 * handler, so don't allow writes
155				 * until it is cleared */
156	WriteErrorSeen,		/* A write error has been seen on this
157				 * device
158				 */
159	FaultRecorded,		/* Intermediate state for clearing
160				 * Blocked.  The Fault is/will-be
161				 * recorded in the metadata, but that
162				 * metadata hasn't been stored safely
163				 * on disk yet.
164				 */
165	BlockedBadBlocks,	/* A writer is blocked because they
166				 * found an unacknowledged bad-block.
167				 * This can safely be cleared at any
168				 * time, and the writer will re-check.
169				 * It may be set at any time, and at
170				 * worst the writer will timeout and
171				 * re-check.  So setting it as
172				 * accurately as possible is good, but
173				 * not absolutely critical.
174				 */
175	WantReplacement,	/* This device is a candidate to be
176				 * hot-replaced, either because it has
177				 * reported some faults, or because
178				 * of explicit request.
179				 */
180	Replacement,		/* This device is a replacement for
181				 * a want_replacement device with same
182				 * raid_disk number.
183				 */
184	Candidate,		/* For clustered environments only:
185				 * This device is seen locally but not
186				 * by the whole cluster
187				 */
188	Journal,		/* This device is used as journal for
189				 * raid-5/6.
190				 * Usually, this device should be faster
191				 * than other devices in the array
192				 */
193	ClusterRemove,
194	RemoveSynchronized,	/* synchronize_rcu() was called after
195				 * this device was known to be faulty,
196				 * so it is safe to remove without
197				 * another synchronize_rcu() call.
198				 */
199	ExternalBbl,            /* External metadata provides bad
200				 * block management for a disk
201				 */
202	FailFast,		/* Minimal retries should be attempted on
203				 * this device, so use REQ_FAILFAST_DEV.
204				 * Also don't try to repair failed reads.
205				 * It is expects that no bad block log
206				 * is present.
207				 */
208	LastDev,		/* Seems to be the last working dev as
209				 * it didn't fail, so don't use FailFast
210				 * any more for metadata
211				 */
212	CollisionCheck,		/*
213				 * check if there is collision between raid1
214				 * serial bios.
215				 */
216};
217
218static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
219			      sector_t *first_bad, int *bad_sectors)
220{
221	if (unlikely(rdev->badblocks.count)) {
222		int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
223					sectors,
224					first_bad, bad_sectors);
225		if (rv)
226			*first_bad -= rdev->data_offset;
227		return rv;
228	}
229	return 0;
230}
231extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
232			      int is_new);
233extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
234				int is_new);
235struct md_cluster_info;
236
237/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
238enum mddev_flags {
239	MD_ARRAY_FIRST_USE,	/* First use of array, needs initialization */
240	MD_CLOSING,		/* If set, we are closing the array, do not open
241				 * it then */
242	MD_JOURNAL_CLEAN,	/* A raid with journal is already clean */
243	MD_HAS_JOURNAL,		/* The raid array has journal feature set */
244	MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
245				   * already took resync lock, need to
246				   * release the lock */
247	MD_FAILFAST_SUPPORTED,	/* Using MD_FAILFAST on metadata writes is
248				 * supported as calls to md_error() will
249				 * never cause the array to become failed.
250				 */
251	MD_HAS_PPL,		/* The raid array has PPL feature set */
252	MD_HAS_MULTIPLE_PPLS,	/* The raid array has multiple PPLs feature set */
253	MD_ALLOW_SB_UPDATE,	/* md_check_recovery is allowed to update
254				 * the metadata without taking reconfig_mutex.
255				 */
256	MD_UPDATING_SB,		/* md_check_recovery is updating the metadata
257				 * without explicitly holding reconfig_mutex.
258				 */
259	MD_NOT_READY,		/* do_md_run() is active, so 'array_state'
260				 * must not report that array is ready yet
261				 */
262	MD_BROKEN,              /* This is used in RAID-0/LINEAR only, to stop
263				 * I/O in case an array member is gone/failed.
264				 */
265};
266
267enum mddev_sb_flags {
268	MD_SB_CHANGE_DEVS,		/* Some device status has changed */
269	MD_SB_CHANGE_CLEAN,	/* transition to or from 'clean' */
270	MD_SB_CHANGE_PENDING,	/* switch from 'clean' to 'active' in progress */
271	MD_SB_NEED_REWRITE,	/* metadata write needs to be repeated */
272};
273
274#define NR_SERIAL_INFOS		8
275/* record current range of serialize IOs */
276struct serial_info {
277	struct rb_node node;
278	sector_t start;		/* start sector of rb node */
279	sector_t last;		/* end sector of rb node */
280	sector_t _subtree_last; /* highest sector in subtree of rb node */
281};
282
283struct mddev {
284	void				*private;
285	struct md_personality		*pers;
286	dev_t				unit;
287	int				md_minor;
288	struct list_head		disks;
289	unsigned long			flags;
290	unsigned long			sb_flags;
291
292	int				suspended;
293	atomic_t			active_io;
294	int				ro;
295	int				sysfs_active; /* set when sysfs deletes
296						       * are happening, so run/
297						       * takeover/stop are not safe
298						       */
299	struct gendisk			*gendisk;
300
301	struct kobject			kobj;
302	int				hold_active;
303#define	UNTIL_IOCTL	1
304#define	UNTIL_STOP	2
305
306	/* Superblock information */
307	int				major_version,
308					minor_version,
309					patch_version;
310	int				persistent;
311	int				external;	/* metadata is
312							 * managed externally */
313	char				metadata_type[17]; /* externally set*/
314	int				chunk_sectors;
315	time64_t			ctime, utime;
316	int				level, layout;
317	char				clevel[16];
318	int				raid_disks;
319	int				max_disks;
320	sector_t			dev_sectors;	/* used size of
321							 * component devices */
322	sector_t			array_sectors; /* exported array size */
323	int				external_size; /* size managed
324							* externally */
325	__u64				events;
326	/* If the last 'event' was simply a clean->dirty transition, and
327	 * we didn't write it to the spares, then it is safe and simple
328	 * to just decrement the event count on a dirty->clean transition.
329	 * So we record that possibility here.
330	 */
331	int				can_decrease_events;
332
333	char				uuid[16];
334
335	/* If the array is being reshaped, we need to record the
336	 * new shape and an indication of where we are up to.
337	 * This is written to the superblock.
338	 * If reshape_position is MaxSector, then no reshape is happening (yet).
339	 */
340	sector_t			reshape_position;
341	int				delta_disks, new_level, new_layout;
342	int				new_chunk_sectors;
343	int				reshape_backwards;
344
345	struct md_thread		*thread;	/* management thread */
346	struct md_thread		*sync_thread;	/* doing resync or reconstruct */
347
348	/* 'last_sync_action' is initialized to "none".  It is set when a
349	 * sync operation (i.e "data-check", "requested-resync", "resync",
350	 * "recovery", or "reshape") is started.  It holds this value even
351	 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
352	 * or finished).  It is overwritten when a new sync operation is begun.
353	 */
354	char				*last_sync_action;
355	sector_t			curr_resync;	/* last block scheduled */
356	/* As resync requests can complete out of order, we cannot easily track
357	 * how much resync has been completed.  So we occasionally pause until
358	 * everything completes, then set curr_resync_completed to curr_resync.
359	 * As such it may be well behind the real resync mark, but it is a value
360	 * we are certain of.
361	 */
362	sector_t			curr_resync_completed;
363	unsigned long			resync_mark;	/* a recent timestamp */
364	sector_t			resync_mark_cnt;/* blocks written at resync_mark */
365	sector_t			curr_mark_cnt; /* blocks scheduled now */
366
367	sector_t			resync_max_sectors; /* may be set by personality */
368
369	atomic64_t			resync_mismatches; /* count of sectors where
370							    * parity/replica mismatch found
371							    */
372
373	/* allow user-space to request suspension of IO to regions of the array */
374	sector_t			suspend_lo;
375	sector_t			suspend_hi;
376	/* if zero, use the system-wide default */
377	int				sync_speed_min;
378	int				sync_speed_max;
379
380	/* resync even though the same disks are shared among md-devices */
381	int				parallel_resync;
382
383	int				ok_start_degraded;
384
385	unsigned long			recovery;
386	/* If a RAID personality determines that recovery (of a particular
387	 * device) will fail due to a read error on the source device, it
388	 * takes a copy of this number and does not attempt recovery again
389	 * until this number changes.
390	 */
391	int				recovery_disabled;
392
393	int				in_sync;	/* know to not need resync */
394	/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
395	 * that we are never stopping an array while it is open.
396	 * 'reconfig_mutex' protects all other reconfiguration.
397	 * These locks are separate due to conflicting interactions
398	 * with bdev->bd_mutex.
399	 * Lock ordering is:
400	 *  reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
401	 *  bd_mutex -> open_mutex:  e.g. __blkdev_get -> md_open
402	 */
403	struct mutex			open_mutex;
404	struct mutex			reconfig_mutex;
405	atomic_t			active;		/* general refcount */
406	atomic_t			openers;	/* number of active opens */
407
408	int				changed;	/* True if we might need to
409							 * reread partition info */
410	int				degraded;	/* whether md should consider
411							 * adding a spare
412							 */
413
414	atomic_t			recovery_active; /* blocks scheduled, but not written */
415	wait_queue_head_t		recovery_wait;
416	sector_t			recovery_cp;
417	sector_t			resync_min;	/* user requested sync
418							 * starts here */
419	sector_t			resync_max;	/* resync should pause
420							 * when it gets here */
421
422	struct kernfs_node		*sysfs_state;	/* handle for 'array_state'
423							 * file in sysfs.
424							 */
425	struct kernfs_node		*sysfs_action;  /* handle for 'sync_action' */
426	struct kernfs_node		*sysfs_completed;	/*handle for 'sync_completed' */
427	struct kernfs_node		*sysfs_degraded;	/*handle for 'degraded' */
428	struct kernfs_node		*sysfs_level;		/*handle for 'level' */
429
430	struct work_struct del_work;	/* used for delayed sysfs removal */
431
432	/* "lock" protects:
433	 *   flush_bio transition from NULL to !NULL
434	 *   rdev superblocks, events
435	 *   clearing MD_CHANGE_*
436	 *   in_sync - and related safemode and MD_CHANGE changes
437	 *   pers (also protected by reconfig_mutex and pending IO).
438	 *   clearing ->bitmap
439	 *   clearing ->bitmap_info.file
440	 *   changing ->resync_{min,max}
441	 *   setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
442	 */
443	spinlock_t			lock;
444	wait_queue_head_t		sb_wait;	/* for waiting on superblock updates */
445	atomic_t			pending_writes;	/* number of active superblock writes */
446
447	unsigned int			safemode;	/* if set, update "clean" superblock
448							 * when no writes pending.
449							 */
450	unsigned int			safemode_delay;
451	struct timer_list		safemode_timer;
452	struct percpu_ref		writes_pending;
453	int				sync_checkers;	/* # of threads checking writes_pending */
454	struct request_queue		*queue;	/* for plugging ... */
455
456	struct bitmap			*bitmap; /* the bitmap for the device */
457	struct {
458		struct file		*file; /* the bitmap file */
459		loff_t			offset; /* offset from superblock of
460						 * start of bitmap. May be
461						 * negative, but not '0'
462						 * For external metadata, offset
463						 * from start of device.
464						 */
465		unsigned long		space; /* space available at this offset */
466		loff_t			default_offset; /* this is the offset to use when
467							 * hot-adding a bitmap.  It should
468							 * eventually be settable by sysfs.
469							 */
470		unsigned long		default_space; /* space available at
471							* default offset */
472		struct mutex		mutex;
473		unsigned long		chunksize;
474		unsigned long		daemon_sleep; /* how many jiffies between updates? */
475		unsigned long		max_write_behind; /* write-behind mode */
476		int			external;
477		int			nodes; /* Maximum number of nodes in the cluster */
478		char                    cluster_name[64]; /* Name of the cluster */
479	} bitmap_info;
480
481	atomic_t			max_corr_read_errors; /* max read retries */
482	struct list_head		all_mddevs;
483
484	struct attribute_group		*to_remove;
485
486	struct bio_set			bio_set;
487	struct bio_set			sync_set; /* for sync operations like
488						   * metadata and bitmap writes
489						   */
490	mempool_t			md_io_pool;
491
492	/* Generic flush handling.
493	 * The last to finish preflush schedules a worker to submit
494	 * the rest of the request (without the REQ_PREFLUSH flag).
495	 */
496	struct bio *flush_bio;
497	atomic_t flush_pending;
498	ktime_t start_flush, last_flush; /* last_flush is when the last completed
499					  * flush was started.
500					  */
501	struct work_struct flush_work;
502	struct work_struct event_work;	/* used by dm to report failure event */
503	mempool_t *serial_info_pool;
504	void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
505	struct md_cluster_info		*cluster_info;
506	unsigned int			good_device_nr;	/* good device num within cluster raid */
507	unsigned int			noio_flag; /* for memalloc scope API */
508
509	bool	has_superblocks:1;
510	bool	fail_last_dev:1;
511	bool	serialize_policy:1;
512};
513
514enum recovery_flags {
515	/*
516	 * If neither SYNC or RESHAPE are set, then it is a recovery.
517	 */
518	MD_RECOVERY_RUNNING,	/* a thread is running, or about to be started */
519	MD_RECOVERY_SYNC,	/* actually doing a resync, not a recovery */
520	MD_RECOVERY_RECOVER,	/* doing recovery, or need to try it. */
521	MD_RECOVERY_INTR,	/* resync needs to be aborted for some reason */
522	MD_RECOVERY_DONE,	/* thread is done and is waiting to be reaped */
523	MD_RECOVERY_NEEDED,	/* we might need to start a resync/recover */
524	MD_RECOVERY_REQUESTED,	/* user-space has requested a sync (used with SYNC) */
525	MD_RECOVERY_CHECK,	/* user-space request for check-only, no repair */
526	MD_RECOVERY_RESHAPE,	/* A reshape is happening */
527	MD_RECOVERY_FROZEN,	/* User request to abort, and not restart, any action */
528	MD_RECOVERY_ERROR,	/* sync-action interrupted because io-error */
529	MD_RECOVERY_WAIT,	/* waiting for pers->start() to finish */
530	MD_RESYNCING_REMOTE,	/* remote node is running resync thread */
531};
532
533static inline int __must_check mddev_lock(struct mddev *mddev)
534{
535	return mutex_lock_interruptible(&mddev->reconfig_mutex);
536}
537
538/* Sometimes we need to take the lock in a situation where
539 * failure due to interrupts is not acceptable.
540 */
541static inline void mddev_lock_nointr(struct mddev *mddev)
542{
543	mutex_lock(&mddev->reconfig_mutex);
544}
545
546static inline int mddev_trylock(struct mddev *mddev)
547{
548	return mutex_trylock(&mddev->reconfig_mutex);
549}
550extern void mddev_unlock(struct mddev *mddev);
551
552static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
553{
554	atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
555}
556
557static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
558{
559	atomic_add(nr_sectors, &bio->bi_disk->sync_io);
560}
561
562struct md_personality
563{
564	char *name;
565	int level;
566	struct list_head list;
567	struct module *owner;
568	bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
569	/*
570	 * start up works that do NOT require md_thread. tasks that
571	 * requires md_thread should go into start()
572	 */
573	int (*run)(struct mddev *mddev);
574	/* start up works that require md threads */
575	int (*start)(struct mddev *mddev);
576	void (*free)(struct mddev *mddev, void *priv);
577	void (*status)(struct seq_file *seq, struct mddev *mddev);
578	/* error_handler must set ->faulty and clear ->in_sync
579	 * if appropriate, and should abort recovery if needed
580	 */
581	void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
582	int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
583	int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
584	int (*spare_active) (struct mddev *mddev);
585	sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
586	int (*resize) (struct mddev *mddev, sector_t sectors);
587	sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
588	int (*check_reshape) (struct mddev *mddev);
589	int (*start_reshape) (struct mddev *mddev);
590	void (*finish_reshape) (struct mddev *mddev);
591	void (*update_reshape_pos) (struct mddev *mddev);
592	/* quiesce suspends or resumes internal processing.
593	 * 1 - stop new actions and wait for action io to complete
594	 * 0 - return to normal behaviour
595	 */
596	void (*quiesce) (struct mddev *mddev, int quiesce);
597	/* takeover is used to transition an array from one
598	 * personality to another.  The new personality must be able
599	 * to handle the data in the current layout.
600	 * e.g. 2drive raid1 -> 2drive raid5
601	 *      ndrive raid5 -> degraded n+1drive raid6 with special layout
602	 * If the takeover succeeds, a new 'private' structure is returned.
603	 * This needs to be installed and then ->run used to activate the
604	 * array.
605	 */
606	void *(*takeover) (struct mddev *mddev);
 
 
 
607	/* Changes the consistency policy of an active array. */
608	int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
609};
610
611struct md_sysfs_entry {
612	struct attribute attr;
613	ssize_t (*show)(struct mddev *, char *);
614	ssize_t (*store)(struct mddev *, const char *, size_t);
615};
616extern struct attribute_group md_bitmap_group;
617
618static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
619{
620	if (sd)
621		return sysfs_get_dirent(sd, name);
622	return sd;
623}
624static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
625{
626	if (sd)
627		sysfs_notify_dirent(sd);
628}
629
630static inline char * mdname (struct mddev * mddev)
631{
632	return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
633}
634
635static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
636{
637	char nm[20];
638	if (!test_bit(Replacement, &rdev->flags) &&
639	    !test_bit(Journal, &rdev->flags) &&
640	    mddev->kobj.sd) {
641		sprintf(nm, "rd%d", rdev->raid_disk);
642		return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
643	} else
644		return 0;
645}
646
647static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
648{
649	char nm[20];
650	if (!test_bit(Replacement, &rdev->flags) &&
651	    !test_bit(Journal, &rdev->flags) &&
652	    mddev->kobj.sd) {
653		sprintf(nm, "rd%d", rdev->raid_disk);
654		sysfs_remove_link(&mddev->kobj, nm);
655	}
656}
657
658/*
659 * iterates through some rdev ringlist. It's safe to remove the
660 * current 'rdev'. Dont touch 'tmp' though.
661 */
662#define rdev_for_each_list(rdev, tmp, head)				\
663	list_for_each_entry_safe(rdev, tmp, head, same_set)
664
665/*
666 * iterates through the 'same array disks' ringlist
667 */
668#define rdev_for_each(rdev, mddev)				\
669	list_for_each_entry(rdev, &((mddev)->disks), same_set)
670
671#define rdev_for_each_safe(rdev, tmp, mddev)				\
672	list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
673
674#define rdev_for_each_rcu(rdev, mddev)				\
675	list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
676
677struct md_thread {
678	void			(*run) (struct md_thread *thread);
679	struct mddev		*mddev;
680	wait_queue_head_t	wqueue;
681	unsigned long		flags;
682	struct task_struct	*tsk;
683	unsigned long		timeout;
684	void			*private;
685};
686
687#define THREAD_WAKEUP  0
688
689static inline void safe_put_page(struct page *p)
690{
691	if (p) put_page(p);
692}
693
694extern int register_md_personality(struct md_personality *p);
695extern int unregister_md_personality(struct md_personality *p);
696extern int register_md_cluster_operations(struct md_cluster_operations *ops,
697		struct module *module);
698extern int unregister_md_cluster_operations(void);
699extern int md_setup_cluster(struct mddev *mddev, int nodes);
700extern void md_cluster_stop(struct mddev *mddev);
701extern struct md_thread *md_register_thread(
702	void (*run)(struct md_thread *thread),
703	struct mddev *mddev,
704	const char *name);
705extern void md_unregister_thread(struct md_thread **threadp);
706extern void md_wakeup_thread(struct md_thread *thread);
707extern void md_check_recovery(struct mddev *mddev);
708extern void md_reap_sync_thread(struct mddev *mddev);
709extern int mddev_init_writes_pending(struct mddev *mddev);
710extern bool md_write_start(struct mddev *mddev, struct bio *bi);
711extern void md_write_inc(struct mddev *mddev, struct bio *bi);
712extern void md_write_end(struct mddev *mddev);
713extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
714extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
715extern void md_finish_reshape(struct mddev *mddev);
716
717extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
 
718extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
719			   sector_t sector, int size, struct page *page);
720extern int md_super_wait(struct mddev *mddev);
721extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
722			struct page *page, int op, int op_flags,
723			bool metadata_op);
724extern void md_do_sync(struct md_thread *thread);
725extern void md_new_event(struct mddev *mddev);
726extern void md_allow_write(struct mddev *mddev);
727extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
728extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
729extern int md_check_no_bitmap(struct mddev *mddev);
730extern int md_integrity_register(struct mddev *mddev);
731extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
732extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
733
734extern void mddev_init(struct mddev *mddev);
735extern int md_run(struct mddev *mddev);
736extern int md_start(struct mddev *mddev);
737extern void md_stop(struct mddev *mddev);
738extern void md_stop_writes(struct mddev *mddev);
739extern int md_rdev_init(struct md_rdev *rdev);
740extern void md_rdev_clear(struct md_rdev *rdev);
741
742extern void md_handle_request(struct mddev *mddev, struct bio *bio);
743extern void mddev_suspend(struct mddev *mddev);
744extern void mddev_resume(struct mddev *mddev);
745extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
746				   struct mddev *mddev);
747
748extern void md_reload_sb(struct mddev *mddev, int raid_disk);
749extern void md_update_sb(struct mddev *mddev, int force);
750extern void md_kick_rdev_from_array(struct md_rdev * rdev);
751extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
752				     bool is_suspend);
753extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
754				      bool is_suspend);
755struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
756struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
757
758static inline bool is_mddev_broken(struct md_rdev *rdev, const char *md_type)
759{
760	int flags = rdev->bdev->bd_disk->flags;
761
762	if (!(flags & GENHD_FL_UP)) {
763		if (!test_and_set_bit(MD_BROKEN, &rdev->mddev->flags))
764			pr_warn("md: %s: %s array has a missing/failed member\n",
765				mdname(rdev->mddev), md_type);
766		return true;
767	}
768	return false;
769}
770
771static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
772{
773	int faulty = test_bit(Faulty, &rdev->flags);
774	if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
775		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
776		md_wakeup_thread(mddev->thread);
777	}
778}
779
780extern struct md_cluster_operations *md_cluster_ops;
781static inline int mddev_is_clustered(struct mddev *mddev)
782{
783	return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
784}
785
786/* clear unsupported mddev_flags */
787static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
788	unsigned long unsupported_flags)
789{
790	mddev->flags &= ~unsupported_flags;
791}
792
793static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
794{
795	if (bio_op(bio) == REQ_OP_WRITE_SAME &&
796	    !bio->bi_disk->queue->limits.max_write_same_sectors)
797		mddev->queue->limits.max_write_same_sectors = 0;
798}
799
800static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
801{
802	if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
803	    !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
804		mddev->queue->limits.max_write_zeroes_sectors = 0;
805}
806
807struct mdu_array_info_s;
808struct mdu_disk_info_s;
809
810extern int mdp_major;
811void md_autostart_arrays(int part);
812int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
813int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
814int do_md_run(struct mddev *mddev);
815
816extern const struct block_device_operations md_fops;
817
818#endif /* _MD_MD_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3   md.h : kernel internal structure of the Linux MD driver
  4          Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
  5
  6*/
  7
  8#ifndef _MD_MD_H
  9#define _MD_MD_H
 10
 11#include <linux/blkdev.h>
 12#include <linux/backing-dev.h>
 13#include <linux/badblocks.h>
 14#include <linux/kobject.h>
 15#include <linux/list.h>
 16#include <linux/mm.h>
 17#include <linux/mutex.h>
 18#include <linux/timer.h>
 19#include <linux/wait.h>
 20#include <linux/workqueue.h>
 21#include "md-cluster.h"
 22
 23#define MaxSector (~(sector_t)0)
 24
 25/*
 26 * These flags should really be called "NO_RETRY" rather than
 27 * "FAILFAST" because they don't make any promise about time lapse,
 28 * only about the number of retries, which will be zero.
 29 * REQ_FAILFAST_DRIVER is not included because
 30 * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
 31 * seems to suggest that the errors it avoids retrying should usually
 32 * be retried.
 33 */
 34#define	MD_FAILFAST	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
 
 
 
 
 
 
 
 
 
 
 35/*
 36 * MD's 'extended' device
 37 */
 38struct md_rdev {
 39	struct list_head same_set;	/* RAID devices within the same set */
 40
 41	sector_t sectors;		/* Device size (in 512bytes sectors) */
 42	struct mddev *mddev;		/* RAID array if running */
 43	int last_events;		/* IO event timestamp */
 44
 45	/*
 46	 * If meta_bdev is non-NULL, it means that a separate device is
 47	 * being used to store the metadata (superblock/bitmap) which
 48	 * would otherwise be contained on the same device as the data (bdev).
 49	 */
 50	struct block_device *meta_bdev;
 51	struct block_device *bdev;	/* block device handle */
 52
 53	struct page	*sb_page, *bb_page;
 54	int		sb_loaded;
 55	__u64		sb_events;
 56	sector_t	data_offset;	/* start of data in array */
 57	sector_t	new_data_offset;/* only relevant while reshaping */
 58	sector_t	sb_start;	/* offset of the super block (in 512byte sectors) */
 59	int		sb_size;	/* bytes in the superblock */
 60	int		preferred_minor;	/* autorun support */
 61
 62	struct kobject	kobj;
 63
 64	/* A device can be in one of three states based on two flags:
 65	 * Not working:   faulty==1 in_sync==0
 66	 * Fully working: faulty==0 in_sync==1
 67	 * Working, but not
 68	 * in sync with array
 69	 *                faulty==0 in_sync==0
 70	 *
 71	 * It can never have faulty==1, in_sync==1
 72	 * This reduces the burden of testing multiple flags in many cases
 73	 */
 74
 75	unsigned long	flags;	/* bit set of 'enum flag_bits' bits. */
 76	wait_queue_head_t blocked_wait;
 77
 78	int desc_nr;			/* descriptor index in the superblock */
 79	int raid_disk;			/* role of device in array */
 80	int new_raid_disk;		/* role that the device will have in
 81					 * the array after a level-change completes.
 82					 */
 83	int saved_raid_disk;		/* role that device used to have in the
 84					 * array and could again if we did a partial
 85					 * resync from the bitmap
 86					 */
 87	union {
 88		sector_t recovery_offset;/* If this device has been partially
 89					 * recovered, this is where we were
 90					 * up to.
 91					 */
 92		sector_t journal_tail;	/* If this device is a journal device,
 93					 * this is the journal tail (journal
 94					 * recovery start point)
 95					 */
 96	};
 97
 98	atomic_t	nr_pending;	/* number of pending requests.
 99					 * only maintained for arrays that
100					 * support hot removal
101					 */
102	atomic_t	read_errors;	/* number of consecutive read errors that
103					 * we have tried to ignore.
104					 */
105	time64_t	last_read_error;	/* monotonic time since our
106						 * last read error
107						 */
108	atomic_t	corrected_errors; /* number of corrected read errors,
109					   * for reporting to userspace and storing
110					   * in superblock.
111					   */
112
113	/*
114	 * The members for check collision of write behind IOs.
115	 */
116	struct list_head wb_list;
117	spinlock_t wb_list_lock;
118	wait_queue_head_t wb_io_wait;
119
120	struct work_struct del_work;	/* used for delayed sysfs removal */
121
122	struct kernfs_node *sysfs_state; /* handle for 'state'
123					   * sysfs entry */
124
 
 
 
125	struct badblocks badblocks;
126
127	struct {
128		short offset;	/* Offset from superblock to start of PPL.
129				 * Not used by external metadata. */
130		unsigned int size;	/* Size in sectors of the PPL space */
131		sector_t sector;	/* First sector of the PPL space */
132	} ppl;
133};
134enum flag_bits {
135	Faulty,			/* device is known to have a fault */
136	In_sync,		/* device is in_sync with rest of array */
137	Bitmap_sync,		/* ..actually, not quite In_sync.  Need a
138				 * bitmap-based recovery to get fully in sync.
139				 * The bit is only meaningful before device
140				 * has been passed to pers->hot_add_disk.
141				 */
142	WriteMostly,		/* Avoid reading if at all possible */
143	AutoDetected,		/* added by auto-detect */
144	Blocked,		/* An error occurred but has not yet
145				 * been acknowledged by the metadata
146				 * handler, so don't allow writes
147				 * until it is cleared */
148	WriteErrorSeen,		/* A write error has been seen on this
149				 * device
150				 */
151	FaultRecorded,		/* Intermediate state for clearing
152				 * Blocked.  The Fault is/will-be
153				 * recorded in the metadata, but that
154				 * metadata hasn't been stored safely
155				 * on disk yet.
156				 */
157	BlockedBadBlocks,	/* A writer is blocked because they
158				 * found an unacknowledged bad-block.
159				 * This can safely be cleared at any
160				 * time, and the writer will re-check.
161				 * It may be set at any time, and at
162				 * worst the writer will timeout and
163				 * re-check.  So setting it as
164				 * accurately as possible is good, but
165				 * not absolutely critical.
166				 */
167	WantReplacement,	/* This device is a candidate to be
168				 * hot-replaced, either because it has
169				 * reported some faults, or because
170				 * of explicit request.
171				 */
172	Replacement,		/* This device is a replacement for
173				 * a want_replacement device with same
174				 * raid_disk number.
175				 */
176	Candidate,		/* For clustered environments only:
177				 * This device is seen locally but not
178				 * by the whole cluster
179				 */
180	Journal,		/* This device is used as journal for
181				 * raid-5/6.
182				 * Usually, this device should be faster
183				 * than other devices in the array
184				 */
185	ClusterRemove,
186	RemoveSynchronized,	/* synchronize_rcu() was called after
187				 * this device was known to be faulty,
188				 * so it is safe to remove without
189				 * another synchronize_rcu() call.
190				 */
191	ExternalBbl,            /* External metadata provides bad
192				 * block management for a disk
193				 */
194	FailFast,		/* Minimal retries should be attempted on
195				 * this device, so use REQ_FAILFAST_DEV.
196				 * Also don't try to repair failed reads.
197				 * It is expects that no bad block log
198				 * is present.
199				 */
200	LastDev,		/* Seems to be the last working dev as
201				 * it didn't fail, so don't use FailFast
202				 * any more for metadata
203				 */
204	WBCollisionCheck,	/*
205				 * multiqueue device should check if there
206				 * is collision between write behind bios.
207				 */
208};
209
210static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
211			      sector_t *first_bad, int *bad_sectors)
212{
213	if (unlikely(rdev->badblocks.count)) {
214		int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
215					sectors,
216					first_bad, bad_sectors);
217		if (rv)
218			*first_bad -= rdev->data_offset;
219		return rv;
220	}
221	return 0;
222}
223extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
224			      int is_new);
225extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
226				int is_new);
227struct md_cluster_info;
228
229/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
230enum mddev_flags {
231	MD_ARRAY_FIRST_USE,	/* First use of array, needs initialization */
232	MD_CLOSING,		/* If set, we are closing the array, do not open
233				 * it then */
234	MD_JOURNAL_CLEAN,	/* A raid with journal is already clean */
235	MD_HAS_JOURNAL,		/* The raid array has journal feature set */
236	MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
237				   * already took resync lock, need to
238				   * release the lock */
239	MD_FAILFAST_SUPPORTED,	/* Using MD_FAILFAST on metadata writes is
240				 * supported as calls to md_error() will
241				 * never cause the array to become failed.
242				 */
243	MD_HAS_PPL,		/* The raid array has PPL feature set */
244	MD_HAS_MULTIPLE_PPLS,	/* The raid array has multiple PPLs feature set */
245	MD_ALLOW_SB_UPDATE,	/* md_check_recovery is allowed to update
246				 * the metadata without taking reconfig_mutex.
247				 */
248	MD_UPDATING_SB,		/* md_check_recovery is updating the metadata
249				 * without explicitly holding reconfig_mutex.
250				 */
251	MD_NOT_READY,		/* do_md_run() is active, so 'array_state'
252				 * must not report that array is ready yet
253				 */
254	MD_BROKEN,              /* This is used in RAID-0/LINEAR only, to stop
255				 * I/O in case an array member is gone/failed.
256				 */
257};
258
259enum mddev_sb_flags {
260	MD_SB_CHANGE_DEVS,		/* Some device status has changed */
261	MD_SB_CHANGE_CLEAN,	/* transition to or from 'clean' */
262	MD_SB_CHANGE_PENDING,	/* switch from 'clean' to 'active' in progress */
263	MD_SB_NEED_REWRITE,	/* metadata write needs to be repeated */
264};
265
266#define NR_WB_INFOS	8
267/* record current range of write behind IOs */
268struct wb_info {
269	sector_t lo;
270	sector_t hi;
271	struct list_head list;
 
272};
273
274struct mddev {
275	void				*private;
276	struct md_personality		*pers;
277	dev_t				unit;
278	int				md_minor;
279	struct list_head		disks;
280	unsigned long			flags;
281	unsigned long			sb_flags;
282
283	int				suspended;
284	atomic_t			active_io;
285	int				ro;
286	int				sysfs_active; /* set when sysfs deletes
287						       * are happening, so run/
288						       * takeover/stop are not safe
289						       */
290	struct gendisk			*gendisk;
291
292	struct kobject			kobj;
293	int				hold_active;
294#define	UNTIL_IOCTL	1
295#define	UNTIL_STOP	2
296
297	/* Superblock information */
298	int				major_version,
299					minor_version,
300					patch_version;
301	int				persistent;
302	int				external;	/* metadata is
303							 * managed externally */
304	char				metadata_type[17]; /* externally set*/
305	int				chunk_sectors;
306	time64_t			ctime, utime;
307	int				level, layout;
308	char				clevel[16];
309	int				raid_disks;
310	int				max_disks;
311	sector_t			dev_sectors;	/* used size of
312							 * component devices */
313	sector_t			array_sectors; /* exported array size */
314	int				external_size; /* size managed
315							* externally */
316	__u64				events;
317	/* If the last 'event' was simply a clean->dirty transition, and
318	 * we didn't write it to the spares, then it is safe and simple
319	 * to just decrement the event count on a dirty->clean transition.
320	 * So we record that possibility here.
321	 */
322	int				can_decrease_events;
323
324	char				uuid[16];
325
326	/* If the array is being reshaped, we need to record the
327	 * new shape and an indication of where we are up to.
328	 * This is written to the superblock.
329	 * If reshape_position is MaxSector, then no reshape is happening (yet).
330	 */
331	sector_t			reshape_position;
332	int				delta_disks, new_level, new_layout;
333	int				new_chunk_sectors;
334	int				reshape_backwards;
335
336	struct md_thread		*thread;	/* management thread */
337	struct md_thread		*sync_thread;	/* doing resync or reconstruct */
338
339	/* 'last_sync_action' is initialized to "none".  It is set when a
340	 * sync operation (i.e "data-check", "requested-resync", "resync",
341	 * "recovery", or "reshape") is started.  It holds this value even
342	 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
343	 * or finished).  It is overwritten when a new sync operation is begun.
344	 */
345	char				*last_sync_action;
346	sector_t			curr_resync;	/* last block scheduled */
347	/* As resync requests can complete out of order, we cannot easily track
348	 * how much resync has been completed.  So we occasionally pause until
349	 * everything completes, then set curr_resync_completed to curr_resync.
350	 * As such it may be well behind the real resync mark, but it is a value
351	 * we are certain of.
352	 */
353	sector_t			curr_resync_completed;
354	unsigned long			resync_mark;	/* a recent timestamp */
355	sector_t			resync_mark_cnt;/* blocks written at resync_mark */
356	sector_t			curr_mark_cnt; /* blocks scheduled now */
357
358	sector_t			resync_max_sectors; /* may be set by personality */
359
360	atomic64_t			resync_mismatches; /* count of sectors where
361							    * parity/replica mismatch found
362							    */
363
364	/* allow user-space to request suspension of IO to regions of the array */
365	sector_t			suspend_lo;
366	sector_t			suspend_hi;
367	/* if zero, use the system-wide default */
368	int				sync_speed_min;
369	int				sync_speed_max;
370
371	/* resync even though the same disks are shared among md-devices */
372	int				parallel_resync;
373
374	int				ok_start_degraded;
375
376	unsigned long			recovery;
377	/* If a RAID personality determines that recovery (of a particular
378	 * device) will fail due to a read error on the source device, it
379	 * takes a copy of this number and does not attempt recovery again
380	 * until this number changes.
381	 */
382	int				recovery_disabled;
383
384	int				in_sync;	/* know to not need resync */
385	/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
386	 * that we are never stopping an array while it is open.
387	 * 'reconfig_mutex' protects all other reconfiguration.
388	 * These locks are separate due to conflicting interactions
389	 * with bdev->bd_mutex.
390	 * Lock ordering is:
391	 *  reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
392	 *  bd_mutex -> open_mutex:  e.g. __blkdev_get -> md_open
393	 */
394	struct mutex			open_mutex;
395	struct mutex			reconfig_mutex;
396	atomic_t			active;		/* general refcount */
397	atomic_t			openers;	/* number of active opens */
398
399	int				changed;	/* True if we might need to
400							 * reread partition info */
401	int				degraded;	/* whether md should consider
402							 * adding a spare
403							 */
404
405	atomic_t			recovery_active; /* blocks scheduled, but not written */
406	wait_queue_head_t		recovery_wait;
407	sector_t			recovery_cp;
408	sector_t			resync_min;	/* user requested sync
409							 * starts here */
410	sector_t			resync_max;	/* resync should pause
411							 * when it gets here */
412
413	struct kernfs_node		*sysfs_state;	/* handle for 'array_state'
414							 * file in sysfs.
415							 */
416	struct kernfs_node		*sysfs_action;  /* handle for 'sync_action' */
 
 
 
417
418	struct work_struct del_work;	/* used for delayed sysfs removal */
419
420	/* "lock" protects:
421	 *   flush_bio transition from NULL to !NULL
422	 *   rdev superblocks, events
423	 *   clearing MD_CHANGE_*
424	 *   in_sync - and related safemode and MD_CHANGE changes
425	 *   pers (also protected by reconfig_mutex and pending IO).
426	 *   clearing ->bitmap
427	 *   clearing ->bitmap_info.file
428	 *   changing ->resync_{min,max}
429	 *   setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
430	 */
431	spinlock_t			lock;
432	wait_queue_head_t		sb_wait;	/* for waiting on superblock updates */
433	atomic_t			pending_writes;	/* number of active superblock writes */
434
435	unsigned int			safemode;	/* if set, update "clean" superblock
436							 * when no writes pending.
437							 */
438	unsigned int			safemode_delay;
439	struct timer_list		safemode_timer;
440	struct percpu_ref		writes_pending;
441	int				sync_checkers;	/* # of threads checking writes_pending */
442	struct request_queue		*queue;	/* for plugging ... */
443
444	struct bitmap			*bitmap; /* the bitmap for the device */
445	struct {
446		struct file		*file; /* the bitmap file */
447		loff_t			offset; /* offset from superblock of
448						 * start of bitmap. May be
449						 * negative, but not '0'
450						 * For external metadata, offset
451						 * from start of device.
452						 */
453		unsigned long		space; /* space available at this offset */
454		loff_t			default_offset; /* this is the offset to use when
455							 * hot-adding a bitmap.  It should
456							 * eventually be settable by sysfs.
457							 */
458		unsigned long		default_space; /* space available at
459							* default offset */
460		struct mutex		mutex;
461		unsigned long		chunksize;
462		unsigned long		daemon_sleep; /* how many jiffies between updates? */
463		unsigned long		max_write_behind; /* write-behind mode */
464		int			external;
465		int			nodes; /* Maximum number of nodes in the cluster */
466		char                    cluster_name[64]; /* Name of the cluster */
467	} bitmap_info;
468
469	atomic_t			max_corr_read_errors; /* max read retries */
470	struct list_head		all_mddevs;
471
472	struct attribute_group		*to_remove;
473
474	struct bio_set			bio_set;
475	struct bio_set			sync_set; /* for sync operations like
476						   * metadata and bitmap writes
477						   */
 
478
479	/* Generic flush handling.
480	 * The last to finish preflush schedules a worker to submit
481	 * the rest of the request (without the REQ_PREFLUSH flag).
482	 */
483	struct bio *flush_bio;
484	atomic_t flush_pending;
485	ktime_t start_flush, last_flush; /* last_flush is when the last completed
486					  * flush was started.
487					  */
488	struct work_struct flush_work;
489	struct work_struct event_work;	/* used by dm to report failure event */
490	mempool_t *wb_info_pool;
491	void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
492	struct md_cluster_info		*cluster_info;
493	unsigned int			good_device_nr;	/* good device num within cluster raid */
 
494
495	bool	has_superblocks:1;
496	bool	fail_last_dev:1;
 
497};
498
499enum recovery_flags {
500	/*
501	 * If neither SYNC or RESHAPE are set, then it is a recovery.
502	 */
503	MD_RECOVERY_RUNNING,	/* a thread is running, or about to be started */
504	MD_RECOVERY_SYNC,	/* actually doing a resync, not a recovery */
505	MD_RECOVERY_RECOVER,	/* doing recovery, or need to try it. */
506	MD_RECOVERY_INTR,	/* resync needs to be aborted for some reason */
507	MD_RECOVERY_DONE,	/* thread is done and is waiting to be reaped */
508	MD_RECOVERY_NEEDED,	/* we might need to start a resync/recover */
509	MD_RECOVERY_REQUESTED,	/* user-space has requested a sync (used with SYNC) */
510	MD_RECOVERY_CHECK,	/* user-space request for check-only, no repair */
511	MD_RECOVERY_RESHAPE,	/* A reshape is happening */
512	MD_RECOVERY_FROZEN,	/* User request to abort, and not restart, any action */
513	MD_RECOVERY_ERROR,	/* sync-action interrupted because io-error */
514	MD_RECOVERY_WAIT,	/* waiting for pers->start() to finish */
515	MD_RESYNCING_REMOTE,	/* remote node is running resync thread */
516};
517
518static inline int __must_check mddev_lock(struct mddev *mddev)
519{
520	return mutex_lock_interruptible(&mddev->reconfig_mutex);
521}
522
523/* Sometimes we need to take the lock in a situation where
524 * failure due to interrupts is not acceptable.
525 */
526static inline void mddev_lock_nointr(struct mddev *mddev)
527{
528	mutex_lock(&mddev->reconfig_mutex);
529}
530
531static inline int mddev_trylock(struct mddev *mddev)
532{
533	return mutex_trylock(&mddev->reconfig_mutex);
534}
535extern void mddev_unlock(struct mddev *mddev);
536
537static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
538{
539	atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
540}
541
542static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
543{
544	atomic_add(nr_sectors, &bio->bi_disk->sync_io);
545}
546
547struct md_personality
548{
549	char *name;
550	int level;
551	struct list_head list;
552	struct module *owner;
553	bool (*make_request)(struct mddev *mddev, struct bio *bio);
554	/*
555	 * start up works that do NOT require md_thread. tasks that
556	 * requires md_thread should go into start()
557	 */
558	int (*run)(struct mddev *mddev);
559	/* start up works that require md threads */
560	int (*start)(struct mddev *mddev);
561	void (*free)(struct mddev *mddev, void *priv);
562	void (*status)(struct seq_file *seq, struct mddev *mddev);
563	/* error_handler must set ->faulty and clear ->in_sync
564	 * if appropriate, and should abort recovery if needed
565	 */
566	void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
567	int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
568	int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
569	int (*spare_active) (struct mddev *mddev);
570	sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
571	int (*resize) (struct mddev *mddev, sector_t sectors);
572	sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
573	int (*check_reshape) (struct mddev *mddev);
574	int (*start_reshape) (struct mddev *mddev);
575	void (*finish_reshape) (struct mddev *mddev);
576	void (*update_reshape_pos) (struct mddev *mddev);
577	/* quiesce suspends or resumes internal processing.
578	 * 1 - stop new actions and wait for action io to complete
579	 * 0 - return to normal behaviour
580	 */
581	void (*quiesce) (struct mddev *mddev, int quiesce);
582	/* takeover is used to transition an array from one
583	 * personality to another.  The new personality must be able
584	 * to handle the data in the current layout.
585	 * e.g. 2drive raid1 -> 2drive raid5
586	 *      ndrive raid5 -> degraded n+1drive raid6 with special layout
587	 * If the takeover succeeds, a new 'private' structure is returned.
588	 * This needs to be installed and then ->run used to activate the
589	 * array.
590	 */
591	void *(*takeover) (struct mddev *mddev);
592	/* congested implements bdi.congested_fn().
593	 * Will not be called while array is 'suspended' */
594	int (*congested)(struct mddev *mddev, int bits);
595	/* Changes the consistency policy of an active array. */
596	int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
597};
598
599struct md_sysfs_entry {
600	struct attribute attr;
601	ssize_t (*show)(struct mddev *, char *);
602	ssize_t (*store)(struct mddev *, const char *, size_t);
603};
604extern struct attribute_group md_bitmap_group;
605
606static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
607{
608	if (sd)
609		return sysfs_get_dirent(sd, name);
610	return sd;
611}
612static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
613{
614	if (sd)
615		sysfs_notify_dirent(sd);
616}
617
618static inline char * mdname (struct mddev * mddev)
619{
620	return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
621}
622
623static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
624{
625	char nm[20];
626	if (!test_bit(Replacement, &rdev->flags) &&
627	    !test_bit(Journal, &rdev->flags) &&
628	    mddev->kobj.sd) {
629		sprintf(nm, "rd%d", rdev->raid_disk);
630		return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
631	} else
632		return 0;
633}
634
635static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
636{
637	char nm[20];
638	if (!test_bit(Replacement, &rdev->flags) &&
639	    !test_bit(Journal, &rdev->flags) &&
640	    mddev->kobj.sd) {
641		sprintf(nm, "rd%d", rdev->raid_disk);
642		sysfs_remove_link(&mddev->kobj, nm);
643	}
644}
645
646/*
647 * iterates through some rdev ringlist. It's safe to remove the
648 * current 'rdev'. Dont touch 'tmp' though.
649 */
650#define rdev_for_each_list(rdev, tmp, head)				\
651	list_for_each_entry_safe(rdev, tmp, head, same_set)
652
653/*
654 * iterates through the 'same array disks' ringlist
655 */
656#define rdev_for_each(rdev, mddev)				\
657	list_for_each_entry(rdev, &((mddev)->disks), same_set)
658
659#define rdev_for_each_safe(rdev, tmp, mddev)				\
660	list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
661
662#define rdev_for_each_rcu(rdev, mddev)				\
663	list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
664
665struct md_thread {
666	void			(*run) (struct md_thread *thread);
667	struct mddev		*mddev;
668	wait_queue_head_t	wqueue;
669	unsigned long		flags;
670	struct task_struct	*tsk;
671	unsigned long		timeout;
672	void			*private;
673};
674
675#define THREAD_WAKEUP  0
676
677static inline void safe_put_page(struct page *p)
678{
679	if (p) put_page(p);
680}
681
682extern int register_md_personality(struct md_personality *p);
683extern int unregister_md_personality(struct md_personality *p);
684extern int register_md_cluster_operations(struct md_cluster_operations *ops,
685		struct module *module);
686extern int unregister_md_cluster_operations(void);
687extern int md_setup_cluster(struct mddev *mddev, int nodes);
688extern void md_cluster_stop(struct mddev *mddev);
689extern struct md_thread *md_register_thread(
690	void (*run)(struct md_thread *thread),
691	struct mddev *mddev,
692	const char *name);
693extern void md_unregister_thread(struct md_thread **threadp);
694extern void md_wakeup_thread(struct md_thread *thread);
695extern void md_check_recovery(struct mddev *mddev);
696extern void md_reap_sync_thread(struct mddev *mddev);
697extern int mddev_init_writes_pending(struct mddev *mddev);
698extern bool md_write_start(struct mddev *mddev, struct bio *bi);
699extern void md_write_inc(struct mddev *mddev, struct bio *bi);
700extern void md_write_end(struct mddev *mddev);
701extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
702extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
703extern void md_finish_reshape(struct mddev *mddev);
704
705extern int mddev_congested(struct mddev *mddev, int bits);
706extern void md_flush_request(struct mddev *mddev, struct bio *bio);
707extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
708			   sector_t sector, int size, struct page *page);
709extern int md_super_wait(struct mddev *mddev);
710extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
711			struct page *page, int op, int op_flags,
712			bool metadata_op);
713extern void md_do_sync(struct md_thread *thread);
714extern void md_new_event(struct mddev *mddev);
715extern void md_allow_write(struct mddev *mddev);
716extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
717extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
718extern int md_check_no_bitmap(struct mddev *mddev);
719extern int md_integrity_register(struct mddev *mddev);
720extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
721extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
722
723extern void mddev_init(struct mddev *mddev);
724extern int md_run(struct mddev *mddev);
725extern int md_start(struct mddev *mddev);
726extern void md_stop(struct mddev *mddev);
727extern void md_stop_writes(struct mddev *mddev);
728extern int md_rdev_init(struct md_rdev *rdev);
729extern void md_rdev_clear(struct md_rdev *rdev);
730
731extern void md_handle_request(struct mddev *mddev, struct bio *bio);
732extern void mddev_suspend(struct mddev *mddev);
733extern void mddev_resume(struct mddev *mddev);
734extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
735				   struct mddev *mddev);
736
737extern void md_reload_sb(struct mddev *mddev, int raid_disk);
738extern void md_update_sb(struct mddev *mddev, int force);
739extern void md_kick_rdev_from_array(struct md_rdev * rdev);
740extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
741				 bool is_suspend);
 
 
742struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
743struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
744
745static inline bool is_mddev_broken(struct md_rdev *rdev, const char *md_type)
746{
747	int flags = rdev->bdev->bd_disk->flags;
748
749	if (!(flags & GENHD_FL_UP)) {
750		if (!test_and_set_bit(MD_BROKEN, &rdev->mddev->flags))
751			pr_warn("md: %s: %s array has a missing/failed member\n",
752				mdname(rdev->mddev), md_type);
753		return true;
754	}
755	return false;
756}
757
758static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
759{
760	int faulty = test_bit(Faulty, &rdev->flags);
761	if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
762		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
763		md_wakeup_thread(mddev->thread);
764	}
765}
766
767extern struct md_cluster_operations *md_cluster_ops;
768static inline int mddev_is_clustered(struct mddev *mddev)
769{
770	return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
771}
772
773/* clear unsupported mddev_flags */
774static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
775	unsigned long unsupported_flags)
776{
777	mddev->flags &= ~unsupported_flags;
778}
779
780static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
781{
782	if (bio_op(bio) == REQ_OP_WRITE_SAME &&
783	    !bio->bi_disk->queue->limits.max_write_same_sectors)
784		mddev->queue->limits.max_write_same_sectors = 0;
785}
786
787static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
788{
789	if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
790	    !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
791		mddev->queue->limits.max_write_zeroes_sectors = 0;
792}
 
 
 
 
 
 
 
 
 
 
 
 
793#endif /* _MD_MD_H */