Loading...
1/*
2 md_k.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13*/
14
15#ifndef _MD_MD_H
16#define _MD_MD_H
17
18#include <linux/blkdev.h>
19#include <linux/kobject.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/mutex.h>
23#include <linux/timer.h>
24#include <linux/wait.h>
25#include <linux/workqueue.h>
26
27#define MaxSector (~(sector_t)0)
28
29typedef struct mddev_s mddev_t;
30typedef struct mdk_rdev_s mdk_rdev_t;
31
32/* Bad block numbers are stored sorted in a single page.
33 * 64bits is used for each block or extent.
34 * 54 bits are sector number, 9 bits are extent size,
35 * 1 bit is an 'acknowledged' flag.
36 */
37#define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
38
39/*
40 * MD's 'extended' device
41 */
42struct mdk_rdev_s
43{
44 struct list_head same_set; /* RAID devices within the same set */
45
46 sector_t sectors; /* Device size (in 512bytes sectors) */
47 mddev_t *mddev; /* RAID array if running */
48 int last_events; /* IO event timestamp */
49
50 /*
51 * If meta_bdev is non-NULL, it means that a separate device is
52 * being used to store the metadata (superblock/bitmap) which
53 * would otherwise be contained on the same device as the data (bdev).
54 */
55 struct block_device *meta_bdev;
56 struct block_device *bdev; /* block device handle */
57
58 struct page *sb_page, *bb_page;
59 int sb_loaded;
60 __u64 sb_events;
61 sector_t data_offset; /* start of data in array */
62 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
63 int sb_size; /* bytes in the superblock */
64 int preferred_minor; /* autorun support */
65
66 struct kobject kobj;
67
68 /* A device can be in one of three states based on two flags:
69 * Not working: faulty==1 in_sync==0
70 * Fully working: faulty==0 in_sync==1
71 * Working, but not
72 * in sync with array
73 * faulty==0 in_sync==0
74 *
75 * It can never have faulty==1, in_sync==1
76 * This reduces the burden of testing multiple flags in many cases
77 */
78
79 unsigned long flags;
80#define Faulty 1 /* device is known to have a fault */
81#define In_sync 2 /* device is in_sync with rest of array */
82#define WriteMostly 4 /* Avoid reading if at all possible */
83#define AutoDetected 7 /* added by auto-detect */
84#define Blocked 8 /* An error occurred but has not yet
85 * been acknowledged by the metadata
86 * handler, so don't allow writes
87 * until it is cleared */
88#define WriteErrorSeen 9 /* A write error has been seen on this
89 * device
90 */
91#define FaultRecorded 10 /* Intermediate state for clearing
92 * Blocked. The Fault is/will-be
93 * recorded in the metadata, but that
94 * metadata hasn't been stored safely
95 * on disk yet.
96 */
97#define BlockedBadBlocks 11 /* A writer is blocked because they
98 * found an unacknowledged bad-block.
99 * This can safely be cleared at any
100 * time, and the writer will re-check.
101 * It may be set at any time, and at
102 * worst the writer will timeout and
103 * re-check. So setting it as
104 * accurately as possible is good, but
105 * not absolutely critical.
106 */
107 wait_queue_head_t blocked_wait;
108
109 int desc_nr; /* descriptor index in the superblock */
110 int raid_disk; /* role of device in array */
111 int new_raid_disk; /* role that the device will have in
112 * the array after a level-change completes.
113 */
114 int saved_raid_disk; /* role that device used to have in the
115 * array and could again if we did a partial
116 * resync from the bitmap
117 */
118 sector_t recovery_offset;/* If this device has been partially
119 * recovered, this is where we were
120 * up to.
121 */
122
123 atomic_t nr_pending; /* number of pending requests.
124 * only maintained for arrays that
125 * support hot removal
126 */
127 atomic_t read_errors; /* number of consecutive read errors that
128 * we have tried to ignore.
129 */
130 struct timespec last_read_error; /* monotonic time since our
131 * last read error
132 */
133 atomic_t corrected_errors; /* number of corrected read errors,
134 * for reporting to userspace and storing
135 * in superblock.
136 */
137 struct work_struct del_work; /* used for delayed sysfs removal */
138
139 struct sysfs_dirent *sysfs_state; /* handle for 'state'
140 * sysfs entry */
141
142 struct badblocks {
143 int count; /* count of bad blocks */
144 int unacked_exist; /* there probably are unacknowledged
145 * bad blocks. This is only cleared
146 * when a read discovers none
147 */
148 int shift; /* shift from sectors to block size
149 * a -ve shift means badblocks are
150 * disabled.*/
151 u64 *page; /* badblock list */
152 int changed;
153 seqlock_t lock;
154
155 sector_t sector;
156 sector_t size; /* in sectors */
157 } badblocks;
158};
159
160#define BB_LEN_MASK (0x00000000000001FFULL)
161#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
162#define BB_ACK_MASK (0x8000000000000000ULL)
163#define BB_MAX_LEN 512
164#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
165#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
166#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
167#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
168
169extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
170 sector_t *first_bad, int *bad_sectors);
171static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors,
172 sector_t *first_bad, int *bad_sectors)
173{
174 if (unlikely(rdev->badblocks.count)) {
175 int rv = md_is_badblock(&rdev->badblocks, rdev->data_offset + s,
176 sectors,
177 first_bad, bad_sectors);
178 if (rv)
179 *first_bad -= rdev->data_offset;
180 return rv;
181 }
182 return 0;
183}
184extern int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors,
185 int acknowledged);
186extern int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors);
187extern void md_ack_all_badblocks(struct badblocks *bb);
188
189struct mddev_s
190{
191 void *private;
192 struct mdk_personality *pers;
193 dev_t unit;
194 int md_minor;
195 struct list_head disks;
196 unsigned long flags;
197#define MD_CHANGE_DEVS 0 /* Some device status has changed */
198#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
199#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
200#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
201
202 int suspended;
203 atomic_t active_io;
204 int ro;
205 int sysfs_active; /* set when sysfs deletes
206 * are happening, so run/
207 * takeover/stop are not safe
208 */
209 int ready; /* See when safe to pass
210 * IO requests down */
211 struct gendisk *gendisk;
212
213 struct kobject kobj;
214 int hold_active;
215#define UNTIL_IOCTL 1
216#define UNTIL_STOP 2
217
218 /* Superblock information */
219 int major_version,
220 minor_version,
221 patch_version;
222 int persistent;
223 int external; /* metadata is
224 * managed externally */
225 char metadata_type[17]; /* externally set*/
226 int chunk_sectors;
227 time_t ctime, utime;
228 int level, layout;
229 char clevel[16];
230 int raid_disks;
231 int max_disks;
232 sector_t dev_sectors; /* used size of
233 * component devices */
234 sector_t array_sectors; /* exported array size */
235 int external_size; /* size managed
236 * externally */
237 __u64 events;
238 /* If the last 'event' was simply a clean->dirty transition, and
239 * we didn't write it to the spares, then it is safe and simple
240 * to just decrement the event count on a dirty->clean transition.
241 * So we record that possibility here.
242 */
243 int can_decrease_events;
244
245 char uuid[16];
246
247 /* If the array is being reshaped, we need to record the
248 * new shape and an indication of where we are up to.
249 * This is written to the superblock.
250 * If reshape_position is MaxSector, then no reshape is happening (yet).
251 */
252 sector_t reshape_position;
253 int delta_disks, new_level, new_layout;
254 int new_chunk_sectors;
255
256 atomic_t plug_cnt; /* If device is expecting
257 * more bios soon.
258 */
259 struct mdk_thread_s *thread; /* management thread */
260 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
261 sector_t curr_resync; /* last block scheduled */
262 /* As resync requests can complete out of order, we cannot easily track
263 * how much resync has been completed. So we occasionally pause until
264 * everything completes, then set curr_resync_completed to curr_resync.
265 * As such it may be well behind the real resync mark, but it is a value
266 * we are certain of.
267 */
268 sector_t curr_resync_completed;
269 unsigned long resync_mark; /* a recent timestamp */
270 sector_t resync_mark_cnt;/* blocks written at resync_mark */
271 sector_t curr_mark_cnt; /* blocks scheduled now */
272
273 sector_t resync_max_sectors; /* may be set by personality */
274
275 sector_t resync_mismatches; /* count of sectors where
276 * parity/replica mismatch found
277 */
278
279 /* allow user-space to request suspension of IO to regions of the array */
280 sector_t suspend_lo;
281 sector_t suspend_hi;
282 /* if zero, use the system-wide default */
283 int sync_speed_min;
284 int sync_speed_max;
285
286 /* resync even though the same disks are shared among md-devices */
287 int parallel_resync;
288
289 int ok_start_degraded;
290 /* recovery/resync flags
291 * NEEDED: we might need to start a resync/recover
292 * RUNNING: a thread is running, or about to be started
293 * SYNC: actually doing a resync, not a recovery
294 * RECOVER: doing recovery, or need to try it.
295 * INTR: resync needs to be aborted for some reason
296 * DONE: thread is done and is waiting to be reaped
297 * REQUEST: user-space has requested a sync (used with SYNC)
298 * CHECK: user-space request for check-only, no repair
299 * RESHAPE: A reshape is happening
300 *
301 * If neither SYNC or RESHAPE are set, then it is a recovery.
302 */
303#define MD_RECOVERY_RUNNING 0
304#define MD_RECOVERY_SYNC 1
305#define MD_RECOVERY_RECOVER 2
306#define MD_RECOVERY_INTR 3
307#define MD_RECOVERY_DONE 4
308#define MD_RECOVERY_NEEDED 5
309#define MD_RECOVERY_REQUESTED 6
310#define MD_RECOVERY_CHECK 7
311#define MD_RECOVERY_RESHAPE 8
312#define MD_RECOVERY_FROZEN 9
313
314 unsigned long recovery;
315 /* If a RAID personality determines that recovery (of a particular
316 * device) will fail due to a read error on the source device, it
317 * takes a copy of this number and does not attempt recovery again
318 * until this number changes.
319 */
320 int recovery_disabled;
321
322 int in_sync; /* know to not need resync */
323 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
324 * that we are never stopping an array while it is open.
325 * 'reconfig_mutex' protects all other reconfiguration.
326 * These locks are separate due to conflicting interactions
327 * with bdev->bd_mutex.
328 * Lock ordering is:
329 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
330 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
331 */
332 struct mutex open_mutex;
333 struct mutex reconfig_mutex;
334 atomic_t active; /* general refcount */
335 atomic_t openers; /* number of active opens */
336
337 int changed; /* True if we might need to
338 * reread partition info */
339 int degraded; /* whether md should consider
340 * adding a spare
341 */
342
343 atomic_t recovery_active; /* blocks scheduled, but not written */
344 wait_queue_head_t recovery_wait;
345 sector_t recovery_cp;
346 sector_t resync_min; /* user requested sync
347 * starts here */
348 sector_t resync_max; /* resync should pause
349 * when it gets here */
350
351 struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
352 * file in sysfs.
353 */
354 struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
355
356 struct work_struct del_work; /* used for delayed sysfs removal */
357
358 spinlock_t write_lock;
359 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
360 atomic_t pending_writes; /* number of active superblock writes */
361
362 unsigned int safemode; /* if set, update "clean" superblock
363 * when no writes pending.
364 */
365 unsigned int safemode_delay;
366 struct timer_list safemode_timer;
367 atomic_t writes_pending;
368 struct request_queue *queue; /* for plugging ... */
369
370 struct bitmap *bitmap; /* the bitmap for the device */
371 struct {
372 struct file *file; /* the bitmap file */
373 loff_t offset; /* offset from superblock of
374 * start of bitmap. May be
375 * negative, but not '0'
376 * For external metadata, offset
377 * from start of device.
378 */
379 loff_t default_offset; /* this is the offset to use when
380 * hot-adding a bitmap. It should
381 * eventually be settable by sysfs.
382 */
383 struct mutex mutex;
384 unsigned long chunksize;
385 unsigned long daemon_sleep; /* how many jiffies between updates? */
386 unsigned long max_write_behind; /* write-behind mode */
387 int external;
388 } bitmap_info;
389
390 atomic_t max_corr_read_errors; /* max read retries */
391 struct list_head all_mddevs;
392
393 struct attribute_group *to_remove;
394
395 struct bio_set *bio_set;
396
397 /* Generic flush handling.
398 * The last to finish preflush schedules a worker to submit
399 * the rest of the request (without the REQ_FLUSH flag).
400 */
401 struct bio *flush_bio;
402 atomic_t flush_pending;
403 struct work_struct flush_work;
404 struct work_struct event_work; /* used by dm to report failure event */
405 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
406};
407
408
409static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
410{
411 int faulty = test_bit(Faulty, &rdev->flags);
412 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
413 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
414}
415
416static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
417{
418 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
419}
420
421struct mdk_personality
422{
423 char *name;
424 int level;
425 struct list_head list;
426 struct module *owner;
427 int (*make_request)(mddev_t *mddev, struct bio *bio);
428 int (*run)(mddev_t *mddev);
429 int (*stop)(mddev_t *mddev);
430 void (*status)(struct seq_file *seq, mddev_t *mddev);
431 /* error_handler must set ->faulty and clear ->in_sync
432 * if appropriate, and should abort recovery if needed
433 */
434 void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
435 int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
436 int (*hot_remove_disk) (mddev_t *mddev, int number);
437 int (*spare_active) (mddev_t *mddev);
438 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
439 int (*resize) (mddev_t *mddev, sector_t sectors);
440 sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
441 int (*check_reshape) (mddev_t *mddev);
442 int (*start_reshape) (mddev_t *mddev);
443 void (*finish_reshape) (mddev_t *mddev);
444 /* quiesce moves between quiescence states
445 * 0 - fully active
446 * 1 - no new requests allowed
447 * others - reserved
448 */
449 void (*quiesce) (mddev_t *mddev, int state);
450 /* takeover is used to transition an array from one
451 * personality to another. The new personality must be able
452 * to handle the data in the current layout.
453 * e.g. 2drive raid1 -> 2drive raid5
454 * ndrive raid5 -> degraded n+1drive raid6 with special layout
455 * If the takeover succeeds, a new 'private' structure is returned.
456 * This needs to be installed and then ->run used to activate the
457 * array.
458 */
459 void *(*takeover) (mddev_t *mddev);
460};
461
462
463struct md_sysfs_entry {
464 struct attribute attr;
465 ssize_t (*show)(mddev_t *, char *);
466 ssize_t (*store)(mddev_t *, const char *, size_t);
467};
468extern struct attribute_group md_bitmap_group;
469
470static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
471{
472 if (sd)
473 return sysfs_get_dirent(sd, NULL, name);
474 return sd;
475}
476static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
477{
478 if (sd)
479 sysfs_notify_dirent(sd);
480}
481
482static inline char * mdname (mddev_t * mddev)
483{
484 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
485}
486
487static inline int sysfs_link_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
488{
489 char nm[20];
490 sprintf(nm, "rd%d", rdev->raid_disk);
491 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
492}
493
494static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
495{
496 char nm[20];
497 sprintf(nm, "rd%d", rdev->raid_disk);
498 sysfs_remove_link(&mddev->kobj, nm);
499}
500
501/*
502 * iterates through some rdev ringlist. It's safe to remove the
503 * current 'rdev'. Dont touch 'tmp' though.
504 */
505#define rdev_for_each_list(rdev, tmp, head) \
506 list_for_each_entry_safe(rdev, tmp, head, same_set)
507
508/*
509 * iterates through the 'same array disks' ringlist
510 */
511#define rdev_for_each(rdev, tmp, mddev) \
512 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
513
514#define rdev_for_each_rcu(rdev, mddev) \
515 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
516
517typedef struct mdk_thread_s {
518 void (*run) (mddev_t *mddev);
519 mddev_t *mddev;
520 wait_queue_head_t wqueue;
521 unsigned long flags;
522 struct task_struct *tsk;
523 unsigned long timeout;
524} mdk_thread_t;
525
526#define THREAD_WAKEUP 0
527
528#define __wait_event_lock_irq(wq, condition, lock, cmd) \
529do { \
530 wait_queue_t __wait; \
531 init_waitqueue_entry(&__wait, current); \
532 \
533 add_wait_queue(&wq, &__wait); \
534 for (;;) { \
535 set_current_state(TASK_UNINTERRUPTIBLE); \
536 if (condition) \
537 break; \
538 spin_unlock_irq(&lock); \
539 cmd; \
540 schedule(); \
541 spin_lock_irq(&lock); \
542 } \
543 current->state = TASK_RUNNING; \
544 remove_wait_queue(&wq, &__wait); \
545} while (0)
546
547#define wait_event_lock_irq(wq, condition, lock, cmd) \
548do { \
549 if (condition) \
550 break; \
551 __wait_event_lock_irq(wq, condition, lock, cmd); \
552} while (0)
553
554static inline void safe_put_page(struct page *p)
555{
556 if (p) put_page(p);
557}
558
559extern int register_md_personality(struct mdk_personality *p);
560extern int unregister_md_personality(struct mdk_personality *p);
561extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
562 mddev_t *mddev, const char *name);
563extern void md_unregister_thread(mdk_thread_t **threadp);
564extern void md_wakeup_thread(mdk_thread_t *thread);
565extern void md_check_recovery(mddev_t *mddev);
566extern void md_write_start(mddev_t *mddev, struct bio *bi);
567extern void md_write_end(mddev_t *mddev);
568extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
569extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
570
571extern int mddev_congested(mddev_t *mddev, int bits);
572extern void md_flush_request(mddev_t *mddev, struct bio *bio);
573extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
574 sector_t sector, int size, struct page *page);
575extern void md_super_wait(mddev_t *mddev);
576extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
577 struct page *page, int rw, bool metadata_op);
578extern void md_do_sync(mddev_t *mddev);
579extern void md_new_event(mddev_t *mddev);
580extern int md_allow_write(mddev_t *mddev);
581extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
582extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
583extern int md_check_no_bitmap(mddev_t *mddev);
584extern int md_integrity_register(mddev_t *mddev);
585extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
586extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
587extern void restore_bitmap_write_access(struct file *file);
588
589extern void mddev_init(mddev_t *mddev);
590extern int md_run(mddev_t *mddev);
591extern void md_stop(mddev_t *mddev);
592extern void md_stop_writes(mddev_t *mddev);
593extern int md_rdev_init(mdk_rdev_t *rdev);
594
595extern void mddev_suspend(mddev_t *mddev);
596extern void mddev_resume(mddev_t *mddev);
597extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
598 mddev_t *mddev);
599extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
600 mddev_t *mddev);
601extern int mddev_check_plugged(mddev_t *mddev);
602extern void md_trim_bio(struct bio *bio, int offset, int size);
603#endif /* _MD_MD_H */
1/*
2 md.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13*/
14
15#ifndef _MD_MD_H
16#define _MD_MD_H
17
18#include <linux/blkdev.h>
19#include <linux/backing-dev.h>
20#include <linux/badblocks.h>
21#include <linux/kobject.h>
22#include <linux/list.h>
23#include <linux/mm.h>
24#include <linux/mutex.h>
25#include <linux/timer.h>
26#include <linux/wait.h>
27#include <linux/workqueue.h>
28#include "md-cluster.h"
29
30#define MaxSector (~(sector_t)0)
31
32/*
33 * These flags should really be called "NO_RETRY" rather than
34 * "FAILFAST" because they don't make any promise about time lapse,
35 * only about the number of retries, which will be zero.
36 * REQ_FAILFAST_DRIVER is not included because
37 * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
38 * seems to suggest that the errors it avoids retrying should usually
39 * be retried.
40 */
41#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
42/*
43 * MD's 'extended' device
44 */
45struct md_rdev {
46 struct list_head same_set; /* RAID devices within the same set */
47
48 sector_t sectors; /* Device size (in 512bytes sectors) */
49 struct mddev *mddev; /* RAID array if running */
50 int last_events; /* IO event timestamp */
51
52 /*
53 * If meta_bdev is non-NULL, it means that a separate device is
54 * being used to store the metadata (superblock/bitmap) which
55 * would otherwise be contained on the same device as the data (bdev).
56 */
57 struct block_device *meta_bdev;
58 struct block_device *bdev; /* block device handle */
59
60 struct page *sb_page, *bb_page;
61 int sb_loaded;
62 __u64 sb_events;
63 sector_t data_offset; /* start of data in array */
64 sector_t new_data_offset;/* only relevant while reshaping */
65 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
66 int sb_size; /* bytes in the superblock */
67 int preferred_minor; /* autorun support */
68
69 struct kobject kobj;
70
71 /* A device can be in one of three states based on two flags:
72 * Not working: faulty==1 in_sync==0
73 * Fully working: faulty==0 in_sync==1
74 * Working, but not
75 * in sync with array
76 * faulty==0 in_sync==0
77 *
78 * It can never have faulty==1, in_sync==1
79 * This reduces the burden of testing multiple flags in many cases
80 */
81
82 unsigned long flags; /* bit set of 'enum flag_bits' bits. */
83 wait_queue_head_t blocked_wait;
84
85 int desc_nr; /* descriptor index in the superblock */
86 int raid_disk; /* role of device in array */
87 int new_raid_disk; /* role that the device will have in
88 * the array after a level-change completes.
89 */
90 int saved_raid_disk; /* role that device used to have in the
91 * array and could again if we did a partial
92 * resync from the bitmap
93 */
94 union {
95 sector_t recovery_offset;/* If this device has been partially
96 * recovered, this is where we were
97 * up to.
98 */
99 sector_t journal_tail; /* If this device is a journal device,
100 * this is the journal tail (journal
101 * recovery start point)
102 */
103 };
104
105 atomic_t nr_pending; /* number of pending requests.
106 * only maintained for arrays that
107 * support hot removal
108 */
109 atomic_t read_errors; /* number of consecutive read errors that
110 * we have tried to ignore.
111 */
112 time64_t last_read_error; /* monotonic time since our
113 * last read error
114 */
115 atomic_t corrected_errors; /* number of corrected read errors,
116 * for reporting to userspace and storing
117 * in superblock.
118 */
119 struct work_struct del_work; /* used for delayed sysfs removal */
120
121 struct kernfs_node *sysfs_state; /* handle for 'state'
122 * sysfs entry */
123
124 struct badblocks badblocks;
125
126 struct {
127 short offset; /* Offset from superblock to start of PPL.
128 * Not used by external metadata. */
129 unsigned int size; /* Size in sectors of the PPL space */
130 sector_t sector; /* First sector of the PPL space */
131 } ppl;
132};
133enum flag_bits {
134 Faulty, /* device is known to have a fault */
135 In_sync, /* device is in_sync with rest of array */
136 Bitmap_sync, /* ..actually, not quite In_sync. Need a
137 * bitmap-based recovery to get fully in sync.
138 * The bit is only meaningful before device
139 * has been passed to pers->hot_add_disk.
140 */
141 WriteMostly, /* Avoid reading if at all possible */
142 AutoDetected, /* added by auto-detect */
143 Blocked, /* An error occurred but has not yet
144 * been acknowledged by the metadata
145 * handler, so don't allow writes
146 * until it is cleared */
147 WriteErrorSeen, /* A write error has been seen on this
148 * device
149 */
150 FaultRecorded, /* Intermediate state for clearing
151 * Blocked. The Fault is/will-be
152 * recorded in the metadata, but that
153 * metadata hasn't been stored safely
154 * on disk yet.
155 */
156 BlockedBadBlocks, /* A writer is blocked because they
157 * found an unacknowledged bad-block.
158 * This can safely be cleared at any
159 * time, and the writer will re-check.
160 * It may be set at any time, and at
161 * worst the writer will timeout and
162 * re-check. So setting it as
163 * accurately as possible is good, but
164 * not absolutely critical.
165 */
166 WantReplacement, /* This device is a candidate to be
167 * hot-replaced, either because it has
168 * reported some faults, or because
169 * of explicit request.
170 */
171 Replacement, /* This device is a replacement for
172 * a want_replacement device with same
173 * raid_disk number.
174 */
175 Candidate, /* For clustered environments only:
176 * This device is seen locally but not
177 * by the whole cluster
178 */
179 Journal, /* This device is used as journal for
180 * raid-5/6.
181 * Usually, this device should be faster
182 * than other devices in the array
183 */
184 ClusterRemove,
185 RemoveSynchronized, /* synchronize_rcu() was called after
186 * this device was known to be faulty,
187 * so it is safe to remove without
188 * another synchronize_rcu() call.
189 */
190 ExternalBbl, /* External metadata provides bad
191 * block management for a disk
192 */
193 FailFast, /* Minimal retries should be attempted on
194 * this device, so use REQ_FAILFAST_DEV.
195 * Also don't try to repair failed reads.
196 * It is expects that no bad block log
197 * is present.
198 */
199 LastDev, /* Seems to be the last working dev as
200 * it didn't fail, so don't use FailFast
201 * any more for metadata
202 */
203};
204
205static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
206 sector_t *first_bad, int *bad_sectors)
207{
208 if (unlikely(rdev->badblocks.count)) {
209 int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
210 sectors,
211 first_bad, bad_sectors);
212 if (rv)
213 *first_bad -= rdev->data_offset;
214 return rv;
215 }
216 return 0;
217}
218extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
219 int is_new);
220extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
221 int is_new);
222struct md_cluster_info;
223
224/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
225enum mddev_flags {
226 MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
227 MD_CLOSING, /* If set, we are closing the array, do not open
228 * it then */
229 MD_JOURNAL_CLEAN, /* A raid with journal is already clean */
230 MD_HAS_JOURNAL, /* The raid array has journal feature set */
231 MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
232 * already took resync lock, need to
233 * release the lock */
234 MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is
235 * supported as calls to md_error() will
236 * never cause the array to become failed.
237 */
238 MD_HAS_PPL, /* The raid array has PPL feature set */
239 MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */
240 MD_ALLOW_SB_UPDATE, /* md_check_recovery is allowed to update
241 * the metadata without taking reconfig_mutex.
242 */
243 MD_UPDATING_SB, /* md_check_recovery is updating the metadata
244 * without explicitly holding reconfig_mutex.
245 */
246};
247
248enum mddev_sb_flags {
249 MD_SB_CHANGE_DEVS, /* Some device status has changed */
250 MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
251 MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
252 MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
253};
254
255struct mddev {
256 void *private;
257 struct md_personality *pers;
258 dev_t unit;
259 int md_minor;
260 struct list_head disks;
261 unsigned long flags;
262 unsigned long sb_flags;
263
264 int suspended;
265 atomic_t active_io;
266 int ro;
267 int sysfs_active; /* set when sysfs deletes
268 * are happening, so run/
269 * takeover/stop are not safe
270 */
271 struct gendisk *gendisk;
272
273 struct kobject kobj;
274 int hold_active;
275#define UNTIL_IOCTL 1
276#define UNTIL_STOP 2
277
278 /* Superblock information */
279 int major_version,
280 minor_version,
281 patch_version;
282 int persistent;
283 int external; /* metadata is
284 * managed externally */
285 char metadata_type[17]; /* externally set*/
286 int chunk_sectors;
287 time64_t ctime, utime;
288 int level, layout;
289 char clevel[16];
290 int raid_disks;
291 int max_disks;
292 sector_t dev_sectors; /* used size of
293 * component devices */
294 sector_t array_sectors; /* exported array size */
295 int external_size; /* size managed
296 * externally */
297 __u64 events;
298 /* If the last 'event' was simply a clean->dirty transition, and
299 * we didn't write it to the spares, then it is safe and simple
300 * to just decrement the event count on a dirty->clean transition.
301 * So we record that possibility here.
302 */
303 int can_decrease_events;
304
305 char uuid[16];
306
307 /* If the array is being reshaped, we need to record the
308 * new shape and an indication of where we are up to.
309 * This is written to the superblock.
310 * If reshape_position is MaxSector, then no reshape is happening (yet).
311 */
312 sector_t reshape_position;
313 int delta_disks, new_level, new_layout;
314 int new_chunk_sectors;
315 int reshape_backwards;
316
317 struct md_thread *thread; /* management thread */
318 struct md_thread *sync_thread; /* doing resync or reconstruct */
319
320 /* 'last_sync_action' is initialized to "none". It is set when a
321 * sync operation (i.e "data-check", "requested-resync", "resync",
322 * "recovery", or "reshape") is started. It holds this value even
323 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
324 * or finished). It is overwritten when a new sync operation is begun.
325 */
326 char *last_sync_action;
327 sector_t curr_resync; /* last block scheduled */
328 /* As resync requests can complete out of order, we cannot easily track
329 * how much resync has been completed. So we occasionally pause until
330 * everything completes, then set curr_resync_completed to curr_resync.
331 * As such it may be well behind the real resync mark, but it is a value
332 * we are certain of.
333 */
334 sector_t curr_resync_completed;
335 unsigned long resync_mark; /* a recent timestamp */
336 sector_t resync_mark_cnt;/* blocks written at resync_mark */
337 sector_t curr_mark_cnt; /* blocks scheduled now */
338
339 sector_t resync_max_sectors; /* may be set by personality */
340
341 atomic64_t resync_mismatches; /* count of sectors where
342 * parity/replica mismatch found
343 */
344
345 /* allow user-space to request suspension of IO to regions of the array */
346 sector_t suspend_lo;
347 sector_t suspend_hi;
348 /* if zero, use the system-wide default */
349 int sync_speed_min;
350 int sync_speed_max;
351
352 /* resync even though the same disks are shared among md-devices */
353 int parallel_resync;
354
355 int ok_start_degraded;
356
357 unsigned long recovery;
358 /* If a RAID personality determines that recovery (of a particular
359 * device) will fail due to a read error on the source device, it
360 * takes a copy of this number and does not attempt recovery again
361 * until this number changes.
362 */
363 int recovery_disabled;
364
365 int in_sync; /* know to not need resync */
366 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
367 * that we are never stopping an array while it is open.
368 * 'reconfig_mutex' protects all other reconfiguration.
369 * These locks are separate due to conflicting interactions
370 * with bdev->bd_mutex.
371 * Lock ordering is:
372 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
373 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
374 */
375 struct mutex open_mutex;
376 struct mutex reconfig_mutex;
377 atomic_t active; /* general refcount */
378 atomic_t openers; /* number of active opens */
379
380 int changed; /* True if we might need to
381 * reread partition info */
382 int degraded; /* whether md should consider
383 * adding a spare
384 */
385
386 atomic_t recovery_active; /* blocks scheduled, but not written */
387 wait_queue_head_t recovery_wait;
388 sector_t recovery_cp;
389 sector_t resync_min; /* user requested sync
390 * starts here */
391 sector_t resync_max; /* resync should pause
392 * when it gets here */
393
394 struct kernfs_node *sysfs_state; /* handle for 'array_state'
395 * file in sysfs.
396 */
397 struct kernfs_node *sysfs_action; /* handle for 'sync_action' */
398
399 struct work_struct del_work; /* used for delayed sysfs removal */
400
401 /* "lock" protects:
402 * flush_bio transition from NULL to !NULL
403 * rdev superblocks, events
404 * clearing MD_CHANGE_*
405 * in_sync - and related safemode and MD_CHANGE changes
406 * pers (also protected by reconfig_mutex and pending IO).
407 * clearing ->bitmap
408 * clearing ->bitmap_info.file
409 * changing ->resync_{min,max}
410 * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
411 */
412 spinlock_t lock;
413 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
414 atomic_t pending_writes; /* number of active superblock writes */
415
416 unsigned int safemode; /* if set, update "clean" superblock
417 * when no writes pending.
418 */
419 unsigned int safemode_delay;
420 struct timer_list safemode_timer;
421 struct percpu_ref writes_pending;
422 int sync_checkers; /* # of threads checking writes_pending */
423 struct request_queue *queue; /* for plugging ... */
424
425 struct bitmap *bitmap; /* the bitmap for the device */
426 struct {
427 struct file *file; /* the bitmap file */
428 loff_t offset; /* offset from superblock of
429 * start of bitmap. May be
430 * negative, but not '0'
431 * For external metadata, offset
432 * from start of device.
433 */
434 unsigned long space; /* space available at this offset */
435 loff_t default_offset; /* this is the offset to use when
436 * hot-adding a bitmap. It should
437 * eventually be settable by sysfs.
438 */
439 unsigned long default_space; /* space available at
440 * default offset */
441 struct mutex mutex;
442 unsigned long chunksize;
443 unsigned long daemon_sleep; /* how many jiffies between updates? */
444 unsigned long max_write_behind; /* write-behind mode */
445 int external;
446 int nodes; /* Maximum number of nodes in the cluster */
447 char cluster_name[64]; /* Name of the cluster */
448 } bitmap_info;
449
450 atomic_t max_corr_read_errors; /* max read retries */
451 struct list_head all_mddevs;
452
453 struct attribute_group *to_remove;
454
455 struct bio_set *bio_set;
456 struct bio_set *sync_set; /* for sync operations like
457 * metadata and bitmap writes
458 */
459
460 /* Generic flush handling.
461 * The last to finish preflush schedules a worker to submit
462 * the rest of the request (without the REQ_PREFLUSH flag).
463 */
464 struct bio *flush_bio;
465 atomic_t flush_pending;
466 struct work_struct flush_work;
467 struct work_struct event_work; /* used by dm to report failure event */
468 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
469 struct md_cluster_info *cluster_info;
470 unsigned int good_device_nr; /* good device num within cluster raid */
471
472 bool has_superblocks:1;
473};
474
475enum recovery_flags {
476 /*
477 * If neither SYNC or RESHAPE are set, then it is a recovery.
478 */
479 MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
480 MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
481 MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
482 MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
483 MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
484 MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
485 MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
486 MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
487 MD_RECOVERY_RESHAPE, /* A reshape is happening */
488 MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
489 MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
490 MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
491};
492
493static inline int __must_check mddev_lock(struct mddev *mddev)
494{
495 return mutex_lock_interruptible(&mddev->reconfig_mutex);
496}
497
498/* Sometimes we need to take the lock in a situation where
499 * failure due to interrupts is not acceptable.
500 */
501static inline void mddev_lock_nointr(struct mddev *mddev)
502{
503 mutex_lock(&mddev->reconfig_mutex);
504}
505
506static inline int mddev_trylock(struct mddev *mddev)
507{
508 return mutex_trylock(&mddev->reconfig_mutex);
509}
510extern void mddev_unlock(struct mddev *mddev);
511
512static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
513{
514 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
515}
516
517static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
518{
519 atomic_add(nr_sectors, &bio->bi_disk->sync_io);
520}
521
522struct md_personality
523{
524 char *name;
525 int level;
526 struct list_head list;
527 struct module *owner;
528 bool (*make_request)(struct mddev *mddev, struct bio *bio);
529 /*
530 * start up works that do NOT require md_thread. tasks that
531 * requires md_thread should go into start()
532 */
533 int (*run)(struct mddev *mddev);
534 /* start up works that require md threads */
535 int (*start)(struct mddev *mddev);
536 void (*free)(struct mddev *mddev, void *priv);
537 void (*status)(struct seq_file *seq, struct mddev *mddev);
538 /* error_handler must set ->faulty and clear ->in_sync
539 * if appropriate, and should abort recovery if needed
540 */
541 void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
542 int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
543 int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
544 int (*spare_active) (struct mddev *mddev);
545 sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
546 int (*resize) (struct mddev *mddev, sector_t sectors);
547 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
548 int (*check_reshape) (struct mddev *mddev);
549 int (*start_reshape) (struct mddev *mddev);
550 void (*finish_reshape) (struct mddev *mddev);
551 /* quiesce suspends or resumes internal processing.
552 * 1 - stop new actions and wait for action io to complete
553 * 0 - return to normal behaviour
554 */
555 void (*quiesce) (struct mddev *mddev, int quiesce);
556 /* takeover is used to transition an array from one
557 * personality to another. The new personality must be able
558 * to handle the data in the current layout.
559 * e.g. 2drive raid1 -> 2drive raid5
560 * ndrive raid5 -> degraded n+1drive raid6 with special layout
561 * If the takeover succeeds, a new 'private' structure is returned.
562 * This needs to be installed and then ->run used to activate the
563 * array.
564 */
565 void *(*takeover) (struct mddev *mddev);
566 /* congested implements bdi.congested_fn().
567 * Will not be called while array is 'suspended' */
568 int (*congested)(struct mddev *mddev, int bits);
569 /* Changes the consistency policy of an active array. */
570 int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
571};
572
573struct md_sysfs_entry {
574 struct attribute attr;
575 ssize_t (*show)(struct mddev *, char *);
576 ssize_t (*store)(struct mddev *, const char *, size_t);
577};
578extern struct attribute_group md_bitmap_group;
579
580static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
581{
582 if (sd)
583 return sysfs_get_dirent(sd, name);
584 return sd;
585}
586static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
587{
588 if (sd)
589 sysfs_notify_dirent(sd);
590}
591
592static inline char * mdname (struct mddev * mddev)
593{
594 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
595}
596
597static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
598{
599 char nm[20];
600 if (!test_bit(Replacement, &rdev->flags) &&
601 !test_bit(Journal, &rdev->flags) &&
602 mddev->kobj.sd) {
603 sprintf(nm, "rd%d", rdev->raid_disk);
604 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
605 } else
606 return 0;
607}
608
609static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
610{
611 char nm[20];
612 if (!test_bit(Replacement, &rdev->flags) &&
613 !test_bit(Journal, &rdev->flags) &&
614 mddev->kobj.sd) {
615 sprintf(nm, "rd%d", rdev->raid_disk);
616 sysfs_remove_link(&mddev->kobj, nm);
617 }
618}
619
620/*
621 * iterates through some rdev ringlist. It's safe to remove the
622 * current 'rdev'. Dont touch 'tmp' though.
623 */
624#define rdev_for_each_list(rdev, tmp, head) \
625 list_for_each_entry_safe(rdev, tmp, head, same_set)
626
627/*
628 * iterates through the 'same array disks' ringlist
629 */
630#define rdev_for_each(rdev, mddev) \
631 list_for_each_entry(rdev, &((mddev)->disks), same_set)
632
633#define rdev_for_each_safe(rdev, tmp, mddev) \
634 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
635
636#define rdev_for_each_rcu(rdev, mddev) \
637 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
638
639struct md_thread {
640 void (*run) (struct md_thread *thread);
641 struct mddev *mddev;
642 wait_queue_head_t wqueue;
643 unsigned long flags;
644 struct task_struct *tsk;
645 unsigned long timeout;
646 void *private;
647};
648
649#define THREAD_WAKEUP 0
650
651static inline void safe_put_page(struct page *p)
652{
653 if (p) put_page(p);
654}
655
656extern int register_md_personality(struct md_personality *p);
657extern int unregister_md_personality(struct md_personality *p);
658extern int register_md_cluster_operations(struct md_cluster_operations *ops,
659 struct module *module);
660extern int unregister_md_cluster_operations(void);
661extern int md_setup_cluster(struct mddev *mddev, int nodes);
662extern void md_cluster_stop(struct mddev *mddev);
663extern struct md_thread *md_register_thread(
664 void (*run)(struct md_thread *thread),
665 struct mddev *mddev,
666 const char *name);
667extern void md_unregister_thread(struct md_thread **threadp);
668extern void md_wakeup_thread(struct md_thread *thread);
669extern void md_check_recovery(struct mddev *mddev);
670extern void md_reap_sync_thread(struct mddev *mddev);
671extern int mddev_init_writes_pending(struct mddev *mddev);
672extern bool md_write_start(struct mddev *mddev, struct bio *bi);
673extern void md_write_inc(struct mddev *mddev, struct bio *bi);
674extern void md_write_end(struct mddev *mddev);
675extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
676extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
677extern void md_finish_reshape(struct mddev *mddev);
678
679extern int mddev_congested(struct mddev *mddev, int bits);
680extern void md_flush_request(struct mddev *mddev, struct bio *bio);
681extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
682 sector_t sector, int size, struct page *page);
683extern int md_super_wait(struct mddev *mddev);
684extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
685 struct page *page, int op, int op_flags,
686 bool metadata_op);
687extern void md_do_sync(struct md_thread *thread);
688extern void md_new_event(struct mddev *mddev);
689extern void md_allow_write(struct mddev *mddev);
690extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
691extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
692extern int md_check_no_bitmap(struct mddev *mddev);
693extern int md_integrity_register(struct mddev *mddev);
694extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
695extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
696
697extern void mddev_init(struct mddev *mddev);
698extern int md_run(struct mddev *mddev);
699extern int md_start(struct mddev *mddev);
700extern void md_stop(struct mddev *mddev);
701extern void md_stop_writes(struct mddev *mddev);
702extern int md_rdev_init(struct md_rdev *rdev);
703extern void md_rdev_clear(struct md_rdev *rdev);
704
705extern void md_handle_request(struct mddev *mddev, struct bio *bio);
706extern void mddev_suspend(struct mddev *mddev);
707extern void mddev_resume(struct mddev *mddev);
708extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
709 struct mddev *mddev);
710
711extern void md_reload_sb(struct mddev *mddev, int raid_disk);
712extern void md_update_sb(struct mddev *mddev, int force);
713extern void md_kick_rdev_from_array(struct md_rdev * rdev);
714struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
715struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
716
717static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
718{
719 int faulty = test_bit(Faulty, &rdev->flags);
720 if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
721 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
722 md_wakeup_thread(mddev->thread);
723 }
724}
725
726extern struct md_cluster_operations *md_cluster_ops;
727static inline int mddev_is_clustered(struct mddev *mddev)
728{
729 return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
730}
731
732/* clear unsupported mddev_flags */
733static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
734 unsigned long unsupported_flags)
735{
736 mddev->flags &= ~unsupported_flags;
737}
738
739static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
740{
741 if (bio_op(bio) == REQ_OP_WRITE_SAME &&
742 !bio->bi_disk->queue->limits.max_write_same_sectors)
743 mddev->queue->limits.max_write_same_sectors = 0;
744}
745
746static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
747{
748 if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
749 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
750 mddev->queue->limits.max_write_zeroes_sectors = 0;
751}
752#endif /* _MD_MD_H */