Loading...
1#ifndef _RAID10_H
2#define _RAID10_H
3
4typedef struct mirror_info mirror_info_t;
5
6struct mirror_info {
7 mdk_rdev_t *rdev;
8 sector_t head_position;
9 int recovery_disabled; /* matches
10 * mddev->recovery_disabled
11 * when we shouldn't try
12 * recovering this device.
13 */
14};
15
16typedef struct r10bio_s r10bio_t;
17
18struct r10_private_data_s {
19 mddev_t *mddev;
20 mirror_info_t *mirrors;
21 int raid_disks;
22 spinlock_t device_lock;
23
24 /* geometry */
25 int near_copies; /* number of copies laid out raid0 style */
26 int far_copies; /* number of copies laid out
27 * at large strides across drives
28 */
29 int far_offset; /* far_copies are offset by 1 stripe
30 * instead of many
31 */
32 int copies; /* near_copies * far_copies.
33 * must be <= raid_disks
34 */
35 sector_t stride; /* distance between far copies.
36 * This is size / far_copies unless
37 * far_offset, in which case it is
38 * 1 stripe.
39 */
40
41 sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
42
43 int chunk_shift; /* shift from chunks to sectors */
44 sector_t chunk_mask;
45
46 struct list_head retry_list;
47 /* queue pending writes and submit them on unplug */
48 struct bio_list pending_bio_list;
49
50
51 spinlock_t resync_lock;
52 int nr_pending;
53 int nr_waiting;
54 int nr_queued;
55 int barrier;
56 sector_t next_resync;
57 int fullsync; /* set to 1 if a full sync is needed,
58 * (fresh device added).
59 * Cleared when a sync completes.
60 */
61
62 wait_queue_head_t wait_barrier;
63
64 mempool_t *r10bio_pool;
65 mempool_t *r10buf_pool;
66 struct page *tmppage;
67
68 /* When taking over an array from a different personality, we store
69 * the new thread here until we fully activate the array.
70 */
71 struct mdk_thread_s *thread;
72};
73
74typedef struct r10_private_data_s conf_t;
75
76/*
77 * this is our 'private' RAID10 bio.
78 *
79 * it contains information about what kind of IO operations were started
80 * for this RAID10 operation, and about their status:
81 */
82
83struct r10bio_s {
84 atomic_t remaining; /* 'have we finished' count,
85 * used from IRQ handlers
86 */
87 sector_t sector; /* virtual sector number */
88 int sectors;
89 unsigned long state;
90 mddev_t *mddev;
91 /*
92 * original bio going to /dev/mdx
93 */
94 struct bio *master_bio;
95 /*
96 * if the IO is in READ direction, then this is where we read
97 */
98 int read_slot;
99
100 struct list_head retry_list;
101 /*
102 * if the IO is in WRITE direction, then multiple bios are used,
103 * one for each copy.
104 * When resyncing we also use one for each copy.
105 * When reconstructing, we use 2 bios, one for read, one for write.
106 * We choose the number when they are allocated.
107 */
108 struct {
109 struct bio *bio;
110 sector_t addr;
111 int devnum;
112 } devs[0];
113};
114
115/* when we get a read error on a read-only array, we redirect to another
116 * device without failing the first device, or trying to over-write to
117 * correct the read error. To keep track of bad blocks on a per-bio
118 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
119 */
120#define IO_BLOCKED ((struct bio*)1)
121/* When we successfully write to a known bad-block, we need to remove the
122 * bad-block marking which must be done from process context. So we record
123 * the success by setting devs[n].bio to IO_MADE_GOOD
124 */
125#define IO_MADE_GOOD ((struct bio *)2)
126
127#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
128
129/* bits for r10bio.state */
130#define R10BIO_Uptodate 0
131#define R10BIO_IsSync 1
132#define R10BIO_IsRecover 2
133#define R10BIO_Degraded 3
134/* Set ReadError on bios that experience a read error
135 * so that raid10d knows what to do with them.
136 */
137#define R10BIO_ReadError 4
138/* If a write for this request means we can clear some
139 * known-bad-block records, we set this flag.
140 */
141#define R10BIO_MadeGood 5
142#define R10BIO_WriteError 6
143#endif
1#ifndef _RAID10_H
2#define _RAID10_H
3
4struct raid10_info {
5 struct md_rdev *rdev, *replacement;
6 sector_t head_position;
7 int recovery_disabled; /* matches
8 * mddev->recovery_disabled
9 * when we shouldn't try
10 * recovering this device.
11 */
12};
13
14struct r10conf {
15 struct mddev *mddev;
16 struct raid10_info *mirrors;
17 struct raid10_info *mirrors_new, *mirrors_old;
18 spinlock_t device_lock;
19
20 /* geometry */
21 struct geom {
22 int raid_disks;
23 int near_copies; /* number of copies laid out
24 * raid0 style */
25 int far_copies; /* number of copies laid out
26 * at large strides across drives
27 */
28 int far_offset; /* far_copies are offset by 1
29 * stripe instead of many
30 */
31 sector_t stride; /* distance between far copies.
32 * This is size / far_copies unless
33 * far_offset, in which case it is
34 * 1 stripe.
35 */
36 int far_set_size; /* The number of devices in a set,
37 * where a 'set' are devices that
38 * contain far/offset copies of
39 * each other.
40 */
41 int chunk_shift; /* shift from chunks to sectors */
42 sector_t chunk_mask;
43 } prev, geo;
44 int copies; /* near_copies * far_copies.
45 * must be <= raid_disks
46 */
47
48 sector_t dev_sectors; /* temp copy of
49 * mddev->dev_sectors */
50 sector_t reshape_progress;
51 sector_t reshape_safe;
52 unsigned long reshape_checkpoint;
53 sector_t offset_diff;
54
55 struct list_head retry_list;
56 /* A separate list of r1bio which just need raid_end_bio_io called.
57 * This mustn't happen for writes which had any errors if the superblock
58 * needs to be written.
59 */
60 struct list_head bio_end_io_list;
61
62 /* queue pending writes and submit them on unplug */
63 struct bio_list pending_bio_list;
64 int pending_count;
65
66 spinlock_t resync_lock;
67 atomic_t nr_pending;
68 int nr_waiting;
69 int nr_queued;
70 int barrier;
71 int array_freeze_pending;
72 sector_t next_resync;
73 int fullsync; /* set to 1 if a full sync is needed,
74 * (fresh device added).
75 * Cleared when a sync completes.
76 */
77 int have_replacement; /* There is at least one
78 * replacement device.
79 */
80 wait_queue_head_t wait_barrier;
81
82 mempool_t *r10bio_pool;
83 mempool_t *r10buf_pool;
84 struct page *tmppage;
85
86 /* When taking over an array from a different personality, we store
87 * the new thread here until we fully activate the array.
88 */
89 struct md_thread *thread;
90};
91
92/*
93 * this is our 'private' RAID10 bio.
94 *
95 * it contains information about what kind of IO operations were started
96 * for this RAID10 operation, and about their status:
97 */
98
99struct r10bio {
100 atomic_t remaining; /* 'have we finished' count,
101 * used from IRQ handlers
102 */
103 sector_t sector; /* virtual sector number */
104 int sectors;
105 unsigned long state;
106 struct mddev *mddev;
107 /*
108 * original bio going to /dev/mdx
109 */
110 struct bio *master_bio;
111 /*
112 * if the IO is in READ direction, then this is where we read
113 */
114 int read_slot;
115
116 struct list_head retry_list;
117 /*
118 * if the IO is in WRITE direction, then multiple bios are used,
119 * one for each copy.
120 * When resyncing we also use one for each copy.
121 * When reconstructing, we use 2 bios, one for read, one for write.
122 * We choose the number when they are allocated.
123 * We sometimes need an extra bio to write to the replacement.
124 */
125 struct r10dev {
126 struct bio *bio;
127 union {
128 struct bio *repl_bio; /* used for resync and
129 * writes */
130 struct md_rdev *rdev; /* used for reads
131 * (read_slot >= 0) */
132 };
133 sector_t addr;
134 int devnum;
135 } devs[0];
136};
137
138/* bits for r10bio.state */
139enum r10bio_state {
140 R10BIO_Uptodate,
141 R10BIO_IsSync,
142 R10BIO_IsRecover,
143 R10BIO_IsReshape,
144 R10BIO_Degraded,
145/* Set ReadError on bios that experience a read error
146 * so that raid10d knows what to do with them.
147 */
148 R10BIO_ReadError,
149/* If a write for this request means we can clear some
150 * known-bad-block records, we set this flag.
151 */
152 R10BIO_MadeGood,
153 R10BIO_WriteError,
154/* During a reshape we might be performing IO on the
155 * 'previous' part of the array, in which case this
156 * flag is set
157 */
158 R10BIO_Previous,
159/* failfast devices did receive failfast requests. */
160 R10BIO_FailFast,
161};
162#endif