Loading...
1#ifndef _RAID10_H
2#define _RAID10_H
3
4struct mirror_info {
5 struct md_rdev *rdev, *replacement;
6 sector_t head_position;
7 int recovery_disabled; /* matches
8 * mddev->recovery_disabled
9 * when we shouldn't try
10 * recovering this device.
11 */
12};
13
14struct r10conf {
15 struct mddev *mddev;
16 struct mirror_info *mirrors;
17 struct mirror_info *mirrors_new, *mirrors_old;
18 spinlock_t device_lock;
19
20 /* geometry */
21 struct geom {
22 int raid_disks;
23 int near_copies; /* number of copies laid out
24 * raid0 style */
25 int far_copies; /* number of copies laid out
26 * at large strides across drives
27 */
28 int far_offset; /* far_copies are offset by 1
29 * stripe instead of many
30 */
31 sector_t stride; /* distance between far copies.
32 * This is size / far_copies unless
33 * far_offset, in which case it is
34 * 1 stripe.
35 */
36 int chunk_shift; /* shift from chunks to sectors */
37 sector_t chunk_mask;
38 } prev, geo;
39 int copies; /* near_copies * far_copies.
40 * must be <= raid_disks
41 */
42
43 sector_t dev_sectors; /* temp copy of
44 * mddev->dev_sectors */
45 sector_t reshape_progress;
46 sector_t reshape_safe;
47 unsigned long reshape_checkpoint;
48 sector_t offset_diff;
49
50 struct list_head retry_list;
51 /* queue pending writes and submit them on unplug */
52 struct bio_list pending_bio_list;
53 int pending_count;
54
55 spinlock_t resync_lock;
56 int nr_pending;
57 int nr_waiting;
58 int nr_queued;
59 int barrier;
60 sector_t next_resync;
61 int fullsync; /* set to 1 if a full sync is needed,
62 * (fresh device added).
63 * Cleared when a sync completes.
64 */
65 int have_replacement; /* There is at least one
66 * replacement device.
67 */
68 wait_queue_head_t wait_barrier;
69
70 mempool_t *r10bio_pool;
71 mempool_t *r10buf_pool;
72 struct page *tmppage;
73
74 /* When taking over an array from a different personality, we store
75 * the new thread here until we fully activate the array.
76 */
77 struct md_thread *thread;
78};
79
80/*
81 * this is our 'private' RAID10 bio.
82 *
83 * it contains information about what kind of IO operations were started
84 * for this RAID10 operation, and about their status:
85 */
86
87struct r10bio {
88 atomic_t remaining; /* 'have we finished' count,
89 * used from IRQ handlers
90 */
91 sector_t sector; /* virtual sector number */
92 int sectors;
93 unsigned long state;
94 struct mddev *mddev;
95 /*
96 * original bio going to /dev/mdx
97 */
98 struct bio *master_bio;
99 /*
100 * if the IO is in READ direction, then this is where we read
101 */
102 int read_slot;
103
104 struct list_head retry_list;
105 /*
106 * if the IO is in WRITE direction, then multiple bios are used,
107 * one for each copy.
108 * When resyncing we also use one for each copy.
109 * When reconstructing, we use 2 bios, one for read, one for write.
110 * We choose the number when they are allocated.
111 * We sometimes need an extra bio to write to the replacement.
112 */
113 struct r10dev {
114 struct bio *bio;
115 union {
116 struct bio *repl_bio; /* used for resync and
117 * writes */
118 struct md_rdev *rdev; /* used for reads
119 * (read_slot >= 0) */
120 };
121 sector_t addr;
122 int devnum;
123 } devs[0];
124};
125
126/* when we get a read error on a read-only array, we redirect to another
127 * device without failing the first device, or trying to over-write to
128 * correct the read error. To keep track of bad blocks on a per-bio
129 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
130 */
131#define IO_BLOCKED ((struct bio*)1)
132/* When we successfully write to a known bad-block, we need to remove the
133 * bad-block marking which must be done from process context. So we record
134 * the success by setting devs[n].bio to IO_MADE_GOOD
135 */
136#define IO_MADE_GOOD ((struct bio *)2)
137
138#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
139
140/* bits for r10bio.state */
141enum r10bio_state {
142 R10BIO_Uptodate,
143 R10BIO_IsSync,
144 R10BIO_IsRecover,
145 R10BIO_IsReshape,
146 R10BIO_Degraded,
147/* Set ReadError on bios that experience a read error
148 * so that raid10d knows what to do with them.
149 */
150 R10BIO_ReadError,
151/* If a write for this request means we can clear some
152 * known-bad-block records, we set this flag.
153 */
154 R10BIO_MadeGood,
155 R10BIO_WriteError,
156/* During a reshape we might be performing IO on the
157 * 'previous' part of the array, in which case this
158 * flag is set
159 */
160 R10BIO_Previous,
161};
162#endif
1#ifndef _RAID10_H
2#define _RAID10_H
3
4struct raid10_info {
5 struct md_rdev *rdev, *replacement;
6 sector_t head_position;
7 int recovery_disabled; /* matches
8 * mddev->recovery_disabled
9 * when we shouldn't try
10 * recovering this device.
11 */
12};
13
14struct r10conf {
15 struct mddev *mddev;
16 struct raid10_info *mirrors;
17 struct raid10_info *mirrors_new, *mirrors_old;
18 spinlock_t device_lock;
19
20 /* geometry */
21 struct geom {
22 int raid_disks;
23 int near_copies; /* number of copies laid out
24 * raid0 style */
25 int far_copies; /* number of copies laid out
26 * at large strides across drives
27 */
28 int far_offset; /* far_copies are offset by 1
29 * stripe instead of many
30 */
31 sector_t stride; /* distance between far copies.
32 * This is size / far_copies unless
33 * far_offset, in which case it is
34 * 1 stripe.
35 */
36 int far_set_size; /* The number of devices in a set,
37 * where a 'set' are devices that
38 * contain far/offset copies of
39 * each other.
40 */
41 int chunk_shift; /* shift from chunks to sectors */
42 sector_t chunk_mask;
43 } prev, geo;
44 int copies; /* near_copies * far_copies.
45 * must be <= raid_disks
46 */
47
48 sector_t dev_sectors; /* temp copy of
49 * mddev->dev_sectors */
50 sector_t reshape_progress;
51 sector_t reshape_safe;
52 unsigned long reshape_checkpoint;
53 sector_t offset_diff;
54
55 struct list_head retry_list;
56 /* queue pending writes and submit them on unplug */
57 struct bio_list pending_bio_list;
58 int pending_count;
59
60 spinlock_t resync_lock;
61 int nr_pending;
62 int nr_waiting;
63 int nr_queued;
64 int barrier;
65 sector_t next_resync;
66 int fullsync; /* set to 1 if a full sync is needed,
67 * (fresh device added).
68 * Cleared when a sync completes.
69 */
70 int have_replacement; /* There is at least one
71 * replacement device.
72 */
73 wait_queue_head_t wait_barrier;
74
75 mempool_t *r10bio_pool;
76 mempool_t *r10buf_pool;
77 struct page *tmppage;
78
79 /* When taking over an array from a different personality, we store
80 * the new thread here until we fully activate the array.
81 */
82 struct md_thread *thread;
83};
84
85/*
86 * this is our 'private' RAID10 bio.
87 *
88 * it contains information about what kind of IO operations were started
89 * for this RAID10 operation, and about their status:
90 */
91
92struct r10bio {
93 atomic_t remaining; /* 'have we finished' count,
94 * used from IRQ handlers
95 */
96 sector_t sector; /* virtual sector number */
97 int sectors;
98 unsigned long state;
99 struct mddev *mddev;
100 /*
101 * original bio going to /dev/mdx
102 */
103 struct bio *master_bio;
104 /*
105 * if the IO is in READ direction, then this is where we read
106 */
107 int read_slot;
108
109 struct list_head retry_list;
110 /*
111 * if the IO is in WRITE direction, then multiple bios are used,
112 * one for each copy.
113 * When resyncing we also use one for each copy.
114 * When reconstructing, we use 2 bios, one for read, one for write.
115 * We choose the number when they are allocated.
116 * We sometimes need an extra bio to write to the replacement.
117 */
118 struct r10dev {
119 struct bio *bio;
120 union {
121 struct bio *repl_bio; /* used for resync and
122 * writes */
123 struct md_rdev *rdev; /* used for reads
124 * (read_slot >= 0) */
125 };
126 sector_t addr;
127 int devnum;
128 } devs[0];
129};
130
131/* bits for r10bio.state */
132enum r10bio_state {
133 R10BIO_Uptodate,
134 R10BIO_IsSync,
135 R10BIO_IsRecover,
136 R10BIO_IsReshape,
137 R10BIO_Degraded,
138/* Set ReadError on bios that experience a read error
139 * so that raid10d knows what to do with them.
140 */
141 R10BIO_ReadError,
142/* If a write for this request means we can clear some
143 * known-bad-block records, we set this flag.
144 */
145 R10BIO_MadeGood,
146 R10BIO_WriteError,
147/* During a reshape we might be performing IO on the
148 * 'previous' part of the array, in which case this
149 * flag is set
150 */
151 R10BIO_Previous,
152};
153
154extern int md_raid10_congested(struct mddev *mddev, int bits);
155
156#endif