Loading...
1#ifndef _RAID10_H
2#define _RAID10_H
3
4typedef struct mirror_info mirror_info_t;
5
6struct mirror_info {
7 mdk_rdev_t *rdev;
8 sector_t head_position;
9 int recovery_disabled; /* matches
10 * mddev->recovery_disabled
11 * when we shouldn't try
12 * recovering this device.
13 */
14};
15
16typedef struct r10bio_s r10bio_t;
17
18struct r10_private_data_s {
19 mddev_t *mddev;
20 mirror_info_t *mirrors;
21 int raid_disks;
22 spinlock_t device_lock;
23
24 /* geometry */
25 int near_copies; /* number of copies laid out raid0 style */
26 int far_copies; /* number of copies laid out
27 * at large strides across drives
28 */
29 int far_offset; /* far_copies are offset by 1 stripe
30 * instead of many
31 */
32 int copies; /* near_copies * far_copies.
33 * must be <= raid_disks
34 */
35 sector_t stride; /* distance between far copies.
36 * This is size / far_copies unless
37 * far_offset, in which case it is
38 * 1 stripe.
39 */
40
41 sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
42
43 int chunk_shift; /* shift from chunks to sectors */
44 sector_t chunk_mask;
45
46 struct list_head retry_list;
47 /* queue pending writes and submit them on unplug */
48 struct bio_list pending_bio_list;
49
50
51 spinlock_t resync_lock;
52 int nr_pending;
53 int nr_waiting;
54 int nr_queued;
55 int barrier;
56 sector_t next_resync;
57 int fullsync; /* set to 1 if a full sync is needed,
58 * (fresh device added).
59 * Cleared when a sync completes.
60 */
61
62 wait_queue_head_t wait_barrier;
63
64 mempool_t *r10bio_pool;
65 mempool_t *r10buf_pool;
66 struct page *tmppage;
67
68 /* When taking over an array from a different personality, we store
69 * the new thread here until we fully activate the array.
70 */
71 struct mdk_thread_s *thread;
72};
73
74typedef struct r10_private_data_s conf_t;
75
76/*
77 * this is our 'private' RAID10 bio.
78 *
79 * it contains information about what kind of IO operations were started
80 * for this RAID10 operation, and about their status:
81 */
82
83struct r10bio_s {
84 atomic_t remaining; /* 'have we finished' count,
85 * used from IRQ handlers
86 */
87 sector_t sector; /* virtual sector number */
88 int sectors;
89 unsigned long state;
90 mddev_t *mddev;
91 /*
92 * original bio going to /dev/mdx
93 */
94 struct bio *master_bio;
95 /*
96 * if the IO is in READ direction, then this is where we read
97 */
98 int read_slot;
99
100 struct list_head retry_list;
101 /*
102 * if the IO is in WRITE direction, then multiple bios are used,
103 * one for each copy.
104 * When resyncing we also use one for each copy.
105 * When reconstructing, we use 2 bios, one for read, one for write.
106 * We choose the number when they are allocated.
107 */
108 struct {
109 struct bio *bio;
110 sector_t addr;
111 int devnum;
112 } devs[0];
113};
114
115/* when we get a read error on a read-only array, we redirect to another
116 * device without failing the first device, or trying to over-write to
117 * correct the read error. To keep track of bad blocks on a per-bio
118 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
119 */
120#define IO_BLOCKED ((struct bio*)1)
121/* When we successfully write to a known bad-block, we need to remove the
122 * bad-block marking which must be done from process context. So we record
123 * the success by setting devs[n].bio to IO_MADE_GOOD
124 */
125#define IO_MADE_GOOD ((struct bio *)2)
126
127#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
128
129/* bits for r10bio.state */
130#define R10BIO_Uptodate 0
131#define R10BIO_IsSync 1
132#define R10BIO_IsRecover 2
133#define R10BIO_Degraded 3
134/* Set ReadError on bios that experience a read error
135 * so that raid10d knows what to do with them.
136 */
137#define R10BIO_ReadError 4
138/* If a write for this request means we can clear some
139 * known-bad-block records, we set this flag.
140 */
141#define R10BIO_MadeGood 5
142#define R10BIO_WriteError 6
143#endif
1#ifndef _RAID10_H
2#define _RAID10_H
3
4struct mirror_info {
5 struct md_rdev *rdev, *replacement;
6 sector_t head_position;
7 int recovery_disabled; /* matches
8 * mddev->recovery_disabled
9 * when we shouldn't try
10 * recovering this device.
11 */
12};
13
14struct r10conf {
15 struct mddev *mddev;
16 struct mirror_info *mirrors;
17 struct mirror_info *mirrors_new, *mirrors_old;
18 spinlock_t device_lock;
19
20 /* geometry */
21 struct geom {
22 int raid_disks;
23 int near_copies; /* number of copies laid out
24 * raid0 style */
25 int far_copies; /* number of copies laid out
26 * at large strides across drives
27 */
28 int far_offset; /* far_copies are offset by 1
29 * stripe instead of many
30 */
31 sector_t stride; /* distance between far copies.
32 * This is size / far_copies unless
33 * far_offset, in which case it is
34 * 1 stripe.
35 */
36 int chunk_shift; /* shift from chunks to sectors */
37 sector_t chunk_mask;
38 } prev, geo;
39 int copies; /* near_copies * far_copies.
40 * must be <= raid_disks
41 */
42
43 sector_t dev_sectors; /* temp copy of
44 * mddev->dev_sectors */
45 sector_t reshape_progress;
46 sector_t reshape_safe;
47 unsigned long reshape_checkpoint;
48 sector_t offset_diff;
49
50 struct list_head retry_list;
51 /* queue pending writes and submit them on unplug */
52 struct bio_list pending_bio_list;
53 int pending_count;
54
55 spinlock_t resync_lock;
56 int nr_pending;
57 int nr_waiting;
58 int nr_queued;
59 int barrier;
60 sector_t next_resync;
61 int fullsync; /* set to 1 if a full sync is needed,
62 * (fresh device added).
63 * Cleared when a sync completes.
64 */
65 int have_replacement; /* There is at least one
66 * replacement device.
67 */
68 wait_queue_head_t wait_barrier;
69
70 mempool_t *r10bio_pool;
71 mempool_t *r10buf_pool;
72 struct page *tmppage;
73
74 /* When taking over an array from a different personality, we store
75 * the new thread here until we fully activate the array.
76 */
77 struct md_thread *thread;
78};
79
80/*
81 * this is our 'private' RAID10 bio.
82 *
83 * it contains information about what kind of IO operations were started
84 * for this RAID10 operation, and about their status:
85 */
86
87struct r10bio {
88 atomic_t remaining; /* 'have we finished' count,
89 * used from IRQ handlers
90 */
91 sector_t sector; /* virtual sector number */
92 int sectors;
93 unsigned long state;
94 struct mddev *mddev;
95 /*
96 * original bio going to /dev/mdx
97 */
98 struct bio *master_bio;
99 /*
100 * if the IO is in READ direction, then this is where we read
101 */
102 int read_slot;
103
104 struct list_head retry_list;
105 /*
106 * if the IO is in WRITE direction, then multiple bios are used,
107 * one for each copy.
108 * When resyncing we also use one for each copy.
109 * When reconstructing, we use 2 bios, one for read, one for write.
110 * We choose the number when they are allocated.
111 * We sometimes need an extra bio to write to the replacement.
112 */
113 struct r10dev {
114 struct bio *bio;
115 union {
116 struct bio *repl_bio; /* used for resync and
117 * writes */
118 struct md_rdev *rdev; /* used for reads
119 * (read_slot >= 0) */
120 };
121 sector_t addr;
122 int devnum;
123 } devs[0];
124};
125
126/* when we get a read error on a read-only array, we redirect to another
127 * device without failing the first device, or trying to over-write to
128 * correct the read error. To keep track of bad blocks on a per-bio
129 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
130 */
131#define IO_BLOCKED ((struct bio*)1)
132/* When we successfully write to a known bad-block, we need to remove the
133 * bad-block marking which must be done from process context. So we record
134 * the success by setting devs[n].bio to IO_MADE_GOOD
135 */
136#define IO_MADE_GOOD ((struct bio *)2)
137
138#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
139
140/* bits for r10bio.state */
141enum r10bio_state {
142 R10BIO_Uptodate,
143 R10BIO_IsSync,
144 R10BIO_IsRecover,
145 R10BIO_IsReshape,
146 R10BIO_Degraded,
147/* Set ReadError on bios that experience a read error
148 * so that raid10d knows what to do with them.
149 */
150 R10BIO_ReadError,
151/* If a write for this request means we can clear some
152 * known-bad-block records, we set this flag.
153 */
154 R10BIO_MadeGood,
155 R10BIO_WriteError,
156/* During a reshape we might be performing IO on the
157 * 'previous' part of the array, in which case this
158 * flag is set
159 */
160 R10BIO_Previous,
161};
162#endif