Loading...
1#ifndef _RAID10_H
2#define _RAID10_H
3
4typedef struct mirror_info mirror_info_t;
5
6struct mirror_info {
7 mdk_rdev_t *rdev;
8 sector_t head_position;
9 int recovery_disabled; /* matches
10 * mddev->recovery_disabled
11 * when we shouldn't try
12 * recovering this device.
13 */
14};
15
16typedef struct r10bio_s r10bio_t;
17
18struct r10_private_data_s {
19 mddev_t *mddev;
20 mirror_info_t *mirrors;
21 int raid_disks;
22 spinlock_t device_lock;
23
24 /* geometry */
25 int near_copies; /* number of copies laid out raid0 style */
26 int far_copies; /* number of copies laid out
27 * at large strides across drives
28 */
29 int far_offset; /* far_copies are offset by 1 stripe
30 * instead of many
31 */
32 int copies; /* near_copies * far_copies.
33 * must be <= raid_disks
34 */
35 sector_t stride; /* distance between far copies.
36 * This is size / far_copies unless
37 * far_offset, in which case it is
38 * 1 stripe.
39 */
40
41 sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
42
43 int chunk_shift; /* shift from chunks to sectors */
44 sector_t chunk_mask;
45
46 struct list_head retry_list;
47 /* queue pending writes and submit them on unplug */
48 struct bio_list pending_bio_list;
49
50
51 spinlock_t resync_lock;
52 int nr_pending;
53 int nr_waiting;
54 int nr_queued;
55 int barrier;
56 sector_t next_resync;
57 int fullsync; /* set to 1 if a full sync is needed,
58 * (fresh device added).
59 * Cleared when a sync completes.
60 */
61
62 wait_queue_head_t wait_barrier;
63
64 mempool_t *r10bio_pool;
65 mempool_t *r10buf_pool;
66 struct page *tmppage;
67
68 /* When taking over an array from a different personality, we store
69 * the new thread here until we fully activate the array.
70 */
71 struct mdk_thread_s *thread;
72};
73
74typedef struct r10_private_data_s conf_t;
75
76/*
77 * this is our 'private' RAID10 bio.
78 *
79 * it contains information about what kind of IO operations were started
80 * for this RAID10 operation, and about their status:
81 */
82
83struct r10bio_s {
84 atomic_t remaining; /* 'have we finished' count,
85 * used from IRQ handlers
86 */
87 sector_t sector; /* virtual sector number */
88 int sectors;
89 unsigned long state;
90 mddev_t *mddev;
91 /*
92 * original bio going to /dev/mdx
93 */
94 struct bio *master_bio;
95 /*
96 * if the IO is in READ direction, then this is where we read
97 */
98 int read_slot;
99
100 struct list_head retry_list;
101 /*
102 * if the IO is in WRITE direction, then multiple bios are used,
103 * one for each copy.
104 * When resyncing we also use one for each copy.
105 * When reconstructing, we use 2 bios, one for read, one for write.
106 * We choose the number when they are allocated.
107 */
108 struct {
109 struct bio *bio;
110 sector_t addr;
111 int devnum;
112 } devs[0];
113};
114
115/* when we get a read error on a read-only array, we redirect to another
116 * device without failing the first device, or trying to over-write to
117 * correct the read error. To keep track of bad blocks on a per-bio
118 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
119 */
120#define IO_BLOCKED ((struct bio*)1)
121/* When we successfully write to a known bad-block, we need to remove the
122 * bad-block marking which must be done from process context. So we record
123 * the success by setting devs[n].bio to IO_MADE_GOOD
124 */
125#define IO_MADE_GOOD ((struct bio *)2)
126
127#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
128
129/* bits for r10bio.state */
130#define R10BIO_Uptodate 0
131#define R10BIO_IsSync 1
132#define R10BIO_IsRecover 2
133#define R10BIO_Degraded 3
134/* Set ReadError on bios that experience a read error
135 * so that raid10d knows what to do with them.
136 */
137#define R10BIO_ReadError 4
138/* If a write for this request means we can clear some
139 * known-bad-block records, we set this flag.
140 */
141#define R10BIO_MadeGood 5
142#define R10BIO_WriteError 6
143#endif
1#ifndef _RAID10_H
2#define _RAID10_H
3
4struct raid10_info {
5 struct md_rdev *rdev, *replacement;
6 sector_t head_position;
7 int recovery_disabled; /* matches
8 * mddev->recovery_disabled
9 * when we shouldn't try
10 * recovering this device.
11 */
12};
13
14struct r10conf {
15 struct mddev *mddev;
16 struct raid10_info *mirrors;
17 struct raid10_info *mirrors_new, *mirrors_old;
18 spinlock_t device_lock;
19
20 /* geometry */
21 struct geom {
22 int raid_disks;
23 int near_copies; /* number of copies laid out
24 * raid0 style */
25 int far_copies; /* number of copies laid out
26 * at large strides across drives
27 */
28 int far_offset; /* far_copies are offset by 1
29 * stripe instead of many
30 */
31 sector_t stride; /* distance between far copies.
32 * This is size / far_copies unless
33 * far_offset, in which case it is
34 * 1 stripe.
35 */
36 int far_set_size; /* The number of devices in a set,
37 * where a 'set' are devices that
38 * contain far/offset copies of
39 * each other.
40 */
41 int chunk_shift; /* shift from chunks to sectors */
42 sector_t chunk_mask;
43 } prev, geo;
44 int copies; /* near_copies * far_copies.
45 * must be <= raid_disks
46 */
47
48 sector_t dev_sectors; /* temp copy of
49 * mddev->dev_sectors */
50 sector_t reshape_progress;
51 sector_t reshape_safe;
52 unsigned long reshape_checkpoint;
53 sector_t offset_diff;
54
55 struct list_head retry_list;
56 /* queue pending writes and submit them on unplug */
57 struct bio_list pending_bio_list;
58 int pending_count;
59
60 spinlock_t resync_lock;
61 int nr_pending;
62 int nr_waiting;
63 int nr_queued;
64 int barrier;
65 sector_t next_resync;
66 int fullsync; /* set to 1 if a full sync is needed,
67 * (fresh device added).
68 * Cleared when a sync completes.
69 */
70 int have_replacement; /* There is at least one
71 * replacement device.
72 */
73 wait_queue_head_t wait_barrier;
74
75 mempool_t *r10bio_pool;
76 mempool_t *r10buf_pool;
77 struct page *tmppage;
78
79 /* When taking over an array from a different personality, we store
80 * the new thread here until we fully activate the array.
81 */
82 struct md_thread *thread;
83};
84
85/*
86 * this is our 'private' RAID10 bio.
87 *
88 * it contains information about what kind of IO operations were started
89 * for this RAID10 operation, and about their status:
90 */
91
92struct r10bio {
93 atomic_t remaining; /* 'have we finished' count,
94 * used from IRQ handlers
95 */
96 sector_t sector; /* virtual sector number */
97 int sectors;
98 unsigned long state;
99 struct mddev *mddev;
100 /*
101 * original bio going to /dev/mdx
102 */
103 struct bio *master_bio;
104 /*
105 * if the IO is in READ direction, then this is where we read
106 */
107 int read_slot;
108
109 struct list_head retry_list;
110 /*
111 * if the IO is in WRITE direction, then multiple bios are used,
112 * one for each copy.
113 * When resyncing we also use one for each copy.
114 * When reconstructing, we use 2 bios, one for read, one for write.
115 * We choose the number when they are allocated.
116 * We sometimes need an extra bio to write to the replacement.
117 */
118 struct r10dev {
119 struct bio *bio;
120 union {
121 struct bio *repl_bio; /* used for resync and
122 * writes */
123 struct md_rdev *rdev; /* used for reads
124 * (read_slot >= 0) */
125 };
126 sector_t addr;
127 int devnum;
128 } devs[0];
129};
130
131/* bits for r10bio.state */
132enum r10bio_state {
133 R10BIO_Uptodate,
134 R10BIO_IsSync,
135 R10BIO_IsRecover,
136 R10BIO_IsReshape,
137 R10BIO_Degraded,
138/* Set ReadError on bios that experience a read error
139 * so that raid10d knows what to do with them.
140 */
141 R10BIO_ReadError,
142/* If a write for this request means we can clear some
143 * known-bad-block records, we set this flag.
144 */
145 R10BIO_MadeGood,
146 R10BIO_WriteError,
147/* During a reshape we might be performing IO on the
148 * 'previous' part of the array, in which case this
149 * flag is set
150 */
151 R10BIO_Previous,
152};
153
154extern int md_raid10_congested(struct mddev *mddev, int bits);
155
156#endif