Loading...
1#ifndef _RAID5_H
2#define _RAID5_H
3
4#include <linux/raid/xor.h>
5#include <linux/dmaengine.h>
6
7/*
8 *
9 * Each stripe contains one buffer per device. Each buffer can be in
10 * one of a number of states stored in "flags". Changes between
11 * these states happen *almost* exclusively under the protection of the
12 * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and
13 * these are not protected by STRIPE_ACTIVE.
14 *
15 * The flag bits that are used to represent these states are:
16 * R5_UPTODATE and R5_LOCKED
17 *
18 * State Empty == !UPTODATE, !LOCK
19 * We have no data, and there is no active request
20 * State Want == !UPTODATE, LOCK
21 * A read request is being submitted for this block
22 * State Dirty == UPTODATE, LOCK
23 * Some new data is in this buffer, and it is being written out
24 * State Clean == UPTODATE, !LOCK
25 * We have valid data which is the same as on disc
26 *
27 * The possible state transitions are:
28 *
29 * Empty -> Want - on read or write to get old data for parity calc
30 * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
31 * Empty -> Clean - on compute_block when computing a block for failed drive
32 * Want -> Empty - on failed read
33 * Want -> Clean - on successful completion of read request
34 * Dirty -> Clean - on successful completion of write request
35 * Dirty -> Clean - on failed write
36 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
37 *
38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
39 * all happen in b_end_io at interrupt time.
40 * Each sets the Uptodate bit before releasing the Lock bit.
41 * This leaves one multi-stage transition:
42 * Want->Dirty->Clean
43 * This is safe because thinking that a Clean buffer is actually dirty
44 * will at worst delay some action, and the stripe will be scheduled
45 * for attention after the transition is complete.
46 *
47 * There is one possibility that is not covered by these states. That
48 * is if one drive has failed and there is a spare being rebuilt. We
49 * can't distinguish between a clean block that has been generated
50 * from parity calculations, and a clean block that has been
51 * successfully written to the spare ( or to parity when resyncing).
52 * To distingush these states we have a stripe bit STRIPE_INSYNC that
53 * is set whenever a write is scheduled to the spare, or to the parity
54 * disc if there is no spare. A sync request clears this bit, and
55 * when we find it set with no buffers locked, we know the sync is
56 * complete.
57 *
58 * Buffers for the md device that arrive via make_request are attached
59 * to the appropriate stripe in one of two lists linked on b_reqnext.
60 * One list (bh_read) for read requests, one (bh_write) for write.
61 * There should never be more than one buffer on the two lists
62 * together, but we are not guaranteed of that so we allow for more.
63 *
64 * If a buffer is on the read list when the associated cache buffer is
65 * Uptodate, the data is copied into the read buffer and it's b_end_io
66 * routine is called. This may happen in the end_request routine only
67 * if the buffer has just successfully been read. end_request should
68 * remove the buffers from the list and then set the Uptodate bit on
69 * the buffer. Other threads may do this only if they first check
70 * that the Uptodate bit is set. Once they have checked that they may
71 * take buffers off the read queue.
72 *
73 * When a buffer on the write list is committed for write it is copied
74 * into the cache buffer, which is then marked dirty, and moved onto a
75 * third list, the written list (bh_written). Once both the parity
76 * block and the cached buffer are successfully written, any buffer on
77 * a written list can be returned with b_end_io.
78 *
79 * The write list and read list both act as fifos. The read list,
80 * write list and written list are protected by the device_lock.
81 * The device_lock is only for list manipulations and will only be
82 * held for a very short time. It can be claimed from interrupts.
83 *
84 *
85 * Stripes in the stripe cache can be on one of two lists (or on
86 * neither). The "inactive_list" contains stripes which are not
87 * currently being used for any request. They can freely be reused
88 * for another stripe. The "handle_list" contains stripes that need
89 * to be handled in some way. Both of these are fifo queues. Each
90 * stripe is also (potentially) linked to a hash bucket in the hash
91 * table so that it can be found by sector number. Stripes that are
92 * not hashed must be on the inactive_list, and will normally be at
93 * the front. All stripes start life this way.
94 *
95 * The inactive_list, handle_list and hash bucket lists are all protected by the
96 * device_lock.
97 * - stripes have a reference counter. If count==0, they are on a list.
98 * - If a stripe might need handling, STRIPE_HANDLE is set.
99 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
100 * handle_list else inactive_list
101 *
102 * This, combined with the fact that STRIPE_HANDLE is only ever
103 * cleared while a stripe has a non-zero count means that if the
104 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
105 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
106 * the stripe is on inactive_list.
107 *
108 * The possible transitions are:
109 * activate an unhashed/inactive stripe (get_active_stripe())
110 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
111 * activate a hashed, possibly active stripe (get_active_stripe())
112 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
113 * attach a request to an active stripe (add_stripe_bh())
114 * lockdev attach-buffer unlockdev
115 * handle a stripe (handle_stripe())
116 * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ...
117 * (lockdev check-buffers unlockdev) ..
118 * change-state ..
119 * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
120 * release an active stripe (release_stripe())
121 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
122 *
123 * The refcount counts each thread that have activated the stripe,
124 * plus raid5d if it is handling it, plus one for each active request
125 * on a cached buffer, and plus one if the stripe is undergoing stripe
126 * operations.
127 *
128 * The stripe operations are:
129 * -copying data between the stripe cache and user application buffers
130 * -computing blocks to save a disk access, or to recover a missing block
131 * -updating the parity on a write operation (reconstruct write and
132 * read-modify-write)
133 * -checking parity correctness
134 * -running i/o to disk
135 * These operations are carried out by raid5_run_ops which uses the async_tx
136 * api to (optionally) offload operations to dedicated hardware engines.
137 * When requesting an operation handle_stripe sets the pending bit for the
138 * operation and increments the count. raid5_run_ops is then run whenever
139 * the count is non-zero.
140 * There are some critical dependencies between the operations that prevent some
141 * from being requested while another is in flight.
142 * 1/ Parity check operations destroy the in cache version of the parity block,
143 * so we prevent parity dependent operations like writes and compute_blocks
144 * from starting while a check is in progress. Some dma engines can perform
145 * the check without damaging the parity block, in these cases the parity
146 * block is re-marked up to date (assuming the check was successful) and is
147 * not re-read from disk.
148 * 2/ When a write operation is requested we immediately lock the affected
149 * blocks, and mark them as not up to date. This causes new read requests
150 * to be held off, as well as parity checks and compute block operations.
151 * 3/ Once a compute block operation has been requested handle_stripe treats
152 * that block as if it is up to date. raid5_run_ops guaruntees that any
153 * operation that is dependent on the compute block result is initiated after
154 * the compute block completes.
155 */
156
157/*
158 * Operations state - intermediate states that are visible outside of
159 * STRIPE_ACTIVE.
160 * In general _idle indicates nothing is running, _run indicates a data
161 * processing operation is active, and _result means the data processing result
162 * is stable and can be acted upon. For simple operations like biofill and
163 * compute that only have an _idle and _run state they are indicated with
164 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
165 */
166/**
167 * enum check_states - handles syncing / repairing a stripe
168 * @check_state_idle - check operations are quiesced
169 * @check_state_run - check operation is running
170 * @check_state_result - set outside lock when check result is valid
171 * @check_state_compute_run - check failed and we are repairing
172 * @check_state_compute_result - set outside lock when compute result is valid
173 */
174enum check_states {
175 check_state_idle = 0,
176 check_state_run, /* xor parity check */
177 check_state_run_q, /* q-parity check */
178 check_state_run_pq, /* pq dual parity check */
179 check_state_check_result,
180 check_state_compute_run, /* parity repair */
181 check_state_compute_result,
182};
183
184/**
185 * enum reconstruct_states - handles writing or expanding a stripe
186 */
187enum reconstruct_states {
188 reconstruct_state_idle = 0,
189 reconstruct_state_prexor_drain_run, /* prexor-write */
190 reconstruct_state_drain_run, /* write */
191 reconstruct_state_run, /* expand */
192 reconstruct_state_prexor_drain_result,
193 reconstruct_state_drain_result,
194 reconstruct_state_result,
195};
196
197struct stripe_head {
198 struct hlist_node hash;
199 struct list_head lru; /* inactive_list or handle_list */
200 struct raid5_private_data *raid_conf;
201 short generation; /* increments with every
202 * reshape */
203 sector_t sector; /* sector of this row */
204 short pd_idx; /* parity disk index */
205 short qd_idx; /* 'Q' disk index for raid6 */
206 short ddf_layout;/* use DDF ordering to calculate Q */
207 unsigned long state; /* state flags */
208 atomic_t count; /* nr of active thread/requests */
209 int bm_seq; /* sequence number for bitmap flushes */
210 int disks; /* disks in stripe */
211 enum check_states check_state;
212 enum reconstruct_states reconstruct_state;
213 /**
214 * struct stripe_operations
215 * @target - STRIPE_OP_COMPUTE_BLK target
216 * @target2 - 2nd compute target in the raid6 case
217 * @zero_sum_result - P and Q verification flags
218 * @request - async service request flags for raid_run_ops
219 */
220 struct stripe_operations {
221 int target, target2;
222 enum sum_check_flags zero_sum_result;
223 #ifdef CONFIG_MULTICORE_RAID456
224 unsigned long request;
225 wait_queue_head_t wait_for_ops;
226 #endif
227 } ops;
228 struct r5dev {
229 struct bio req;
230 struct bio_vec vec;
231 struct page *page;
232 struct bio *toread, *read, *towrite, *written;
233 sector_t sector; /* sector of this page */
234 unsigned long flags;
235 } dev[1]; /* allocated with extra space depending of RAID geometry */
236};
237
238/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
239 * for handle_stripe.
240 */
241struct stripe_head_state {
242 int syncing, expanding, expanded;
243 int locked, uptodate, to_read, to_write, failed, written;
244 int to_fill, compute, req_compute, non_overwrite;
245 int failed_num[2];
246 int p_failed, q_failed;
247 int dec_preread_active;
248 unsigned long ops_request;
249
250 struct bio *return_bi;
251 mdk_rdev_t *blocked_rdev;
252 int handle_bad_blocks;
253};
254
255/* Flags */
256#define R5_UPTODATE 0 /* page contains current data */
257#define R5_LOCKED 1 /* IO has been submitted on "req" */
258#define R5_OVERWRITE 2 /* towrite covers whole page */
259/* and some that are internal to handle_stripe */
260#define R5_Insync 3 /* rdev && rdev->in_sync at start */
261#define R5_Wantread 4 /* want to schedule a read */
262#define R5_Wantwrite 5
263#define R5_Overlap 7 /* There is a pending overlapping request on this block */
264#define R5_ReadError 8 /* seen a read error here recently */
265#define R5_ReWrite 9 /* have tried to over-write the readerror */
266
267#define R5_Expanded 10 /* This block now has post-expand data */
268#define R5_Wantcompute 11 /* compute_block in progress treat as
269 * uptodate
270 */
271#define R5_Wantfill 12 /* dev->toread contains a bio that needs
272 * filling
273 */
274#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
275#define R5_WantFUA 14 /* Write should be FUA */
276#define R5_WriteError 15 /* got a write error - need to record it */
277#define R5_MadeGood 16 /* A bad block has been fixed by writing to it*/
278/*
279 * Write method
280 */
281#define RECONSTRUCT_WRITE 1
282#define READ_MODIFY_WRITE 2
283/* not a write method, but a compute_parity mode */
284#define CHECK_PARITY 3
285/* Additional compute_parity mode -- updates the parity w/o LOCKING */
286#define UPDATE_PARITY 4
287
288/*
289 * Stripe state
290 */
291enum {
292 STRIPE_ACTIVE,
293 STRIPE_HANDLE,
294 STRIPE_SYNC_REQUESTED,
295 STRIPE_SYNCING,
296 STRIPE_INSYNC,
297 STRIPE_PREREAD_ACTIVE,
298 STRIPE_DELAYED,
299 STRIPE_DEGRADED,
300 STRIPE_BIT_DELAY,
301 STRIPE_EXPANDING,
302 STRIPE_EXPAND_SOURCE,
303 STRIPE_EXPAND_READY,
304 STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */
305 STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */
306 STRIPE_BIOFILL_RUN,
307 STRIPE_COMPUTE_RUN,
308 STRIPE_OPS_REQ_PENDING,
309};
310
311/*
312 * Operation request flags
313 */
314#define STRIPE_OP_BIOFILL 0
315#define STRIPE_OP_COMPUTE_BLK 1
316#define STRIPE_OP_PREXOR 2
317#define STRIPE_OP_BIODRAIN 3
318#define STRIPE_OP_RECONSTRUCT 4
319#define STRIPE_OP_CHECK 5
320
321/*
322 * Plugging:
323 *
324 * To improve write throughput, we need to delay the handling of some
325 * stripes until there has been a chance that several write requests
326 * for the one stripe have all been collected.
327 * In particular, any write request that would require pre-reading
328 * is put on a "delayed" queue until there are no stripes currently
329 * in a pre-read phase. Further, if the "delayed" queue is empty when
330 * a stripe is put on it then we "plug" the queue and do not process it
331 * until an unplug call is made. (the unplug_io_fn() is called).
332 *
333 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
334 * it to the count of prereading stripes.
335 * When write is initiated, or the stripe refcnt == 0 (just in case) we
336 * clear the PREREAD_ACTIVE flag and decrement the count
337 * Whenever the 'handle' queue is empty and the device is not plugged, we
338 * move any strips from delayed to handle and clear the DELAYED flag and set
339 * PREREAD_ACTIVE.
340 * In stripe_handle, if we find pre-reading is necessary, we do it if
341 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
342 * HANDLE gets cleared if stripe_handle leaves nothing locked.
343 */
344
345
346struct disk_info {
347 mdk_rdev_t *rdev;
348};
349
350struct raid5_private_data {
351 struct hlist_head *stripe_hashtbl;
352 mddev_t *mddev;
353 struct disk_info *spare;
354 int chunk_sectors;
355 int level, algorithm;
356 int max_degraded;
357 int raid_disks;
358 int max_nr_stripes;
359
360 /* reshape_progress is the leading edge of a 'reshape'
361 * It has value MaxSector when no reshape is happening
362 * If delta_disks < 0, it is the last sector we started work on,
363 * else is it the next sector to work on.
364 */
365 sector_t reshape_progress;
366 /* reshape_safe is the trailing edge of a reshape. We know that
367 * before (or after) this address, all reshape has completed.
368 */
369 sector_t reshape_safe;
370 int previous_raid_disks;
371 int prev_chunk_sectors;
372 int prev_algo;
373 short generation; /* increments with every reshape */
374 unsigned long reshape_checkpoint; /* Time we last updated
375 * metadata */
376
377 struct list_head handle_list; /* stripes needing handling */
378 struct list_head hold_list; /* preread ready stripes */
379 struct list_head delayed_list; /* stripes that have plugged requests */
380 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
381 struct bio *retry_read_aligned; /* currently retrying aligned bios */
382 struct bio *retry_read_aligned_list; /* aligned bios retry list */
383 atomic_t preread_active_stripes; /* stripes with scheduled io */
384 atomic_t active_aligned_reads;
385 atomic_t pending_full_writes; /* full write backlog */
386 int bypass_count; /* bypassed prereads */
387 int bypass_threshold; /* preread nice */
388 struct list_head *last_hold; /* detect hold_list promotions */
389
390 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
391 /* unfortunately we need two cache names as we temporarily have
392 * two caches.
393 */
394 int active_name;
395 char cache_name[2][32];
396 struct kmem_cache *slab_cache; /* for allocating stripes */
397
398 int seq_flush, seq_write;
399 int quiesce;
400
401 int fullsync; /* set to 1 if a full sync is needed,
402 * (fresh device added).
403 * Cleared when a sync completes.
404 */
405 int recovery_disabled;
406 /* per cpu variables */
407 struct raid5_percpu {
408 struct page *spare_page; /* Used when checking P/Q in raid6 */
409 void *scribble; /* space for constructing buffer
410 * lists and performing address
411 * conversions
412 */
413 } __percpu *percpu;
414 size_t scribble_len; /* size of scribble region must be
415 * associated with conf to handle
416 * cpu hotplug while reshaping
417 */
418#ifdef CONFIG_HOTPLUG_CPU
419 struct notifier_block cpu_notify;
420#endif
421
422 /*
423 * Free stripes pool
424 */
425 atomic_t active_stripes;
426 struct list_head inactive_list;
427 wait_queue_head_t wait_for_stripe;
428 wait_queue_head_t wait_for_overlap;
429 int inactive_blocked; /* release of inactive stripes blocked,
430 * waiting for 25% to be free
431 */
432 int pool_size; /* number of disks in stripeheads in pool */
433 spinlock_t device_lock;
434 struct disk_info *disks;
435
436 /* When taking over an array from a different personality, we store
437 * the new thread here until we fully activate the array.
438 */
439 struct mdk_thread_s *thread;
440};
441
442typedef struct raid5_private_data raid5_conf_t;
443
444/*
445 * Our supported algorithms
446 */
447#define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */
448#define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */
449#define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */
450#define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */
451
452/* Define non-rotating (raid4) algorithms. These allow
453 * conversion of raid4 to raid5.
454 */
455#define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */
456#define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */
457
458/* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
459 * Firstly, the exact positioning of the parity block is slightly
460 * different between the 'LEFT_*' modes of md and the "_N_*" modes
461 * of DDF.
462 * Secondly, or order of datablocks over which the Q syndrome is computed
463 * is different.
464 * Consequently we have different layouts for DDF/raid6 than md/raid6.
465 * These layouts are from the DDFv1.2 spec.
466 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
467 * leaves RLQ=3 as 'Vendor Specific'
468 */
469
470#define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */
471#define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
472#define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
473
474
475/* For every RAID5 algorithm we define a RAID6 algorithm
476 * with exactly the same layout for data and parity, and
477 * with the Q block always on the last device (N-1).
478 * This allows trivial conversion from RAID5 to RAID6
479 */
480#define ALGORITHM_LEFT_ASYMMETRIC_6 16
481#define ALGORITHM_RIGHT_ASYMMETRIC_6 17
482#define ALGORITHM_LEFT_SYMMETRIC_6 18
483#define ALGORITHM_RIGHT_SYMMETRIC_6 19
484#define ALGORITHM_PARITY_0_6 20
485#define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N
486
487static inline int algorithm_valid_raid5(int layout)
488{
489 return (layout >= 0) &&
490 (layout <= 5);
491}
492static inline int algorithm_valid_raid6(int layout)
493{
494 return (layout >= 0 && layout <= 5)
495 ||
496 (layout >= 8 && layout <= 10)
497 ||
498 (layout >= 16 && layout <= 20);
499}
500
501static inline int algorithm_is_DDF(int layout)
502{
503 return layout >= 8 && layout <= 10;
504}
505
506extern int md_raid5_congested(mddev_t *mddev, int bits);
507extern void md_raid5_kick_device(raid5_conf_t *conf);
508extern int raid5_set_cache_size(mddev_t *mddev, int size);
509#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _RAID5_H
3#define _RAID5_H
4
5#include <linux/raid/xor.h>
6#include <linux/dmaengine.h>
7
8/*
9 *
10 * Each stripe contains one buffer per device. Each buffer can be in
11 * one of a number of states stored in "flags". Changes between
12 * these states happen *almost* exclusively under the protection of the
13 * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and
14 * these are not protected by STRIPE_ACTIVE.
15 *
16 * The flag bits that are used to represent these states are:
17 * R5_UPTODATE and R5_LOCKED
18 *
19 * State Empty == !UPTODATE, !LOCK
20 * We have no data, and there is no active request
21 * State Want == !UPTODATE, LOCK
22 * A read request is being submitted for this block
23 * State Dirty == UPTODATE, LOCK
24 * Some new data is in this buffer, and it is being written out
25 * State Clean == UPTODATE, !LOCK
26 * We have valid data which is the same as on disc
27 *
28 * The possible state transitions are:
29 *
30 * Empty -> Want - on read or write to get old data for parity calc
31 * Empty -> Dirty - on compute_parity to satisfy write/sync request.
32 * Empty -> Clean - on compute_block when computing a block for failed drive
33 * Want -> Empty - on failed read
34 * Want -> Clean - on successful completion of read request
35 * Dirty -> Clean - on successful completion of write request
36 * Dirty -> Clean - on failed write
37 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
38 *
39 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
40 * all happen in b_end_io at interrupt time.
41 * Each sets the Uptodate bit before releasing the Lock bit.
42 * This leaves one multi-stage transition:
43 * Want->Dirty->Clean
44 * This is safe because thinking that a Clean buffer is actually dirty
45 * will at worst delay some action, and the stripe will be scheduled
46 * for attention after the transition is complete.
47 *
48 * There is one possibility that is not covered by these states. That
49 * is if one drive has failed and there is a spare being rebuilt. We
50 * can't distinguish between a clean block that has been generated
51 * from parity calculations, and a clean block that has been
52 * successfully written to the spare ( or to parity when resyncing).
53 * To distinguish these states we have a stripe bit STRIPE_INSYNC that
54 * is set whenever a write is scheduled to the spare, or to the parity
55 * disc if there is no spare. A sync request clears this bit, and
56 * when we find it set with no buffers locked, we know the sync is
57 * complete.
58 *
59 * Buffers for the md device that arrive via make_request are attached
60 * to the appropriate stripe in one of two lists linked on b_reqnext.
61 * One list (bh_read) for read requests, one (bh_write) for write.
62 * There should never be more than one buffer on the two lists
63 * together, but we are not guaranteed of that so we allow for more.
64 *
65 * If a buffer is on the read list when the associated cache buffer is
66 * Uptodate, the data is copied into the read buffer and it's b_end_io
67 * routine is called. This may happen in the end_request routine only
68 * if the buffer has just successfully been read. end_request should
69 * remove the buffers from the list and then set the Uptodate bit on
70 * the buffer. Other threads may do this only if they first check
71 * that the Uptodate bit is set. Once they have checked that they may
72 * take buffers off the read queue.
73 *
74 * When a buffer on the write list is committed for write it is copied
75 * into the cache buffer, which is then marked dirty, and moved onto a
76 * third list, the written list (bh_written). Once both the parity
77 * block and the cached buffer are successfully written, any buffer on
78 * a written list can be returned with b_end_io.
79 *
80 * The write list and read list both act as fifos. The read list,
81 * write list and written list are protected by the device_lock.
82 * The device_lock is only for list manipulations and will only be
83 * held for a very short time. It can be claimed from interrupts.
84 *
85 *
86 * Stripes in the stripe cache can be on one of two lists (or on
87 * neither). The "inactive_list" contains stripes which are not
88 * currently being used for any request. They can freely be reused
89 * for another stripe. The "handle_list" contains stripes that need
90 * to be handled in some way. Both of these are fifo queues. Each
91 * stripe is also (potentially) linked to a hash bucket in the hash
92 * table so that it can be found by sector number. Stripes that are
93 * not hashed must be on the inactive_list, and will normally be at
94 * the front. All stripes start life this way.
95 *
96 * The inactive_list, handle_list and hash bucket lists are all protected by the
97 * device_lock.
98 * - stripes have a reference counter. If count==0, they are on a list.
99 * - If a stripe might need handling, STRIPE_HANDLE is set.
100 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
101 * handle_list else inactive_list
102 *
103 * This, combined with the fact that STRIPE_HANDLE is only ever
104 * cleared while a stripe has a non-zero count means that if the
105 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
106 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
107 * the stripe is on inactive_list.
108 *
109 * The possible transitions are:
110 * activate an unhashed/inactive stripe (get_active_stripe())
111 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
112 * activate a hashed, possibly active stripe (get_active_stripe())
113 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
114 * attach a request to an active stripe (add_stripe_bh())
115 * lockdev attach-buffer unlockdev
116 * handle a stripe (handle_stripe())
117 * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ...
118 * (lockdev check-buffers unlockdev) ..
119 * change-state ..
120 * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
121 * release an active stripe (release_stripe())
122 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
123 *
124 * The refcount counts each thread that have activated the stripe,
125 * plus raid5d if it is handling it, plus one for each active request
126 * on a cached buffer, and plus one if the stripe is undergoing stripe
127 * operations.
128 *
129 * The stripe operations are:
130 * -copying data between the stripe cache and user application buffers
131 * -computing blocks to save a disk access, or to recover a missing block
132 * -updating the parity on a write operation (reconstruct write and
133 * read-modify-write)
134 * -checking parity correctness
135 * -running i/o to disk
136 * These operations are carried out by raid5_run_ops which uses the async_tx
137 * api to (optionally) offload operations to dedicated hardware engines.
138 * When requesting an operation handle_stripe sets the pending bit for the
139 * operation and increments the count. raid5_run_ops is then run whenever
140 * the count is non-zero.
141 * There are some critical dependencies between the operations that prevent some
142 * from being requested while another is in flight.
143 * 1/ Parity check operations destroy the in cache version of the parity block,
144 * so we prevent parity dependent operations like writes and compute_blocks
145 * from starting while a check is in progress. Some dma engines can perform
146 * the check without damaging the parity block, in these cases the parity
147 * block is re-marked up to date (assuming the check was successful) and is
148 * not re-read from disk.
149 * 2/ When a write operation is requested we immediately lock the affected
150 * blocks, and mark them as not up to date. This causes new read requests
151 * to be held off, as well as parity checks and compute block operations.
152 * 3/ Once a compute block operation has been requested handle_stripe treats
153 * that block as if it is up to date. raid5_run_ops guaruntees that any
154 * operation that is dependent on the compute block result is initiated after
155 * the compute block completes.
156 */
157
158/*
159 * Operations state - intermediate states that are visible outside of
160 * STRIPE_ACTIVE.
161 * In general _idle indicates nothing is running, _run indicates a data
162 * processing operation is active, and _result means the data processing result
163 * is stable and can be acted upon. For simple operations like biofill and
164 * compute that only have an _idle and _run state they are indicated with
165 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
166 */
167/**
168 * enum check_states - handles syncing / repairing a stripe
169 * @check_state_idle - check operations are quiesced
170 * @check_state_run - check operation is running
171 * @check_state_result - set outside lock when check result is valid
172 * @check_state_compute_run - check failed and we are repairing
173 * @check_state_compute_result - set outside lock when compute result is valid
174 */
175enum check_states {
176 check_state_idle = 0,
177 check_state_run, /* xor parity check */
178 check_state_run_q, /* q-parity check */
179 check_state_run_pq, /* pq dual parity check */
180 check_state_check_result,
181 check_state_compute_run, /* parity repair */
182 check_state_compute_result,
183};
184
185/**
186 * enum reconstruct_states - handles writing or expanding a stripe
187 */
188enum reconstruct_states {
189 reconstruct_state_idle = 0,
190 reconstruct_state_prexor_drain_run, /* prexor-write */
191 reconstruct_state_drain_run, /* write */
192 reconstruct_state_run, /* expand */
193 reconstruct_state_prexor_drain_result,
194 reconstruct_state_drain_result,
195 reconstruct_state_result,
196};
197
198#define DEFAULT_STRIPE_SIZE 4096
199struct stripe_head {
200 struct hlist_node hash;
201 struct list_head lru; /* inactive_list or handle_list */
202 struct llist_node release_list;
203 struct r5conf *raid_conf;
204 short generation; /* increments with every
205 * reshape */
206 sector_t sector; /* sector of this row */
207 short pd_idx; /* parity disk index */
208 short qd_idx; /* 'Q' disk index for raid6 */
209 short ddf_layout;/* use DDF ordering to calculate Q */
210 short hash_lock_index;
211 unsigned long state; /* state flags */
212 atomic_t count; /* nr of active thread/requests */
213 int bm_seq; /* sequence number for bitmap flushes */
214 int disks; /* disks in stripe */
215 int overwrite_disks; /* total overwrite disks in stripe,
216 * this is only checked when stripe
217 * has STRIPE_BATCH_READY
218 */
219 enum check_states check_state;
220 enum reconstruct_states reconstruct_state;
221 spinlock_t stripe_lock;
222 int cpu;
223 struct r5worker_group *group;
224
225 struct stripe_head *batch_head; /* protected by stripe lock */
226 spinlock_t batch_lock; /* only header's lock is useful */
227 struct list_head batch_list; /* protected by head's batch lock*/
228
229 union {
230 struct r5l_io_unit *log_io;
231 struct ppl_io_unit *ppl_io;
232 };
233
234 struct list_head log_list;
235 sector_t log_start; /* first meta block on the journal */
236 struct list_head r5c; /* for r5c_cache->stripe_in_journal */
237
238 struct page *ppl_page; /* partial parity of this stripe */
239 /**
240 * struct stripe_operations
241 * @target - STRIPE_OP_COMPUTE_BLK target
242 * @target2 - 2nd compute target in the raid6 case
243 * @zero_sum_result - P and Q verification flags
244 * @request - async service request flags for raid_run_ops
245 */
246 struct stripe_operations {
247 int target, target2;
248 enum sum_check_flags zero_sum_result;
249 } ops;
250
251#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
252 /* These pages will be used by bios in dev[i] */
253 struct page **pages;
254 int nr_pages; /* page array size */
255 int stripes_per_page;
256#endif
257 struct r5dev {
258 /* rreq and rvec are used for the replacement device when
259 * writing data to both devices.
260 */
261 struct bio req, rreq;
262 struct bio_vec vec, rvec;
263 struct page *page, *orig_page;
264 unsigned int offset; /* offset of the page */
265 struct bio *toread, *read, *towrite, *written;
266 sector_t sector; /* sector of this page */
267 unsigned long flags;
268 u32 log_checksum;
269 unsigned short write_hint;
270 } dev[1]; /* allocated with extra space depending of RAID geometry */
271};
272
273/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
274 * for handle_stripe.
275 */
276struct stripe_head_state {
277 /* 'syncing' means that we need to read all devices, either
278 * to check/correct parity, or to reconstruct a missing device.
279 * 'replacing' means we are replacing one or more drives and
280 * the source is valid at this point so we don't need to
281 * read all devices, just the replacement targets.
282 */
283 int syncing, expanding, expanded, replacing;
284 int locked, uptodate, to_read, to_write, failed, written;
285 int to_fill, compute, req_compute, non_overwrite;
286 int injournal, just_cached;
287 int failed_num[2];
288 int p_failed, q_failed;
289 int dec_preread_active;
290 unsigned long ops_request;
291
292 struct md_rdev *blocked_rdev;
293 int handle_bad_blocks;
294 int log_failed;
295 int waiting_extra_page;
296};
297
298/* Flags for struct r5dev.flags */
299enum r5dev_flags {
300 R5_UPTODATE, /* page contains current data */
301 R5_LOCKED, /* IO has been submitted on "req" */
302 R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
303 R5_OVERWRITE, /* towrite covers whole page */
304/* and some that are internal to handle_stripe */
305 R5_Insync, /* rdev && rdev->in_sync at start */
306 R5_Wantread, /* want to schedule a read */
307 R5_Wantwrite,
308 R5_Overlap, /* There is a pending overlapping request
309 * on this block */
310 R5_ReadNoMerge, /* prevent bio from merging in block-layer */
311 R5_ReadError, /* seen a read error here recently */
312 R5_ReWrite, /* have tried to over-write the readerror */
313
314 R5_Expanded, /* This block now has post-expand data */
315 R5_Wantcompute, /* compute_block in progress treat as
316 * uptodate
317 */
318 R5_Wantfill, /* dev->toread contains a bio that needs
319 * filling
320 */
321 R5_Wantdrain, /* dev->towrite needs to be drained */
322 R5_WantFUA, /* Write should be FUA */
323 R5_SyncIO, /* The IO is sync */
324 R5_WriteError, /* got a write error - need to record it */
325 R5_MadeGood, /* A bad block has been fixed by writing to it */
326 R5_ReadRepl, /* Will/did read from replacement rather than orig */
327 R5_MadeGoodRepl,/* A bad block on the replacement device has been
328 * fixed by writing to it */
329 R5_NeedReplace, /* This device has a replacement which is not
330 * up-to-date at this stripe. */
331 R5_WantReplace, /* We need to update the replacement, we have read
332 * data in, and now is a good time to write it out.
333 */
334 R5_Discard, /* Discard the stripe */
335 R5_SkipCopy, /* Don't copy data from bio to stripe cache */
336 R5_InJournal, /* data being written is in the journal device.
337 * if R5_InJournal is set for parity pd_idx, all the
338 * data and parity being written are in the journal
339 * device
340 */
341 R5_OrigPageUPTDODATE, /* with write back cache, we read old data into
342 * dev->orig_page for prexor. When this flag is
343 * set, orig_page contains latest data in the
344 * raid disk.
345 */
346};
347
348/*
349 * Stripe state
350 */
351enum {
352 STRIPE_ACTIVE,
353 STRIPE_HANDLE,
354 STRIPE_SYNC_REQUESTED,
355 STRIPE_SYNCING,
356 STRIPE_INSYNC,
357 STRIPE_REPLACED,
358 STRIPE_PREREAD_ACTIVE,
359 STRIPE_DELAYED,
360 STRIPE_DEGRADED,
361 STRIPE_BIT_DELAY,
362 STRIPE_EXPANDING,
363 STRIPE_EXPAND_SOURCE,
364 STRIPE_EXPAND_READY,
365 STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */
366 STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */
367 STRIPE_BIOFILL_RUN,
368 STRIPE_COMPUTE_RUN,
369 STRIPE_ON_UNPLUG_LIST,
370 STRIPE_DISCARD,
371 STRIPE_ON_RELEASE_LIST,
372 STRIPE_BATCH_READY,
373 STRIPE_BATCH_ERR,
374 STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
375 * to batch yet.
376 */
377 STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
378 * this bit is used in two scenarios:
379 *
380 * 1. write-out phase
381 * set in first entry of r5l_write_stripe
382 * clear in second entry of r5l_write_stripe
383 * used to bypass logic in handle_stripe
384 *
385 * 2. caching phase
386 * set in r5c_try_caching_write()
387 * clear when journal write is done
388 * used to initiate r5c_cache_data()
389 * also used to bypass logic in handle_stripe
390 */
391 STRIPE_R5C_CACHING, /* the stripe is in caching phase
392 * see more detail in the raid5-cache.c
393 */
394 STRIPE_R5C_PARTIAL_STRIPE, /* in r5c cache (to-be/being handled or
395 * in conf->r5c_partial_stripe_list)
396 */
397 STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or
398 * in conf->r5c_full_stripe_list)
399 */
400 STRIPE_R5C_PREFLUSH, /* need to flush journal device */
401};
402
403#define STRIPE_EXPAND_SYNC_FLAGS \
404 ((1 << STRIPE_EXPAND_SOURCE) |\
405 (1 << STRIPE_EXPAND_READY) |\
406 (1 << STRIPE_EXPANDING) |\
407 (1 << STRIPE_SYNC_REQUESTED))
408/*
409 * Operation request flags
410 */
411enum {
412 STRIPE_OP_BIOFILL,
413 STRIPE_OP_COMPUTE_BLK,
414 STRIPE_OP_PREXOR,
415 STRIPE_OP_BIODRAIN,
416 STRIPE_OP_RECONSTRUCT,
417 STRIPE_OP_CHECK,
418 STRIPE_OP_PARTIAL_PARITY,
419};
420
421/*
422 * RAID parity calculation preferences
423 */
424enum {
425 PARITY_DISABLE_RMW = 0,
426 PARITY_ENABLE_RMW,
427 PARITY_PREFER_RMW,
428};
429
430/*
431 * Pages requested from set_syndrome_sources()
432 */
433enum {
434 SYNDROME_SRC_ALL,
435 SYNDROME_SRC_WANT_DRAIN,
436 SYNDROME_SRC_WRITTEN,
437};
438/*
439 * Plugging:
440 *
441 * To improve write throughput, we need to delay the handling of some
442 * stripes until there has been a chance that several write requests
443 * for the one stripe have all been collected.
444 * In particular, any write request that would require pre-reading
445 * is put on a "delayed" queue until there are no stripes currently
446 * in a pre-read phase. Further, if the "delayed" queue is empty when
447 * a stripe is put on it then we "plug" the queue and do not process it
448 * until an unplug call is made. (the unplug_io_fn() is called).
449 *
450 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
451 * it to the count of prereading stripes.
452 * When write is initiated, or the stripe refcnt == 0 (just in case) we
453 * clear the PREREAD_ACTIVE flag and decrement the count
454 * Whenever the 'handle' queue is empty and the device is not plugged, we
455 * move any strips from delayed to handle and clear the DELAYED flag and set
456 * PREREAD_ACTIVE.
457 * In stripe_handle, if we find pre-reading is necessary, we do it if
458 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
459 * HANDLE gets cleared if stripe_handle leaves nothing locked.
460 */
461
462/* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
463 * There are three safe ways to access disk_info.rdev.
464 * 1/ when holding mddev->reconfig_mutex
465 * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
466 * is called as part of performing resync/recovery/reshape.
467 * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
468 * and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
469 * lock.
470 * When .rdev is set to NULL, the nr_pending count checked again and if
471 * it has been incremented, the pointer is put back in .rdev.
472 */
473
474struct disk_info {
475 struct md_rdev *rdev, *replacement;
476 struct page *extra_page; /* extra page to use in prexor */
477};
478
479/*
480 * Stripe cache
481 */
482
483#define NR_STRIPES 256
484
485#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
486#define STRIPE_SIZE PAGE_SIZE
487#define STRIPE_SHIFT (PAGE_SHIFT - 9)
488#define STRIPE_SECTORS (STRIPE_SIZE>>9)
489#endif
490
491#define IO_THRESHOLD 1
492#define BYPASS_THRESHOLD 1
493#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
494#define HASH_MASK (NR_HASH - 1)
495#define MAX_STRIPE_BATCH 8
496
497/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
498 * This is because we sometimes take all the spinlocks
499 * and creating that much locking depth can cause
500 * problems.
501 */
502#define NR_STRIPE_HASH_LOCKS 8
503#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
504
505struct r5worker {
506 struct work_struct work;
507 struct r5worker_group *group;
508 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
509 bool working;
510};
511
512struct r5worker_group {
513 struct list_head handle_list;
514 struct list_head loprio_list;
515 struct r5conf *conf;
516 struct r5worker *workers;
517 int stripes_cnt;
518};
519
520/*
521 * r5c journal modes of the array: write-back or write-through.
522 * write-through mode has identical behavior as existing log only
523 * implementation.
524 */
525enum r5c_journal_mode {
526 R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
527 R5C_JOURNAL_MODE_WRITE_BACK = 1,
528};
529
530enum r5_cache_state {
531 R5_INACTIVE_BLOCKED, /* release of inactive stripes blocked,
532 * waiting for 25% to be free
533 */
534 R5_ALLOC_MORE, /* It might help to allocate another
535 * stripe.
536 */
537 R5_DID_ALLOC, /* A stripe was allocated, don't allocate
538 * more until at least one has been
539 * released. This avoids flooding
540 * the cache.
541 */
542 R5C_LOG_TIGHT, /* log device space tight, need to
543 * prioritize stripes at last_checkpoint
544 */
545 R5C_LOG_CRITICAL, /* log device is running out of space,
546 * only process stripes that are already
547 * occupying the log
548 */
549 R5C_EXTRA_PAGE_IN_USE, /* a stripe is using disk_info.extra_page
550 * for prexor
551 */
552};
553
554#define PENDING_IO_MAX 512
555#define PENDING_IO_ONE_FLUSH 128
556struct r5pending_data {
557 struct list_head sibling;
558 sector_t sector; /* stripe sector */
559 struct bio_list bios;
560};
561
562struct r5conf {
563 struct hlist_head *stripe_hashtbl;
564 /* only protect corresponding hash list and inactive_list */
565 spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
566 struct mddev *mddev;
567 int chunk_sectors;
568 int level, algorithm, rmw_level;
569 int max_degraded;
570 int raid_disks;
571 int max_nr_stripes;
572 int min_nr_stripes;
573#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
574 unsigned long stripe_size;
575 unsigned int stripe_shift;
576 unsigned long stripe_sectors;
577#endif
578
579 /* reshape_progress is the leading edge of a 'reshape'
580 * It has value MaxSector when no reshape is happening
581 * If delta_disks < 0, it is the last sector we started work on,
582 * else is it the next sector to work on.
583 */
584 sector_t reshape_progress;
585 /* reshape_safe is the trailing edge of a reshape. We know that
586 * before (or after) this address, all reshape has completed.
587 */
588 sector_t reshape_safe;
589 int previous_raid_disks;
590 int prev_chunk_sectors;
591 int prev_algo;
592 short generation; /* increments with every reshape */
593 seqcount_spinlock_t gen_lock; /* lock against generation changes */
594 unsigned long reshape_checkpoint; /* Time we last updated
595 * metadata */
596 long long min_offset_diff; /* minimum difference between
597 * data_offset and
598 * new_data_offset across all
599 * devices. May be negative,
600 * but is closest to zero.
601 */
602
603 struct list_head handle_list; /* stripes needing handling */
604 struct list_head loprio_list; /* low priority stripes */
605 struct list_head hold_list; /* preread ready stripes */
606 struct list_head delayed_list; /* stripes that have plugged requests */
607 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
608 struct bio *retry_read_aligned; /* currently retrying aligned bios */
609 unsigned int retry_read_offset; /* sector offset into retry_read_aligned */
610 struct bio *retry_read_aligned_list; /* aligned bios retry list */
611 atomic_t preread_active_stripes; /* stripes with scheduled io */
612 atomic_t active_aligned_reads;
613 atomic_t pending_full_writes; /* full write backlog */
614 int bypass_count; /* bypassed prereads */
615 int bypass_threshold; /* preread nice */
616 int skip_copy; /* Don't copy data from bio to stripe cache */
617 struct list_head *last_hold; /* detect hold_list promotions */
618
619 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
620 /* unfortunately we need two cache names as we temporarily have
621 * two caches.
622 */
623 int active_name;
624 char cache_name[2][32];
625 struct kmem_cache *slab_cache; /* for allocating stripes */
626 struct mutex cache_size_mutex; /* Protect changes to cache size */
627
628 int seq_flush, seq_write;
629 int quiesce;
630
631 int fullsync; /* set to 1 if a full sync is needed,
632 * (fresh device added).
633 * Cleared when a sync completes.
634 */
635 int recovery_disabled;
636 /* per cpu variables */
637 struct raid5_percpu {
638 struct page *spare_page; /* Used when checking P/Q in raid6 */
639 void *scribble; /* space for constructing buffer
640 * lists and performing address
641 * conversions
642 */
643 int scribble_obj_size;
644 } __percpu *percpu;
645 int scribble_disks;
646 int scribble_sectors;
647 struct hlist_node node;
648
649 /*
650 * Free stripes pool
651 */
652 atomic_t active_stripes;
653 struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
654
655 atomic_t r5c_cached_full_stripes;
656 struct list_head r5c_full_stripe_list;
657 atomic_t r5c_cached_partial_stripes;
658 struct list_head r5c_partial_stripe_list;
659 atomic_t r5c_flushing_full_stripes;
660 atomic_t r5c_flushing_partial_stripes;
661
662 atomic_t empty_inactive_list_nr;
663 struct llist_head released_stripes;
664 wait_queue_head_t wait_for_quiescent;
665 wait_queue_head_t wait_for_stripe;
666 wait_queue_head_t wait_for_overlap;
667 unsigned long cache_state;
668 struct shrinker shrinker;
669 int pool_size; /* number of disks in stripeheads in pool */
670 spinlock_t device_lock;
671 struct disk_info *disks;
672 struct bio_set bio_split;
673
674 /* When taking over an array from a different personality, we store
675 * the new thread here until we fully activate the array.
676 */
677 struct md_thread *thread;
678 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
679 struct r5worker_group *worker_groups;
680 int group_cnt;
681 int worker_cnt_per_group;
682 struct r5l_log *log;
683 void *log_private;
684
685 spinlock_t pending_bios_lock;
686 bool batch_bio_dispatch;
687 struct r5pending_data *pending_data;
688 struct list_head free_list;
689 struct list_head pending_list;
690 int pending_data_cnt;
691 struct r5pending_data *next_pending_data;
692};
693
694#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
695#define RAID5_STRIPE_SIZE(conf) STRIPE_SIZE
696#define RAID5_STRIPE_SHIFT(conf) STRIPE_SHIFT
697#define RAID5_STRIPE_SECTORS(conf) STRIPE_SECTORS
698#else
699#define RAID5_STRIPE_SIZE(conf) ((conf)->stripe_size)
700#define RAID5_STRIPE_SHIFT(conf) ((conf)->stripe_shift)
701#define RAID5_STRIPE_SECTORS(conf) ((conf)->stripe_sectors)
702#endif
703
704/* bio's attached to a stripe+device for I/O are linked together in bi_sector
705 * order without overlap. There may be several bio's per stripe+device, and
706 * a bio could span several devices.
707 * When walking this list for a particular stripe+device, we must never proceed
708 * beyond a bio that extends past this device, as the next bio might no longer
709 * be valid.
710 * This function is used to determine the 'next' bio in the list, given the
711 * sector of the current stripe+device
712 */
713static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector)
714{
715 if (bio_end_sector(bio) < sector + RAID5_STRIPE_SECTORS(conf))
716 return bio->bi_next;
717 else
718 return NULL;
719}
720
721/*
722 * Our supported algorithms
723 */
724#define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */
725#define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */
726#define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */
727#define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */
728
729/* Define non-rotating (raid4) algorithms. These allow
730 * conversion of raid4 to raid5.
731 */
732#define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */
733#define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */
734
735/* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
736 * Firstly, the exact positioning of the parity block is slightly
737 * different between the 'LEFT_*' modes of md and the "_N_*" modes
738 * of DDF.
739 * Secondly, or order of datablocks over which the Q syndrome is computed
740 * is different.
741 * Consequently we have different layouts for DDF/raid6 than md/raid6.
742 * These layouts are from the DDFv1.2 spec.
743 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
744 * leaves RLQ=3 as 'Vendor Specific'
745 */
746
747#define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */
748#define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
749#define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
750
751/* For every RAID5 algorithm we define a RAID6 algorithm
752 * with exactly the same layout for data and parity, and
753 * with the Q block always on the last device (N-1).
754 * This allows trivial conversion from RAID5 to RAID6
755 */
756#define ALGORITHM_LEFT_ASYMMETRIC_6 16
757#define ALGORITHM_RIGHT_ASYMMETRIC_6 17
758#define ALGORITHM_LEFT_SYMMETRIC_6 18
759#define ALGORITHM_RIGHT_SYMMETRIC_6 19
760#define ALGORITHM_PARITY_0_6 20
761#define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N
762
763static inline int algorithm_valid_raid5(int layout)
764{
765 return (layout >= 0) &&
766 (layout <= 5);
767}
768static inline int algorithm_valid_raid6(int layout)
769{
770 return (layout >= 0 && layout <= 5)
771 ||
772 (layout >= 8 && layout <= 10)
773 ||
774 (layout >= 16 && layout <= 20);
775}
776
777static inline int algorithm_is_DDF(int layout)
778{
779 return layout >= 8 && layout <= 10;
780}
781
782#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
783/*
784 * Return offset of the corresponding page for r5dev.
785 */
786static inline int raid5_get_page_offset(struct stripe_head *sh, int disk_idx)
787{
788 return (disk_idx % sh->stripes_per_page) * RAID5_STRIPE_SIZE(sh->raid_conf);
789}
790
791/*
792 * Return corresponding page address for r5dev.
793 */
794static inline struct page *
795raid5_get_dev_page(struct stripe_head *sh, int disk_idx)
796{
797 return sh->pages[disk_idx / sh->stripes_per_page];
798}
799#endif
800
801extern void md_raid5_kick_device(struct r5conf *conf);
802extern int raid5_set_cache_size(struct mddev *mddev, int size);
803extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
804extern void raid5_release_stripe(struct stripe_head *sh);
805extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
806 int previous, int *dd_idx,
807 struct stripe_head *sh);
808extern struct stripe_head *
809raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
810 int previous, int noblock, int noquiesce);
811extern int raid5_calc_degraded(struct r5conf *conf);
812extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
813#endif