Linux Audio

Check our new training course

Loading...
v3.1
  1#ifndef _RAID5_H
  2#define _RAID5_H
  3
  4#include <linux/raid/xor.h>
  5#include <linux/dmaengine.h>
  6
  7/*
  8 *
  9 * Each stripe contains one buffer per device.  Each buffer can be in
 10 * one of a number of states stored in "flags".  Changes between
 11 * these states happen *almost* exclusively under the protection of the
 12 * STRIPE_ACTIVE flag.  Some very specific changes can happen in bi_end_io, and
 13 * these are not protected by STRIPE_ACTIVE.
 14 *
 15 * The flag bits that are used to represent these states are:
 16 *   R5_UPTODATE and R5_LOCKED
 17 *
 18 * State Empty == !UPTODATE, !LOCK
 19 *        We have no data, and there is no active request
 20 * State Want == !UPTODATE, LOCK
 21 *        A read request is being submitted for this block
 22 * State Dirty == UPTODATE, LOCK
 23 *        Some new data is in this buffer, and it is being written out
 24 * State Clean == UPTODATE, !LOCK
 25 *        We have valid data which is the same as on disc
 26 *
 27 * The possible state transitions are:
 28 *
 29 *  Empty -> Want   - on read or write to get old data for  parity calc
 30 *  Empty -> Dirty  - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
 31 *  Empty -> Clean  - on compute_block when computing a block for failed drive
 32 *  Want  -> Empty  - on failed read
 33 *  Want  -> Clean  - on successful completion of read request
 34 *  Dirty -> Clean  - on successful completion of write request
 35 *  Dirty -> Clean  - on failed write
 36 *  Clean -> Dirty  - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
 37 *
 38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
 39 * all happen in b_end_io at interrupt time.
 40 * Each sets the Uptodate bit before releasing the Lock bit.
 41 * This leaves one multi-stage transition:
 42 *    Want->Dirty->Clean
 43 * This is safe because thinking that a Clean buffer is actually dirty
 44 * will at worst delay some action, and the stripe will be scheduled
 45 * for attention after the transition is complete.
 46 *
 47 * There is one possibility that is not covered by these states.  That
 48 * is if one drive has failed and there is a spare being rebuilt.  We
 49 * can't distinguish between a clean block that has been generated
 50 * from parity calculations, and a clean block that has been
 51 * successfully written to the spare ( or to parity when resyncing).
 52 * To distingush these states we have a stripe bit STRIPE_INSYNC that
 53 * is set whenever a write is scheduled to the spare, or to the parity
 54 * disc if there is no spare.  A sync request clears this bit, and
 55 * when we find it set with no buffers locked, we know the sync is
 56 * complete.
 57 *
 58 * Buffers for the md device that arrive via make_request are attached
 59 * to the appropriate stripe in one of two lists linked on b_reqnext.
 60 * One list (bh_read) for read requests, one (bh_write) for write.
 61 * There should never be more than one buffer on the two lists
 62 * together, but we are not guaranteed of that so we allow for more.
 63 *
 64 * If a buffer is on the read list when the associated cache buffer is
 65 * Uptodate, the data is copied into the read buffer and it's b_end_io
 66 * routine is called.  This may happen in the end_request routine only
 67 * if the buffer has just successfully been read.  end_request should
 68 * remove the buffers from the list and then set the Uptodate bit on
 69 * the buffer.  Other threads may do this only if they first check
 70 * that the Uptodate bit is set.  Once they have checked that they may
 71 * take buffers off the read queue.
 72 *
 73 * When a buffer on the write list is committed for write it is copied
 74 * into the cache buffer, which is then marked dirty, and moved onto a
 75 * third list, the written list (bh_written).  Once both the parity
 76 * block and the cached buffer are successfully written, any buffer on
 77 * a written list can be returned with b_end_io.
 78 *
 79 * The write list and read list both act as fifos.  The read list,
 80 * write list and written list are protected by the device_lock.
 81 * The device_lock is only for list manipulations and will only be
 82 * held for a very short time.  It can be claimed from interrupts.
 83 *
 84 *
 85 * Stripes in the stripe cache can be on one of two lists (or on
 86 * neither).  The "inactive_list" contains stripes which are not
 87 * currently being used for any request.  They can freely be reused
 88 * for another stripe.  The "handle_list" contains stripes that need
 89 * to be handled in some way.  Both of these are fifo queues.  Each
 90 * stripe is also (potentially) linked to a hash bucket in the hash
 91 * table so that it can be found by sector number.  Stripes that are
 92 * not hashed must be on the inactive_list, and will normally be at
 93 * the front.  All stripes start life this way.
 94 *
 95 * The inactive_list, handle_list and hash bucket lists are all protected by the
 96 * device_lock.
 97 *  - stripes have a reference counter. If count==0, they are on a list.
 98 *  - If a stripe might need handling, STRIPE_HANDLE is set.
 99 *  - When refcount reaches zero, then if STRIPE_HANDLE it is put on
100 *    handle_list else inactive_list
101 *
102 * This, combined with the fact that STRIPE_HANDLE is only ever
103 * cleared while a stripe has a non-zero count means that if the
104 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
105 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
106 * the stripe is on inactive_list.
107 *
108 * The possible transitions are:
109 *  activate an unhashed/inactive stripe (get_active_stripe())
110 *     lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
111 *  activate a hashed, possibly active stripe (get_active_stripe())
112 *     lockdev check-hash if(!cnt++)unlink-stripe unlockdev
113 *  attach a request to an active stripe (add_stripe_bh())
114 *     lockdev attach-buffer unlockdev
115 *  handle a stripe (handle_stripe())
116 *     setSTRIPE_ACTIVE,  clrSTRIPE_HANDLE ...
117 *		(lockdev check-buffers unlockdev) ..
118 *		change-state ..
119 *		record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
120 *  release an active stripe (release_stripe())
121 *     lockdev if (!--cnt) { if  STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
122 *
123 * The refcount counts each thread that have activated the stripe,
124 * plus raid5d if it is handling it, plus one for each active request
125 * on a cached buffer, and plus one if the stripe is undergoing stripe
126 * operations.
127 *
128 * The stripe operations are:
129 * -copying data between the stripe cache and user application buffers
130 * -computing blocks to save a disk access, or to recover a missing block
131 * -updating the parity on a write operation (reconstruct write and
132 *  read-modify-write)
133 * -checking parity correctness
134 * -running i/o to disk
135 * These operations are carried out by raid5_run_ops which uses the async_tx
136 * api to (optionally) offload operations to dedicated hardware engines.
137 * When requesting an operation handle_stripe sets the pending bit for the
138 * operation and increments the count.  raid5_run_ops is then run whenever
139 * the count is non-zero.
140 * There are some critical dependencies between the operations that prevent some
141 * from being requested while another is in flight.
142 * 1/ Parity check operations destroy the in cache version of the parity block,
143 *    so we prevent parity dependent operations like writes and compute_blocks
144 *    from starting while a check is in progress.  Some dma engines can perform
145 *    the check without damaging the parity block, in these cases the parity
146 *    block is re-marked up to date (assuming the check was successful) and is
147 *    not re-read from disk.
148 * 2/ When a write operation is requested we immediately lock the affected
149 *    blocks, and mark them as not up to date.  This causes new read requests
150 *    to be held off, as well as parity checks and compute block operations.
151 * 3/ Once a compute block operation has been requested handle_stripe treats
152 *    that block as if it is up to date.  raid5_run_ops guaruntees that any
153 *    operation that is dependent on the compute block result is initiated after
154 *    the compute block completes.
155 */
156
157/*
158 * Operations state - intermediate states that are visible outside of 
159 *   STRIPE_ACTIVE.
160 * In general _idle indicates nothing is running, _run indicates a data
161 * processing operation is active, and _result means the data processing result
162 * is stable and can be acted upon.  For simple operations like biofill and
163 * compute that only have an _idle and _run state they are indicated with
164 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
165 */
166/**
167 * enum check_states - handles syncing / repairing a stripe
168 * @check_state_idle - check operations are quiesced
169 * @check_state_run - check operation is running
170 * @check_state_result - set outside lock when check result is valid
171 * @check_state_compute_run - check failed and we are repairing
172 * @check_state_compute_result - set outside lock when compute result is valid
173 */
174enum check_states {
175	check_state_idle = 0,
176	check_state_run, /* xor parity check */
177	check_state_run_q, /* q-parity check */
178	check_state_run_pq, /* pq dual parity check */
179	check_state_check_result,
180	check_state_compute_run, /* parity repair */
181	check_state_compute_result,
182};
183
184/**
185 * enum reconstruct_states - handles writing or expanding a stripe
186 */
187enum reconstruct_states {
188	reconstruct_state_idle = 0,
189	reconstruct_state_prexor_drain_run,	/* prexor-write */
190	reconstruct_state_drain_run,		/* write */
191	reconstruct_state_run,			/* expand */
192	reconstruct_state_prexor_drain_result,
193	reconstruct_state_drain_result,
194	reconstruct_state_result,
195};
196
197struct stripe_head {
198	struct hlist_node	hash;
199	struct list_head	lru;	      /* inactive_list or handle_list */
200	struct raid5_private_data *raid_conf;
 
201	short			generation;	/* increments with every
202						 * reshape */
203	sector_t		sector;		/* sector of this row */
204	short			pd_idx;		/* parity disk index */
205	short			qd_idx;		/* 'Q' disk index for raid6 */
206	short			ddf_layout;/* use DDF ordering to calculate Q */
 
207	unsigned long		state;		/* state flags */
208	atomic_t		count;	      /* nr of active thread/requests */
209	int			bm_seq;	/* sequence number for bitmap flushes */
210	int			disks;		/* disks in stripe */
 
 
 
 
211	enum check_states	check_state;
212	enum reconstruct_states reconstruct_state;
 
 
 
 
 
 
 
 
 
 
213	/**
214	 * struct stripe_operations
215	 * @target - STRIPE_OP_COMPUTE_BLK target
216	 * @target2 - 2nd compute target in the raid6 case
217	 * @zero_sum_result - P and Q verification flags
218	 * @request - async service request flags for raid_run_ops
219	 */
220	struct stripe_operations {
221		int 		     target, target2;
222		enum sum_check_flags zero_sum_result;
223		#ifdef CONFIG_MULTICORE_RAID456
224		unsigned long	     request;
225		wait_queue_head_t    wait_for_ops;
226		#endif
227	} ops;
228	struct r5dev {
229		struct bio	req;
230		struct bio_vec	vec;
231		struct page	*page;
 
 
 
232		struct bio	*toread, *read, *towrite, *written;
233		sector_t	sector;			/* sector of this page */
234		unsigned long	flags;
 
235	} dev[1]; /* allocated with extra space depending of RAID geometry */
236};
237
238/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
239 *     for handle_stripe.
240 */
241struct stripe_head_state {
242	int syncing, expanding, expanded;
 
 
 
 
 
 
243	int locked, uptodate, to_read, to_write, failed, written;
244	int to_fill, compute, req_compute, non_overwrite;
245	int failed_num[2];
246	int p_failed, q_failed;
247	int dec_preread_active;
248	unsigned long ops_request;
249
250	struct bio *return_bi;
251	mdk_rdev_t *blocked_rdev;
252	int handle_bad_blocks;
 
253};
254
255/* Flags */
256#define	R5_UPTODATE	0	/* page contains current data */
257#define	R5_LOCKED	1	/* IO has been submitted on "req" */
258#define	R5_OVERWRITE	2	/* towrite covers whole page */
 
 
259/* and some that are internal to handle_stripe */
260#define	R5_Insync	3	/* rdev && rdev->in_sync at start */
261#define	R5_Wantread	4	/* want to schedule a read */
262#define	R5_Wantwrite	5
263#define	R5_Overlap	7	/* There is a pending overlapping request on this block */
264#define	R5_ReadError	8	/* seen a read error here recently */
265#define	R5_ReWrite	9	/* have tried to over-write the readerror */
266
267#define	R5_Expanded	10	/* This block now has post-expand data */
268#define	R5_Wantcompute	11	/* compute_block in progress treat as
269				 * uptodate
270				 */
271#define	R5_Wantfill	12	/* dev->toread contains a bio that needs
272				 * filling
273				 */
274#define	R5_Wantdrain	13	/* dev->towrite needs to be drained */
275#define	R5_WantFUA	14	/* Write should be FUA */
276#define	R5_WriteError	15	/* got a write error - need to record it */
277#define	R5_MadeGood	16	/* A bad block has been fixed by writing to it*/
278/*
279 * Write method
280 */
281#define RECONSTRUCT_WRITE	1
282#define READ_MODIFY_WRITE	2
283/* not a write method, but a compute_parity mode */
284#define	CHECK_PARITY		3
285/* Additional compute_parity mode -- updates the parity w/o LOCKING */
286#define UPDATE_PARITY		4
 
 
 
 
 
287
288/*
289 * Stripe state
290 */
291enum {
292	STRIPE_ACTIVE,
293	STRIPE_HANDLE,
294	STRIPE_SYNC_REQUESTED,
295	STRIPE_SYNCING,
296	STRIPE_INSYNC,
 
297	STRIPE_PREREAD_ACTIVE,
298	STRIPE_DELAYED,
299	STRIPE_DEGRADED,
300	STRIPE_BIT_DELAY,
301	STRIPE_EXPANDING,
302	STRIPE_EXPAND_SOURCE,
303	STRIPE_EXPAND_READY,
304	STRIPE_IO_STARTED,	/* do not count towards 'bypass_count' */
305	STRIPE_FULL_WRITE,	/* all blocks are set to be overwritten */
306	STRIPE_BIOFILL_RUN,
307	STRIPE_COMPUTE_RUN,
308	STRIPE_OPS_REQ_PENDING,
 
 
 
 
 
 
 
 
 
309};
310
 
 
 
 
 
311/*
312 * Operation request flags
313 */
314#define STRIPE_OP_BIOFILL	0
315#define STRIPE_OP_COMPUTE_BLK	1
316#define STRIPE_OP_PREXOR	2
317#define STRIPE_OP_BIODRAIN	3
318#define STRIPE_OP_RECONSTRUCT	4
319#define STRIPE_OP_CHECK	5
 
 
 
 
 
 
 
 
 
 
 
320
321/*
 
 
 
 
 
 
 
 
322 * Plugging:
323 *
324 * To improve write throughput, we need to delay the handling of some
325 * stripes until there has been a chance that several write requests
326 * for the one stripe have all been collected.
327 * In particular, any write request that would require pre-reading
328 * is put on a "delayed" queue until there are no stripes currently
329 * in a pre-read phase.  Further, if the "delayed" queue is empty when
330 * a stripe is put on it then we "plug" the queue and do not process it
331 * until an unplug call is made. (the unplug_io_fn() is called).
332 *
333 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
334 * it to the count of prereading stripes.
335 * When write is initiated, or the stripe refcnt == 0 (just in case) we
336 * clear the PREREAD_ACTIVE flag and decrement the count
337 * Whenever the 'handle' queue is empty and the device is not plugged, we
338 * move any strips from delayed to handle and clear the DELAYED flag and set
339 * PREREAD_ACTIVE.
340 * In stripe_handle, if we find pre-reading is necessary, we do it if
341 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
342 * HANDLE gets cleared if stripe_handle leaves nothing locked.
343 */
344
345
346struct disk_info {
347	mdk_rdev_t	*rdev;
348};
349
350struct raid5_private_data {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351	struct hlist_head	*stripe_hashtbl;
352	mddev_t			*mddev;
353	struct disk_info	*spare;
 
354	int			chunk_sectors;
355	int			level, algorithm;
356	int			max_degraded;
357	int			raid_disks;
358	int			max_nr_stripes;
 
359
360	/* reshape_progress is the leading edge of a 'reshape'
361	 * It has value MaxSector when no reshape is happening
362	 * If delta_disks < 0, it is the last sector we started work on,
363	 * else is it the next sector to work on.
364	 */
365	sector_t		reshape_progress;
366	/* reshape_safe is the trailing edge of a reshape.  We know that
367	 * before (or after) this address, all reshape has completed.
368	 */
369	sector_t		reshape_safe;
370	int			previous_raid_disks;
371	int			prev_chunk_sectors;
372	int			prev_algo;
373	short			generation; /* increments with every reshape */
 
374	unsigned long		reshape_checkpoint; /* Time we last updated
375						     * metadata */
 
 
 
 
 
 
376
377	struct list_head	handle_list; /* stripes needing handling */
378	struct list_head	hold_list; /* preread ready stripes */
379	struct list_head	delayed_list; /* stripes that have plugged requests */
380	struct list_head	bitmap_list; /* stripes delaying awaiting bitmap update */
381	struct bio		*retry_read_aligned; /* currently retrying aligned bios   */
382	struct bio		*retry_read_aligned_list; /* aligned bios retry list  */
383	atomic_t		preread_active_stripes; /* stripes with scheduled io */
384	atomic_t		active_aligned_reads;
385	atomic_t		pending_full_writes; /* full write backlog */
386	int			bypass_count; /* bypassed prereads */
387	int			bypass_threshold; /* preread nice */
 
388	struct list_head	*last_hold; /* detect hold_list promotions */
389
 
 
 
390	atomic_t		reshape_stripes; /* stripes with pending writes for reshape */
391	/* unfortunately we need two cache names as we temporarily have
392	 * two caches.
393	 */
394	int			active_name;
395	char			cache_name[2][32];
396	struct kmem_cache		*slab_cache; /* for allocating stripes */
 
397
398	int			seq_flush, seq_write;
399	int			quiesce;
400
401	int			fullsync;  /* set to 1 if a full sync is needed,
402					    * (fresh device added).
403					    * Cleared when a sync completes.
404					    */
405	int			recovery_disabled;
406	/* per cpu variables */
407	struct raid5_percpu {
408		struct page	*spare_page; /* Used when checking P/Q in raid6 */
409		void		*scribble;   /* space for constructing buffer
410					      * lists and performing address
411					      * conversions
412					      */
413	} __percpu *percpu;
414	size_t			scribble_len; /* size of scribble region must be
415					       * associated with conf to handle
416					       * cpu hotplug while reshaping
417					       */
418#ifdef CONFIG_HOTPLUG_CPU
419	struct notifier_block	cpu_notify;
420#endif
421
422	/*
423	 * Free stripes pool
424	 */
425	atomic_t		active_stripes;
426	struct list_head	inactive_list;
 
 
 
427	wait_queue_head_t	wait_for_stripe;
428	wait_queue_head_t	wait_for_overlap;
429	int			inactive_blocked;	/* release of inactive stripes blocked,
430							 * waiting for 25% to be free
431							 */
 
 
 
 
 
 
 
 
 
 
432	int			pool_size; /* number of disks in stripeheads in pool */
433	spinlock_t		device_lock;
434	struct disk_info	*disks;
435
436	/* When taking over an array from a different personality, we store
437	 * the new thread here until we fully activate the array.
438	 */
439	struct mdk_thread_s	*thread;
 
 
 
 
 
440};
441
442typedef struct raid5_private_data raid5_conf_t;
443
444/*
445 * Our supported algorithms
446 */
447#define ALGORITHM_LEFT_ASYMMETRIC	0 /* Rotating Parity N with Data Restart */
448#define ALGORITHM_RIGHT_ASYMMETRIC	1 /* Rotating Parity 0 with Data Restart */
449#define ALGORITHM_LEFT_SYMMETRIC	2 /* Rotating Parity N with Data Continuation */
450#define ALGORITHM_RIGHT_SYMMETRIC	3 /* Rotating Parity 0 with Data Continuation */
451
452/* Define non-rotating (raid4) algorithms.  These allow
453 * conversion of raid4 to raid5.
454 */
455#define ALGORITHM_PARITY_0		4 /* P or P,Q are initial devices */
456#define ALGORITHM_PARITY_N		5 /* P or P,Q are final devices. */
457
458/* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
459 * Firstly, the exact positioning of the parity block is slightly
460 * different between the 'LEFT_*' modes of md and the "_N_*" modes
461 * of DDF.
462 * Secondly, or order of datablocks over which the Q syndrome is computed
463 * is different.
464 * Consequently we have different layouts for DDF/raid6 than md/raid6.
465 * These layouts are from the DDFv1.2 spec.
466 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
467 * leaves RLQ=3 as 'Vendor Specific'
468 */
469
470#define ALGORITHM_ROTATING_ZERO_RESTART	8 /* DDF PRL=6 RLQ=1 */
471#define ALGORITHM_ROTATING_N_RESTART	9 /* DDF PRL=6 RLQ=2 */
472#define ALGORITHM_ROTATING_N_CONTINUE	10 /*DDF PRL=6 RLQ=3 */
473
474
475/* For every RAID5 algorithm we define a RAID6 algorithm
476 * with exactly the same layout for data and parity, and
477 * with the Q block always on the last device (N-1).
478 * This allows trivial conversion from RAID5 to RAID6
479 */
480#define ALGORITHM_LEFT_ASYMMETRIC_6	16
481#define ALGORITHM_RIGHT_ASYMMETRIC_6	17
482#define ALGORITHM_LEFT_SYMMETRIC_6	18
483#define ALGORITHM_RIGHT_SYMMETRIC_6	19
484#define ALGORITHM_PARITY_0_6		20
485#define ALGORITHM_PARITY_N_6		ALGORITHM_PARITY_N
486
487static inline int algorithm_valid_raid5(int layout)
488{
489	return (layout >= 0) &&
490		(layout <= 5);
491}
492static inline int algorithm_valid_raid6(int layout)
493{
494	return (layout >= 0 && layout <= 5)
495		||
496		(layout >= 8 && layout <= 10)
497		||
498		(layout >= 16 && layout <= 20);
499}
500
501static inline int algorithm_is_DDF(int layout)
502{
503	return layout >= 8 && layout <= 10;
504}
505
506extern int md_raid5_congested(mddev_t *mddev, int bits);
507extern void md_raid5_kick_device(raid5_conf_t *conf);
508extern int raid5_set_cache_size(mddev_t *mddev, int size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509#endif
v4.6
  1#ifndef _RAID5_H
  2#define _RAID5_H
  3
  4#include <linux/raid/xor.h>
  5#include <linux/dmaengine.h>
  6
  7/*
  8 *
  9 * Each stripe contains one buffer per device.  Each buffer can be in
 10 * one of a number of states stored in "flags".  Changes between
 11 * these states happen *almost* exclusively under the protection of the
 12 * STRIPE_ACTIVE flag.  Some very specific changes can happen in bi_end_io, and
 13 * these are not protected by STRIPE_ACTIVE.
 14 *
 15 * The flag bits that are used to represent these states are:
 16 *   R5_UPTODATE and R5_LOCKED
 17 *
 18 * State Empty == !UPTODATE, !LOCK
 19 *        We have no data, and there is no active request
 20 * State Want == !UPTODATE, LOCK
 21 *        A read request is being submitted for this block
 22 * State Dirty == UPTODATE, LOCK
 23 *        Some new data is in this buffer, and it is being written out
 24 * State Clean == UPTODATE, !LOCK
 25 *        We have valid data which is the same as on disc
 26 *
 27 * The possible state transitions are:
 28 *
 29 *  Empty -> Want   - on read or write to get old data for  parity calc
 30 *  Empty -> Dirty  - on compute_parity to satisfy write/sync request.
 31 *  Empty -> Clean  - on compute_block when computing a block for failed drive
 32 *  Want  -> Empty  - on failed read
 33 *  Want  -> Clean  - on successful completion of read request
 34 *  Dirty -> Clean  - on successful completion of write request
 35 *  Dirty -> Clean  - on failed write
 36 *  Clean -> Dirty  - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
 37 *
 38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
 39 * all happen in b_end_io at interrupt time.
 40 * Each sets the Uptodate bit before releasing the Lock bit.
 41 * This leaves one multi-stage transition:
 42 *    Want->Dirty->Clean
 43 * This is safe because thinking that a Clean buffer is actually dirty
 44 * will at worst delay some action, and the stripe will be scheduled
 45 * for attention after the transition is complete.
 46 *
 47 * There is one possibility that is not covered by these states.  That
 48 * is if one drive has failed and there is a spare being rebuilt.  We
 49 * can't distinguish between a clean block that has been generated
 50 * from parity calculations, and a clean block that has been
 51 * successfully written to the spare ( or to parity when resyncing).
 52 * To distinguish these states we have a stripe bit STRIPE_INSYNC that
 53 * is set whenever a write is scheduled to the spare, or to the parity
 54 * disc if there is no spare.  A sync request clears this bit, and
 55 * when we find it set with no buffers locked, we know the sync is
 56 * complete.
 57 *
 58 * Buffers for the md device that arrive via make_request are attached
 59 * to the appropriate stripe in one of two lists linked on b_reqnext.
 60 * One list (bh_read) for read requests, one (bh_write) for write.
 61 * There should never be more than one buffer on the two lists
 62 * together, but we are not guaranteed of that so we allow for more.
 63 *
 64 * If a buffer is on the read list when the associated cache buffer is
 65 * Uptodate, the data is copied into the read buffer and it's b_end_io
 66 * routine is called.  This may happen in the end_request routine only
 67 * if the buffer has just successfully been read.  end_request should
 68 * remove the buffers from the list and then set the Uptodate bit on
 69 * the buffer.  Other threads may do this only if they first check
 70 * that the Uptodate bit is set.  Once they have checked that they may
 71 * take buffers off the read queue.
 72 *
 73 * When a buffer on the write list is committed for write it is copied
 74 * into the cache buffer, which is then marked dirty, and moved onto a
 75 * third list, the written list (bh_written).  Once both the parity
 76 * block and the cached buffer are successfully written, any buffer on
 77 * a written list can be returned with b_end_io.
 78 *
 79 * The write list and read list both act as fifos.  The read list,
 80 * write list and written list are protected by the device_lock.
 81 * The device_lock is only for list manipulations and will only be
 82 * held for a very short time.  It can be claimed from interrupts.
 83 *
 84 *
 85 * Stripes in the stripe cache can be on one of two lists (or on
 86 * neither).  The "inactive_list" contains stripes which are not
 87 * currently being used for any request.  They can freely be reused
 88 * for another stripe.  The "handle_list" contains stripes that need
 89 * to be handled in some way.  Both of these are fifo queues.  Each
 90 * stripe is also (potentially) linked to a hash bucket in the hash
 91 * table so that it can be found by sector number.  Stripes that are
 92 * not hashed must be on the inactive_list, and will normally be at
 93 * the front.  All stripes start life this way.
 94 *
 95 * The inactive_list, handle_list and hash bucket lists are all protected by the
 96 * device_lock.
 97 *  - stripes have a reference counter. If count==0, they are on a list.
 98 *  - If a stripe might need handling, STRIPE_HANDLE is set.
 99 *  - When refcount reaches zero, then if STRIPE_HANDLE it is put on
100 *    handle_list else inactive_list
101 *
102 * This, combined with the fact that STRIPE_HANDLE is only ever
103 * cleared while a stripe has a non-zero count means that if the
104 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
105 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
106 * the stripe is on inactive_list.
107 *
108 * The possible transitions are:
109 *  activate an unhashed/inactive stripe (get_active_stripe())
110 *     lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
111 *  activate a hashed, possibly active stripe (get_active_stripe())
112 *     lockdev check-hash if(!cnt++)unlink-stripe unlockdev
113 *  attach a request to an active stripe (add_stripe_bh())
114 *     lockdev attach-buffer unlockdev
115 *  handle a stripe (handle_stripe())
116 *     setSTRIPE_ACTIVE,  clrSTRIPE_HANDLE ...
117 *		(lockdev check-buffers unlockdev) ..
118 *		change-state ..
119 *		record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
120 *  release an active stripe (release_stripe())
121 *     lockdev if (!--cnt) { if  STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
122 *
123 * The refcount counts each thread that have activated the stripe,
124 * plus raid5d if it is handling it, plus one for each active request
125 * on a cached buffer, and plus one if the stripe is undergoing stripe
126 * operations.
127 *
128 * The stripe operations are:
129 * -copying data between the stripe cache and user application buffers
130 * -computing blocks to save a disk access, or to recover a missing block
131 * -updating the parity on a write operation (reconstruct write and
132 *  read-modify-write)
133 * -checking parity correctness
134 * -running i/o to disk
135 * These operations are carried out by raid5_run_ops which uses the async_tx
136 * api to (optionally) offload operations to dedicated hardware engines.
137 * When requesting an operation handle_stripe sets the pending bit for the
138 * operation and increments the count.  raid5_run_ops is then run whenever
139 * the count is non-zero.
140 * There are some critical dependencies between the operations that prevent some
141 * from being requested while another is in flight.
142 * 1/ Parity check operations destroy the in cache version of the parity block,
143 *    so we prevent parity dependent operations like writes and compute_blocks
144 *    from starting while a check is in progress.  Some dma engines can perform
145 *    the check without damaging the parity block, in these cases the parity
146 *    block is re-marked up to date (assuming the check was successful) and is
147 *    not re-read from disk.
148 * 2/ When a write operation is requested we immediately lock the affected
149 *    blocks, and mark them as not up to date.  This causes new read requests
150 *    to be held off, as well as parity checks and compute block operations.
151 * 3/ Once a compute block operation has been requested handle_stripe treats
152 *    that block as if it is up to date.  raid5_run_ops guaruntees that any
153 *    operation that is dependent on the compute block result is initiated after
154 *    the compute block completes.
155 */
156
157/*
158 * Operations state - intermediate states that are visible outside of
159 *   STRIPE_ACTIVE.
160 * In general _idle indicates nothing is running, _run indicates a data
161 * processing operation is active, and _result means the data processing result
162 * is stable and can be acted upon.  For simple operations like biofill and
163 * compute that only have an _idle and _run state they are indicated with
164 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
165 */
166/**
167 * enum check_states - handles syncing / repairing a stripe
168 * @check_state_idle - check operations are quiesced
169 * @check_state_run - check operation is running
170 * @check_state_result - set outside lock when check result is valid
171 * @check_state_compute_run - check failed and we are repairing
172 * @check_state_compute_result - set outside lock when compute result is valid
173 */
174enum check_states {
175	check_state_idle = 0,
176	check_state_run, /* xor parity check */
177	check_state_run_q, /* q-parity check */
178	check_state_run_pq, /* pq dual parity check */
179	check_state_check_result,
180	check_state_compute_run, /* parity repair */
181	check_state_compute_result,
182};
183
184/**
185 * enum reconstruct_states - handles writing or expanding a stripe
186 */
187enum reconstruct_states {
188	reconstruct_state_idle = 0,
189	reconstruct_state_prexor_drain_run,	/* prexor-write */
190	reconstruct_state_drain_run,		/* write */
191	reconstruct_state_run,			/* expand */
192	reconstruct_state_prexor_drain_result,
193	reconstruct_state_drain_result,
194	reconstruct_state_result,
195};
196
197struct stripe_head {
198	struct hlist_node	hash;
199	struct list_head	lru;	      /* inactive_list or handle_list */
200	struct llist_node	release_list;
201	struct r5conf		*raid_conf;
202	short			generation;	/* increments with every
203						 * reshape */
204	sector_t		sector;		/* sector of this row */
205	short			pd_idx;		/* parity disk index */
206	short			qd_idx;		/* 'Q' disk index for raid6 */
207	short			ddf_layout;/* use DDF ordering to calculate Q */
208	short			hash_lock_index;
209	unsigned long		state;		/* state flags */
210	atomic_t		count;	      /* nr of active thread/requests */
211	int			bm_seq;	/* sequence number for bitmap flushes */
212	int			disks;		/* disks in stripe */
213	int			overwrite_disks; /* total overwrite disks in stripe,
214						  * this is only checked when stripe
215						  * has STRIPE_BATCH_READY
216						  */
217	enum check_states	check_state;
218	enum reconstruct_states reconstruct_state;
219	spinlock_t		stripe_lock;
220	int			cpu;
221	struct r5worker_group	*group;
222
223	struct stripe_head	*batch_head; /* protected by stripe lock */
224	spinlock_t		batch_lock; /* only header's lock is useful */
225	struct list_head	batch_list; /* protected by head's batch lock*/
226
227	struct r5l_io_unit	*log_io;
228	struct list_head	log_list;
229	/**
230	 * struct stripe_operations
231	 * @target - STRIPE_OP_COMPUTE_BLK target
232	 * @target2 - 2nd compute target in the raid6 case
233	 * @zero_sum_result - P and Q verification flags
234	 * @request - async service request flags for raid_run_ops
235	 */
236	struct stripe_operations {
237		int 		     target, target2;
238		enum sum_check_flags zero_sum_result;
 
 
 
 
239	} ops;
240	struct r5dev {
241		/* rreq and rvec are used for the replacement device when
242		 * writing data to both devices.
243		 */
244		struct bio	req, rreq;
245		struct bio_vec	vec, rvec;
246		struct page	*page, *orig_page;
247		struct bio	*toread, *read, *towrite, *written;
248		sector_t	sector;			/* sector of this page */
249		unsigned long	flags;
250		u32		log_checksum;
251	} dev[1]; /* allocated with extra space depending of RAID geometry */
252};
253
254/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
255 *     for handle_stripe.
256 */
257struct stripe_head_state {
258	/* 'syncing' means that we need to read all devices, either
259	 * to check/correct parity, or to reconstruct a missing device.
260	 * 'replacing' means we are replacing one or more drives and
261	 * the source is valid at this point so we don't need to
262	 * read all devices, just the replacement targets.
263	 */
264	int syncing, expanding, expanded, replacing;
265	int locked, uptodate, to_read, to_write, failed, written;
266	int to_fill, compute, req_compute, non_overwrite;
267	int failed_num[2];
268	int p_failed, q_failed;
269	int dec_preread_active;
270	unsigned long ops_request;
271
272	struct bio_list return_bi;
273	struct md_rdev *blocked_rdev;
274	int handle_bad_blocks;
275	int log_failed;
276};
277
278/* Flags for struct r5dev.flags */
279enum r5dev_flags {
280	R5_UPTODATE,	/* page contains current data */
281	R5_LOCKED,	/* IO has been submitted on "req" */
282	R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
283	R5_OVERWRITE,	/* towrite covers whole page */
284/* and some that are internal to handle_stripe */
285	R5_Insync,	/* rdev && rdev->in_sync at start */
286	R5_Wantread,	/* want to schedule a read */
287	R5_Wantwrite,
288	R5_Overlap,	/* There is a pending overlapping request
289			 * on this block */
290	R5_ReadNoMerge, /* prevent bio from merging in block-layer */
291	R5_ReadError,	/* seen a read error here recently */
292	R5_ReWrite,	/* have tried to over-write the readerror */
293
294	R5_Expanded,	/* This block now has post-expand data */
295	R5_Wantcompute,	/* compute_block in progress treat as
296			 * uptodate
297			 */
298	R5_Wantfill,	/* dev->toread contains a bio that needs
299			 * filling
300			 */
301	R5_Wantdrain,	/* dev->towrite needs to be drained */
302	R5_WantFUA,	/* Write should be FUA */
303	R5_SyncIO,	/* The IO is sync */
304	R5_WriteError,	/* got a write error - need to record it */
305	R5_MadeGood,	/* A bad block has been fixed by writing to it */
306	R5_ReadRepl,	/* Will/did read from replacement rather than orig */
307	R5_MadeGoodRepl,/* A bad block on the replacement device has been
308			 * fixed by writing to it */
309	R5_NeedReplace,	/* This device has a replacement which is not
310			 * up-to-date at this stripe. */
311	R5_WantReplace, /* We need to update the replacement, we have read
312			 * data in, and now is a good time to write it out.
313			 */
314	R5_Discard,	/* Discard the stripe */
315	R5_SkipCopy,	/* Don't copy data from bio to stripe cache */
316};
317
318/*
319 * Stripe state
320 */
321enum {
322	STRIPE_ACTIVE,
323	STRIPE_HANDLE,
324	STRIPE_SYNC_REQUESTED,
325	STRIPE_SYNCING,
326	STRIPE_INSYNC,
327	STRIPE_REPLACED,
328	STRIPE_PREREAD_ACTIVE,
329	STRIPE_DELAYED,
330	STRIPE_DEGRADED,
331	STRIPE_BIT_DELAY,
332	STRIPE_EXPANDING,
333	STRIPE_EXPAND_SOURCE,
334	STRIPE_EXPAND_READY,
335	STRIPE_IO_STARTED,	/* do not count towards 'bypass_count' */
336	STRIPE_FULL_WRITE,	/* all blocks are set to be overwritten */
337	STRIPE_BIOFILL_RUN,
338	STRIPE_COMPUTE_RUN,
339	STRIPE_OPS_REQ_PENDING,
340	STRIPE_ON_UNPLUG_LIST,
341	STRIPE_DISCARD,
342	STRIPE_ON_RELEASE_LIST,
343	STRIPE_BATCH_READY,
344	STRIPE_BATCH_ERR,
345	STRIPE_BITMAP_PENDING,	/* Being added to bitmap, don't add
346				 * to batch yet.
347				 */
348	STRIPE_LOG_TRAPPED, /* trapped into log */
349};
350
351#define STRIPE_EXPAND_SYNC_FLAGS \
352	((1 << STRIPE_EXPAND_SOURCE) |\
353	(1 << STRIPE_EXPAND_READY) |\
354	(1 << STRIPE_EXPANDING) |\
355	(1 << STRIPE_SYNC_REQUESTED))
356/*
357 * Operation request flags
358 */
359enum {
360	STRIPE_OP_BIOFILL,
361	STRIPE_OP_COMPUTE_BLK,
362	STRIPE_OP_PREXOR,
363	STRIPE_OP_BIODRAIN,
364	STRIPE_OP_RECONSTRUCT,
365	STRIPE_OP_CHECK,
366};
367
368/*
369 * RAID parity calculation preferences
370 */
371enum {
372	PARITY_DISABLE_RMW = 0,
373	PARITY_ENABLE_RMW,
374	PARITY_PREFER_RMW,
375};
376
377/*
378 * Pages requested from set_syndrome_sources()
379 */
380enum {
381	SYNDROME_SRC_ALL,
382	SYNDROME_SRC_WANT_DRAIN,
383	SYNDROME_SRC_WRITTEN,
384};
385/*
386 * Plugging:
387 *
388 * To improve write throughput, we need to delay the handling of some
389 * stripes until there has been a chance that several write requests
390 * for the one stripe have all been collected.
391 * In particular, any write request that would require pre-reading
392 * is put on a "delayed" queue until there are no stripes currently
393 * in a pre-read phase.  Further, if the "delayed" queue is empty when
394 * a stripe is put on it then we "plug" the queue and do not process it
395 * until an unplug call is made. (the unplug_io_fn() is called).
396 *
397 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
398 * it to the count of prereading stripes.
399 * When write is initiated, or the stripe refcnt == 0 (just in case) we
400 * clear the PREREAD_ACTIVE flag and decrement the count
401 * Whenever the 'handle' queue is empty and the device is not plugged, we
402 * move any strips from delayed to handle and clear the DELAYED flag and set
403 * PREREAD_ACTIVE.
404 * In stripe_handle, if we find pre-reading is necessary, we do it if
405 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
406 * HANDLE gets cleared if stripe_handle leaves nothing locked.
407 */
408
 
409struct disk_info {
410	struct md_rdev	*rdev, *replacement;
411};
412
413/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
414 * This is because we sometimes take all the spinlocks
415 * and creating that much locking depth can cause
416 * problems.
417 */
418#define NR_STRIPE_HASH_LOCKS 8
419#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
420
421struct r5worker {
422	struct work_struct work;
423	struct r5worker_group *group;
424	struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
425	bool working;
426};
427
428struct r5worker_group {
429	struct list_head handle_list;
430	struct r5conf *conf;
431	struct r5worker *workers;
432	int stripes_cnt;
433};
434
435struct r5conf {
436	struct hlist_head	*stripe_hashtbl;
437	/* only protect corresponding hash list and inactive_list */
438	spinlock_t		hash_locks[NR_STRIPE_HASH_LOCKS];
439	struct mddev		*mddev;
440	int			chunk_sectors;
441	int			level, algorithm, rmw_level;
442	int			max_degraded;
443	int			raid_disks;
444	int			max_nr_stripes;
445	int			min_nr_stripes;
446
447	/* reshape_progress is the leading edge of a 'reshape'
448	 * It has value MaxSector when no reshape is happening
449	 * If delta_disks < 0, it is the last sector we started work on,
450	 * else is it the next sector to work on.
451	 */
452	sector_t		reshape_progress;
453	/* reshape_safe is the trailing edge of a reshape.  We know that
454	 * before (or after) this address, all reshape has completed.
455	 */
456	sector_t		reshape_safe;
457	int			previous_raid_disks;
458	int			prev_chunk_sectors;
459	int			prev_algo;
460	short			generation; /* increments with every reshape */
461	seqcount_t		gen_lock;	/* lock against generation changes */
462	unsigned long		reshape_checkpoint; /* Time we last updated
463						     * metadata */
464	long long		min_offset_diff; /* minimum difference between
465						  * data_offset and
466						  * new_data_offset across all
467						  * devices.  May be negative,
468						  * but is closest to zero.
469						  */
470
471	struct list_head	handle_list; /* stripes needing handling */
472	struct list_head	hold_list; /* preread ready stripes */
473	struct list_head	delayed_list; /* stripes that have plugged requests */
474	struct list_head	bitmap_list; /* stripes delaying awaiting bitmap update */
475	struct bio		*retry_read_aligned; /* currently retrying aligned bios   */
476	struct bio		*retry_read_aligned_list; /* aligned bios retry list  */
477	atomic_t		preread_active_stripes; /* stripes with scheduled io */
478	atomic_t		active_aligned_reads;
479	atomic_t		pending_full_writes; /* full write backlog */
480	int			bypass_count; /* bypassed prereads */
481	int			bypass_threshold; /* preread nice */
482	int			skip_copy; /* Don't copy data from bio to stripe cache */
483	struct list_head	*last_hold; /* detect hold_list promotions */
484
485	/* bios to have bi_end_io called after metadata is synced */
486	struct bio_list		return_bi;
487
488	atomic_t		reshape_stripes; /* stripes with pending writes for reshape */
489	/* unfortunately we need two cache names as we temporarily have
490	 * two caches.
491	 */
492	int			active_name;
493	char			cache_name[2][32];
494	struct kmem_cache	*slab_cache; /* for allocating stripes */
495	struct mutex		cache_size_mutex; /* Protect changes to cache size */
496
497	int			seq_flush, seq_write;
498	int			quiesce;
499
500	int			fullsync;  /* set to 1 if a full sync is needed,
501					    * (fresh device added).
502					    * Cleared when a sync completes.
503					    */
504	int			recovery_disabled;
505	/* per cpu variables */
506	struct raid5_percpu {
507		struct page	*spare_page; /* Used when checking P/Q in raid6 */
508		struct flex_array *scribble;   /* space for constructing buffer
509					      * lists and performing address
510					      * conversions
511					      */
512	} __percpu *percpu;
513	int scribble_disks;
514	int scribble_sectors;
 
 
515#ifdef CONFIG_HOTPLUG_CPU
516	struct notifier_block	cpu_notify;
517#endif
518
519	/*
520	 * Free stripes pool
521	 */
522	atomic_t		active_stripes;
523	struct list_head	inactive_list[NR_STRIPE_HASH_LOCKS];
524	atomic_t		empty_inactive_list_nr;
525	struct llist_head	released_stripes;
526	wait_queue_head_t	wait_for_quiescent;
527	wait_queue_head_t	wait_for_stripe;
528	wait_queue_head_t	wait_for_overlap;
529	unsigned long		cache_state;
530#define R5_INACTIVE_BLOCKED	1	/* release of inactive stripes blocked,
531					 * waiting for 25% to be free
532					 */
533#define R5_ALLOC_MORE		2	/* It might help to allocate another
534					 * stripe.
535					 */
536#define R5_DID_ALLOC		4	/* A stripe was allocated, don't allocate
537					 * more until at least one has been
538					 * released.  This avoids flooding
539					 * the cache.
540					 */
541	struct shrinker		shrinker;
542	int			pool_size; /* number of disks in stripeheads in pool */
543	spinlock_t		device_lock;
544	struct disk_info	*disks;
545
546	/* When taking over an array from a different personality, we store
547	 * the new thread here until we fully activate the array.
548	 */
549	struct md_thread	*thread;
550	struct list_head	temp_inactive_list[NR_STRIPE_HASH_LOCKS];
551	struct r5worker_group	*worker_groups;
552	int			group_cnt;
553	int			worker_cnt_per_group;
554	struct r5l_log		*log;
555};
556
 
557
558/*
559 * Our supported algorithms
560 */
561#define ALGORITHM_LEFT_ASYMMETRIC	0 /* Rotating Parity N with Data Restart */
562#define ALGORITHM_RIGHT_ASYMMETRIC	1 /* Rotating Parity 0 with Data Restart */
563#define ALGORITHM_LEFT_SYMMETRIC	2 /* Rotating Parity N with Data Continuation */
564#define ALGORITHM_RIGHT_SYMMETRIC	3 /* Rotating Parity 0 with Data Continuation */
565
566/* Define non-rotating (raid4) algorithms.  These allow
567 * conversion of raid4 to raid5.
568 */
569#define ALGORITHM_PARITY_0		4 /* P or P,Q are initial devices */
570#define ALGORITHM_PARITY_N		5 /* P or P,Q are final devices. */
571
572/* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
573 * Firstly, the exact positioning of the parity block is slightly
574 * different between the 'LEFT_*' modes of md and the "_N_*" modes
575 * of DDF.
576 * Secondly, or order of datablocks over which the Q syndrome is computed
577 * is different.
578 * Consequently we have different layouts for DDF/raid6 than md/raid6.
579 * These layouts are from the DDFv1.2 spec.
580 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
581 * leaves RLQ=3 as 'Vendor Specific'
582 */
583
584#define ALGORITHM_ROTATING_ZERO_RESTART	8 /* DDF PRL=6 RLQ=1 */
585#define ALGORITHM_ROTATING_N_RESTART	9 /* DDF PRL=6 RLQ=2 */
586#define ALGORITHM_ROTATING_N_CONTINUE	10 /*DDF PRL=6 RLQ=3 */
587
 
588/* For every RAID5 algorithm we define a RAID6 algorithm
589 * with exactly the same layout for data and parity, and
590 * with the Q block always on the last device (N-1).
591 * This allows trivial conversion from RAID5 to RAID6
592 */
593#define ALGORITHM_LEFT_ASYMMETRIC_6	16
594#define ALGORITHM_RIGHT_ASYMMETRIC_6	17
595#define ALGORITHM_LEFT_SYMMETRIC_6	18
596#define ALGORITHM_RIGHT_SYMMETRIC_6	19
597#define ALGORITHM_PARITY_0_6		20
598#define ALGORITHM_PARITY_N_6		ALGORITHM_PARITY_N
599
600static inline int algorithm_valid_raid5(int layout)
601{
602	return (layout >= 0) &&
603		(layout <= 5);
604}
605static inline int algorithm_valid_raid6(int layout)
606{
607	return (layout >= 0 && layout <= 5)
608		||
609		(layout >= 8 && layout <= 10)
610		||
611		(layout >= 16 && layout <= 20);
612}
613
614static inline int algorithm_is_DDF(int layout)
615{
616	return layout >= 8 && layout <= 10;
617}
618
619extern void md_raid5_kick_device(struct r5conf *conf);
620extern int raid5_set_cache_size(struct mddev *mddev, int size);
621extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
622extern void raid5_release_stripe(struct stripe_head *sh);
623extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
624				     int previous, int *dd_idx,
625				     struct stripe_head *sh);
626extern struct stripe_head *
627raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
628			int previous, int noblock, int noquiesce);
629extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
630extern void r5l_exit_log(struct r5l_log *log);
631extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
632extern void r5l_write_stripe_run(struct r5l_log *log);
633extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
634extern void r5l_stripe_write_finished(struct stripe_head *sh);
635extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
636extern void r5l_quiesce(struct r5l_log *log, int state);
637extern bool r5l_log_disk_error(struct r5conf *conf);
638#endif