Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _RAID1_H
  3#define _RAID1_H
  4
  5/*
  6 * each barrier unit size is 64MB fow now
  7 * note: it must be larger than RESYNC_DEPTH
  8 */
  9#define BARRIER_UNIT_SECTOR_BITS	17
 10#define BARRIER_UNIT_SECTOR_SIZE	(1<<17)
 11/*
 12 * In struct r1conf, the following members are related to I/O barrier
 13 * buckets,
 14 *	atomic_t	*nr_pending;
 15 *	atomic_t	*nr_waiting;
 16 *	atomic_t	*nr_queued;
 17 *	atomic_t	*barrier;
 18 * Each of them points to array of atomic_t variables, each array is
 19 * designed to have BARRIER_BUCKETS_NR elements and occupy a single
 20 * memory page. The data width of atomic_t variables is 4 bytes, equal
 21 * to 1<<(ilog2(sizeof(atomic_t))), BARRIER_BUCKETS_NR_BITS is defined
 22 * as (PAGE_SHIFT - ilog2(sizeof(int))) to make sure an array of
 23 * atomic_t variables with BARRIER_BUCKETS_NR elements just exactly
 24 * occupies a single memory page.
 25 */
 26#define BARRIER_BUCKETS_NR_BITS		(PAGE_SHIFT - ilog2(sizeof(atomic_t)))
 27#define BARRIER_BUCKETS_NR		(1<<BARRIER_BUCKETS_NR_BITS)
 28
 29/* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk.
 30 * There are three safe ways to access raid1_info.rdev.
 31 * 1/ when holding mddev->reconfig_mutex
 32 * 2/ when resync/recovery is known to be happening - i.e. in code that is
 33 *    called as part of performing resync/recovery.
 34 * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
 35 *    and if it is non-NULL, increment rdev->nr_pending before dropping the
 36 *    RCU lock.
 37 * When .rdev is set to NULL, the nr_pending count checked again and if it has
 38 * been incremented, the pointer is put back in .rdev.
 39 */
 40
 41struct raid1_info {
 42	struct md_rdev	*rdev;
 43	sector_t	head_position;
 44
 45	/* When choose the best device for a read (read_balance())
 46	 * we try to keep sequential reads one the same device
 47	 */
 48	sector_t	next_seq_sect;
 49	sector_t	seq_start;
 50};
 51
 52/*
 53 * memory pools need a pointer to the mddev, so they can force an unplug
 54 * when memory is tight, and a count of the number of drives that the
 55 * pool was allocated for, so they know how much to allocate and free.
 56 * mddev->raid_disks cannot be used, as it can change while a pool is active
 57 * These two datums are stored in a kmalloced struct.
 58 * The 'raid_disks' here is twice the raid_disks in r1conf.
 59 * This allows space for each 'real' device can have a replacement in the
 60 * second half of the array.
 61 */
 62
 63struct pool_info {
 64	struct mddev *mddev;
 65	int	raid_disks;
 66};
 67
 68struct r1conf {
 69	struct mddev		*mddev;
 70	struct raid1_info	*mirrors;	/* twice 'raid_disks' to
 71						 * allow for replacements.
 72						 */
 73	int			raid_disks;
 74
 75	spinlock_t		device_lock;
 76
 77	/* list of 'struct r1bio' that need to be processed by raid1d,
 78	 * whether to retry a read, writeout a resync or recovery
 79	 * block, or anything else.
 80	 */
 81	struct list_head	retry_list;
 82	/* A separate list of r1bio which just need raid_end_bio_io called.
 83	 * This mustn't happen for writes which had any errors if the superblock
 84	 * needs to be written.
 85	 */
 86	struct list_head	bio_end_io_list;
 87
 88	/* queue pending writes to be submitted on unplug */
 89	struct bio_list		pending_bio_list;
 90
 91	/* for use when syncing mirrors:
 92	 * We don't allow both normal IO and resync/recovery IO at
 93	 * the same time - resync/recovery can only happen when there
 94	 * is no other IO.  So when either is active, the other has to wait.
 95	 * See more details description in raid1.c near raise_barrier().
 96	 */
 97	wait_queue_head_t	wait_barrier;
 98	spinlock_t		resync_lock;
 99	atomic_t		nr_sync_pending;
100	atomic_t		*nr_pending;
101	atomic_t		*nr_waiting;
102	atomic_t		*nr_queued;
103	atomic_t		*barrier;
104	int			array_frozen;
105
106	/* Set to 1 if a full sync is needed, (fresh device added).
107	 * Cleared when a sync completes.
108	 */
109	int			fullsync;
110
111	/* When the same as mddev->recovery_disabled we don't allow
112	 * recovery to be attempted as we expect a read error.
113	 */
114	int			recovery_disabled;
115
116	/* poolinfo contains information about the content of the
117	 * mempools - it changes when the array grows or shrinks
118	 */
119	struct pool_info	*poolinfo;
120	mempool_t		r1bio_pool;
121	mempool_t		r1buf_pool;
122
123	struct bio_set		bio_split;
124
125	/* temporary buffer to synchronous IO when attempting to repair
126	 * a read error.
127	 */
128	struct page		*tmppage;
129
130	/* When taking over an array from a different personality, we store
131	 * the new thread here until we fully activate the array.
132	 */
133	struct md_thread	*thread;
134
135	/* Keep track of cluster resync window to send to other
136	 * nodes.
137	 */
138	sector_t		cluster_sync_low;
139	sector_t		cluster_sync_high;
140
141};
142
143/*
144 * this is our 'private' RAID1 bio.
145 *
146 * it contains information about what kind of IO operations were started
147 * for this RAID1 operation, and about their status:
148 */
149
150struct r1bio {
151	atomic_t		remaining; /* 'have we finished' count,
152					    * used from IRQ handlers
153					    */
154	atomic_t		behind_remaining; /* number of write-behind ios remaining
155						 * in this BehindIO request
156						 */
157	sector_t		sector;
158	int			sectors;
159	unsigned long		state;
160	unsigned long		start_time;
161	struct mddev		*mddev;
162	/*
163	 * original bio going to /dev/mdx
164	 */
165	struct bio		*master_bio;
166	/*
167	 * if the IO is in READ direction, then this is where we read
168	 */
169	int			read_disk;
170
171	struct list_head	retry_list;
172
173	/*
174	 * When R1BIO_BehindIO is set, we store pages for write behind
175	 * in behind_master_bio.
176	 */
177	struct bio		*behind_master_bio;
178
179	/*
180	 * if the IO is in WRITE direction, then multiple bios are used.
181	 * We choose the number when they are allocated.
182	 */
183	struct bio		*bios[];
184	/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
185};
186
187/* bits for r1bio.state */
188enum r1bio_state {
189	R1BIO_Uptodate,
190	R1BIO_IsSync,
191	R1BIO_Degraded,
192	R1BIO_BehindIO,
193/* Set ReadError on bios that experience a readerror so that
194 * raid1d knows what to do with them.
195 */
196	R1BIO_ReadError,
197/* For write-behind requests, we call bi_end_io when
198 * the last non-write-behind device completes, providing
199 * any write was successful.  Otherwise we call when
200 * any write-behind write succeeds, otherwise we call
201 * with failure when last write completes (and all failed).
202 * Record that bi_end_io was called with this flag...
203 */
204	R1BIO_Returned,
205/* If a write for this request means we can clear some
206 * known-bad-block records, we set this flag
207 */
208	R1BIO_MadeGood,
209	R1BIO_WriteError,
210	R1BIO_FailFast,
211};
212
213static inline int sector_to_idx(sector_t sector)
214{
215	return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
216			 BARRIER_BUCKETS_NR_BITS);
217}
218#endif
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _RAID1_H
  3#define _RAID1_H
  4
  5/*
  6 * each barrier unit size is 64MB fow now
  7 * note: it must be larger than RESYNC_DEPTH
  8 */
  9#define BARRIER_UNIT_SECTOR_BITS	17
 10#define BARRIER_UNIT_SECTOR_SIZE	(1<<17)
 11/*
 12 * In struct r1conf, the following members are related to I/O barrier
 13 * buckets,
 14 *	atomic_t	*nr_pending;
 15 *	atomic_t	*nr_waiting;
 16 *	atomic_t	*nr_queued;
 17 *	atomic_t	*barrier;
 18 * Each of them points to array of atomic_t variables, each array is
 19 * designed to have BARRIER_BUCKETS_NR elements and occupy a single
 20 * memory page. The data width of atomic_t variables is 4 bytes, equal
 21 * to 1<<(ilog2(sizeof(atomic_t))), BARRIER_BUCKETS_NR_BITS is defined
 22 * as (PAGE_SHIFT - ilog2(sizeof(int))) to make sure an array of
 23 * atomic_t variables with BARRIER_BUCKETS_NR elements just exactly
 24 * occupies a single memory page.
 25 */
 26#define BARRIER_BUCKETS_NR_BITS		(PAGE_SHIFT - ilog2(sizeof(atomic_t)))
 27#define BARRIER_BUCKETS_NR		(1<<BARRIER_BUCKETS_NR_BITS)
 28
 29/* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk.
 30 * There are three safe ways to access raid1_info.rdev.
 31 * 1/ when holding mddev->reconfig_mutex
 32 * 2/ when resync/recovery is known to be happening - i.e. in code that is
 33 *    called as part of performing resync/recovery.
 34 * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
 35 *    and if it is non-NULL, increment rdev->nr_pending before dropping the
 36 *    RCU lock.
 37 * When .rdev is set to NULL, the nr_pending count checked again and if it has
 38 * been incremented, the pointer is put back in .rdev.
 39 */
 40
 41struct raid1_info {
 42	struct md_rdev	*rdev;
 43	sector_t	head_position;
 44
 45	/* When choose the best device for a read (read_balance())
 46	 * we try to keep sequential reads one the same device
 47	 */
 48	sector_t	next_seq_sect;
 49	sector_t	seq_start;
 50};
 51
 52/*
 53 * memory pools need a pointer to the mddev, so they can force an unplug
 54 * when memory is tight, and a count of the number of drives that the
 55 * pool was allocated for, so they know how much to allocate and free.
 56 * mddev->raid_disks cannot be used, as it can change while a pool is active
 57 * These two datums are stored in a kmalloced struct.
 58 * The 'raid_disks' here is twice the raid_disks in r1conf.
 59 * This allows space for each 'real' device can have a replacement in the
 60 * second half of the array.
 61 */
 62
 63struct pool_info {
 64	struct mddev *mddev;
 65	int	raid_disks;
 66};
 67
 68struct r1conf {
 69	struct mddev		*mddev;
 70	struct raid1_info	*mirrors;	/* twice 'raid_disks' to
 71						 * allow for replacements.
 72						 */
 73	int			raid_disks;
 74
 75	spinlock_t		device_lock;
 76
 77	/* list of 'struct r1bio' that need to be processed by raid1d,
 78	 * whether to retry a read, writeout a resync or recovery
 79	 * block, or anything else.
 80	 */
 81	struct list_head	retry_list;
 82	/* A separate list of r1bio which just need raid_end_bio_io called.
 83	 * This mustn't happen for writes which had any errors if the superblock
 84	 * needs to be written.
 85	 */
 86	struct list_head	bio_end_io_list;
 87
 88	/* queue pending writes to be submitted on unplug */
 89	struct bio_list		pending_bio_list;
 90
 91	/* for use when syncing mirrors:
 92	 * We don't allow both normal IO and resync/recovery IO at
 93	 * the same time - resync/recovery can only happen when there
 94	 * is no other IO.  So when either is active, the other has to wait.
 95	 * See more details description in raid1.c near raise_barrier().
 96	 */
 97	wait_queue_head_t	wait_barrier;
 98	spinlock_t		resync_lock;
 99	atomic_t		nr_sync_pending;
100	atomic_t		*nr_pending;
101	atomic_t		*nr_waiting;
102	atomic_t		*nr_queued;
103	atomic_t		*barrier;
104	int			array_frozen;
105
106	/* Set to 1 if a full sync is needed, (fresh device added).
107	 * Cleared when a sync completes.
108	 */
109	int			fullsync;
110
111	/* When the same as mddev->recovery_disabled we don't allow
112	 * recovery to be attempted as we expect a read error.
113	 */
114	int			recovery_disabled;
115
116	/* poolinfo contains information about the content of the
117	 * mempools - it changes when the array grows or shrinks
118	 */
119	struct pool_info	*poolinfo;
120	mempool_t		r1bio_pool;
121	mempool_t		r1buf_pool;
122
123	struct bio_set		bio_split;
124
125	/* temporary buffer to synchronous IO when attempting to repair
126	 * a read error.
127	 */
128	struct page		*tmppage;
129
130	/* When taking over an array from a different personality, we store
131	 * the new thread here until we fully activate the array.
132	 */
133	struct md_thread __rcu	*thread;
134
135	/* Keep track of cluster resync window to send to other
136	 * nodes.
137	 */
138	sector_t		cluster_sync_low;
139	sector_t		cluster_sync_high;
140
141};
142
143/*
144 * this is our 'private' RAID1 bio.
145 *
146 * it contains information about what kind of IO operations were started
147 * for this RAID1 operation, and about their status:
148 */
149
150struct r1bio {
151	atomic_t		remaining; /* 'have we finished' count,
152					    * used from IRQ handlers
153					    */
154	atomic_t		behind_remaining; /* number of write-behind ios remaining
155						 * in this BehindIO request
156						 */
157	sector_t		sector;
158	int			sectors;
159	unsigned long		state;
 
160	struct mddev		*mddev;
161	/*
162	 * original bio going to /dev/mdx
163	 */
164	struct bio		*master_bio;
165	/*
166	 * if the IO is in READ direction, then this is where we read
167	 */
168	int			read_disk;
169
170	struct list_head	retry_list;
171
172	/*
173	 * When R1BIO_BehindIO is set, we store pages for write behind
174	 * in behind_master_bio.
175	 */
176	struct bio		*behind_master_bio;
177
178	/*
179	 * if the IO is in WRITE direction, then multiple bios are used.
180	 * We choose the number when they are allocated.
181	 */
182	struct bio		*bios[];
183	/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
184};
185
186/* bits for r1bio.state */
187enum r1bio_state {
188	R1BIO_Uptodate,
189	R1BIO_IsSync,
190	R1BIO_Degraded,
191	R1BIO_BehindIO,
192/* Set ReadError on bios that experience a readerror so that
193 * raid1d knows what to do with them.
194 */
195	R1BIO_ReadError,
196/* For write-behind requests, we call bi_end_io when
197 * the last non-write-behind device completes, providing
198 * any write was successful.  Otherwise we call when
199 * any write-behind write succeeds, otherwise we call
200 * with failure when last write completes (and all failed).
201 * Record that bi_end_io was called with this flag...
202 */
203	R1BIO_Returned,
204/* If a write for this request means we can clear some
205 * known-bad-block records, we set this flag
206 */
207	R1BIO_MadeGood,
208	R1BIO_WriteError,
209	R1BIO_FailFast,
210};
211
212static inline int sector_to_idx(sector_t sector)
213{
214	return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
215			 BARRIER_BUCKETS_NR_BITS);
216}
217#endif