Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to setting various queue properties from drivers
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/init.h>
  8#include <linux/bio.h>
  9#include <linux/blkdev.h>
 10#include <linux/pagemap.h>
 11#include <linux/backing-dev-defs.h>
 12#include <linux/gcd.h>
 13#include <linux/lcm.h>
 14#include <linux/jiffies.h>
 15#include <linux/gfp.h>
 16#include <linux/dma-mapping.h>
 17
 18#include "blk.h"
 
 19#include "blk-wbt.h"
 20
 21void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 22{
 23	q->rq_timeout = timeout;
 24}
 25EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
 26
 27/**
 28 * blk_set_default_limits - reset limits to default values
 29 * @lim:  the queue_limits structure to reset
 30 *
 31 * Description:
 32 *   Returns a queue_limit struct to its default state.
 33 */
 34void blk_set_default_limits(struct queue_limits *lim)
 35{
 36	lim->max_segments = BLK_MAX_SEGMENTS;
 37	lim->max_discard_segments = 1;
 38	lim->max_integrity_segments = 0;
 39	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 40	lim->virt_boundary_mask = 0;
 41	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
 42	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
 43	lim->max_dev_sectors = 0;
 44	lim->chunk_sectors = 0;
 45	lim->max_write_zeroes_sectors = 0;
 46	lim->max_zone_append_sectors = 0;
 47	lim->max_discard_sectors = 0;
 48	lim->max_hw_discard_sectors = 0;
 49	lim->max_secure_erase_sectors = 0;
 50	lim->discard_granularity = 0;
 51	lim->discard_alignment = 0;
 52	lim->discard_misaligned = 0;
 53	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
 54	lim->bounce = BLK_BOUNCE_NONE;
 55	lim->alignment_offset = 0;
 56	lim->io_opt = 0;
 57	lim->misaligned = 0;
 58	lim->zoned = BLK_ZONED_NONE;
 59	lim->zone_write_granularity = 0;
 60	lim->dma_alignment = 511;
 61}
 62
 63/**
 64 * blk_set_stacking_limits - set default limits for stacking devices
 65 * @lim:  the queue_limits structure to reset
 66 *
 67 * Description:
 68 *   Returns a queue_limit struct to its default state. Should be used
 69 *   by stacking drivers like DM that have no internal limits.
 70 */
 71void blk_set_stacking_limits(struct queue_limits *lim)
 72{
 73	blk_set_default_limits(lim);
 
 
 
 
 
 
 74
 75	/* Inherit limits from component devices */
 76	lim->max_segments = USHRT_MAX;
 77	lim->max_discard_segments = USHRT_MAX;
 78	lim->max_hw_sectors = UINT_MAX;
 79	lim->max_segment_size = UINT_MAX;
 80	lim->max_sectors = UINT_MAX;
 81	lim->max_dev_sectors = UINT_MAX;
 82	lim->max_write_zeroes_sectors = UINT_MAX;
 83	lim->max_zone_append_sectors = UINT_MAX;
 
 84}
 85EXPORT_SYMBOL(blk_set_stacking_limits);
 86
 87/**
 88 * blk_queue_bounce_limit - set bounce buffer limit for queue
 89 * @q: the request queue for the device
 90 * @bounce: bounce limit to enforce
 91 *
 92 * Description:
 93 *    Force bouncing for ISA DMA ranges or highmem.
 94 *
 95 *    DEPRECATED, don't use in new code.
 96 **/
 97void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
 98{
 99	q->limits.bounce = bounce;
 
 
 
 
 
100}
101EXPORT_SYMBOL(blk_queue_bounce_limit);
102
103/**
104 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
105 * @q:  the request queue for the device
106 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
107 *
108 * Description:
109 *    Enables a low level driver to set a hard upper limit,
110 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
111 *    the device driver based upon the capabilities of the I/O
112 *    controller.
113 *
114 *    max_dev_sectors is a hard limit imposed by the storage device for
115 *    READ/WRITE requests. It is set by the disk driver.
116 *
117 *    max_sectors is a soft limit imposed by the block layer for
118 *    filesystem type requests.  This value can be overridden on a
119 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
120 *    The soft limit can not exceed max_hw_sectors.
121 **/
122void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
123{
124	struct queue_limits *limits = &q->limits;
125	unsigned int max_sectors;
126
127	if ((max_hw_sectors << 9) < PAGE_SIZE) {
128		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
129		printk(KERN_INFO "%s: set to minimum %d\n",
130		       __func__, max_hw_sectors);
131	}
132
133	max_hw_sectors = round_down(max_hw_sectors,
134				    limits->logical_block_size >> SECTOR_SHIFT);
135	limits->max_hw_sectors = max_hw_sectors;
136
137	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
138	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
139	max_sectors = round_down(max_sectors,
140				 limits->logical_block_size >> SECTOR_SHIFT);
141	limits->max_sectors = max_sectors;
142
143	if (!q->disk)
144		return;
145	q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
146}
147EXPORT_SYMBOL(blk_queue_max_hw_sectors);
148
149/**
150 * blk_queue_chunk_sectors - set size of the chunk for this queue
151 * @q:  the request queue for the device
152 * @chunk_sectors:  chunk sectors in the usual 512b unit
153 *
154 * Description:
155 *    If a driver doesn't want IOs to cross a given chunk size, it can set
156 *    this limit and prevent merging across chunks. Note that the block layer
157 *    must accept a page worth of data at any offset. So if the crossing of
158 *    chunks is a hard limitation in the driver, it must still be prepared
159 *    to split single page bios.
160 **/
161void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
162{
163	q->limits.chunk_sectors = chunk_sectors;
164}
165EXPORT_SYMBOL(blk_queue_chunk_sectors);
166
167/**
168 * blk_queue_max_discard_sectors - set max sectors for a single discard
169 * @q:  the request queue for the device
170 * @max_discard_sectors: maximum number of sectors to discard
171 **/
172void blk_queue_max_discard_sectors(struct request_queue *q,
173		unsigned int max_discard_sectors)
174{
175	q->limits.max_hw_discard_sectors = max_discard_sectors;
176	q->limits.max_discard_sectors = max_discard_sectors;
177}
178EXPORT_SYMBOL(blk_queue_max_discard_sectors);
179
180/**
181 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
182 * @q:  the request queue for the device
183 * @max_sectors: maximum number of sectors to secure_erase
184 **/
185void blk_queue_max_secure_erase_sectors(struct request_queue *q,
186		unsigned int max_sectors)
187{
188	q->limits.max_secure_erase_sectors = max_sectors;
 
 
189}
190EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
191
192/**
193 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
194 *                                      write zeroes
195 * @q:  the request queue for the device
196 * @max_write_zeroes_sectors: maximum number of sectors to write per command
197 **/
198void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
199		unsigned int max_write_zeroes_sectors)
200{
201	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
202}
203EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
204
205/**
206 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
207 * @q:  the request queue for the device
208 * @max_zone_append_sectors: maximum number of sectors to write per command
209 **/
210void blk_queue_max_zone_append_sectors(struct request_queue *q,
211		unsigned int max_zone_append_sectors)
212{
213	unsigned int max_sectors;
214
215	if (WARN_ON(!blk_queue_is_zoned(q)))
216		return;
 
 
217
218	max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
219	max_sectors = min(q->limits.chunk_sectors, max_sectors);
 
 
 
220
221	/*
222	 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
223	 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
224	 * or the max_hw_sectors limit not set.
225	 */
226	WARN_ON(!max_sectors);
227
228	q->limits.max_zone_append_sectors = max_sectors;
229}
230EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
231
232/**
233 * blk_queue_max_segments - set max hw segments for a request for this queue
234 * @q:  the request queue for the device
235 * @max_segments:  max number of segments
236 *
237 * Description:
238 *    Enables a low level driver to set an upper limit on the number of
239 *    hw data segments in a request.
240 **/
241void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
242{
243	if (!max_segments) {
244		max_segments = 1;
245		printk(KERN_INFO "%s: set to minimum %d\n",
246		       __func__, max_segments);
247	}
248
249	q->limits.max_segments = max_segments;
250}
251EXPORT_SYMBOL(blk_queue_max_segments);
252
253/**
254 * blk_queue_max_discard_segments - set max segments for discard requests
255 * @q:  the request queue for the device
256 * @max_segments:  max number of segments
257 *
258 * Description:
259 *    Enables a low level driver to set an upper limit on the number of
260 *    segments in a discard request.
261 **/
262void blk_queue_max_discard_segments(struct request_queue *q,
263		unsigned short max_segments)
264{
265	q->limits.max_discard_segments = max_segments;
266}
267EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
268
269/**
270 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
271 * @q:  the request queue for the device
272 * @max_size:  max size of segment in bytes
273 *
274 * Description:
275 *    Enables a low level driver to set an upper limit on the size of a
276 *    coalesced segment
277 **/
278void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
279{
280	if (max_size < PAGE_SIZE) {
281		max_size = PAGE_SIZE;
282		printk(KERN_INFO "%s: set to minimum %d\n",
283		       __func__, max_size);
284	}
285
286	/* see blk_queue_virt_boundary() for the explanation */
287	WARN_ON_ONCE(q->limits.virt_boundary_mask);
288
289	q->limits.max_segment_size = max_size;
 
 
 
 
 
 
 
 
290}
291EXPORT_SYMBOL(blk_queue_max_segment_size);
292
293/**
294 * blk_queue_logical_block_size - set logical block size for the queue
295 * @q:  the request queue for the device
296 * @size:  the logical block size, in bytes
297 *
298 * Description:
299 *   This should be set to the lowest possible block size that the
300 *   storage device can address.  The default of 512 covers most
301 *   hardware.
302 **/
303void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
304{
305	struct queue_limits *limits = &q->limits;
306
307	limits->logical_block_size = size;
 
308
309	if (limits->physical_block_size < size)
310		limits->physical_block_size = size;
311
312	if (limits->io_min < limits->physical_block_size)
313		limits->io_min = limits->physical_block_size;
314
315	limits->max_hw_sectors =
316		round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
317	limits->max_sectors =
318		round_down(limits->max_sectors, size >> SECTOR_SHIFT);
319}
320EXPORT_SYMBOL(blk_queue_logical_block_size);
321
322/**
323 * blk_queue_physical_block_size - set physical block size for the queue
324 * @q:  the request queue for the device
325 * @size:  the physical block size, in bytes
326 *
327 * Description:
328 *   This should be set to the lowest possible sector size that the
329 *   hardware can operate on without reverting to read-modify-write
330 *   operations.
331 */
332void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
333{
334	q->limits.physical_block_size = size;
335
336	if (q->limits.physical_block_size < q->limits.logical_block_size)
337		q->limits.physical_block_size = q->limits.logical_block_size;
338
339	if (q->limits.io_min < q->limits.physical_block_size)
340		q->limits.io_min = q->limits.physical_block_size;
341}
342EXPORT_SYMBOL(blk_queue_physical_block_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343
344/**
345 * blk_queue_zone_write_granularity - set zone write granularity for the queue
346 * @q:  the request queue for the zoned device
347 * @size:  the zone write granularity size, in bytes
348 *
349 * Description:
350 *   This should be set to the lowest possible size allowing to write in
351 *   sequential zones of a zoned block device.
352 */
353void blk_queue_zone_write_granularity(struct request_queue *q,
354				      unsigned int size)
355{
356	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
357		return;
358
359	q->limits.zone_write_granularity = size;
 
360
361	if (q->limits.zone_write_granularity < q->limits.logical_block_size)
362		q->limits.zone_write_granularity = q->limits.logical_block_size;
 
 
 
363}
364EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
365
366/**
367 * blk_queue_alignment_offset - set physical block alignment offset
368 * @q:	the request queue for the device
369 * @offset: alignment offset in bytes
370 *
371 * Description:
372 *   Some devices are naturally misaligned to compensate for things like
373 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
374 *   should call this function for devices whose first sector is not
375 *   naturally aligned.
376 */
377void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
378{
379	q->limits.alignment_offset =
380		offset & (q->limits.physical_block_size - 1);
381	q->limits.misaligned = 0;
382}
383EXPORT_SYMBOL(blk_queue_alignment_offset);
384
385void disk_update_readahead(struct gendisk *disk)
386{
387	struct request_queue *q = disk->queue;
 
 
 
 
 
 
 
 
 
388
389	/*
390	 * For read-ahead of large files to be effective, we need to read ahead
391	 * at least twice the optimal I/O size.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392	 */
393	disk->bdi->ra_pages =
394		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
395	disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396}
397EXPORT_SYMBOL_GPL(disk_update_readahead);
398
399/**
400 * blk_limits_io_min - set minimum request size for a device
401 * @limits: the queue limits
402 * @min:  smallest I/O size in bytes
403 *
404 * Description:
405 *   Some devices have an internal block size bigger than the reported
406 *   hardware sector size.  This function can be used to signal the
407 *   smallest I/O the device can perform without incurring a performance
408 *   penalty.
409 */
410void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
411{
412	limits->io_min = min;
413
414	if (limits->io_min < limits->logical_block_size)
415		limits->io_min = limits->logical_block_size;
416
417	if (limits->io_min < limits->physical_block_size)
418		limits->io_min = limits->physical_block_size;
419}
420EXPORT_SYMBOL(blk_limits_io_min);
421
422/**
423 * blk_queue_io_min - set minimum request size for the queue
424 * @q:	the request queue for the device
425 * @min:  smallest I/O size in bytes
426 *
427 * Description:
428 *   Storage devices may report a granularity or preferred minimum I/O
429 *   size which is the smallest request the device can perform without
430 *   incurring a performance penalty.  For disk drives this is often the
431 *   physical block size.  For RAID arrays it is often the stripe chunk
432 *   size.  A properly aligned multiple of minimum_io_size is the
433 *   preferred request size for workloads where a high number of I/O
434 *   operations is desired.
435 */
436void blk_queue_io_min(struct request_queue *q, unsigned int min)
 
437{
438	blk_limits_io_min(&q->limits, min);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439}
440EXPORT_SYMBOL(blk_queue_io_min);
441
442/**
443 * blk_limits_io_opt - set optimal request size for a device
444 * @limits: the queue limits
445 * @opt:  smallest I/O size in bytes
446 *
447 * Description:
448 *   Storage devices may report an optimal I/O size, which is the
449 *   device's preferred unit for sustained I/O.  This is rarely reported
450 *   for disk drives.  For RAID arrays it is usually the stripe width or
451 *   the internal track size.  A properly aligned multiple of
452 *   optimal_io_size is the preferred request size for workloads where
453 *   sustained throughput is desired.
454 */
455void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
 
456{
457	limits->io_opt = opt;
 
 
 
 
 
 
458}
459EXPORT_SYMBOL(blk_limits_io_opt);
460
461/**
462 * blk_queue_io_opt - set optimal request size for the queue
463 * @q:	the request queue for the device
464 * @opt:  optimal request size in bytes
465 *
466 * Description:
467 *   Storage devices may report an optimal I/O size, which is the
468 *   device's preferred unit for sustained I/O.  This is rarely reported
469 *   for disk drives.  For RAID arrays it is usually the stripe width or
470 *   the internal track size.  A properly aligned multiple of
471 *   optimal_io_size is the preferred request size for workloads where
472 *   sustained throughput is desired.
473 */
474void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
475{
476	blk_limits_io_opt(&q->limits, opt);
477	if (!q->disk)
478		return;
479	q->disk->bdi->ra_pages =
480		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
481}
482EXPORT_SYMBOL(blk_queue_io_opt);
483
484static int queue_limit_alignment_offset(const struct queue_limits *lim,
485		sector_t sector)
486{
487	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
488	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
489		<< SECTOR_SHIFT;
490
491	return (granularity + lim->alignment_offset - alignment) % granularity;
492}
493
494static unsigned int queue_limit_discard_alignment(
495		const struct queue_limits *lim, sector_t sector)
496{
497	unsigned int alignment, granularity, offset;
498
499	if (!lim->max_discard_sectors)
500		return 0;
501
502	/* Why are these in bytes, not sectors? */
503	alignment = lim->discard_alignment >> SECTOR_SHIFT;
504	granularity = lim->discard_granularity >> SECTOR_SHIFT;
505	if (!granularity)
506		return 0;
507
508	/* Offset of the partition start in 'granularity' sectors */
509	offset = sector_div(sector, granularity);
510
511	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
512	offset = (granularity + alignment - offset) % granularity;
513
514	/* Turn it back into bytes, gaah */
515	return offset << SECTOR_SHIFT;
516}
517
518static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
519{
520	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
521	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
522		sectors = PAGE_SIZE >> SECTOR_SHIFT;
523	return sectors;
524}
525
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
526/**
527 * blk_stack_limits - adjust queue_limits for stacked devices
528 * @t:	the stacking driver limits (top device)
529 * @b:  the underlying queue limits (bottom, component device)
530 * @start:  first data sector within component device
531 *
532 * Description:
533 *    This function is used by stacking drivers like MD and DM to ensure
534 *    that all component devices have compatible block sizes and
535 *    alignments.  The stacking driver must provide a queue_limits
536 *    struct (top) and then iteratively call the stacking function for
537 *    all component (bottom) devices.  The stacking function will
538 *    attempt to combine the values and ensure proper alignment.
539 *
540 *    Returns 0 if the top and bottom queue_limits are compatible.  The
541 *    top device's block sizes and alignment offsets may be adjusted to
542 *    ensure alignment with the bottom device. If no compatible sizes
543 *    and alignments exist, -1 is returned and the resulting top
544 *    queue_limits will have the misaligned flag set to indicate that
545 *    the alignment_offset is undefined.
546 */
547int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
548		     sector_t start)
549{
550	unsigned int top, bottom, alignment, ret = 0;
551
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
552	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
 
 
553	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
554	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
555	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
556					b->max_write_zeroes_sectors);
557	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
558					b->max_zone_append_sectors);
559	t->bounce = max(t->bounce, b->bounce);
560
561	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
562					    b->seg_boundary_mask);
563	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
564					    b->virt_boundary_mask);
565
566	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
567	t->max_discard_segments = min_not_zero(t->max_discard_segments,
568					       b->max_discard_segments);
569	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
570						 b->max_integrity_segments);
571
572	t->max_segment_size = min_not_zero(t->max_segment_size,
573					   b->max_segment_size);
574
575	t->misaligned |= b->misaligned;
576
577	alignment = queue_limit_alignment_offset(b, start);
578
579	/* Bottom device has different alignment.  Check that it is
580	 * compatible with the current top alignment.
581	 */
582	if (t->alignment_offset != alignment) {
583
584		top = max(t->physical_block_size, t->io_min)
585			+ t->alignment_offset;
586		bottom = max(b->physical_block_size, b->io_min) + alignment;
587
588		/* Verify that top and bottom intervals line up */
589		if (max(top, bottom) % min(top, bottom)) {
590			t->misaligned = 1;
591			ret = -1;
592		}
593	}
594
595	t->logical_block_size = max(t->logical_block_size,
596				    b->logical_block_size);
597
598	t->physical_block_size = max(t->physical_block_size,
599				     b->physical_block_size);
600
601	t->io_min = max(t->io_min, b->io_min);
602	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
603	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
604
605	/* Set non-power-of-2 compatible chunk_sectors boundary */
606	if (b->chunk_sectors)
607		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
608
609	/* Physical block size a multiple of the logical block size? */
610	if (t->physical_block_size & (t->logical_block_size - 1)) {
611		t->physical_block_size = t->logical_block_size;
612		t->misaligned = 1;
613		ret = -1;
614	}
615
616	/* Minimum I/O a multiple of the physical block size? */
617	if (t->io_min & (t->physical_block_size - 1)) {
618		t->io_min = t->physical_block_size;
619		t->misaligned = 1;
620		ret = -1;
621	}
622
623	/* Optimal I/O a multiple of the physical block size? */
624	if (t->io_opt & (t->physical_block_size - 1)) {
625		t->io_opt = 0;
626		t->misaligned = 1;
627		ret = -1;
628	}
629
630	/* chunk_sectors a multiple of the physical block size? */
631	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
632		t->chunk_sectors = 0;
633		t->misaligned = 1;
634		ret = -1;
635	}
636
637	t->raid_partial_stripes_expensive =
638		max(t->raid_partial_stripes_expensive,
639		    b->raid_partial_stripes_expensive);
640
641	/* Find lowest common alignment_offset */
642	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
643		% max(t->physical_block_size, t->io_min);
644
645	/* Verify that new alignment_offset is on a logical block boundary */
646	if (t->alignment_offset & (t->logical_block_size - 1)) {
647		t->misaligned = 1;
648		ret = -1;
649	}
650
651	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
652	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
653	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
654
655	/* Discard alignment and granularity */
656	if (b->discard_granularity) {
657		alignment = queue_limit_discard_alignment(b, start);
658
659		if (t->discard_granularity != 0 &&
660		    t->discard_alignment != alignment) {
661			top = t->discard_granularity + t->discard_alignment;
662			bottom = b->discard_granularity + alignment;
663
664			/* Verify that top and bottom intervals line up */
665			if ((max(top, bottom) % min(top, bottom)) != 0)
666				t->discard_misaligned = 1;
667		}
668
669		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
670						      b->max_discard_sectors);
671		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
672							 b->max_hw_discard_sectors);
673		t->discard_granularity = max(t->discard_granularity,
674					     b->discard_granularity);
675		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
676			t->discard_granularity;
677	}
678	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
679						   b->max_secure_erase_sectors);
680	t->zone_write_granularity = max(t->zone_write_granularity,
681					b->zone_write_granularity);
682	t->zoned = max(t->zoned, b->zoned);
 
 
 
 
 
683	return ret;
684}
685EXPORT_SYMBOL(blk_stack_limits);
686
687/**
688 * disk_stack_limits - adjust queue limits for stacked drivers
689 * @disk:  MD/DM gendisk (top)
690 * @bdev:  the underlying block device (bottom)
691 * @offset:  offset to beginning of data within component device
 
692 *
693 * Description:
694 *    Merges the limits for a top level gendisk and a bottom level
695 *    block_device.
 
 
 
 
696 */
697void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
698		       sector_t offset)
699{
700	struct request_queue *t = disk->queue;
701
702	if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
703			get_start_sect(bdev) + (offset >> 9)) < 0)
704		pr_notice("%s: Warning: Device %pg is misaligned\n",
705			disk->disk_name, bdev);
706
707	disk_update_readahead(disk);
708}
709EXPORT_SYMBOL(disk_stack_limits);
710
711/**
712 * blk_queue_update_dma_pad - update pad mask
713 * @q:     the request queue for the device
714 * @mask:  pad mask
715 *
716 * Update dma pad mask.
 
717 *
718 * Appending pad buffer to a request modifies the last entry of a
719 * scatter list such that it includes the pad buffer.
720 **/
721void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
722{
723	if (mask > q->dma_pad_mask)
724		q->dma_pad_mask = mask;
725}
726EXPORT_SYMBOL(blk_queue_update_dma_pad);
727
728/**
729 * blk_queue_segment_boundary - set boundary rules for segment merging
730 * @q:  the request queue for the device
731 * @mask:  the memory boundary mask
732 **/
733void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
734{
735	if (mask < PAGE_SIZE - 1) {
736		mask = PAGE_SIZE - 1;
737		printk(KERN_INFO "%s: set to minimum %lx\n",
738		       __func__, mask);
739	}
740
741	q->limits.seg_boundary_mask = mask;
742}
743EXPORT_SYMBOL(blk_queue_segment_boundary);
744
745/**
746 * blk_queue_virt_boundary - set boundary rules for bio merging
747 * @q:  the request queue for the device
748 * @mask:  the memory boundary mask
749 **/
750void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
751{
752	q->limits.virt_boundary_mask = mask;
753
754	/*
755	 * Devices that require a virtual boundary do not support scatter/gather
756	 * I/O natively, but instead require a descriptor list entry for each
757	 * page (which might not be idential to the Linux PAGE_SIZE).  Because
758	 * of that they are not limited by our notion of "segment size".
759	 */
760	if (mask)
761		q->limits.max_segment_size = UINT_MAX;
762}
763EXPORT_SYMBOL(blk_queue_virt_boundary);
764
765/**
766 * blk_queue_dma_alignment - set dma length and memory alignment
767 * @q:     the request queue for the device
768 * @mask:  alignment mask
769 *
770 * description:
771 *    set required memory and length alignment for direct dma transactions.
772 *    this is used when building direct io requests for the queue.
773 *
774 **/
775void blk_queue_dma_alignment(struct request_queue *q, int mask)
776{
777	q->limits.dma_alignment = mask;
778}
779EXPORT_SYMBOL(blk_queue_dma_alignment);
780
781/**
782 * blk_queue_update_dma_alignment - update dma length and memory alignment
783 * @q:     the request queue for the device
784 * @mask:  alignment mask
785 *
786 * description:
787 *    update required memory and length alignment for direct dma transactions.
788 *    If the requested alignment is larger than the current alignment, then
789 *    the current queue alignment is updated to the new value, otherwise it
790 *    is left alone.  The design of this is to allow multiple objects
791 *    (driver, device, transport etc) to set their respective
792 *    alignments without having them interfere.
793 *
794 **/
795void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
796{
797	BUG_ON(mask > PAGE_SIZE);
798
799	if (mask > q->limits.dma_alignment)
800		q->limits.dma_alignment = mask;
 
801}
802EXPORT_SYMBOL(blk_queue_update_dma_alignment);
803
804/**
805 * blk_set_queue_depth - tell the block layer about the device queue depth
806 * @q:		the request queue for the device
807 * @depth:		queue depth
808 *
809 */
810void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
811{
812	q->queue_depth = depth;
813	rq_qos_queue_depth_changed(q);
814}
815EXPORT_SYMBOL(blk_set_queue_depth);
816
817/**
818 * blk_queue_write_cache - configure queue's write cache
819 * @q:		the request queue for the device
820 * @wc:		write back cache on or off
821 * @fua:	device supports FUA writes, if true
822 *
823 * Tell the block layer about the write cache of @q.
824 */
825void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
826{
827	if (wc)
828		blk_queue_flag_set(QUEUE_FLAG_WC, q);
829	else
830		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
831	if (fua)
832		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
833	else
834		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
835
836	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
837}
838EXPORT_SYMBOL_GPL(blk_queue_write_cache);
839
840/**
841 * blk_queue_required_elevator_features - Set a queue required elevator features
842 * @q:		the request queue for the target device
843 * @features:	Required elevator features OR'ed together
844 *
845 * Tell the block layer that for the device controlled through @q, only the
846 * only elevators that can be used are those that implement at least the set of
847 * features specified by @features.
848 */
849void blk_queue_required_elevator_features(struct request_queue *q,
850					  unsigned int features)
851{
852	q->required_elevator_features = features;
853}
854EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
855
856/**
857 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
858 * @q:		the request queue for the device
859 * @dev:	the device pointer for dma
860 *
861 * Tell the block layer about merging the segments by dma map of @q.
862 */
863bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
864				       struct device *dev)
865{
866	unsigned long boundary = dma_get_merge_boundary(dev);
867
868	if (!boundary)
869		return false;
870
871	/* No need to update max_segment_size. see blk_queue_virt_boundary() */
872	blk_queue_virt_boundary(q, boundary);
873
874	return true;
875}
876EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
877
878static bool disk_has_partitions(struct gendisk *disk)
879{
880	unsigned long idx;
881	struct block_device *part;
882	bool ret = false;
883
884	rcu_read_lock();
885	xa_for_each(&disk->part_tbl, idx, part) {
886		if (bdev_is_partition(part)) {
887			ret = true;
888			break;
889		}
890	}
891	rcu_read_unlock();
892
893	return ret;
894}
895
896/**
897 * disk_set_zoned - configure the zoned model for a disk
898 * @disk:	the gendisk of the queue to configure
899 * @model:	the zoned model to set
900 *
901 * Set the zoned model of @disk to @model.
902 *
903 * When @model is BLK_ZONED_HM (host managed), this should be called only
904 * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
905 * If @model specifies BLK_ZONED_HA (host aware), the effective model used
906 * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
907 * on the disk.
908 */
909void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
910{
911	struct request_queue *q = disk->queue;
912
913	switch (model) {
914	case BLK_ZONED_HM:
915		/*
916		 * Host managed devices are supported only if
917		 * CONFIG_BLK_DEV_ZONED is enabled.
918		 */
919		WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
920		break;
921	case BLK_ZONED_HA:
922		/*
923		 * Host aware devices can be treated either as regular block
924		 * devices (similar to drive managed devices) or as zoned block
925		 * devices to take advantage of the zone command set, similarly
926		 * to host managed devices. We try the latter if there are no
927		 * partitions and zoned block device support is enabled, else
928		 * we do nothing special as far as the block layer is concerned.
929		 */
930		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
931		    disk_has_partitions(disk))
932			model = BLK_ZONED_NONE;
933		break;
934	case BLK_ZONED_NONE:
935	default:
936		if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
937			model = BLK_ZONED_NONE;
938		break;
939	}
940
941	q->limits.zoned = model;
942	if (model != BLK_ZONED_NONE) {
943		/*
944		 * Set the zone write granularity to the device logical block
945		 * size by default. The driver can change this value if needed.
946		 */
947		blk_queue_zone_write_granularity(q,
948						queue_logical_block_size(q));
949	} else {
950		disk_clear_zone_settings(disk);
951	}
952}
953EXPORT_SYMBOL_GPL(disk_set_zoned);
954
955int bdev_alignment_offset(struct block_device *bdev)
956{
957	struct request_queue *q = bdev_get_queue(bdev);
958
959	if (q->limits.misaligned)
960		return -1;
961	if (bdev_is_partition(bdev))
962		return queue_limit_alignment_offset(&q->limits,
963				bdev->bd_start_sect);
964	return q->limits.alignment_offset;
965}
966EXPORT_SYMBOL_GPL(bdev_alignment_offset);
967
968unsigned int bdev_discard_alignment(struct block_device *bdev)
969{
970	struct request_queue *q = bdev_get_queue(bdev);
971
972	if (bdev_is_partition(bdev))
973		return queue_limit_discard_alignment(&q->limits,
974				bdev->bd_start_sect);
975	return q->limits.discard_alignment;
976}
977EXPORT_SYMBOL_GPL(bdev_discard_alignment);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to setting various queue properties from drivers
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/init.h>
  8#include <linux/bio.h>
  9#include <linux/blk-integrity.h>
 10#include <linux/pagemap.h>
 11#include <linux/backing-dev-defs.h>
 12#include <linux/gcd.h>
 13#include <linux/lcm.h>
 14#include <linux/jiffies.h>
 15#include <linux/gfp.h>
 16#include <linux/dma-mapping.h>
 17
 18#include "blk.h"
 19#include "blk-rq-qos.h"
 20#include "blk-wbt.h"
 21
 22void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 23{
 24	q->rq_timeout = timeout;
 25}
 26EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
 27
 28/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29 * blk_set_stacking_limits - set default limits for stacking devices
 30 * @lim:  the queue_limits structure to reset
 31 *
 32 * Prepare queue limits for applying limits from underlying devices using
 33 * blk_stack_limits().
 
 34 */
 35void blk_set_stacking_limits(struct queue_limits *lim)
 36{
 37	memset(lim, 0, sizeof(*lim));
 38	lim->logical_block_size = SECTOR_SIZE;
 39	lim->physical_block_size = SECTOR_SIZE;
 40	lim->io_min = SECTOR_SIZE;
 41	lim->discard_granularity = SECTOR_SIZE;
 42	lim->dma_alignment = SECTOR_SIZE - 1;
 43	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 44
 45	/* Inherit limits from component devices */
 46	lim->max_segments = USHRT_MAX;
 47	lim->max_discard_segments = USHRT_MAX;
 48	lim->max_hw_sectors = UINT_MAX;
 49	lim->max_segment_size = UINT_MAX;
 50	lim->max_sectors = UINT_MAX;
 51	lim->max_dev_sectors = UINT_MAX;
 52	lim->max_write_zeroes_sectors = UINT_MAX;
 53	lim->max_hw_zone_append_sectors = UINT_MAX;
 54	lim->max_user_discard_sectors = UINT_MAX;
 55}
 56EXPORT_SYMBOL(blk_set_stacking_limits);
 57
 58void blk_apply_bdi_limits(struct backing_dev_info *bdi,
 59		struct queue_limits *lim)
 
 
 
 
 
 
 
 
 
 60{
 61	/*
 62	 * For read-ahead of large files to be effective, we need to read ahead
 63	 * at least twice the optimal I/O size.
 64	 */
 65	bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
 66	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
 67}
 
 68
 69static int blk_validate_zoned_limits(struct queue_limits *lim)
 70{
 71	if (!(lim->features & BLK_FEAT_ZONED)) {
 72		if (WARN_ON_ONCE(lim->max_open_zones) ||
 73		    WARN_ON_ONCE(lim->max_active_zones) ||
 74		    WARN_ON_ONCE(lim->zone_write_granularity) ||
 75		    WARN_ON_ONCE(lim->max_zone_append_sectors))
 76			return -EINVAL;
 77		return 0;
 78	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79
 80	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
 81		return -EINVAL;
 
 
 
 82
 83	/*
 84	 * Given that active zones include open zones, the maximum number of
 85	 * open zones cannot be larger than the maximum number of active zones.
 86	 */
 87	if (lim->max_active_zones &&
 88	    lim->max_open_zones > lim->max_active_zones)
 89		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 90
 91	if (lim->zone_write_granularity < lim->logical_block_size)
 92		lim->zone_write_granularity = lim->logical_block_size;
 
 
 
 
 
 
 
 
 
 
 93
 94	/*
 95	 * The Zone Append size is limited by the maximum I/O size and the zone
 96	 * size given that it can't span zones.
 97	 *
 98	 * If no max_hw_zone_append_sectors limit is provided, the block layer
 99	 * will emulated it, else we're also bound by the hardware limit.
100	 */
101	lim->max_zone_append_sectors =
102		min_not_zero(lim->max_hw_zone_append_sectors,
103			min(lim->chunk_sectors, lim->max_hw_sectors));
104	return 0;
105}
 
106
107static int blk_validate_integrity_limits(struct queue_limits *lim)
 
 
 
 
 
 
 
108{
109	struct blk_integrity *bi = &lim->integrity;
 
 
110
111	if (!bi->tuple_size) {
112		if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
113		    bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
114			pr_warn("invalid PI settings.\n");
115			return -EINVAL;
116		}
117		return 0;
118	}
 
119
120	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
121		pr_warn("integrity support disabled.\n");
122		return -EINVAL;
123	}
124
125	if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
126	    (bi->flags & BLK_INTEGRITY_REF_TAG)) {
127		pr_warn("ref tag not support without checksum.\n");
128		return -EINVAL;
129	}
130
131	if (!bi->interval_exp)
132		bi->interval_exp = ilog2(lim->logical_block_size);
 
 
 
 
133
134	return 0;
135}
 
136
137/*
138 * Returns max guaranteed bytes which we can fit in a bio.
 
 
139 *
140 * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
141 * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
142 * the first and last segments.
143 */
144static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
145{
146	unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
147	unsigned int length;
 
 
 
148
149	length = min(max_segments, 2) * lim->logical_block_size;
150	if (max_segments > 2)
151		length += (max_segments - 2) * PAGE_SIZE;
152
153	return length;
 
 
 
 
 
 
 
 
 
 
 
 
154}
 
155
156static void blk_atomic_writes_update_limits(struct queue_limits *lim)
 
 
 
 
 
 
 
 
 
157{
158	unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
159					blk_queue_max_guaranteed_bio(lim));
 
 
 
160
161	unit_limit = rounddown_pow_of_two(unit_limit);
 
162
163	lim->atomic_write_max_sectors =
164		min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
165			lim->max_hw_sectors);
166	lim->atomic_write_unit_min =
167		min(lim->atomic_write_hw_unit_min, unit_limit);
168	lim->atomic_write_unit_max =
169		min(lim->atomic_write_hw_unit_max, unit_limit);
170	lim->atomic_write_boundary_sectors =
171		lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
172}
 
173
174static void blk_validate_atomic_write_limits(struct queue_limits *lim)
 
 
 
 
 
 
 
 
 
 
175{
176	unsigned int boundary_sectors;
177
178	if (!lim->atomic_write_hw_max)
179		goto unsupported;
180
181	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
182		goto unsupported;
183
184	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
185		goto unsupported;
186
187	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
188			 lim->atomic_write_hw_unit_max))
189		goto unsupported;
 
 
 
190
191	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
192			 lim->atomic_write_hw_max))
193		goto unsupported;
 
 
 
 
 
 
 
 
 
 
194
195	boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
 
196
197	if (boundary_sectors) {
198		if (WARN_ON_ONCE(lim->atomic_write_hw_max >
199				 lim->atomic_write_hw_boundary))
200			goto unsupported;
201		/*
202		 * A feature of boundary support is that it disallows bios to
203		 * be merged which would result in a merged request which
204		 * crosses either a chunk sector or atomic write HW boundary,
205		 * even though chunk sectors may be just set for performance.
206		 * For simplicity, disallow atomic writes for a chunk sector
207		 * which is non-zero and smaller than atomic write HW boundary.
208		 * Furthermore, chunk sectors must be a multiple of atomic
209		 * write HW boundary. Otherwise boundary support becomes
210		 * complicated.
211		 * Devices which do not conform to these rules can be dealt
212		 * with if and when they show up.
213		 */
214		if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
215			goto unsupported;
216
217		/*
218		 * The boundary size just needs to be a multiple of unit_max
219		 * (and not necessarily a power-of-2), so this following check
220		 * could be relaxed in future.
221		 * Furthermore, if needed, unit_max could even be reduced so
222		 * that it is compliant with a !power-of-2 boundary.
223		 */
224		if (!is_power_of_2(boundary_sectors))
225			goto unsupported;
226	}
 
 
 
 
227
228	blk_atomic_writes_update_limits(lim);
229	return;
230
231unsupported:
232	lim->atomic_write_max_sectors = 0;
233	lim->atomic_write_boundary_sectors = 0;
234	lim->atomic_write_unit_min = 0;
235	lim->atomic_write_unit_max = 0;
236}
 
237
238/*
239 * Check that the limits in lim are valid, initialize defaults for unset
240 * values, and cap values based on others where needed.
 
 
 
 
 
 
 
241 */
242int blk_validate_limits(struct queue_limits *lim)
243{
244	unsigned int max_hw_sectors;
245	unsigned int logical_block_sectors;
246	int err;
 
 
247
248	/*
249	 * Unless otherwise specified, default to 512 byte logical blocks and a
250	 * physical block size equal to the logical block size.
251	 */
252	if (!lim->logical_block_size)
253		lim->logical_block_size = SECTOR_SIZE;
254	else if (blk_validate_block_size(lim->logical_block_size)) {
255		pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
256		return -EINVAL;
257	}
258	if (lim->physical_block_size < lim->logical_block_size)
259		lim->physical_block_size = lim->logical_block_size;
260
261	/*
262	 * The minimum I/O size defaults to the physical block size unless
263	 * explicitly overridden.
264	 */
265	if (lim->io_min < lim->physical_block_size)
266		lim->io_min = lim->physical_block_size;
267
268	/*
269	 * The optimal I/O size may not be aligned to physical block size
270	 * (because it may be limited by dma engines which have no clue about
271	 * block size of the disks attached to them), so we round it down here.
272	 */
273	lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
274
275	/*
276	 * max_hw_sectors has a somewhat weird default for historical reason,
277	 * but driver really should set their own instead of relying on this
278	 * value.
279	 *
280	 * The block layer relies on the fact that every driver can
281	 * handle at lest a page worth of data per I/O, and needs the value
282	 * aligned to the logical block size.
283	 */
284	if (!lim->max_hw_sectors)
285		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
286	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
287		return -EINVAL;
288	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
289	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
290		return -EINVAL;
291	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
292			logical_block_sectors);
293
294	/*
295	 * The actual max_sectors value is a complex beast and also takes the
296	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
297	 * value into account.  The ->max_sectors value is always calculated
298	 * from these, so directly setting it won't have any effect.
299	 */
300	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
301				lim->max_dev_sectors);
302	if (lim->max_user_sectors) {
303		if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
304			return -EINVAL;
305		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
306	} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
307		lim->max_sectors =
308			min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
309	} else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
310		lim->max_sectors =
311			min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
312	} else {
313		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
314	}
315	lim->max_sectors = round_down(lim->max_sectors,
316			logical_block_sectors);
317
318	/*
319	 * Random default for the maximum number of segments.  Driver should not
320	 * rely on this and set their own.
321	 */
322	if (!lim->max_segments)
323		lim->max_segments = BLK_MAX_SEGMENTS;
324
325	lim->max_discard_sectors =
326		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
327
328	if (!lim->max_discard_segments)
329		lim->max_discard_segments = 1;
330
331	if (lim->discard_granularity < lim->physical_block_size)
332		lim->discard_granularity = lim->physical_block_size;
333
334	/*
335	 * By default there is no limit on the segment boundary alignment,
336	 * but if there is one it can't be smaller than the page size as
337	 * that would break all the normal I/O patterns.
338	 */
339	if (!lim->seg_boundary_mask)
340		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
341	if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
342		return -EINVAL;
343
344	/*
345	 * Stacking device may have both virtual boundary and max segment
346	 * size limit, so allow this setting now, and long-term the two
347	 * might need to move out of stacking limits since we have immutable
348	 * bvec and lower layer bio splitting is supposed to handle the two
349	 * correctly.
350	 */
351	if (lim->virt_boundary_mask) {
352		if (!lim->max_segment_size)
353			lim->max_segment_size = UINT_MAX;
354	} else {
355		/*
356		 * The maximum segment size has an odd historic 64k default that
357		 * drivers probably should override.  Just like the I/O size we
358		 * require drivers to at least handle a full page per segment.
359		 */
360		if (!lim->max_segment_size)
361			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
362		if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
363			return -EINVAL;
364	}
365
366	/*
367	 * We require drivers to at least do logical block aligned I/O, but
368	 * historically could not check for that due to the separate calls
369	 * to set the limits.  Once the transition is finished the check
370	 * below should be narrowed down to check the logical block size.
371	 */
372	if (!lim->dma_alignment)
373		lim->dma_alignment = SECTOR_SIZE - 1;
374	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
375		return -EINVAL;
376
377	if (lim->alignment_offset) {
378		lim->alignment_offset &= (lim->physical_block_size - 1);
379		lim->flags &= ~BLK_FLAG_MISALIGNED;
380	}
381
382	if (!(lim->features & BLK_FEAT_WRITE_CACHE))
383		lim->features &= ~BLK_FEAT_FUA;
384
385	blk_validate_atomic_write_limits(lim);
386
387	err = blk_validate_integrity_limits(lim);
388	if (err)
389		return err;
390	return blk_validate_zoned_limits(lim);
391}
392EXPORT_SYMBOL_GPL(blk_validate_limits);
393
394/*
395 * Set the default limits for a newly allocated queue.  @lim contains the
396 * initial limits set by the driver, which could be no limit in which case
397 * all fields are cleared to zero.
 
 
 
 
 
 
398 */
399int blk_set_default_limits(struct queue_limits *lim)
400{
401	/*
402	 * Most defaults are set by capping the bounds in blk_validate_limits,
403	 * but max_user_discard_sectors is special and needs an explicit
404	 * initialization to the max value here.
405	 */
406	lim->max_user_discard_sectors = UINT_MAX;
407	return blk_validate_limits(lim);
408}
 
409
410/**
411 * queue_limits_commit_update - commit an atomic update of queue limits
412 * @q:		queue to update
413 * @lim:	limits to apply
414 *
415 * Apply the limits in @lim that were obtained from queue_limits_start_update()
416 * and updated by the caller to @q.
417 *
418 * Returns 0 if successful, else a negative error code.
 
 
 
 
419 */
420int queue_limits_commit_update(struct request_queue *q,
421		struct queue_limits *lim)
422{
423	int error;
424
425	error = blk_validate_limits(lim);
426	if (error)
427		goto out_unlock;
428
429#ifdef CONFIG_BLK_INLINE_ENCRYPTION
430	if (q->crypto_profile && lim->integrity.tag_size) {
431		pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
432		error = -EINVAL;
433		goto out_unlock;
434	}
435#endif
436
437	q->limits = *lim;
438	if (q->disk)
439		blk_apply_bdi_limits(q->disk->bdi, lim);
440out_unlock:
441	mutex_unlock(&q->limits_lock);
442	return error;
443}
444EXPORT_SYMBOL_GPL(queue_limits_commit_update);
445
446/**
447 * queue_limits_commit_update_frozen - commit an atomic update of queue limits
448 * @q:		queue to update
449 * @lim:	limits to apply
450 *
451 * Apply the limits in @lim that were obtained from queue_limits_start_update()
452 * and updated with the new values by the caller to @q.  Freezes the queue
453 * before the update and unfreezes it after.
454 *
455 * Returns 0 if successful, else a negative error code.
 
 
456 */
457int queue_limits_commit_update_frozen(struct request_queue *q,
458		struct queue_limits *lim)
459{
460	int ret;
461
462	blk_mq_freeze_queue(q);
463	ret = queue_limits_commit_update(q, lim);
464	blk_mq_unfreeze_queue(q);
465
466	return ret;
467}
468EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
469
470/**
471 * queue_limits_set - apply queue limits to queue
472 * @q:		queue to update
473 * @lim:	limits to apply
474 *
475 * Apply the limits in @lim that were freshly initialized to @q.
476 * To update existing limits use queue_limits_start_update() and
477 * queue_limits_commit_update() instead.
478 *
479 * Returns 0 if successful, else a negative error code.
 
 
480 */
481int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
482{
483	mutex_lock(&q->limits_lock);
484	return queue_limits_commit_update(q, lim);
 
 
 
485}
486EXPORT_SYMBOL_GPL(queue_limits_set);
487
488static int queue_limit_alignment_offset(const struct queue_limits *lim,
489		sector_t sector)
490{
491	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
492	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
493		<< SECTOR_SHIFT;
494
495	return (granularity + lim->alignment_offset - alignment) % granularity;
496}
497
498static unsigned int queue_limit_discard_alignment(
499		const struct queue_limits *lim, sector_t sector)
500{
501	unsigned int alignment, granularity, offset;
502
503	if (!lim->max_discard_sectors)
504		return 0;
505
506	/* Why are these in bytes, not sectors? */
507	alignment = lim->discard_alignment >> SECTOR_SHIFT;
508	granularity = lim->discard_granularity >> SECTOR_SHIFT;
 
 
509
510	/* Offset of the partition start in 'granularity' sectors */
511	offset = sector_div(sector, granularity);
512
513	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
514	offset = (granularity + alignment - offset) % granularity;
515
516	/* Turn it back into bytes, gaah */
517	return offset << SECTOR_SHIFT;
518}
519
520static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
521{
522	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
523	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
524		sectors = PAGE_SIZE >> SECTOR_SHIFT;
525	return sectors;
526}
527
528/* Check if second and later bottom devices are compliant */
529static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
530				struct queue_limits *b)
531{
532	/* We're not going to support different boundary sizes.. yet */
533	if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
534		return false;
535
536	/* Can't support this */
537	if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
538		return false;
539
540	/* Or this */
541	if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
542		return false;
543
544	t->atomic_write_hw_max = min(t->atomic_write_hw_max,
545				b->atomic_write_hw_max);
546	t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
547				b->atomic_write_hw_unit_min);
548	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
549				b->atomic_write_hw_unit_max);
550	return true;
551}
552
553/* Check for valid boundary of first bottom device */
554static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
555				struct queue_limits *b)
556{
557	/*
558	 * Ensure atomic write boundary is aligned with chunk sectors. Stacked
559	 * devices store chunk sectors in t->io_min.
560	 */
561	if (b->atomic_write_hw_boundary > t->io_min &&
562	    b->atomic_write_hw_boundary % t->io_min)
563		return false;
564	if (t->io_min > b->atomic_write_hw_boundary &&
565	    t->io_min % b->atomic_write_hw_boundary)
566		return false;
567
568	t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
569	return true;
570}
571
572
573/* Check stacking of first bottom device */
574static bool blk_stack_atomic_writes_head(struct queue_limits *t,
575				struct queue_limits *b)
576{
577	if (b->atomic_write_hw_boundary &&
578	    !blk_stack_atomic_writes_boundary_head(t, b))
579		return false;
580
581	if (t->io_min <= SECTOR_SIZE) {
582		/* No chunk sectors, so use bottom device values directly */
583		t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
584		t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
585		t->atomic_write_hw_max = b->atomic_write_hw_max;
586		return true;
587	}
588
589	/*
590	 * Find values for limits which work for chunk size.
591	 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
592	 * size (t->io_min), as chunk size is not restricted to a power-of-2.
593	 * So we need to find highest power-of-2 which works for the chunk
594	 * size.
595	 * As an example scenario, we could have b->unit_max = 16K and
596	 * t->io_min = 24K. For this case, reduce t->unit_max to a value
597	 * aligned with both limits, i.e. 8K in this example.
598	 */
599	t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
600	while (t->io_min % t->atomic_write_hw_unit_max)
601		t->atomic_write_hw_unit_max /= 2;
602
603	t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min,
604					  t->atomic_write_hw_unit_max);
605	t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min);
606
607	return true;
608}
609
610static void blk_stack_atomic_writes_limits(struct queue_limits *t,
611				struct queue_limits *b, sector_t start)
612{
613	if (!(t->features & BLK_FEAT_ATOMIC_WRITES_STACKED))
614		goto unsupported;
615
616	if (!b->atomic_write_unit_min)
617		goto unsupported;
618
619	if (!blk_atomic_write_start_sect_aligned(start, b))
620		goto unsupported;
621
622	/*
623	 * If atomic_write_hw_max is set, we have already stacked 1x bottom
624	 * device, so check for compliance.
625	 */
626	if (t->atomic_write_hw_max) {
627		if (!blk_stack_atomic_writes_tail(t, b))
628			goto unsupported;
629		return;
630	}
631
632	if (!blk_stack_atomic_writes_head(t, b))
633		goto unsupported;
634	return;
635
636unsupported:
637	t->atomic_write_hw_max = 0;
638	t->atomic_write_hw_unit_max = 0;
639	t->atomic_write_hw_unit_min = 0;
640	t->atomic_write_hw_boundary = 0;
641	t->features &= ~BLK_FEAT_ATOMIC_WRITES_STACKED;
642}
643
644/**
645 * blk_stack_limits - adjust queue_limits for stacked devices
646 * @t:	the stacking driver limits (top device)
647 * @b:  the underlying queue limits (bottom, component device)
648 * @start:  first data sector within component device
649 *
650 * Description:
651 *    This function is used by stacking drivers like MD and DM to ensure
652 *    that all component devices have compatible block sizes and
653 *    alignments.  The stacking driver must provide a queue_limits
654 *    struct (top) and then iteratively call the stacking function for
655 *    all component (bottom) devices.  The stacking function will
656 *    attempt to combine the values and ensure proper alignment.
657 *
658 *    Returns 0 if the top and bottom queue_limits are compatible.  The
659 *    top device's block sizes and alignment offsets may be adjusted to
660 *    ensure alignment with the bottom device. If no compatible sizes
661 *    and alignments exist, -1 is returned and the resulting top
662 *    queue_limits will have the misaligned flag set to indicate that
663 *    the alignment_offset is undefined.
664 */
665int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
666		     sector_t start)
667{
668	unsigned int top, bottom, alignment, ret = 0;
669
670	t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
671
672	/*
673	 * Some feaures need to be supported both by the stacking driver and all
674	 * underlying devices.  The stacking driver sets these flags before
675	 * stacking the limits, and this will clear the flags if any of the
676	 * underlying devices does not support it.
677	 */
678	if (!(b->features & BLK_FEAT_NOWAIT))
679		t->features &= ~BLK_FEAT_NOWAIT;
680	if (!(b->features & BLK_FEAT_POLL))
681		t->features &= ~BLK_FEAT_POLL;
682
683	t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
684
685	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
686	t->max_user_sectors = min_not_zero(t->max_user_sectors,
687			b->max_user_sectors);
688	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
689	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
690	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
691					b->max_write_zeroes_sectors);
692	t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
693					b->max_hw_zone_append_sectors);
 
694
695	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
696					    b->seg_boundary_mask);
697	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
698					    b->virt_boundary_mask);
699
700	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
701	t->max_discard_segments = min_not_zero(t->max_discard_segments,
702					       b->max_discard_segments);
703	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
704						 b->max_integrity_segments);
705
706	t->max_segment_size = min_not_zero(t->max_segment_size,
707					   b->max_segment_size);
708
 
 
709	alignment = queue_limit_alignment_offset(b, start);
710
711	/* Bottom device has different alignment.  Check that it is
712	 * compatible with the current top alignment.
713	 */
714	if (t->alignment_offset != alignment) {
715
716		top = max(t->physical_block_size, t->io_min)
717			+ t->alignment_offset;
718		bottom = max(b->physical_block_size, b->io_min) + alignment;
719
720		/* Verify that top and bottom intervals line up */
721		if (max(top, bottom) % min(top, bottom)) {
722			t->flags |= BLK_FLAG_MISALIGNED;
723			ret = -1;
724		}
725	}
726
727	t->logical_block_size = max(t->logical_block_size,
728				    b->logical_block_size);
729
730	t->physical_block_size = max(t->physical_block_size,
731				     b->physical_block_size);
732
733	t->io_min = max(t->io_min, b->io_min);
734	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
735	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
736
737	/* Set non-power-of-2 compatible chunk_sectors boundary */
738	if (b->chunk_sectors)
739		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
740
741	/* Physical block size a multiple of the logical block size? */
742	if (t->physical_block_size & (t->logical_block_size - 1)) {
743		t->physical_block_size = t->logical_block_size;
744		t->flags |= BLK_FLAG_MISALIGNED;
745		ret = -1;
746	}
747
748	/* Minimum I/O a multiple of the physical block size? */
749	if (t->io_min & (t->physical_block_size - 1)) {
750		t->io_min = t->physical_block_size;
751		t->flags |= BLK_FLAG_MISALIGNED;
752		ret = -1;
753	}
754
755	/* Optimal I/O a multiple of the physical block size? */
756	if (t->io_opt & (t->physical_block_size - 1)) {
757		t->io_opt = 0;
758		t->flags |= BLK_FLAG_MISALIGNED;
759		ret = -1;
760	}
761
762	/* chunk_sectors a multiple of the physical block size? */
763	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
764		t->chunk_sectors = 0;
765		t->flags |= BLK_FLAG_MISALIGNED;
766		ret = -1;
767	}
768
 
 
 
 
769	/* Find lowest common alignment_offset */
770	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
771		% max(t->physical_block_size, t->io_min);
772
773	/* Verify that new alignment_offset is on a logical block boundary */
774	if (t->alignment_offset & (t->logical_block_size - 1)) {
775		t->flags |= BLK_FLAG_MISALIGNED;
776		ret = -1;
777	}
778
779	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
780	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
781	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
782
783	/* Discard alignment and granularity */
784	if (b->discard_granularity) {
785		alignment = queue_limit_discard_alignment(b, start);
786
 
 
 
 
 
 
 
 
 
 
787		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
788						      b->max_discard_sectors);
789		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
790							 b->max_hw_discard_sectors);
791		t->discard_granularity = max(t->discard_granularity,
792					     b->discard_granularity);
793		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
794			t->discard_granularity;
795	}
796	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
797						   b->max_secure_erase_sectors);
798	t->zone_write_granularity = max(t->zone_write_granularity,
799					b->zone_write_granularity);
800	if (!(t->features & BLK_FEAT_ZONED)) {
801		t->zone_write_granularity = 0;
802		t->max_zone_append_sectors = 0;
803	}
804	blk_stack_atomic_writes_limits(t, b, start);
805
806	return ret;
807}
808EXPORT_SYMBOL(blk_stack_limits);
809
810/**
811 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
812 * @t:	the stacking driver limits (top device)
813 * @bdev:  the underlying block device (bottom)
814 * @offset:  offset to beginning of data within component device
815 * @pfx: prefix to use for warnings logged
816 *
817 * Description:
818 *    This function is used by stacking drivers like MD and DM to ensure
819 *    that all component devices have compatible block sizes and
820 *    alignments.  The stacking driver must provide a queue_limits
821 *    struct (top) and then iteratively call the stacking function for
822 *    all component (bottom) devices.  The stacking function will
823 *    attempt to combine the values and ensure proper alignment.
824 */
825void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
826		sector_t offset, const char *pfx)
827{
828	if (blk_stack_limits(t, bdev_limits(bdev),
829			get_start_sect(bdev) + offset))
 
 
830		pr_notice("%s: Warning: Device %pg is misaligned\n",
831			pfx, bdev);
 
 
832}
833EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
834
835/**
836 * queue_limits_stack_integrity - stack integrity profile
837 * @t: target queue limits
838 * @b: base queue limits
839 *
840 * Check if the integrity profile in the @b can be stacked into the
841 * target @t.  Stacking is possible if either:
842 *
843 *   a) does not have any integrity information stacked into it yet
844 *   b) the integrity profile in @b is identical to the one in @t
845 *
846 * If @b can be stacked into @t, return %true.  Else return %false and clear the
847 * integrity information in @t.
848 */
849bool queue_limits_stack_integrity(struct queue_limits *t,
850		struct queue_limits *b)
 
 
 
 
 
 
 
 
851{
852	struct blk_integrity *ti = &t->integrity;
853	struct blk_integrity *bi = &b->integrity;
 
 
 
854
855	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
856		return true;
 
857
858	if (!ti->tuple_size) {
859		/* inherit the settings from the first underlying device */
860		if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
861			ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
862				(bi->flags & BLK_INTEGRITY_REF_TAG);
863			ti->csum_type = bi->csum_type;
864			ti->tuple_size = bi->tuple_size;
865			ti->pi_offset = bi->pi_offset;
866			ti->interval_exp = bi->interval_exp;
867			ti->tag_size = bi->tag_size;
868			goto done;
869		}
870		if (!bi->tuple_size)
871			goto done;
872	}
 
 
 
 
873
874	if (ti->tuple_size != bi->tuple_size)
875		goto incompatible;
876	if (ti->interval_exp != bi->interval_exp)
877		goto incompatible;
878	if (ti->tag_size != bi->tag_size)
879		goto incompatible;
880	if (ti->csum_type != bi->csum_type)
881		goto incompatible;
882	if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
883	    (bi->flags & BLK_INTEGRITY_REF_TAG))
884		goto incompatible;
 
 
 
 
885
886done:
887	ti->flags |= BLK_INTEGRITY_STACKED;
888	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
889
890incompatible:
891	memset(ti, 0, sizeof(*ti));
892	return false;
893}
894EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
895
896/**
897 * blk_set_queue_depth - tell the block layer about the device queue depth
898 * @q:		the request queue for the device
899 * @depth:		queue depth
900 *
901 */
902void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
903{
904	q->queue_depth = depth;
905	rq_qos_queue_depth_changed(q);
906}
907EXPORT_SYMBOL(blk_set_queue_depth);
908
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
909int bdev_alignment_offset(struct block_device *bdev)
910{
911	struct request_queue *q = bdev_get_queue(bdev);
912
913	if (q->limits.flags & BLK_FLAG_MISALIGNED)
914		return -1;
915	if (bdev_is_partition(bdev))
916		return queue_limit_alignment_offset(&q->limits,
917				bdev->bd_start_sect);
918	return q->limits.alignment_offset;
919}
920EXPORT_SYMBOL_GPL(bdev_alignment_offset);
921
922unsigned int bdev_discard_alignment(struct block_device *bdev)
923{
924	struct request_queue *q = bdev_get_queue(bdev);
925
926	if (bdev_is_partition(bdev))
927		return queue_limit_discard_alignment(&q->limits,
928				bdev->bd_start_sect);
929	return q->limits.discard_alignment;
930}
931EXPORT_SYMBOL_GPL(bdev_discard_alignment);