Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Functions related to setting various queue properties from drivers
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/init.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
 
 10#include <linux/gcd.h>
 11#include <linux/lcm.h>
 12#include <linux/jiffies.h>
 13#include <linux/gfp.h>
 
 14
 15#include "blk.h"
 16
 17unsigned long blk_max_low_pfn;
 18EXPORT_SYMBOL(blk_max_low_pfn);
 19
 20unsigned long blk_max_pfn;
 21
 22/**
 23 * blk_queue_prep_rq - set a prepare_request function for queue
 24 * @q:		queue
 25 * @pfn:	prepare_request function
 26 *
 27 * It's possible for a queue to register a prepare_request callback which
 28 * is invoked before the request is handed to the request_fn. The goal of
 29 * the function is to prepare a request for I/O, it can be used to build a
 30 * cdb from the request data for instance.
 31 *
 32 */
 33void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
 34{
 35	q->prep_rq_fn = pfn;
 36}
 37EXPORT_SYMBOL(blk_queue_prep_rq);
 38
 39/**
 40 * blk_queue_unprep_rq - set an unprepare_request function for queue
 41 * @q:		queue
 42 * @ufn:	unprepare_request function
 43 *
 44 * It's possible for a queue to register an unprepare_request callback
 45 * which is invoked before the request is finally completed. The goal
 46 * of the function is to deallocate any data that was allocated in the
 47 * prepare_request callback.
 48 *
 49 */
 50void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
 51{
 52	q->unprep_rq_fn = ufn;
 53}
 54EXPORT_SYMBOL(blk_queue_unprep_rq);
 55
 56void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
 57{
 58	q->softirq_done_fn = fn;
 59}
 60EXPORT_SYMBOL(blk_queue_softirq_done);
 61
 62void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 63{
 64	q->rq_timeout = timeout;
 65}
 66EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
 67
 68void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
 69{
 70	q->rq_timed_out_fn = fn;
 71}
 72EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
 73
 74void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
 75{
 76	q->lld_busy_fn = fn;
 77}
 78EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
 79
 80/**
 81 * blk_set_default_limits - reset limits to default values
 82 * @lim:  the queue_limits structure to reset
 83 *
 84 * Description:
 85 *   Returns a queue_limit struct to its default state.
 86 */
 87void blk_set_default_limits(struct queue_limits *lim)
 88{
 89	lim->max_segments = BLK_MAX_SEGMENTS;
 
 90	lim->max_integrity_segments = 0;
 91	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 92	lim->virt_boundary_mask = 0;
 93	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
 94	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
 95	lim->max_dev_sectors = 0;
 96	lim->chunk_sectors = 0;
 97	lim->max_write_same_sectors = 0;
 
 98	lim->max_discard_sectors = 0;
 99	lim->max_hw_discard_sectors = 0;
100	lim->discard_granularity = 0;
 
101	lim->discard_alignment = 0;
102	lim->discard_misaligned = 0;
103	lim->discard_zeroes_data = 0;
104	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
105	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
106	lim->alignment_offset = 0;
107	lim->io_opt = 0;
108	lim->misaligned = 0;
109	lim->cluster = 1;
 
 
110}
111EXPORT_SYMBOL(blk_set_default_limits);
112
113/**
114 * blk_set_stacking_limits - set default limits for stacking devices
115 * @lim:  the queue_limits structure to reset
116 *
117 * Description:
118 *   Returns a queue_limit struct to its default state. Should be used
119 *   by stacking drivers like DM that have no internal limits.
120 */
121void blk_set_stacking_limits(struct queue_limits *lim)
122{
123	blk_set_default_limits(lim);
124
125	/* Inherit limits from component devices */
126	lim->discard_zeroes_data = 1;
127	lim->max_segments = USHRT_MAX;
 
128	lim->max_hw_sectors = UINT_MAX;
129	lim->max_segment_size = UINT_MAX;
130	lim->max_sectors = UINT_MAX;
131	lim->max_dev_sectors = UINT_MAX;
132	lim->max_write_same_sectors = UINT_MAX;
 
133}
134EXPORT_SYMBOL(blk_set_stacking_limits);
135
136/**
137 * blk_queue_make_request - define an alternate make_request function for a device
138 * @q:  the request queue for the device to be affected
139 * @mfn: the alternate make_request function
140 *
141 * Description:
142 *    The normal way for &struct bios to be passed to a device
143 *    driver is for them to be collected into requests on a request
144 *    queue, and then to allow the device driver to select requests
145 *    off that queue when it is ready.  This works well for many block
146 *    devices. However some block devices (typically virtual devices
147 *    such as md or lvm) do not benefit from the processing on the
148 *    request queue, and are served best by having the requests passed
149 *    directly to them.  This can be achieved by providing a function
150 *    to blk_queue_make_request().
151 *
152 * Caveat:
153 *    The driver that does this *must* be able to deal appropriately
154 *    with buffers in "highmemory". This can be accomplished by either calling
155 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
156 *    blk_queue_bounce() to create a buffer in normal memory.
157 **/
158void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
159{
160	/*
161	 * set defaults
162	 */
163	q->nr_requests = BLKDEV_MAX_RQ;
164
165	q->make_request_fn = mfn;
166	blk_queue_dma_alignment(q, 511);
167	blk_queue_congestion_threshold(q);
168	q->nr_batching = BLK_BATCH_REQ;
169
170	blk_set_default_limits(&q->limits);
171
172	/*
173	 * by default assume old behaviour and bounce for any highmem page
174	 */
175	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
176}
177EXPORT_SYMBOL(blk_queue_make_request);
178
179/**
180 * blk_queue_bounce_limit - set bounce buffer limit for queue
181 * @q: the request queue for the device
182 * @max_addr: the maximum address the device can handle
183 *
184 * Description:
185 *    Different hardware can have different requirements as to what pages
186 *    it can do I/O directly to. A low level driver can call
187 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
188 *    buffers for doing I/O to pages residing above @max_addr.
189 **/
190void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
191{
192	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
193	int dma = 0;
194
195	q->bounce_gfp = GFP_NOIO;
196#if BITS_PER_LONG == 64
197	/*
198	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
199	 * some IOMMUs can handle everything, but I don't know of a
200	 * way to test this here.
201	 */
202	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
203		dma = 1;
204	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
205#else
206	if (b_pfn < blk_max_low_pfn)
207		dma = 1;
208	q->limits.bounce_pfn = b_pfn;
209#endif
210	if (dma) {
211		init_emergency_isa_pool();
212		q->bounce_gfp = GFP_NOIO | GFP_DMA;
213		q->limits.bounce_pfn = b_pfn;
214	}
215}
216EXPORT_SYMBOL(blk_queue_bounce_limit);
217
218/**
219 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
220 * @q:  the request queue for the device
221 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
222 *
223 * Description:
224 *    Enables a low level driver to set a hard upper limit,
225 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
226 *    the device driver based upon the capabilities of the I/O
227 *    controller.
228 *
229 *    max_dev_sectors is a hard limit imposed by the storage device for
230 *    READ/WRITE requests. It is set by the disk driver.
231 *
232 *    max_sectors is a soft limit imposed by the block layer for
233 *    filesystem type requests.  This value can be overridden on a
234 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
235 *    The soft limit can not exceed max_hw_sectors.
236 **/
237void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
238{
239	struct queue_limits *limits = &q->limits;
240	unsigned int max_sectors;
241
242	if ((max_hw_sectors << 9) < PAGE_SIZE) {
243		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
244		printk(KERN_INFO "%s: set to minimum %d\n",
245		       __func__, max_hw_sectors);
246	}
247
 
 
248	limits->max_hw_sectors = max_hw_sectors;
 
249	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
250	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
 
 
 
 
 
 
 
251	limits->max_sectors = max_sectors;
 
 
 
 
252}
253EXPORT_SYMBOL(blk_queue_max_hw_sectors);
254
255/**
256 * blk_queue_chunk_sectors - set size of the chunk for this queue
257 * @q:  the request queue for the device
258 * @chunk_sectors:  chunk sectors in the usual 512b unit
259 *
260 * Description:
261 *    If a driver doesn't want IOs to cross a given chunk size, it can set
262 *    this limit and prevent merging across chunks. Note that the chunk size
263 *    must currently be a power-of-2 in sectors. Also note that the block
264 *    layer must accept a page worth of data at any offset. So if the
265 *    crossing of chunks is a hard limitation in the driver, it must still be
266 *    prepared to split single page bios.
267 **/
268void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
269{
270	BUG_ON(!is_power_of_2(chunk_sectors));
271	q->limits.chunk_sectors = chunk_sectors;
272}
273EXPORT_SYMBOL(blk_queue_chunk_sectors);
274
275/**
276 * blk_queue_max_discard_sectors - set max sectors for a single discard
277 * @q:  the request queue for the device
278 * @max_discard_sectors: maximum number of sectors to discard
279 **/
280void blk_queue_max_discard_sectors(struct request_queue *q,
281		unsigned int max_discard_sectors)
282{
283	q->limits.max_hw_discard_sectors = max_discard_sectors;
284	q->limits.max_discard_sectors = max_discard_sectors;
285}
286EXPORT_SYMBOL(blk_queue_max_discard_sectors);
287
288/**
289 * blk_queue_max_write_same_sectors - set max sectors for a single write same
290 * @q:  the request queue for the device
291 * @max_write_same_sectors: maximum number of sectors to write per command
292 **/
293void blk_queue_max_write_same_sectors(struct request_queue *q,
294				      unsigned int max_write_same_sectors)
295{
296	q->limits.max_write_same_sectors = max_write_same_sectors;
297}
298EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
300/**
301 * blk_queue_max_segments - set max hw segments for a request for this queue
302 * @q:  the request queue for the device
303 * @max_segments:  max number of segments
304 *
305 * Description:
306 *    Enables a low level driver to set an upper limit on the number of
307 *    hw data segments in a request.
308 **/
309void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
310{
311	if (!max_segments) {
312		max_segments = 1;
313		printk(KERN_INFO "%s: set to minimum %d\n",
314		       __func__, max_segments);
315	}
316
317	q->limits.max_segments = max_segments;
318}
319EXPORT_SYMBOL(blk_queue_max_segments);
320
321/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
323 * @q:  the request queue for the device
324 * @max_size:  max size of segment in bytes
325 *
326 * Description:
327 *    Enables a low level driver to set an upper limit on the size of a
328 *    coalesced segment
329 **/
330void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
331{
332	if (max_size < PAGE_SIZE) {
333		max_size = PAGE_SIZE;
334		printk(KERN_INFO "%s: set to minimum %d\n",
335		       __func__, max_size);
336	}
337
 
 
 
338	q->limits.max_segment_size = max_size;
339}
340EXPORT_SYMBOL(blk_queue_max_segment_size);
341
342/**
343 * blk_queue_logical_block_size - set logical block size for the queue
344 * @q:  the request queue for the device
345 * @size:  the logical block size, in bytes
346 *
347 * Description:
348 *   This should be set to the lowest possible block size that the
349 *   storage device can address.  The default of 512 covers most
350 *   hardware.
351 **/
352void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
353{
354	q->limits.logical_block_size = size;
355
356	if (q->limits.physical_block_size < size)
357		q->limits.physical_block_size = size;
358
359	if (q->limits.io_min < q->limits.physical_block_size)
360		q->limits.io_min = q->limits.physical_block_size;
 
 
 
 
 
 
 
 
 
 
 
361}
362EXPORT_SYMBOL(blk_queue_logical_block_size);
363
364/**
365 * blk_queue_physical_block_size - set physical block size for the queue
366 * @q:  the request queue for the device
367 * @size:  the physical block size, in bytes
368 *
369 * Description:
370 *   This should be set to the lowest possible sector size that the
371 *   hardware can operate on without reverting to read-modify-write
372 *   operations.
373 */
374void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
375{
376	q->limits.physical_block_size = size;
377
378	if (q->limits.physical_block_size < q->limits.logical_block_size)
379		q->limits.physical_block_size = q->limits.logical_block_size;
380
 
 
 
381	if (q->limits.io_min < q->limits.physical_block_size)
382		q->limits.io_min = q->limits.physical_block_size;
383}
384EXPORT_SYMBOL(blk_queue_physical_block_size);
385
386/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387 * blk_queue_alignment_offset - set physical block alignment offset
388 * @q:	the request queue for the device
389 * @offset: alignment offset in bytes
390 *
391 * Description:
392 *   Some devices are naturally misaligned to compensate for things like
393 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
394 *   should call this function for devices whose first sector is not
395 *   naturally aligned.
396 */
397void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
398{
399	q->limits.alignment_offset =
400		offset & (q->limits.physical_block_size - 1);
401	q->limits.misaligned = 0;
402}
403EXPORT_SYMBOL(blk_queue_alignment_offset);
404
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405/**
406 * blk_limits_io_min - set minimum request size for a device
407 * @limits: the queue limits
408 * @min:  smallest I/O size in bytes
409 *
410 * Description:
411 *   Some devices have an internal block size bigger than the reported
412 *   hardware sector size.  This function can be used to signal the
413 *   smallest I/O the device can perform without incurring a performance
414 *   penalty.
415 */
416void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
417{
418	limits->io_min = min;
419
420	if (limits->io_min < limits->logical_block_size)
421		limits->io_min = limits->logical_block_size;
422
423	if (limits->io_min < limits->physical_block_size)
424		limits->io_min = limits->physical_block_size;
425}
426EXPORT_SYMBOL(blk_limits_io_min);
427
428/**
429 * blk_queue_io_min - set minimum request size for the queue
430 * @q:	the request queue for the device
431 * @min:  smallest I/O size in bytes
432 *
433 * Description:
434 *   Storage devices may report a granularity or preferred minimum I/O
435 *   size which is the smallest request the device can perform without
436 *   incurring a performance penalty.  For disk drives this is often the
437 *   physical block size.  For RAID arrays it is often the stripe chunk
438 *   size.  A properly aligned multiple of minimum_io_size is the
439 *   preferred request size for workloads where a high number of I/O
440 *   operations is desired.
441 */
442void blk_queue_io_min(struct request_queue *q, unsigned int min)
443{
444	blk_limits_io_min(&q->limits, min);
445}
446EXPORT_SYMBOL(blk_queue_io_min);
447
448/**
449 * blk_limits_io_opt - set optimal request size for a device
450 * @limits: the queue limits
451 * @opt:  smallest I/O size in bytes
452 *
453 * Description:
454 *   Storage devices may report an optimal I/O size, which is the
455 *   device's preferred unit for sustained I/O.  This is rarely reported
456 *   for disk drives.  For RAID arrays it is usually the stripe width or
457 *   the internal track size.  A properly aligned multiple of
458 *   optimal_io_size is the preferred request size for workloads where
459 *   sustained throughput is desired.
460 */
461void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
462{
463	limits->io_opt = opt;
464}
465EXPORT_SYMBOL(blk_limits_io_opt);
466
467/**
468 * blk_queue_io_opt - set optimal request size for the queue
469 * @q:	the request queue for the device
470 * @opt:  optimal request size in bytes
471 *
472 * Description:
473 *   Storage devices may report an optimal I/O size, which is the
474 *   device's preferred unit for sustained I/O.  This is rarely reported
475 *   for disk drives.  For RAID arrays it is usually the stripe width or
476 *   the internal track size.  A properly aligned multiple of
477 *   optimal_io_size is the preferred request size for workloads where
478 *   sustained throughput is desired.
479 */
480void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
481{
482	blk_limits_io_opt(&q->limits, opt);
 
 
 
 
483}
484EXPORT_SYMBOL(blk_queue_io_opt);
485
486/**
487 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
488 * @t:	the stacking driver (top)
489 * @b:  the underlying device (bottom)
490 **/
491void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
492{
493	blk_stack_limits(&t->limits, &b->limits, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494}
495EXPORT_SYMBOL(blk_queue_stack_limits);
496
497/**
498 * blk_stack_limits - adjust queue_limits for stacked devices
499 * @t:	the stacking driver limits (top device)
500 * @b:  the underlying queue limits (bottom, component device)
501 * @start:  first data sector within component device
502 *
503 * Description:
504 *    This function is used by stacking drivers like MD and DM to ensure
505 *    that all component devices have compatible block sizes and
506 *    alignments.  The stacking driver must provide a queue_limits
507 *    struct (top) and then iteratively call the stacking function for
508 *    all component (bottom) devices.  The stacking function will
509 *    attempt to combine the values and ensure proper alignment.
510 *
511 *    Returns 0 if the top and bottom queue_limits are compatible.  The
512 *    top device's block sizes and alignment offsets may be adjusted to
513 *    ensure alignment with the bottom device. If no compatible sizes
514 *    and alignments exist, -1 is returned and the resulting top
515 *    queue_limits will have the misaligned flag set to indicate that
516 *    the alignment_offset is undefined.
517 */
518int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
519		     sector_t start)
520{
521	unsigned int top, bottom, alignment, ret = 0;
522
523	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
524	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
525	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
526	t->max_write_same_sectors = min(t->max_write_same_sectors,
527					b->max_write_same_sectors);
528	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
 
 
529
530	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
531					    b->seg_boundary_mask);
532	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
533					    b->virt_boundary_mask);
534
535	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
 
 
536	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
537						 b->max_integrity_segments);
538
539	t->max_segment_size = min_not_zero(t->max_segment_size,
540					   b->max_segment_size);
541
542	t->misaligned |= b->misaligned;
543
544	alignment = queue_limit_alignment_offset(b, start);
545
546	/* Bottom device has different alignment.  Check that it is
547	 * compatible with the current top alignment.
548	 */
549	if (t->alignment_offset != alignment) {
550
551		top = max(t->physical_block_size, t->io_min)
552			+ t->alignment_offset;
553		bottom = max(b->physical_block_size, b->io_min) + alignment;
554
555		/* Verify that top and bottom intervals line up */
556		if (max(top, bottom) % min(top, bottom)) {
557			t->misaligned = 1;
558			ret = -1;
559		}
560	}
561
562	t->logical_block_size = max(t->logical_block_size,
563				    b->logical_block_size);
564
565	t->physical_block_size = max(t->physical_block_size,
566				     b->physical_block_size);
567
568	t->io_min = max(t->io_min, b->io_min);
569	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 
570
571	t->cluster &= b->cluster;
572	t->discard_zeroes_data &= b->discard_zeroes_data;
 
573
574	/* Physical block size a multiple of the logical block size? */
575	if (t->physical_block_size & (t->logical_block_size - 1)) {
576		t->physical_block_size = t->logical_block_size;
577		t->misaligned = 1;
578		ret = -1;
579	}
580
581	/* Minimum I/O a multiple of the physical block size? */
582	if (t->io_min & (t->physical_block_size - 1)) {
583		t->io_min = t->physical_block_size;
584		t->misaligned = 1;
585		ret = -1;
586	}
587
588	/* Optimal I/O a multiple of the physical block size? */
589	if (t->io_opt & (t->physical_block_size - 1)) {
590		t->io_opt = 0;
591		t->misaligned = 1;
592		ret = -1;
593	}
594
 
 
 
 
 
 
 
595	t->raid_partial_stripes_expensive =
596		max(t->raid_partial_stripes_expensive,
597		    b->raid_partial_stripes_expensive);
598
599	/* Find lowest common alignment_offset */
600	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
601		% max(t->physical_block_size, t->io_min);
602
603	/* Verify that new alignment_offset is on a logical block boundary */
604	if (t->alignment_offset & (t->logical_block_size - 1)) {
605		t->misaligned = 1;
606		ret = -1;
607	}
608
 
 
 
 
609	/* Discard alignment and granularity */
610	if (b->discard_granularity) {
611		alignment = queue_limit_discard_alignment(b, start);
612
613		if (t->discard_granularity != 0 &&
614		    t->discard_alignment != alignment) {
615			top = t->discard_granularity + t->discard_alignment;
616			bottom = b->discard_granularity + alignment;
617
618			/* Verify that top and bottom intervals line up */
619			if ((max(top, bottom) % min(top, bottom)) != 0)
620				t->discard_misaligned = 1;
621		}
622
623		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
624						      b->max_discard_sectors);
625		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
626							 b->max_hw_discard_sectors);
627		t->discard_granularity = max(t->discard_granularity,
628					     b->discard_granularity);
629		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
630			t->discard_granularity;
631	}
632
 
 
 
 
633	return ret;
634}
635EXPORT_SYMBOL(blk_stack_limits);
636
637/**
638 * bdev_stack_limits - adjust queue limits for stacked drivers
639 * @t:	the stacking driver limits (top device)
640 * @bdev:  the component block_device (bottom)
641 * @start:  first data sector within component device
642 *
643 * Description:
644 *    Merges queue limits for a top device and a block_device.  Returns
645 *    0 if alignment didn't change.  Returns -1 if adding the bottom
646 *    device caused misalignment.
647 */
648int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
649		      sector_t start)
650{
651	struct request_queue *bq = bdev_get_queue(bdev);
652
653	start += get_start_sect(bdev);
654
655	return blk_stack_limits(t, &bq->limits, start);
656}
657EXPORT_SYMBOL(bdev_stack_limits);
658
659/**
660 * disk_stack_limits - adjust queue limits for stacked drivers
661 * @disk:  MD/DM gendisk (top)
662 * @bdev:  the underlying block device (bottom)
663 * @offset:  offset to beginning of data within component device
664 *
665 * Description:
666 *    Merges the limits for a top level gendisk and a bottom level
667 *    block_device.
668 */
669void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
670		       sector_t offset)
671{
672	struct request_queue *t = disk->queue;
673
674	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
675		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
 
 
676
677		disk_name(disk, 0, top);
678		bdevname(bdev, bottom);
679
680		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
681		       top, bottom);
682	}
683}
684EXPORT_SYMBOL(disk_stack_limits);
685
686/**
687 * blk_queue_dma_pad - set pad mask
688 * @q:     the request queue for the device
689 * @mask:  pad mask
690 *
691 * Set dma pad mask.
692 *
693 * Appending pad buffer to a request modifies the last entry of a
694 * scatter list such that it includes the pad buffer.
695 **/
696void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
697{
698	q->dma_pad_mask = mask;
699}
700EXPORT_SYMBOL(blk_queue_dma_pad);
701
702/**
703 * blk_queue_update_dma_pad - update pad mask
704 * @q:     the request queue for the device
705 * @mask:  pad mask
706 *
707 * Update dma pad mask.
708 *
709 * Appending pad buffer to a request modifies the last entry of a
710 * scatter list such that it includes the pad buffer.
711 **/
712void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
713{
714	if (mask > q->dma_pad_mask)
715		q->dma_pad_mask = mask;
716}
717EXPORT_SYMBOL(blk_queue_update_dma_pad);
718
719/**
720 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
721 * @q:  the request queue for the device
722 * @dma_drain_needed: fn which returns non-zero if drain is necessary
723 * @buf:	physically contiguous buffer
724 * @size:	size of the buffer in bytes
725 *
726 * Some devices have excess DMA problems and can't simply discard (or
727 * zero fill) the unwanted piece of the transfer.  They have to have a
728 * real area of memory to transfer it into.  The use case for this is
729 * ATAPI devices in DMA mode.  If the packet command causes a transfer
730 * bigger than the transfer size some HBAs will lock up if there
731 * aren't DMA elements to contain the excess transfer.  What this API
732 * does is adjust the queue so that the buf is always appended
733 * silently to the scatterlist.
734 *
735 * Note: This routine adjusts max_hw_segments to make room for appending
736 * the drain buffer.  If you call blk_queue_max_segments() after calling
737 * this routine, you must set the limit to one fewer than your device
738 * can support otherwise there won't be room for the drain buffer.
739 */
740int blk_queue_dma_drain(struct request_queue *q,
741			       dma_drain_needed_fn *dma_drain_needed,
742			       void *buf, unsigned int size)
743{
744	if (queue_max_segments(q) < 2)
745		return -EINVAL;
746	/* make room for appending the drain */
747	blk_queue_max_segments(q, queue_max_segments(q) - 1);
748	q->dma_drain_needed = dma_drain_needed;
749	q->dma_drain_buffer = buf;
750	q->dma_drain_size = size;
751
752	return 0;
753}
754EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
755
756/**
757 * blk_queue_segment_boundary - set boundary rules for segment merging
758 * @q:  the request queue for the device
759 * @mask:  the memory boundary mask
760 **/
761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
762{
763	if (mask < PAGE_SIZE - 1) {
764		mask = PAGE_SIZE - 1;
765		printk(KERN_INFO "%s: set to minimum %lx\n",
766		       __func__, mask);
767	}
768
769	q->limits.seg_boundary_mask = mask;
770}
771EXPORT_SYMBOL(blk_queue_segment_boundary);
772
773/**
774 * blk_queue_virt_boundary - set boundary rules for bio merging
775 * @q:  the request queue for the device
776 * @mask:  the memory boundary mask
777 **/
778void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
779{
780	q->limits.virt_boundary_mask = mask;
 
 
 
 
 
 
 
 
 
781}
782EXPORT_SYMBOL(blk_queue_virt_boundary);
783
784/**
785 * blk_queue_dma_alignment - set dma length and memory alignment
786 * @q:     the request queue for the device
787 * @mask:  alignment mask
788 *
789 * description:
790 *    set required memory and length alignment for direct dma transactions.
791 *    this is used when building direct io requests for the queue.
792 *
793 **/
794void blk_queue_dma_alignment(struct request_queue *q, int mask)
795{
796	q->dma_alignment = mask;
797}
798EXPORT_SYMBOL(blk_queue_dma_alignment);
799
800/**
801 * blk_queue_update_dma_alignment - update dma length and memory alignment
802 * @q:     the request queue for the device
803 * @mask:  alignment mask
804 *
805 * description:
806 *    update required memory and length alignment for direct dma transactions.
807 *    If the requested alignment is larger than the current alignment, then
808 *    the current queue alignment is updated to the new value, otherwise it
809 *    is left alone.  The design of this is to allow multiple objects
810 *    (driver, device, transport etc) to set their respective
811 *    alignments without having them interfere.
812 *
813 **/
814void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
815{
816	BUG_ON(mask > PAGE_SIZE);
817
818	if (mask > q->dma_alignment)
819		q->dma_alignment = mask;
820}
821EXPORT_SYMBOL(blk_queue_update_dma_alignment);
822
823/**
824 * blk_queue_flush - configure queue's cache flush capability
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
825 * @q:		the request queue for the device
826 * @flush:	0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
827 *
828 * Tell block layer cache flush capability of @q.  If it supports
829 * flushing, REQ_FLUSH should be set.  If it supports bypassing
830 * write cache for individual writes, REQ_FUA should be set.
831 */
832void blk_queue_flush(struct request_queue *q, unsigned int flush)
 
833{
834	WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
 
 
 
835
836	if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
837		flush &= ~REQ_FUA;
838
839	q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
840}
841EXPORT_SYMBOL_GPL(blk_queue_flush);
842
843void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 
 
 
 
844{
845	q->flush_not_queueable = !queueable;
 
 
 
 
 
 
 
 
 
846}
847EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
848
849static int __init blk_settings_init(void)
850{
851	blk_max_low_pfn = max_low_pfn - 1;
852	blk_max_pfn = max_pfn - 1;
853	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854}
855subsys_initcall(blk_settings_init);
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to setting various queue properties from drivers
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/init.h>
  8#include <linux/bio.h>
  9#include <linux/blkdev.h>
 10#include <linux/pagemap.h>
 11#include <linux/backing-dev-defs.h>
 12#include <linux/gcd.h>
 13#include <linux/lcm.h>
 14#include <linux/jiffies.h>
 15#include <linux/gfp.h>
 16#include <linux/dma-mapping.h>
 17
 18#include "blk.h"
 19#include "blk-rq-qos.h"
 20#include "blk-wbt.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21
 22void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 23{
 24	q->rq_timeout = timeout;
 25}
 26EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
 27
 
 
 
 
 
 
 
 
 
 
 
 
 28/**
 29 * blk_set_default_limits - reset limits to default values
 30 * @lim:  the queue_limits structure to reset
 31 *
 32 * Description:
 33 *   Returns a queue_limit struct to its default state.
 34 */
 35void blk_set_default_limits(struct queue_limits *lim)
 36{
 37	lim->max_segments = BLK_MAX_SEGMENTS;
 38	lim->max_discard_segments = 1;
 39	lim->max_integrity_segments = 0;
 40	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 41	lim->virt_boundary_mask = 0;
 42	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
 43	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
 44	lim->max_user_sectors = lim->max_dev_sectors = 0;
 45	lim->chunk_sectors = 0;
 46	lim->max_write_zeroes_sectors = 0;
 47	lim->max_zone_append_sectors = 0;
 48	lim->max_discard_sectors = 0;
 49	lim->max_hw_discard_sectors = 0;
 50	lim->max_secure_erase_sectors = 0;
 51	lim->discard_granularity = 512;
 52	lim->discard_alignment = 0;
 53	lim->discard_misaligned = 0;
 
 54	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
 55	lim->bounce = BLK_BOUNCE_NONE;
 56	lim->alignment_offset = 0;
 57	lim->io_opt = 0;
 58	lim->misaligned = 0;
 59	lim->zoned = false;
 60	lim->zone_write_granularity = 0;
 61	lim->dma_alignment = 511;
 62}
 
 63
 64/**
 65 * blk_set_stacking_limits - set default limits for stacking devices
 66 * @lim:  the queue_limits structure to reset
 67 *
 68 * Description:
 69 *   Returns a queue_limit struct to its default state. Should be used
 70 *   by stacking drivers like DM that have no internal limits.
 71 */
 72void blk_set_stacking_limits(struct queue_limits *lim)
 73{
 74	blk_set_default_limits(lim);
 75
 76	/* Inherit limits from component devices */
 
 77	lim->max_segments = USHRT_MAX;
 78	lim->max_discard_segments = USHRT_MAX;
 79	lim->max_hw_sectors = UINT_MAX;
 80	lim->max_segment_size = UINT_MAX;
 81	lim->max_sectors = UINT_MAX;
 82	lim->max_dev_sectors = UINT_MAX;
 83	lim->max_write_zeroes_sectors = UINT_MAX;
 84	lim->max_zone_append_sectors = UINT_MAX;
 85}
 86EXPORT_SYMBOL(blk_set_stacking_limits);
 87
 88/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 89 * blk_queue_bounce_limit - set bounce buffer limit for queue
 90 * @q: the request queue for the device
 91 * @bounce: bounce limit to enforce
 92 *
 93 * Description:
 94 *    Force bouncing for ISA DMA ranges or highmem.
 95 *
 96 *    DEPRECATED, don't use in new code.
 
 97 **/
 98void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
 99{
100	q->limits.bounce = bounce;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101}
102EXPORT_SYMBOL(blk_queue_bounce_limit);
103
104/**
105 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
106 * @q:  the request queue for the device
107 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
108 *
109 * Description:
110 *    Enables a low level driver to set a hard upper limit,
111 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
112 *    the device driver based upon the capabilities of the I/O
113 *    controller.
114 *
115 *    max_dev_sectors is a hard limit imposed by the storage device for
116 *    READ/WRITE requests. It is set by the disk driver.
117 *
118 *    max_sectors is a soft limit imposed by the block layer for
119 *    filesystem type requests.  This value can be overridden on a
120 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
121 *    The soft limit can not exceed max_hw_sectors.
122 **/
123void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
124{
125	struct queue_limits *limits = &q->limits;
126	unsigned int max_sectors;
127
128	if ((max_hw_sectors << 9) < PAGE_SIZE) {
129		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
130		pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
 
131	}
132
133	max_hw_sectors = round_down(max_hw_sectors,
134				    limits->logical_block_size >> SECTOR_SHIFT);
135	limits->max_hw_sectors = max_hw_sectors;
136
137	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
138
139	if (limits->max_user_sectors)
140		max_sectors = min(max_sectors, limits->max_user_sectors);
141	else
142		max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
143
144	max_sectors = round_down(max_sectors,
145				 limits->logical_block_size >> SECTOR_SHIFT);
146	limits->max_sectors = max_sectors;
147
148	if (!q->disk)
149		return;
150	q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
151}
152EXPORT_SYMBOL(blk_queue_max_hw_sectors);
153
154/**
155 * blk_queue_chunk_sectors - set size of the chunk for this queue
156 * @q:  the request queue for the device
157 * @chunk_sectors:  chunk sectors in the usual 512b unit
158 *
159 * Description:
160 *    If a driver doesn't want IOs to cross a given chunk size, it can set
161 *    this limit and prevent merging across chunks. Note that the block layer
162 *    must accept a page worth of data at any offset. So if the crossing of
163 *    chunks is a hard limitation in the driver, it must still be prepared
164 *    to split single page bios.
 
165 **/
166void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
167{
 
168	q->limits.chunk_sectors = chunk_sectors;
169}
170EXPORT_SYMBOL(blk_queue_chunk_sectors);
171
172/**
173 * blk_queue_max_discard_sectors - set max sectors for a single discard
174 * @q:  the request queue for the device
175 * @max_discard_sectors: maximum number of sectors to discard
176 **/
177void blk_queue_max_discard_sectors(struct request_queue *q,
178		unsigned int max_discard_sectors)
179{
180	q->limits.max_hw_discard_sectors = max_discard_sectors;
181	q->limits.max_discard_sectors = max_discard_sectors;
182}
183EXPORT_SYMBOL(blk_queue_max_discard_sectors);
184
185/**
186 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
187 * @q:  the request queue for the device
188 * @max_sectors: maximum number of sectors to secure_erase
189 **/
190void blk_queue_max_secure_erase_sectors(struct request_queue *q,
191		unsigned int max_sectors)
192{
193	q->limits.max_secure_erase_sectors = max_sectors;
194}
195EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
196
197/**
198 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
199 *                                      write zeroes
200 * @q:  the request queue for the device
201 * @max_write_zeroes_sectors: maximum number of sectors to write per command
202 **/
203void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
204		unsigned int max_write_zeroes_sectors)
205{
206	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
207}
208EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
209
210/**
211 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
212 * @q:  the request queue for the device
213 * @max_zone_append_sectors: maximum number of sectors to write per command
214 **/
215void blk_queue_max_zone_append_sectors(struct request_queue *q,
216		unsigned int max_zone_append_sectors)
217{
218	unsigned int max_sectors;
219
220	if (WARN_ON(!blk_queue_is_zoned(q)))
221		return;
222
223	max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
224	max_sectors = min(q->limits.chunk_sectors, max_sectors);
225
226	/*
227	 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
228	 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
229	 * or the max_hw_sectors limit not set.
230	 */
231	WARN_ON(!max_sectors);
232
233	q->limits.max_zone_append_sectors = max_sectors;
234}
235EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
236
237/**
238 * blk_queue_max_segments - set max hw segments for a request for this queue
239 * @q:  the request queue for the device
240 * @max_segments:  max number of segments
241 *
242 * Description:
243 *    Enables a low level driver to set an upper limit on the number of
244 *    hw data segments in a request.
245 **/
246void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
247{
248	if (!max_segments) {
249		max_segments = 1;
250		pr_info("%s: set to minimum %u\n", __func__, max_segments);
 
251	}
252
253	q->limits.max_segments = max_segments;
254}
255EXPORT_SYMBOL(blk_queue_max_segments);
256
257/**
258 * blk_queue_max_discard_segments - set max segments for discard requests
259 * @q:  the request queue for the device
260 * @max_segments:  max number of segments
261 *
262 * Description:
263 *    Enables a low level driver to set an upper limit on the number of
264 *    segments in a discard request.
265 **/
266void blk_queue_max_discard_segments(struct request_queue *q,
267		unsigned short max_segments)
268{
269	q->limits.max_discard_segments = max_segments;
270}
271EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
272
273/**
274 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
275 * @q:  the request queue for the device
276 * @max_size:  max size of segment in bytes
277 *
278 * Description:
279 *    Enables a low level driver to set an upper limit on the size of a
280 *    coalesced segment
281 **/
282void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
283{
284	if (max_size < PAGE_SIZE) {
285		max_size = PAGE_SIZE;
286		pr_info("%s: set to minimum %u\n", __func__, max_size);
 
287	}
288
289	/* see blk_queue_virt_boundary() for the explanation */
290	WARN_ON_ONCE(q->limits.virt_boundary_mask);
291
292	q->limits.max_segment_size = max_size;
293}
294EXPORT_SYMBOL(blk_queue_max_segment_size);
295
296/**
297 * blk_queue_logical_block_size - set logical block size for the queue
298 * @q:  the request queue for the device
299 * @size:  the logical block size, in bytes
300 *
301 * Description:
302 *   This should be set to the lowest possible block size that the
303 *   storage device can address.  The default of 512 covers most
304 *   hardware.
305 **/
306void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
307{
308	struct queue_limits *limits = &q->limits;
309
310	limits->logical_block_size = size;
 
311
312	if (limits->discard_granularity < limits->logical_block_size)
313		limits->discard_granularity = limits->logical_block_size;
314
315	if (limits->physical_block_size < size)
316		limits->physical_block_size = size;
317
318	if (limits->io_min < limits->physical_block_size)
319		limits->io_min = limits->physical_block_size;
320
321	limits->max_hw_sectors =
322		round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
323	limits->max_sectors =
324		round_down(limits->max_sectors, size >> SECTOR_SHIFT);
325}
326EXPORT_SYMBOL(blk_queue_logical_block_size);
327
328/**
329 * blk_queue_physical_block_size - set physical block size for the queue
330 * @q:  the request queue for the device
331 * @size:  the physical block size, in bytes
332 *
333 * Description:
334 *   This should be set to the lowest possible sector size that the
335 *   hardware can operate on without reverting to read-modify-write
336 *   operations.
337 */
338void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
339{
340	q->limits.physical_block_size = size;
341
342	if (q->limits.physical_block_size < q->limits.logical_block_size)
343		q->limits.physical_block_size = q->limits.logical_block_size;
344
345	if (q->limits.discard_granularity < q->limits.physical_block_size)
346		q->limits.discard_granularity = q->limits.physical_block_size;
347
348	if (q->limits.io_min < q->limits.physical_block_size)
349		q->limits.io_min = q->limits.physical_block_size;
350}
351EXPORT_SYMBOL(blk_queue_physical_block_size);
352
353/**
354 * blk_queue_zone_write_granularity - set zone write granularity for the queue
355 * @q:  the request queue for the zoned device
356 * @size:  the zone write granularity size, in bytes
357 *
358 * Description:
359 *   This should be set to the lowest possible size allowing to write in
360 *   sequential zones of a zoned block device.
361 */
362void blk_queue_zone_write_granularity(struct request_queue *q,
363				      unsigned int size)
364{
365	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
366		return;
367
368	q->limits.zone_write_granularity = size;
369
370	if (q->limits.zone_write_granularity < q->limits.logical_block_size)
371		q->limits.zone_write_granularity = q->limits.logical_block_size;
372}
373EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
374
375/**
376 * blk_queue_alignment_offset - set physical block alignment offset
377 * @q:	the request queue for the device
378 * @offset: alignment offset in bytes
379 *
380 * Description:
381 *   Some devices are naturally misaligned to compensate for things like
382 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
383 *   should call this function for devices whose first sector is not
384 *   naturally aligned.
385 */
386void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
387{
388	q->limits.alignment_offset =
389		offset & (q->limits.physical_block_size - 1);
390	q->limits.misaligned = 0;
391}
392EXPORT_SYMBOL(blk_queue_alignment_offset);
393
394void disk_update_readahead(struct gendisk *disk)
395{
396	struct request_queue *q = disk->queue;
397
398	/*
399	 * For read-ahead of large files to be effective, we need to read ahead
400	 * at least twice the optimal I/O size.
401	 */
402	disk->bdi->ra_pages =
403		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
404	disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
405}
406EXPORT_SYMBOL_GPL(disk_update_readahead);
407
408/**
409 * blk_limits_io_min - set minimum request size for a device
410 * @limits: the queue limits
411 * @min:  smallest I/O size in bytes
412 *
413 * Description:
414 *   Some devices have an internal block size bigger than the reported
415 *   hardware sector size.  This function can be used to signal the
416 *   smallest I/O the device can perform without incurring a performance
417 *   penalty.
418 */
419void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
420{
421	limits->io_min = min;
422
423	if (limits->io_min < limits->logical_block_size)
424		limits->io_min = limits->logical_block_size;
425
426	if (limits->io_min < limits->physical_block_size)
427		limits->io_min = limits->physical_block_size;
428}
429EXPORT_SYMBOL(blk_limits_io_min);
430
431/**
432 * blk_queue_io_min - set minimum request size for the queue
433 * @q:	the request queue for the device
434 * @min:  smallest I/O size in bytes
435 *
436 * Description:
437 *   Storage devices may report a granularity or preferred minimum I/O
438 *   size which is the smallest request the device can perform without
439 *   incurring a performance penalty.  For disk drives this is often the
440 *   physical block size.  For RAID arrays it is often the stripe chunk
441 *   size.  A properly aligned multiple of minimum_io_size is the
442 *   preferred request size for workloads where a high number of I/O
443 *   operations is desired.
444 */
445void blk_queue_io_min(struct request_queue *q, unsigned int min)
446{
447	blk_limits_io_min(&q->limits, min);
448}
449EXPORT_SYMBOL(blk_queue_io_min);
450
451/**
452 * blk_limits_io_opt - set optimal request size for a device
453 * @limits: the queue limits
454 * @opt:  smallest I/O size in bytes
455 *
456 * Description:
457 *   Storage devices may report an optimal I/O size, which is the
458 *   device's preferred unit for sustained I/O.  This is rarely reported
459 *   for disk drives.  For RAID arrays it is usually the stripe width or
460 *   the internal track size.  A properly aligned multiple of
461 *   optimal_io_size is the preferred request size for workloads where
462 *   sustained throughput is desired.
463 */
464void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
465{
466	limits->io_opt = opt;
467}
468EXPORT_SYMBOL(blk_limits_io_opt);
469
470/**
471 * blk_queue_io_opt - set optimal request size for the queue
472 * @q:	the request queue for the device
473 * @opt:  optimal request size in bytes
474 *
475 * Description:
476 *   Storage devices may report an optimal I/O size, which is the
477 *   device's preferred unit for sustained I/O.  This is rarely reported
478 *   for disk drives.  For RAID arrays it is usually the stripe width or
479 *   the internal track size.  A properly aligned multiple of
480 *   optimal_io_size is the preferred request size for workloads where
481 *   sustained throughput is desired.
482 */
483void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
484{
485	blk_limits_io_opt(&q->limits, opt);
486	if (!q->disk)
487		return;
488	q->disk->bdi->ra_pages =
489		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
490}
491EXPORT_SYMBOL(blk_queue_io_opt);
492
493static int queue_limit_alignment_offset(const struct queue_limits *lim,
494		sector_t sector)
 
 
 
 
495{
496	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
497	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
498		<< SECTOR_SHIFT;
499
500	return (granularity + lim->alignment_offset - alignment) % granularity;
501}
502
503static unsigned int queue_limit_discard_alignment(
504		const struct queue_limits *lim, sector_t sector)
505{
506	unsigned int alignment, granularity, offset;
507
508	if (!lim->max_discard_sectors)
509		return 0;
510
511	/* Why are these in bytes, not sectors? */
512	alignment = lim->discard_alignment >> SECTOR_SHIFT;
513	granularity = lim->discard_granularity >> SECTOR_SHIFT;
514	if (!granularity)
515		return 0;
516
517	/* Offset of the partition start in 'granularity' sectors */
518	offset = sector_div(sector, granularity);
519
520	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
521	offset = (granularity + alignment - offset) % granularity;
522
523	/* Turn it back into bytes, gaah */
524	return offset << SECTOR_SHIFT;
525}
526
527static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
528{
529	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
530	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
531		sectors = PAGE_SIZE >> SECTOR_SHIFT;
532	return sectors;
533}
 
534
535/**
536 * blk_stack_limits - adjust queue_limits for stacked devices
537 * @t:	the stacking driver limits (top device)
538 * @b:  the underlying queue limits (bottom, component device)
539 * @start:  first data sector within component device
540 *
541 * Description:
542 *    This function is used by stacking drivers like MD and DM to ensure
543 *    that all component devices have compatible block sizes and
544 *    alignments.  The stacking driver must provide a queue_limits
545 *    struct (top) and then iteratively call the stacking function for
546 *    all component (bottom) devices.  The stacking function will
547 *    attempt to combine the values and ensure proper alignment.
548 *
549 *    Returns 0 if the top and bottom queue_limits are compatible.  The
550 *    top device's block sizes and alignment offsets may be adjusted to
551 *    ensure alignment with the bottom device. If no compatible sizes
552 *    and alignments exist, -1 is returned and the resulting top
553 *    queue_limits will have the misaligned flag set to indicate that
554 *    the alignment_offset is undefined.
555 */
556int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
557		     sector_t start)
558{
559	unsigned int top, bottom, alignment, ret = 0;
560
561	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
562	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
563	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
564	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
565					b->max_write_zeroes_sectors);
566	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
567					b->max_zone_append_sectors);
568	t->bounce = max(t->bounce, b->bounce);
569
570	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
571					    b->seg_boundary_mask);
572	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
573					    b->virt_boundary_mask);
574
575	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
576	t->max_discard_segments = min_not_zero(t->max_discard_segments,
577					       b->max_discard_segments);
578	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
579						 b->max_integrity_segments);
580
581	t->max_segment_size = min_not_zero(t->max_segment_size,
582					   b->max_segment_size);
583
584	t->misaligned |= b->misaligned;
585
586	alignment = queue_limit_alignment_offset(b, start);
587
588	/* Bottom device has different alignment.  Check that it is
589	 * compatible with the current top alignment.
590	 */
591	if (t->alignment_offset != alignment) {
592
593		top = max(t->physical_block_size, t->io_min)
594			+ t->alignment_offset;
595		bottom = max(b->physical_block_size, b->io_min) + alignment;
596
597		/* Verify that top and bottom intervals line up */
598		if (max(top, bottom) % min(top, bottom)) {
599			t->misaligned = 1;
600			ret = -1;
601		}
602	}
603
604	t->logical_block_size = max(t->logical_block_size,
605				    b->logical_block_size);
606
607	t->physical_block_size = max(t->physical_block_size,
608				     b->physical_block_size);
609
610	t->io_min = max(t->io_min, b->io_min);
611	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
612	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
613
614	/* Set non-power-of-2 compatible chunk_sectors boundary */
615	if (b->chunk_sectors)
616		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
617
618	/* Physical block size a multiple of the logical block size? */
619	if (t->physical_block_size & (t->logical_block_size - 1)) {
620		t->physical_block_size = t->logical_block_size;
621		t->misaligned = 1;
622		ret = -1;
623	}
624
625	/* Minimum I/O a multiple of the physical block size? */
626	if (t->io_min & (t->physical_block_size - 1)) {
627		t->io_min = t->physical_block_size;
628		t->misaligned = 1;
629		ret = -1;
630	}
631
632	/* Optimal I/O a multiple of the physical block size? */
633	if (t->io_opt & (t->physical_block_size - 1)) {
634		t->io_opt = 0;
635		t->misaligned = 1;
636		ret = -1;
637	}
638
639	/* chunk_sectors a multiple of the physical block size? */
640	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
641		t->chunk_sectors = 0;
642		t->misaligned = 1;
643		ret = -1;
644	}
645
646	t->raid_partial_stripes_expensive =
647		max(t->raid_partial_stripes_expensive,
648		    b->raid_partial_stripes_expensive);
649
650	/* Find lowest common alignment_offset */
651	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
652		% max(t->physical_block_size, t->io_min);
653
654	/* Verify that new alignment_offset is on a logical block boundary */
655	if (t->alignment_offset & (t->logical_block_size - 1)) {
656		t->misaligned = 1;
657		ret = -1;
658	}
659
660	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
661	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
662	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
663
664	/* Discard alignment and granularity */
665	if (b->discard_granularity) {
666		alignment = queue_limit_discard_alignment(b, start);
667
668		if (t->discard_granularity != 0 &&
669		    t->discard_alignment != alignment) {
670			top = t->discard_granularity + t->discard_alignment;
671			bottom = b->discard_granularity + alignment;
672
673			/* Verify that top and bottom intervals line up */
674			if ((max(top, bottom) % min(top, bottom)) != 0)
675				t->discard_misaligned = 1;
676		}
677
678		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
679						      b->max_discard_sectors);
680		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
681							 b->max_hw_discard_sectors);
682		t->discard_granularity = max(t->discard_granularity,
683					     b->discard_granularity);
684		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
685			t->discard_granularity;
686	}
687	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
688						   b->max_secure_erase_sectors);
689	t->zone_write_granularity = max(t->zone_write_granularity,
690					b->zone_write_granularity);
691	t->zoned = max(t->zoned, b->zoned);
692	return ret;
693}
694EXPORT_SYMBOL(blk_stack_limits);
695
696/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
697 * disk_stack_limits - adjust queue limits for stacked drivers
698 * @disk:  MD/DM gendisk (top)
699 * @bdev:  the underlying block device (bottom)
700 * @offset:  offset to beginning of data within component device
701 *
702 * Description:
703 *    Merges the limits for a top level gendisk and a bottom level
704 *    block_device.
705 */
706void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
707		       sector_t offset)
708{
709	struct request_queue *t = disk->queue;
710
711	if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
712			get_start_sect(bdev) + (offset >> 9)) < 0)
713		pr_notice("%s: Warning: Device %pg is misaligned\n",
714			disk->disk_name, bdev);
715
716	disk_update_readahead(disk);
 
 
 
 
 
717}
718EXPORT_SYMBOL(disk_stack_limits);
719
720/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
721 * blk_queue_update_dma_pad - update pad mask
722 * @q:     the request queue for the device
723 * @mask:  pad mask
724 *
725 * Update dma pad mask.
726 *
727 * Appending pad buffer to a request modifies the last entry of a
728 * scatter list such that it includes the pad buffer.
729 **/
730void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
731{
732	if (mask > q->dma_pad_mask)
733		q->dma_pad_mask = mask;
734}
735EXPORT_SYMBOL(blk_queue_update_dma_pad);
736
737/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
738 * blk_queue_segment_boundary - set boundary rules for segment merging
739 * @q:  the request queue for the device
740 * @mask:  the memory boundary mask
741 **/
742void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
743{
744	if (mask < PAGE_SIZE - 1) {
745		mask = PAGE_SIZE - 1;
746		pr_info("%s: set to minimum %lx\n", __func__, mask);
 
747	}
748
749	q->limits.seg_boundary_mask = mask;
750}
751EXPORT_SYMBOL(blk_queue_segment_boundary);
752
753/**
754 * blk_queue_virt_boundary - set boundary rules for bio merging
755 * @q:  the request queue for the device
756 * @mask:  the memory boundary mask
757 **/
758void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
759{
760	q->limits.virt_boundary_mask = mask;
761
762	/*
763	 * Devices that require a virtual boundary do not support scatter/gather
764	 * I/O natively, but instead require a descriptor list entry for each
765	 * page (which might not be idential to the Linux PAGE_SIZE).  Because
766	 * of that they are not limited by our notion of "segment size".
767	 */
768	if (mask)
769		q->limits.max_segment_size = UINT_MAX;
770}
771EXPORT_SYMBOL(blk_queue_virt_boundary);
772
773/**
774 * blk_queue_dma_alignment - set dma length and memory alignment
775 * @q:     the request queue for the device
776 * @mask:  alignment mask
777 *
778 * description:
779 *    set required memory and length alignment for direct dma transactions.
780 *    this is used when building direct io requests for the queue.
781 *
782 **/
783void blk_queue_dma_alignment(struct request_queue *q, int mask)
784{
785	q->limits.dma_alignment = mask;
786}
787EXPORT_SYMBOL(blk_queue_dma_alignment);
788
789/**
790 * blk_queue_update_dma_alignment - update dma length and memory alignment
791 * @q:     the request queue for the device
792 * @mask:  alignment mask
793 *
794 * description:
795 *    update required memory and length alignment for direct dma transactions.
796 *    If the requested alignment is larger than the current alignment, then
797 *    the current queue alignment is updated to the new value, otherwise it
798 *    is left alone.  The design of this is to allow multiple objects
799 *    (driver, device, transport etc) to set their respective
800 *    alignments without having them interfere.
801 *
802 **/
803void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
804{
805	BUG_ON(mask > PAGE_SIZE);
806
807	if (mask > q->limits.dma_alignment)
808		q->limits.dma_alignment = mask;
809}
810EXPORT_SYMBOL(blk_queue_update_dma_alignment);
811
812/**
813 * blk_set_queue_depth - tell the block layer about the device queue depth
814 * @q:		the request queue for the device
815 * @depth:		queue depth
816 *
817 */
818void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
819{
820	q->queue_depth = depth;
821	rq_qos_queue_depth_changed(q);
822}
823EXPORT_SYMBOL(blk_set_queue_depth);
824
825/**
826 * blk_queue_write_cache - configure queue's write cache
827 * @q:		the request queue for the device
828 * @wc:		write back cache on or off
829 * @fua:	device supports FUA writes, if true
830 *
831 * Tell the block layer about the write cache of @q.
832 */
833void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
834{
835	if (wc) {
836		blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
837		blk_queue_flag_set(QUEUE_FLAG_WC, q);
838	} else {
839		blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
840		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
841	}
842	if (fua)
843		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
844	else
845		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
846}
847EXPORT_SYMBOL_GPL(blk_queue_write_cache);
848
849/**
850 * blk_queue_required_elevator_features - Set a queue required elevator features
851 * @q:		the request queue for the target device
852 * @features:	Required elevator features OR'ed together
853 *
854 * Tell the block layer that for the device controlled through @q, only the
855 * only elevators that can be used are those that implement at least the set of
856 * features specified by @features.
857 */
858void blk_queue_required_elevator_features(struct request_queue *q,
859					  unsigned int features)
860{
861	q->required_elevator_features = features;
862}
863EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
864
865/**
866 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
867 * @q:		the request queue for the device
868 * @dev:	the device pointer for dma
869 *
870 * Tell the block layer about merging the segments by dma map of @q.
 
 
871 */
872bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
873				       struct device *dev)
874{
875	unsigned long boundary = dma_get_merge_boundary(dev);
876
877	if (!boundary)
878		return false;
879
880	/* No need to update max_segment_size. see blk_queue_virt_boundary() */
881	blk_queue_virt_boundary(q, boundary);
882
883	return true;
884}
885EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
886
887/**
888 * disk_set_zoned - inidicate a zoned device
889 * @disk:	gendisk to configure
890 */
891void disk_set_zoned(struct gendisk *disk)
892{
893	struct request_queue *q = disk->queue;
894
895	WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
896
897	/*
898	 * Set the zone write granularity to the device logical block
899	 * size by default. The driver can change this value if needed.
900	 */
901	q->limits.zoned = true;
902	blk_queue_zone_write_granularity(q, queue_logical_block_size(q));
903}
904EXPORT_SYMBOL_GPL(disk_set_zoned);
905
906int bdev_alignment_offset(struct block_device *bdev)
907{
908	struct request_queue *q = bdev_get_queue(bdev);
909
910	if (q->limits.misaligned)
911		return -1;
912	if (bdev_is_partition(bdev))
913		return queue_limit_alignment_offset(&q->limits,
914				bdev->bd_start_sect);
915	return q->limits.alignment_offset;
916}
917EXPORT_SYMBOL_GPL(bdev_alignment_offset);
918
919unsigned int bdev_discard_alignment(struct block_device *bdev)
920{
921	struct request_queue *q = bdev_get_queue(bdev);
922
923	if (bdev_is_partition(bdev))
924		return queue_limit_discard_alignment(&q->limits,
925				bdev->bd_start_sect);
926	return q->limits.discard_alignment;
927}
928EXPORT_SYMBOL_GPL(bdev_discard_alignment);