Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Functions related to setting various queue properties from drivers
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/init.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
 10#include <linux/gcd.h>
 11#include <linux/lcm.h>
 12#include <linux/jiffies.h>
 13#include <linux/gfp.h>
 14
 15#include "blk.h"
 16
 17unsigned long blk_max_low_pfn;
 18EXPORT_SYMBOL(blk_max_low_pfn);
 19
 20unsigned long blk_max_pfn;
 21
 22/**
 23 * blk_queue_prep_rq - set a prepare_request function for queue
 24 * @q:		queue
 25 * @pfn:	prepare_request function
 26 *
 27 * It's possible for a queue to register a prepare_request callback which
 28 * is invoked before the request is handed to the request_fn. The goal of
 29 * the function is to prepare a request for I/O, it can be used to build a
 30 * cdb from the request data for instance.
 31 *
 32 */
 33void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
 34{
 35	q->prep_rq_fn = pfn;
 36}
 37EXPORT_SYMBOL(blk_queue_prep_rq);
 38
 39/**
 40 * blk_queue_unprep_rq - set an unprepare_request function for queue
 41 * @q:		queue
 42 * @ufn:	unprepare_request function
 43 *
 44 * It's possible for a queue to register an unprepare_request callback
 45 * which is invoked before the request is finally completed. The goal
 46 * of the function is to deallocate any data that was allocated in the
 47 * prepare_request callback.
 48 *
 49 */
 50void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
 51{
 52	q->unprep_rq_fn = ufn;
 53}
 54EXPORT_SYMBOL(blk_queue_unprep_rq);
 55
 56/**
 57 * blk_queue_merge_bvec - set a merge_bvec function for queue
 58 * @q:		queue
 59 * @mbfn:	merge_bvec_fn
 60 *
 61 * Usually queues have static limitations on the max sectors or segments that
 62 * we can put in a request. Stacking drivers may have some settings that
 63 * are dynamic, and thus we have to query the queue whether it is ok to
 64 * add a new bio_vec to a bio at a given offset or not. If the block device
 65 * has such limitations, it needs to register a merge_bvec_fn to control
 66 * the size of bio's sent to it. Note that a block device *must* allow a
 67 * single page to be added to an empty bio. The block device driver may want
 68 * to use the bio_split() function to deal with these bio's. By default
 69 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
 70 * honored.
 71 */
 72void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
 73{
 74	q->merge_bvec_fn = mbfn;
 75}
 76EXPORT_SYMBOL(blk_queue_merge_bvec);
 77
 78void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
 79{
 80	q->softirq_done_fn = fn;
 81}
 82EXPORT_SYMBOL(blk_queue_softirq_done);
 83
 84void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 85{
 86	q->rq_timeout = timeout;
 87}
 88EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
 89
 90void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
 91{
 92	q->rq_timed_out_fn = fn;
 93}
 94EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
 95
 96void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
 97{
 98	q->lld_busy_fn = fn;
 99}
100EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
101
102/**
103 * blk_set_default_limits - reset limits to default values
104 * @lim:  the queue_limits structure to reset
105 *
106 * Description:
107 *   Returns a queue_limit struct to its default state.
108 */
109void blk_set_default_limits(struct queue_limits *lim)
110{
111	lim->max_segments = BLK_MAX_SEGMENTS;
112	lim->max_integrity_segments = 0;
113	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 
114	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
115	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
 
 
116	lim->max_write_same_sectors = 0;
117	lim->max_discard_sectors = 0;
 
118	lim->discard_granularity = 0;
119	lim->discard_alignment = 0;
120	lim->discard_misaligned = 0;
121	lim->discard_zeroes_data = 0;
122	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
123	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
124	lim->alignment_offset = 0;
125	lim->io_opt = 0;
126	lim->misaligned = 0;
127	lim->cluster = 1;
128}
129EXPORT_SYMBOL(blk_set_default_limits);
130
131/**
132 * blk_set_stacking_limits - set default limits for stacking devices
133 * @lim:  the queue_limits structure to reset
134 *
135 * Description:
136 *   Returns a queue_limit struct to its default state. Should be used
137 *   by stacking drivers like DM that have no internal limits.
138 */
139void blk_set_stacking_limits(struct queue_limits *lim)
140{
141	blk_set_default_limits(lim);
142
143	/* Inherit limits from component devices */
144	lim->discard_zeroes_data = 1;
145	lim->max_segments = USHRT_MAX;
146	lim->max_hw_sectors = UINT_MAX;
147	lim->max_segment_size = UINT_MAX;
148	lim->max_sectors = UINT_MAX;
 
149	lim->max_write_same_sectors = UINT_MAX;
150}
151EXPORT_SYMBOL(blk_set_stacking_limits);
152
153/**
154 * blk_queue_make_request - define an alternate make_request function for a device
155 * @q:  the request queue for the device to be affected
156 * @mfn: the alternate make_request function
157 *
158 * Description:
159 *    The normal way for &struct bios to be passed to a device
160 *    driver is for them to be collected into requests on a request
161 *    queue, and then to allow the device driver to select requests
162 *    off that queue when it is ready.  This works well for many block
163 *    devices. However some block devices (typically virtual devices
164 *    such as md or lvm) do not benefit from the processing on the
165 *    request queue, and are served best by having the requests passed
166 *    directly to them.  This can be achieved by providing a function
167 *    to blk_queue_make_request().
168 *
169 * Caveat:
170 *    The driver that does this *must* be able to deal appropriately
171 *    with buffers in "highmemory". This can be accomplished by either calling
172 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
173 *    blk_queue_bounce() to create a buffer in normal memory.
174 **/
175void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
176{
177	/*
178	 * set defaults
179	 */
180	q->nr_requests = BLKDEV_MAX_RQ;
181
182	q->make_request_fn = mfn;
183	blk_queue_dma_alignment(q, 511);
184	blk_queue_congestion_threshold(q);
185	q->nr_batching = BLK_BATCH_REQ;
186
187	blk_set_default_limits(&q->limits);
188
189	/*
190	 * by default assume old behaviour and bounce for any highmem page
191	 */
192	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
193}
194EXPORT_SYMBOL(blk_queue_make_request);
195
196/**
197 * blk_queue_bounce_limit - set bounce buffer limit for queue
198 * @q: the request queue for the device
199 * @max_addr: the maximum address the device can handle
200 *
201 * Description:
202 *    Different hardware can have different requirements as to what pages
203 *    it can do I/O directly to. A low level driver can call
204 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
205 *    buffers for doing I/O to pages residing above @max_addr.
206 **/
207void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
208{
209	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
210	int dma = 0;
211
212	q->bounce_gfp = GFP_NOIO;
213#if BITS_PER_LONG == 64
214	/*
215	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
216	 * some IOMMUs can handle everything, but I don't know of a
217	 * way to test this here.
218	 */
219	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
220		dma = 1;
221	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
222#else
223	if (b_pfn < blk_max_low_pfn)
224		dma = 1;
225	q->limits.bounce_pfn = b_pfn;
226#endif
227	if (dma) {
228		init_emergency_isa_pool();
229		q->bounce_gfp = GFP_NOIO | GFP_DMA;
230		q->limits.bounce_pfn = b_pfn;
231	}
232}
233EXPORT_SYMBOL(blk_queue_bounce_limit);
234
235/**
236 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
237 * @limits: the queue limits
238 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
239 *
240 * Description:
241 *    Enables a low level driver to set a hard upper limit,
242 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
243 *    the device driver based upon the combined capabilities of I/O
244 *    controller and storage device.
 
 
 
245 *
246 *    max_sectors is a soft limit imposed by the block layer for
247 *    filesystem type requests.  This value can be overridden on a
248 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
249 *    The soft limit can not exceed max_hw_sectors.
250 **/
251void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
252{
253	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
254		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
 
 
 
255		printk(KERN_INFO "%s: set to minimum %d\n",
256		       __func__, max_hw_sectors);
257	}
258
259	limits->max_hw_sectors = max_hw_sectors;
260	limits->max_sectors = min_t(unsigned int, max_hw_sectors,
261				    BLK_DEF_MAX_SECTORS);
 
262}
263EXPORT_SYMBOL(blk_limits_max_hw_sectors);
264
265/**
266 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
267 * @q:  the request queue for the device
268 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
269 *
270 * Description:
271 *    See description for blk_limits_max_hw_sectors().
 
 
 
 
 
272 **/
273void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
274{
275	blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
 
276}
277EXPORT_SYMBOL(blk_queue_max_hw_sectors);
278
279/**
280 * blk_queue_max_discard_sectors - set max sectors for a single discard
281 * @q:  the request queue for the device
282 * @max_discard_sectors: maximum number of sectors to discard
283 **/
284void blk_queue_max_discard_sectors(struct request_queue *q,
285		unsigned int max_discard_sectors)
286{
 
287	q->limits.max_discard_sectors = max_discard_sectors;
288}
289EXPORT_SYMBOL(blk_queue_max_discard_sectors);
290
291/**
292 * blk_queue_max_write_same_sectors - set max sectors for a single write same
293 * @q:  the request queue for the device
294 * @max_write_same_sectors: maximum number of sectors to write per command
295 **/
296void blk_queue_max_write_same_sectors(struct request_queue *q,
297				      unsigned int max_write_same_sectors)
298{
299	q->limits.max_write_same_sectors = max_write_same_sectors;
300}
301EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
302
303/**
304 * blk_queue_max_segments - set max hw segments for a request for this queue
305 * @q:  the request queue for the device
306 * @max_segments:  max number of segments
307 *
308 * Description:
309 *    Enables a low level driver to set an upper limit on the number of
310 *    hw data segments in a request.
311 **/
312void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
313{
314	if (!max_segments) {
315		max_segments = 1;
316		printk(KERN_INFO "%s: set to minimum %d\n",
317		       __func__, max_segments);
318	}
319
320	q->limits.max_segments = max_segments;
321}
322EXPORT_SYMBOL(blk_queue_max_segments);
323
324/**
325 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
326 * @q:  the request queue for the device
327 * @max_size:  max size of segment in bytes
328 *
329 * Description:
330 *    Enables a low level driver to set an upper limit on the size of a
331 *    coalesced segment
332 **/
333void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
334{
335	if (max_size < PAGE_CACHE_SIZE) {
336		max_size = PAGE_CACHE_SIZE;
337		printk(KERN_INFO "%s: set to minimum %d\n",
338		       __func__, max_size);
339	}
340
341	q->limits.max_segment_size = max_size;
342}
343EXPORT_SYMBOL(blk_queue_max_segment_size);
344
345/**
346 * blk_queue_logical_block_size - set logical block size for the queue
347 * @q:  the request queue for the device
348 * @size:  the logical block size, in bytes
349 *
350 * Description:
351 *   This should be set to the lowest possible block size that the
352 *   storage device can address.  The default of 512 covers most
353 *   hardware.
354 **/
355void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
356{
357	q->limits.logical_block_size = size;
358
359	if (q->limits.physical_block_size < size)
360		q->limits.physical_block_size = size;
361
362	if (q->limits.io_min < q->limits.physical_block_size)
363		q->limits.io_min = q->limits.physical_block_size;
364}
365EXPORT_SYMBOL(blk_queue_logical_block_size);
366
367/**
368 * blk_queue_physical_block_size - set physical block size for the queue
369 * @q:  the request queue for the device
370 * @size:  the physical block size, in bytes
371 *
372 * Description:
373 *   This should be set to the lowest possible sector size that the
374 *   hardware can operate on without reverting to read-modify-write
375 *   operations.
376 */
377void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
378{
379	q->limits.physical_block_size = size;
380
381	if (q->limits.physical_block_size < q->limits.logical_block_size)
382		q->limits.physical_block_size = q->limits.logical_block_size;
383
384	if (q->limits.io_min < q->limits.physical_block_size)
385		q->limits.io_min = q->limits.physical_block_size;
386}
387EXPORT_SYMBOL(blk_queue_physical_block_size);
388
389/**
390 * blk_queue_alignment_offset - set physical block alignment offset
391 * @q:	the request queue for the device
392 * @offset: alignment offset in bytes
393 *
394 * Description:
395 *   Some devices are naturally misaligned to compensate for things like
396 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
397 *   should call this function for devices whose first sector is not
398 *   naturally aligned.
399 */
400void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
401{
402	q->limits.alignment_offset =
403		offset & (q->limits.physical_block_size - 1);
404	q->limits.misaligned = 0;
405}
406EXPORT_SYMBOL(blk_queue_alignment_offset);
407
408/**
409 * blk_limits_io_min - set minimum request size for a device
410 * @limits: the queue limits
411 * @min:  smallest I/O size in bytes
412 *
413 * Description:
414 *   Some devices have an internal block size bigger than the reported
415 *   hardware sector size.  This function can be used to signal the
416 *   smallest I/O the device can perform without incurring a performance
417 *   penalty.
418 */
419void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
420{
421	limits->io_min = min;
422
423	if (limits->io_min < limits->logical_block_size)
424		limits->io_min = limits->logical_block_size;
425
426	if (limits->io_min < limits->physical_block_size)
427		limits->io_min = limits->physical_block_size;
428}
429EXPORT_SYMBOL(blk_limits_io_min);
430
431/**
432 * blk_queue_io_min - set minimum request size for the queue
433 * @q:	the request queue for the device
434 * @min:  smallest I/O size in bytes
435 *
436 * Description:
437 *   Storage devices may report a granularity or preferred minimum I/O
438 *   size which is the smallest request the device can perform without
439 *   incurring a performance penalty.  For disk drives this is often the
440 *   physical block size.  For RAID arrays it is often the stripe chunk
441 *   size.  A properly aligned multiple of minimum_io_size is the
442 *   preferred request size for workloads where a high number of I/O
443 *   operations is desired.
444 */
445void blk_queue_io_min(struct request_queue *q, unsigned int min)
446{
447	blk_limits_io_min(&q->limits, min);
448}
449EXPORT_SYMBOL(blk_queue_io_min);
450
451/**
452 * blk_limits_io_opt - set optimal request size for a device
453 * @limits: the queue limits
454 * @opt:  smallest I/O size in bytes
455 *
456 * Description:
457 *   Storage devices may report an optimal I/O size, which is the
458 *   device's preferred unit for sustained I/O.  This is rarely reported
459 *   for disk drives.  For RAID arrays it is usually the stripe width or
460 *   the internal track size.  A properly aligned multiple of
461 *   optimal_io_size is the preferred request size for workloads where
462 *   sustained throughput is desired.
463 */
464void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
465{
466	limits->io_opt = opt;
467}
468EXPORT_SYMBOL(blk_limits_io_opt);
469
470/**
471 * blk_queue_io_opt - set optimal request size for the queue
472 * @q:	the request queue for the device
473 * @opt:  optimal request size in bytes
474 *
475 * Description:
476 *   Storage devices may report an optimal I/O size, which is the
477 *   device's preferred unit for sustained I/O.  This is rarely reported
478 *   for disk drives.  For RAID arrays it is usually the stripe width or
479 *   the internal track size.  A properly aligned multiple of
480 *   optimal_io_size is the preferred request size for workloads where
481 *   sustained throughput is desired.
482 */
483void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
484{
485	blk_limits_io_opt(&q->limits, opt);
486}
487EXPORT_SYMBOL(blk_queue_io_opt);
488
489/**
490 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
491 * @t:	the stacking driver (top)
492 * @b:  the underlying device (bottom)
493 **/
494void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
495{
496	blk_stack_limits(&t->limits, &b->limits, 0);
497}
498EXPORT_SYMBOL(blk_queue_stack_limits);
499
500/**
501 * blk_stack_limits - adjust queue_limits for stacked devices
502 * @t:	the stacking driver limits (top device)
503 * @b:  the underlying queue limits (bottom, component device)
504 * @start:  first data sector within component device
505 *
506 * Description:
507 *    This function is used by stacking drivers like MD and DM to ensure
508 *    that all component devices have compatible block sizes and
509 *    alignments.  The stacking driver must provide a queue_limits
510 *    struct (top) and then iteratively call the stacking function for
511 *    all component (bottom) devices.  The stacking function will
512 *    attempt to combine the values and ensure proper alignment.
513 *
514 *    Returns 0 if the top and bottom queue_limits are compatible.  The
515 *    top device's block sizes and alignment offsets may be adjusted to
516 *    ensure alignment with the bottom device. If no compatible sizes
517 *    and alignments exist, -1 is returned and the resulting top
518 *    queue_limits will have the misaligned flag set to indicate that
519 *    the alignment_offset is undefined.
520 */
521int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
522		     sector_t start)
523{
524	unsigned int top, bottom, alignment, ret = 0;
525
526	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
527	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
 
528	t->max_write_same_sectors = min(t->max_write_same_sectors,
529					b->max_write_same_sectors);
530	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
531
532	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
533					    b->seg_boundary_mask);
 
 
534
535	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
536	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
537						 b->max_integrity_segments);
538
539	t->max_segment_size = min_not_zero(t->max_segment_size,
540					   b->max_segment_size);
541
542	t->misaligned |= b->misaligned;
543
544	alignment = queue_limit_alignment_offset(b, start);
545
546	/* Bottom device has different alignment.  Check that it is
547	 * compatible with the current top alignment.
548	 */
549	if (t->alignment_offset != alignment) {
550
551		top = max(t->physical_block_size, t->io_min)
552			+ t->alignment_offset;
553		bottom = max(b->physical_block_size, b->io_min) + alignment;
554
555		/* Verify that top and bottom intervals line up */
556		if (max(top, bottom) & (min(top, bottom) - 1)) {
557			t->misaligned = 1;
558			ret = -1;
559		}
560	}
561
562	t->logical_block_size = max(t->logical_block_size,
563				    b->logical_block_size);
564
565	t->physical_block_size = max(t->physical_block_size,
566				     b->physical_block_size);
567
568	t->io_min = max(t->io_min, b->io_min);
569	t->io_opt = lcm(t->io_opt, b->io_opt);
570
571	t->cluster &= b->cluster;
572	t->discard_zeroes_data &= b->discard_zeroes_data;
573
574	/* Physical block size a multiple of the logical block size? */
575	if (t->physical_block_size & (t->logical_block_size - 1)) {
576		t->physical_block_size = t->logical_block_size;
577		t->misaligned = 1;
578		ret = -1;
579	}
580
581	/* Minimum I/O a multiple of the physical block size? */
582	if (t->io_min & (t->physical_block_size - 1)) {
583		t->io_min = t->physical_block_size;
584		t->misaligned = 1;
585		ret = -1;
586	}
587
588	/* Optimal I/O a multiple of the physical block size? */
589	if (t->io_opt & (t->physical_block_size - 1)) {
590		t->io_opt = 0;
591		t->misaligned = 1;
592		ret = -1;
593	}
594
595	t->raid_partial_stripes_expensive =
596		max(t->raid_partial_stripes_expensive,
597		    b->raid_partial_stripes_expensive);
598
599	/* Find lowest common alignment_offset */
600	t->alignment_offset = lcm(t->alignment_offset, alignment)
601		& (max(t->physical_block_size, t->io_min) - 1);
602
603	/* Verify that new alignment_offset is on a logical block boundary */
604	if (t->alignment_offset & (t->logical_block_size - 1)) {
605		t->misaligned = 1;
606		ret = -1;
607	}
608
609	/* Discard alignment and granularity */
610	if (b->discard_granularity) {
611		alignment = queue_limit_discard_alignment(b, start);
612
613		if (t->discard_granularity != 0 &&
614		    t->discard_alignment != alignment) {
615			top = t->discard_granularity + t->discard_alignment;
616			bottom = b->discard_granularity + alignment;
617
618			/* Verify that top and bottom intervals line up */
619			if ((max(top, bottom) % min(top, bottom)) != 0)
620				t->discard_misaligned = 1;
621		}
622
623		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
624						      b->max_discard_sectors);
 
 
625		t->discard_granularity = max(t->discard_granularity,
626					     b->discard_granularity);
627		t->discard_alignment = lcm(t->discard_alignment, alignment) %
628			t->discard_granularity;
629	}
630
631	return ret;
632}
633EXPORT_SYMBOL(blk_stack_limits);
634
635/**
636 * bdev_stack_limits - adjust queue limits for stacked drivers
637 * @t:	the stacking driver limits (top device)
638 * @bdev:  the component block_device (bottom)
639 * @start:  first data sector within component device
640 *
641 * Description:
642 *    Merges queue limits for a top device and a block_device.  Returns
643 *    0 if alignment didn't change.  Returns -1 if adding the bottom
644 *    device caused misalignment.
645 */
646int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
647		      sector_t start)
648{
649	struct request_queue *bq = bdev_get_queue(bdev);
650
651	start += get_start_sect(bdev);
652
653	return blk_stack_limits(t, &bq->limits, start);
654}
655EXPORT_SYMBOL(bdev_stack_limits);
656
657/**
658 * disk_stack_limits - adjust queue limits for stacked drivers
659 * @disk:  MD/DM gendisk (top)
660 * @bdev:  the underlying block device (bottom)
661 * @offset:  offset to beginning of data within component device
662 *
663 * Description:
664 *    Merges the limits for a top level gendisk and a bottom level
665 *    block_device.
666 */
667void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
668		       sector_t offset)
669{
670	struct request_queue *t = disk->queue;
671
672	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
673		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
674
675		disk_name(disk, 0, top);
676		bdevname(bdev, bottom);
677
678		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
679		       top, bottom);
680	}
681}
682EXPORT_SYMBOL(disk_stack_limits);
683
684/**
685 * blk_queue_dma_pad - set pad mask
686 * @q:     the request queue for the device
687 * @mask:  pad mask
688 *
689 * Set dma pad mask.
690 *
691 * Appending pad buffer to a request modifies the last entry of a
692 * scatter list such that it includes the pad buffer.
693 **/
694void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
695{
696	q->dma_pad_mask = mask;
697}
698EXPORT_SYMBOL(blk_queue_dma_pad);
699
700/**
701 * blk_queue_update_dma_pad - update pad mask
702 * @q:     the request queue for the device
703 * @mask:  pad mask
704 *
705 * Update dma pad mask.
706 *
707 * Appending pad buffer to a request modifies the last entry of a
708 * scatter list such that it includes the pad buffer.
709 **/
710void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
711{
712	if (mask > q->dma_pad_mask)
713		q->dma_pad_mask = mask;
714}
715EXPORT_SYMBOL(blk_queue_update_dma_pad);
716
717/**
718 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
719 * @q:  the request queue for the device
720 * @dma_drain_needed: fn which returns non-zero if drain is necessary
721 * @buf:	physically contiguous buffer
722 * @size:	size of the buffer in bytes
723 *
724 * Some devices have excess DMA problems and can't simply discard (or
725 * zero fill) the unwanted piece of the transfer.  They have to have a
726 * real area of memory to transfer it into.  The use case for this is
727 * ATAPI devices in DMA mode.  If the packet command causes a transfer
728 * bigger than the transfer size some HBAs will lock up if there
729 * aren't DMA elements to contain the excess transfer.  What this API
730 * does is adjust the queue so that the buf is always appended
731 * silently to the scatterlist.
732 *
733 * Note: This routine adjusts max_hw_segments to make room for appending
734 * the drain buffer.  If you call blk_queue_max_segments() after calling
735 * this routine, you must set the limit to one fewer than your device
736 * can support otherwise there won't be room for the drain buffer.
737 */
738int blk_queue_dma_drain(struct request_queue *q,
739			       dma_drain_needed_fn *dma_drain_needed,
740			       void *buf, unsigned int size)
741{
742	if (queue_max_segments(q) < 2)
743		return -EINVAL;
744	/* make room for appending the drain */
745	blk_queue_max_segments(q, queue_max_segments(q) - 1);
746	q->dma_drain_needed = dma_drain_needed;
747	q->dma_drain_buffer = buf;
748	q->dma_drain_size = size;
749
750	return 0;
751}
752EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
753
754/**
755 * blk_queue_segment_boundary - set boundary rules for segment merging
756 * @q:  the request queue for the device
757 * @mask:  the memory boundary mask
758 **/
759void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
760{
761	if (mask < PAGE_CACHE_SIZE - 1) {
762		mask = PAGE_CACHE_SIZE - 1;
763		printk(KERN_INFO "%s: set to minimum %lx\n",
764		       __func__, mask);
765	}
766
767	q->limits.seg_boundary_mask = mask;
768}
769EXPORT_SYMBOL(blk_queue_segment_boundary);
 
 
 
 
 
 
 
 
 
 
 
770
771/**
772 * blk_queue_dma_alignment - set dma length and memory alignment
773 * @q:     the request queue for the device
774 * @mask:  alignment mask
775 *
776 * description:
777 *    set required memory and length alignment for direct dma transactions.
778 *    this is used when building direct io requests for the queue.
779 *
780 **/
781void blk_queue_dma_alignment(struct request_queue *q, int mask)
782{
783	q->dma_alignment = mask;
784}
785EXPORT_SYMBOL(blk_queue_dma_alignment);
786
787/**
788 * blk_queue_update_dma_alignment - update dma length and memory alignment
789 * @q:     the request queue for the device
790 * @mask:  alignment mask
791 *
792 * description:
793 *    update required memory and length alignment for direct dma transactions.
794 *    If the requested alignment is larger than the current alignment, then
795 *    the current queue alignment is updated to the new value, otherwise it
796 *    is left alone.  The design of this is to allow multiple objects
797 *    (driver, device, transport etc) to set their respective
798 *    alignments without having them interfere.
799 *
800 **/
801void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
802{
803	BUG_ON(mask > PAGE_SIZE);
804
805	if (mask > q->dma_alignment)
806		q->dma_alignment = mask;
807}
808EXPORT_SYMBOL(blk_queue_update_dma_alignment);
809
810/**
811 * blk_queue_flush - configure queue's cache flush capability
812 * @q:		the request queue for the device
813 * @flush:	0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
814 *
815 * Tell block layer cache flush capability of @q.  If it supports
816 * flushing, REQ_FLUSH should be set.  If it supports bypassing
817 * write cache for individual writes, REQ_FUA should be set.
818 */
819void blk_queue_flush(struct request_queue *q, unsigned int flush)
820{
821	WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
822
823	if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
824		flush &= ~REQ_FUA;
825
826	q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
827}
828EXPORT_SYMBOL_GPL(blk_queue_flush);
829
830void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
831{
832	q->flush_not_queueable = !queueable;
833}
834EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
835
836static int __init blk_settings_init(void)
837{
838	blk_max_low_pfn = max_low_pfn - 1;
839	blk_max_pfn = max_pfn - 1;
840	return 0;
841}
842subsys_initcall(blk_settings_init);
v4.6
  1/*
  2 * Functions related to setting various queue properties from drivers
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/init.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
 10#include <linux/gcd.h>
 11#include <linux/lcm.h>
 12#include <linux/jiffies.h>
 13#include <linux/gfp.h>
 14
 15#include "blk.h"
 16
 17unsigned long blk_max_low_pfn;
 18EXPORT_SYMBOL(blk_max_low_pfn);
 19
 20unsigned long blk_max_pfn;
 21
 22/**
 23 * blk_queue_prep_rq - set a prepare_request function for queue
 24 * @q:		queue
 25 * @pfn:	prepare_request function
 26 *
 27 * It's possible for a queue to register a prepare_request callback which
 28 * is invoked before the request is handed to the request_fn. The goal of
 29 * the function is to prepare a request for I/O, it can be used to build a
 30 * cdb from the request data for instance.
 31 *
 32 */
 33void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
 34{
 35	q->prep_rq_fn = pfn;
 36}
 37EXPORT_SYMBOL(blk_queue_prep_rq);
 38
 39/**
 40 * blk_queue_unprep_rq - set an unprepare_request function for queue
 41 * @q:		queue
 42 * @ufn:	unprepare_request function
 43 *
 44 * It's possible for a queue to register an unprepare_request callback
 45 * which is invoked before the request is finally completed. The goal
 46 * of the function is to deallocate any data that was allocated in the
 47 * prepare_request callback.
 48 *
 49 */
 50void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
 51{
 52	q->unprep_rq_fn = ufn;
 53}
 54EXPORT_SYMBOL(blk_queue_unprep_rq);
 55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
 57{
 58	q->softirq_done_fn = fn;
 59}
 60EXPORT_SYMBOL(blk_queue_softirq_done);
 61
 62void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 63{
 64	q->rq_timeout = timeout;
 65}
 66EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
 67
 68void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
 69{
 70	q->rq_timed_out_fn = fn;
 71}
 72EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
 73
 74void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
 75{
 76	q->lld_busy_fn = fn;
 77}
 78EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
 79
 80/**
 81 * blk_set_default_limits - reset limits to default values
 82 * @lim:  the queue_limits structure to reset
 83 *
 84 * Description:
 85 *   Returns a queue_limit struct to its default state.
 86 */
 87void blk_set_default_limits(struct queue_limits *lim)
 88{
 89	lim->max_segments = BLK_MAX_SEGMENTS;
 90	lim->max_integrity_segments = 0;
 91	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 92	lim->virt_boundary_mask = 0;
 93	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
 94	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
 95	lim->max_dev_sectors = 0;
 96	lim->chunk_sectors = 0;
 97	lim->max_write_same_sectors = 0;
 98	lim->max_discard_sectors = 0;
 99	lim->max_hw_discard_sectors = 0;
100	lim->discard_granularity = 0;
101	lim->discard_alignment = 0;
102	lim->discard_misaligned = 0;
103	lim->discard_zeroes_data = 0;
104	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
105	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
106	lim->alignment_offset = 0;
107	lim->io_opt = 0;
108	lim->misaligned = 0;
109	lim->cluster = 1;
110}
111EXPORT_SYMBOL(blk_set_default_limits);
112
113/**
114 * blk_set_stacking_limits - set default limits for stacking devices
115 * @lim:  the queue_limits structure to reset
116 *
117 * Description:
118 *   Returns a queue_limit struct to its default state. Should be used
119 *   by stacking drivers like DM that have no internal limits.
120 */
121void blk_set_stacking_limits(struct queue_limits *lim)
122{
123	blk_set_default_limits(lim);
124
125	/* Inherit limits from component devices */
126	lim->discard_zeroes_data = 1;
127	lim->max_segments = USHRT_MAX;
128	lim->max_hw_sectors = UINT_MAX;
129	lim->max_segment_size = UINT_MAX;
130	lim->max_sectors = UINT_MAX;
131	lim->max_dev_sectors = UINT_MAX;
132	lim->max_write_same_sectors = UINT_MAX;
133}
134EXPORT_SYMBOL(blk_set_stacking_limits);
135
136/**
137 * blk_queue_make_request - define an alternate make_request function for a device
138 * @q:  the request queue for the device to be affected
139 * @mfn: the alternate make_request function
140 *
141 * Description:
142 *    The normal way for &struct bios to be passed to a device
143 *    driver is for them to be collected into requests on a request
144 *    queue, and then to allow the device driver to select requests
145 *    off that queue when it is ready.  This works well for many block
146 *    devices. However some block devices (typically virtual devices
147 *    such as md or lvm) do not benefit from the processing on the
148 *    request queue, and are served best by having the requests passed
149 *    directly to them.  This can be achieved by providing a function
150 *    to blk_queue_make_request().
151 *
152 * Caveat:
153 *    The driver that does this *must* be able to deal appropriately
154 *    with buffers in "highmemory". This can be accomplished by either calling
155 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
156 *    blk_queue_bounce() to create a buffer in normal memory.
157 **/
158void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
159{
160	/*
161	 * set defaults
162	 */
163	q->nr_requests = BLKDEV_MAX_RQ;
164
165	q->make_request_fn = mfn;
166	blk_queue_dma_alignment(q, 511);
167	blk_queue_congestion_threshold(q);
168	q->nr_batching = BLK_BATCH_REQ;
169
170	blk_set_default_limits(&q->limits);
171
172	/*
173	 * by default assume old behaviour and bounce for any highmem page
174	 */
175	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
176}
177EXPORT_SYMBOL(blk_queue_make_request);
178
179/**
180 * blk_queue_bounce_limit - set bounce buffer limit for queue
181 * @q: the request queue for the device
182 * @max_addr: the maximum address the device can handle
183 *
184 * Description:
185 *    Different hardware can have different requirements as to what pages
186 *    it can do I/O directly to. A low level driver can call
187 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
188 *    buffers for doing I/O to pages residing above @max_addr.
189 **/
190void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
191{
192	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
193	int dma = 0;
194
195	q->bounce_gfp = GFP_NOIO;
196#if BITS_PER_LONG == 64
197	/*
198	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
199	 * some IOMMUs can handle everything, but I don't know of a
200	 * way to test this here.
201	 */
202	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
203		dma = 1;
204	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
205#else
206	if (b_pfn < blk_max_low_pfn)
207		dma = 1;
208	q->limits.bounce_pfn = b_pfn;
209#endif
210	if (dma) {
211		init_emergency_isa_pool();
212		q->bounce_gfp = GFP_NOIO | GFP_DMA;
213		q->limits.bounce_pfn = b_pfn;
214	}
215}
216EXPORT_SYMBOL(blk_queue_bounce_limit);
217
218/**
219 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
220 * @q:  the request queue for the device
221 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
222 *
223 * Description:
224 *    Enables a low level driver to set a hard upper limit,
225 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
226 *    the device driver based upon the capabilities of the I/O
227 *    controller.
228 *
229 *    max_dev_sectors is a hard limit imposed by the storage device for
230 *    READ/WRITE requests. It is set by the disk driver.
231 *
232 *    max_sectors is a soft limit imposed by the block layer for
233 *    filesystem type requests.  This value can be overridden on a
234 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
235 *    The soft limit can not exceed max_hw_sectors.
236 **/
237void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
238{
239	struct queue_limits *limits = &q->limits;
240	unsigned int max_sectors;
241
242	if ((max_hw_sectors << 9) < PAGE_SIZE) {
243		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
244		printk(KERN_INFO "%s: set to minimum %d\n",
245		       __func__, max_hw_sectors);
246	}
247
248	limits->max_hw_sectors = max_hw_sectors;
249	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
250	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
251	limits->max_sectors = max_sectors;
252}
253EXPORT_SYMBOL(blk_queue_max_hw_sectors);
254
255/**
256 * blk_queue_chunk_sectors - set size of the chunk for this queue
257 * @q:  the request queue for the device
258 * @chunk_sectors:  chunk sectors in the usual 512b unit
259 *
260 * Description:
261 *    If a driver doesn't want IOs to cross a given chunk size, it can set
262 *    this limit and prevent merging across chunks. Note that the chunk size
263 *    must currently be a power-of-2 in sectors. Also note that the block
264 *    layer must accept a page worth of data at any offset. So if the
265 *    crossing of chunks is a hard limitation in the driver, it must still be
266 *    prepared to split single page bios.
267 **/
268void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
269{
270	BUG_ON(!is_power_of_2(chunk_sectors));
271	q->limits.chunk_sectors = chunk_sectors;
272}
273EXPORT_SYMBOL(blk_queue_chunk_sectors);
274
275/**
276 * blk_queue_max_discard_sectors - set max sectors for a single discard
277 * @q:  the request queue for the device
278 * @max_discard_sectors: maximum number of sectors to discard
279 **/
280void blk_queue_max_discard_sectors(struct request_queue *q,
281		unsigned int max_discard_sectors)
282{
283	q->limits.max_hw_discard_sectors = max_discard_sectors;
284	q->limits.max_discard_sectors = max_discard_sectors;
285}
286EXPORT_SYMBOL(blk_queue_max_discard_sectors);
287
288/**
289 * blk_queue_max_write_same_sectors - set max sectors for a single write same
290 * @q:  the request queue for the device
291 * @max_write_same_sectors: maximum number of sectors to write per command
292 **/
293void blk_queue_max_write_same_sectors(struct request_queue *q,
294				      unsigned int max_write_same_sectors)
295{
296	q->limits.max_write_same_sectors = max_write_same_sectors;
297}
298EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
299
300/**
301 * blk_queue_max_segments - set max hw segments for a request for this queue
302 * @q:  the request queue for the device
303 * @max_segments:  max number of segments
304 *
305 * Description:
306 *    Enables a low level driver to set an upper limit on the number of
307 *    hw data segments in a request.
308 **/
309void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
310{
311	if (!max_segments) {
312		max_segments = 1;
313		printk(KERN_INFO "%s: set to minimum %d\n",
314		       __func__, max_segments);
315	}
316
317	q->limits.max_segments = max_segments;
318}
319EXPORT_SYMBOL(blk_queue_max_segments);
320
321/**
322 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
323 * @q:  the request queue for the device
324 * @max_size:  max size of segment in bytes
325 *
326 * Description:
327 *    Enables a low level driver to set an upper limit on the size of a
328 *    coalesced segment
329 **/
330void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
331{
332	if (max_size < PAGE_SIZE) {
333		max_size = PAGE_SIZE;
334		printk(KERN_INFO "%s: set to minimum %d\n",
335		       __func__, max_size);
336	}
337
338	q->limits.max_segment_size = max_size;
339}
340EXPORT_SYMBOL(blk_queue_max_segment_size);
341
342/**
343 * blk_queue_logical_block_size - set logical block size for the queue
344 * @q:  the request queue for the device
345 * @size:  the logical block size, in bytes
346 *
347 * Description:
348 *   This should be set to the lowest possible block size that the
349 *   storage device can address.  The default of 512 covers most
350 *   hardware.
351 **/
352void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
353{
354	q->limits.logical_block_size = size;
355
356	if (q->limits.physical_block_size < size)
357		q->limits.physical_block_size = size;
358
359	if (q->limits.io_min < q->limits.physical_block_size)
360		q->limits.io_min = q->limits.physical_block_size;
361}
362EXPORT_SYMBOL(blk_queue_logical_block_size);
363
364/**
365 * blk_queue_physical_block_size - set physical block size for the queue
366 * @q:  the request queue for the device
367 * @size:  the physical block size, in bytes
368 *
369 * Description:
370 *   This should be set to the lowest possible sector size that the
371 *   hardware can operate on without reverting to read-modify-write
372 *   operations.
373 */
374void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
375{
376	q->limits.physical_block_size = size;
377
378	if (q->limits.physical_block_size < q->limits.logical_block_size)
379		q->limits.physical_block_size = q->limits.logical_block_size;
380
381	if (q->limits.io_min < q->limits.physical_block_size)
382		q->limits.io_min = q->limits.physical_block_size;
383}
384EXPORT_SYMBOL(blk_queue_physical_block_size);
385
386/**
387 * blk_queue_alignment_offset - set physical block alignment offset
388 * @q:	the request queue for the device
389 * @offset: alignment offset in bytes
390 *
391 * Description:
392 *   Some devices are naturally misaligned to compensate for things like
393 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
394 *   should call this function for devices whose first sector is not
395 *   naturally aligned.
396 */
397void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
398{
399	q->limits.alignment_offset =
400		offset & (q->limits.physical_block_size - 1);
401	q->limits.misaligned = 0;
402}
403EXPORT_SYMBOL(blk_queue_alignment_offset);
404
405/**
406 * blk_limits_io_min - set minimum request size for a device
407 * @limits: the queue limits
408 * @min:  smallest I/O size in bytes
409 *
410 * Description:
411 *   Some devices have an internal block size bigger than the reported
412 *   hardware sector size.  This function can be used to signal the
413 *   smallest I/O the device can perform without incurring a performance
414 *   penalty.
415 */
416void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
417{
418	limits->io_min = min;
419
420	if (limits->io_min < limits->logical_block_size)
421		limits->io_min = limits->logical_block_size;
422
423	if (limits->io_min < limits->physical_block_size)
424		limits->io_min = limits->physical_block_size;
425}
426EXPORT_SYMBOL(blk_limits_io_min);
427
428/**
429 * blk_queue_io_min - set minimum request size for the queue
430 * @q:	the request queue for the device
431 * @min:  smallest I/O size in bytes
432 *
433 * Description:
434 *   Storage devices may report a granularity or preferred minimum I/O
435 *   size which is the smallest request the device can perform without
436 *   incurring a performance penalty.  For disk drives this is often the
437 *   physical block size.  For RAID arrays it is often the stripe chunk
438 *   size.  A properly aligned multiple of minimum_io_size is the
439 *   preferred request size for workloads where a high number of I/O
440 *   operations is desired.
441 */
442void blk_queue_io_min(struct request_queue *q, unsigned int min)
443{
444	blk_limits_io_min(&q->limits, min);
445}
446EXPORT_SYMBOL(blk_queue_io_min);
447
448/**
449 * blk_limits_io_opt - set optimal request size for a device
450 * @limits: the queue limits
451 * @opt:  smallest I/O size in bytes
452 *
453 * Description:
454 *   Storage devices may report an optimal I/O size, which is the
455 *   device's preferred unit for sustained I/O.  This is rarely reported
456 *   for disk drives.  For RAID arrays it is usually the stripe width or
457 *   the internal track size.  A properly aligned multiple of
458 *   optimal_io_size is the preferred request size for workloads where
459 *   sustained throughput is desired.
460 */
461void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
462{
463	limits->io_opt = opt;
464}
465EXPORT_SYMBOL(blk_limits_io_opt);
466
467/**
468 * blk_queue_io_opt - set optimal request size for the queue
469 * @q:	the request queue for the device
470 * @opt:  optimal request size in bytes
471 *
472 * Description:
473 *   Storage devices may report an optimal I/O size, which is the
474 *   device's preferred unit for sustained I/O.  This is rarely reported
475 *   for disk drives.  For RAID arrays it is usually the stripe width or
476 *   the internal track size.  A properly aligned multiple of
477 *   optimal_io_size is the preferred request size for workloads where
478 *   sustained throughput is desired.
479 */
480void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
481{
482	blk_limits_io_opt(&q->limits, opt);
483}
484EXPORT_SYMBOL(blk_queue_io_opt);
485
486/**
487 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
488 * @t:	the stacking driver (top)
489 * @b:  the underlying device (bottom)
490 **/
491void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
492{
493	blk_stack_limits(&t->limits, &b->limits, 0);
494}
495EXPORT_SYMBOL(blk_queue_stack_limits);
496
497/**
498 * blk_stack_limits - adjust queue_limits for stacked devices
499 * @t:	the stacking driver limits (top device)
500 * @b:  the underlying queue limits (bottom, component device)
501 * @start:  first data sector within component device
502 *
503 * Description:
504 *    This function is used by stacking drivers like MD and DM to ensure
505 *    that all component devices have compatible block sizes and
506 *    alignments.  The stacking driver must provide a queue_limits
507 *    struct (top) and then iteratively call the stacking function for
508 *    all component (bottom) devices.  The stacking function will
509 *    attempt to combine the values and ensure proper alignment.
510 *
511 *    Returns 0 if the top and bottom queue_limits are compatible.  The
512 *    top device's block sizes and alignment offsets may be adjusted to
513 *    ensure alignment with the bottom device. If no compatible sizes
514 *    and alignments exist, -1 is returned and the resulting top
515 *    queue_limits will have the misaligned flag set to indicate that
516 *    the alignment_offset is undefined.
517 */
518int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
519		     sector_t start)
520{
521	unsigned int top, bottom, alignment, ret = 0;
522
523	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
524	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
525	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
526	t->max_write_same_sectors = min(t->max_write_same_sectors,
527					b->max_write_same_sectors);
528	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
529
530	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
531					    b->seg_boundary_mask);
532	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
533					    b->virt_boundary_mask);
534
535	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
536	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
537						 b->max_integrity_segments);
538
539	t->max_segment_size = min_not_zero(t->max_segment_size,
540					   b->max_segment_size);
541
542	t->misaligned |= b->misaligned;
543
544	alignment = queue_limit_alignment_offset(b, start);
545
546	/* Bottom device has different alignment.  Check that it is
547	 * compatible with the current top alignment.
548	 */
549	if (t->alignment_offset != alignment) {
550
551		top = max(t->physical_block_size, t->io_min)
552			+ t->alignment_offset;
553		bottom = max(b->physical_block_size, b->io_min) + alignment;
554
555		/* Verify that top and bottom intervals line up */
556		if (max(top, bottom) % min(top, bottom)) {
557			t->misaligned = 1;
558			ret = -1;
559		}
560	}
561
562	t->logical_block_size = max(t->logical_block_size,
563				    b->logical_block_size);
564
565	t->physical_block_size = max(t->physical_block_size,
566				     b->physical_block_size);
567
568	t->io_min = max(t->io_min, b->io_min);
569	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
570
571	t->cluster &= b->cluster;
572	t->discard_zeroes_data &= b->discard_zeroes_data;
573
574	/* Physical block size a multiple of the logical block size? */
575	if (t->physical_block_size & (t->logical_block_size - 1)) {
576		t->physical_block_size = t->logical_block_size;
577		t->misaligned = 1;
578		ret = -1;
579	}
580
581	/* Minimum I/O a multiple of the physical block size? */
582	if (t->io_min & (t->physical_block_size - 1)) {
583		t->io_min = t->physical_block_size;
584		t->misaligned = 1;
585		ret = -1;
586	}
587
588	/* Optimal I/O a multiple of the physical block size? */
589	if (t->io_opt & (t->physical_block_size - 1)) {
590		t->io_opt = 0;
591		t->misaligned = 1;
592		ret = -1;
593	}
594
595	t->raid_partial_stripes_expensive =
596		max(t->raid_partial_stripes_expensive,
597		    b->raid_partial_stripes_expensive);
598
599	/* Find lowest common alignment_offset */
600	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
601		% max(t->physical_block_size, t->io_min);
602
603	/* Verify that new alignment_offset is on a logical block boundary */
604	if (t->alignment_offset & (t->logical_block_size - 1)) {
605		t->misaligned = 1;
606		ret = -1;
607	}
608
609	/* Discard alignment and granularity */
610	if (b->discard_granularity) {
611		alignment = queue_limit_discard_alignment(b, start);
612
613		if (t->discard_granularity != 0 &&
614		    t->discard_alignment != alignment) {
615			top = t->discard_granularity + t->discard_alignment;
616			bottom = b->discard_granularity + alignment;
617
618			/* Verify that top and bottom intervals line up */
619			if ((max(top, bottom) % min(top, bottom)) != 0)
620				t->discard_misaligned = 1;
621		}
622
623		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
624						      b->max_discard_sectors);
625		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
626							 b->max_hw_discard_sectors);
627		t->discard_granularity = max(t->discard_granularity,
628					     b->discard_granularity);
629		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
630			t->discard_granularity;
631	}
632
633	return ret;
634}
635EXPORT_SYMBOL(blk_stack_limits);
636
637/**
638 * bdev_stack_limits - adjust queue limits for stacked drivers
639 * @t:	the stacking driver limits (top device)
640 * @bdev:  the component block_device (bottom)
641 * @start:  first data sector within component device
642 *
643 * Description:
644 *    Merges queue limits for a top device and a block_device.  Returns
645 *    0 if alignment didn't change.  Returns -1 if adding the bottom
646 *    device caused misalignment.
647 */
648int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
649		      sector_t start)
650{
651	struct request_queue *bq = bdev_get_queue(bdev);
652
653	start += get_start_sect(bdev);
654
655	return blk_stack_limits(t, &bq->limits, start);
656}
657EXPORT_SYMBOL(bdev_stack_limits);
658
659/**
660 * disk_stack_limits - adjust queue limits for stacked drivers
661 * @disk:  MD/DM gendisk (top)
662 * @bdev:  the underlying block device (bottom)
663 * @offset:  offset to beginning of data within component device
664 *
665 * Description:
666 *    Merges the limits for a top level gendisk and a bottom level
667 *    block_device.
668 */
669void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
670		       sector_t offset)
671{
672	struct request_queue *t = disk->queue;
673
674	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
675		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
676
677		disk_name(disk, 0, top);
678		bdevname(bdev, bottom);
679
680		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
681		       top, bottom);
682	}
683}
684EXPORT_SYMBOL(disk_stack_limits);
685
686/**
687 * blk_queue_dma_pad - set pad mask
688 * @q:     the request queue for the device
689 * @mask:  pad mask
690 *
691 * Set dma pad mask.
692 *
693 * Appending pad buffer to a request modifies the last entry of a
694 * scatter list such that it includes the pad buffer.
695 **/
696void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
697{
698	q->dma_pad_mask = mask;
699}
700EXPORT_SYMBOL(blk_queue_dma_pad);
701
702/**
703 * blk_queue_update_dma_pad - update pad mask
704 * @q:     the request queue for the device
705 * @mask:  pad mask
706 *
707 * Update dma pad mask.
708 *
709 * Appending pad buffer to a request modifies the last entry of a
710 * scatter list such that it includes the pad buffer.
711 **/
712void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
713{
714	if (mask > q->dma_pad_mask)
715		q->dma_pad_mask = mask;
716}
717EXPORT_SYMBOL(blk_queue_update_dma_pad);
718
719/**
720 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
721 * @q:  the request queue for the device
722 * @dma_drain_needed: fn which returns non-zero if drain is necessary
723 * @buf:	physically contiguous buffer
724 * @size:	size of the buffer in bytes
725 *
726 * Some devices have excess DMA problems and can't simply discard (or
727 * zero fill) the unwanted piece of the transfer.  They have to have a
728 * real area of memory to transfer it into.  The use case for this is
729 * ATAPI devices in DMA mode.  If the packet command causes a transfer
730 * bigger than the transfer size some HBAs will lock up if there
731 * aren't DMA elements to contain the excess transfer.  What this API
732 * does is adjust the queue so that the buf is always appended
733 * silently to the scatterlist.
734 *
735 * Note: This routine adjusts max_hw_segments to make room for appending
736 * the drain buffer.  If you call blk_queue_max_segments() after calling
737 * this routine, you must set the limit to one fewer than your device
738 * can support otherwise there won't be room for the drain buffer.
739 */
740int blk_queue_dma_drain(struct request_queue *q,
741			       dma_drain_needed_fn *dma_drain_needed,
742			       void *buf, unsigned int size)
743{
744	if (queue_max_segments(q) < 2)
745		return -EINVAL;
746	/* make room for appending the drain */
747	blk_queue_max_segments(q, queue_max_segments(q) - 1);
748	q->dma_drain_needed = dma_drain_needed;
749	q->dma_drain_buffer = buf;
750	q->dma_drain_size = size;
751
752	return 0;
753}
754EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
755
756/**
757 * blk_queue_segment_boundary - set boundary rules for segment merging
758 * @q:  the request queue for the device
759 * @mask:  the memory boundary mask
760 **/
761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
762{
763	if (mask < PAGE_SIZE - 1) {
764		mask = PAGE_SIZE - 1;
765		printk(KERN_INFO "%s: set to minimum %lx\n",
766		       __func__, mask);
767	}
768
769	q->limits.seg_boundary_mask = mask;
770}
771EXPORT_SYMBOL(blk_queue_segment_boundary);
772
773/**
774 * blk_queue_virt_boundary - set boundary rules for bio merging
775 * @q:  the request queue for the device
776 * @mask:  the memory boundary mask
777 **/
778void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
779{
780	q->limits.virt_boundary_mask = mask;
781}
782EXPORT_SYMBOL(blk_queue_virt_boundary);
783
784/**
785 * blk_queue_dma_alignment - set dma length and memory alignment
786 * @q:     the request queue for the device
787 * @mask:  alignment mask
788 *
789 * description:
790 *    set required memory and length alignment for direct dma transactions.
791 *    this is used when building direct io requests for the queue.
792 *
793 **/
794void blk_queue_dma_alignment(struct request_queue *q, int mask)
795{
796	q->dma_alignment = mask;
797}
798EXPORT_SYMBOL(blk_queue_dma_alignment);
799
800/**
801 * blk_queue_update_dma_alignment - update dma length and memory alignment
802 * @q:     the request queue for the device
803 * @mask:  alignment mask
804 *
805 * description:
806 *    update required memory and length alignment for direct dma transactions.
807 *    If the requested alignment is larger than the current alignment, then
808 *    the current queue alignment is updated to the new value, otherwise it
809 *    is left alone.  The design of this is to allow multiple objects
810 *    (driver, device, transport etc) to set their respective
811 *    alignments without having them interfere.
812 *
813 **/
814void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
815{
816	BUG_ON(mask > PAGE_SIZE);
817
818	if (mask > q->dma_alignment)
819		q->dma_alignment = mask;
820}
821EXPORT_SYMBOL(blk_queue_update_dma_alignment);
822
823/**
824 * blk_queue_flush - configure queue's cache flush capability
825 * @q:		the request queue for the device
826 * @flush:	0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
827 *
828 * Tell block layer cache flush capability of @q.  If it supports
829 * flushing, REQ_FLUSH should be set.  If it supports bypassing
830 * write cache for individual writes, REQ_FUA should be set.
831 */
832void blk_queue_flush(struct request_queue *q, unsigned int flush)
833{
834	WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
835
836	if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
837		flush &= ~REQ_FUA;
838
839	q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
840}
841EXPORT_SYMBOL_GPL(blk_queue_flush);
842
843void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
844{
845	q->flush_not_queueable = !queueable;
846}
847EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
848
849static int __init blk_settings_init(void)
850{
851	blk_max_low_pfn = max_low_pfn - 1;
852	blk_max_pfn = max_pfn - 1;
853	return 0;
854}
855subsys_initcall(blk_settings_init);