Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Functions related to setting various queue properties from drivers
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/init.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
 10#include <linux/gcd.h>
 11#include <linux/lcm.h>
 12#include <linux/jiffies.h>
 13#include <linux/gfp.h>
 14
 15#include "blk.h"
 
 16
 17unsigned long blk_max_low_pfn;
 18EXPORT_SYMBOL(blk_max_low_pfn);
 19
 20unsigned long blk_max_pfn;
 21
 22/**
 23 * blk_queue_prep_rq - set a prepare_request function for queue
 24 * @q:		queue
 25 * @pfn:	prepare_request function
 26 *
 27 * It's possible for a queue to register a prepare_request callback which
 28 * is invoked before the request is handed to the request_fn. The goal of
 29 * the function is to prepare a request for I/O, it can be used to build a
 30 * cdb from the request data for instance.
 31 *
 32 */
 33void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
 34{
 35	q->prep_rq_fn = pfn;
 36}
 37EXPORT_SYMBOL(blk_queue_prep_rq);
 38
 39/**
 40 * blk_queue_unprep_rq - set an unprepare_request function for queue
 41 * @q:		queue
 42 * @ufn:	unprepare_request function
 43 *
 44 * It's possible for a queue to register an unprepare_request callback
 45 * which is invoked before the request is finally completed. The goal
 46 * of the function is to deallocate any data that was allocated in the
 47 * prepare_request callback.
 48 *
 49 */
 50void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
 51{
 52	q->unprep_rq_fn = ufn;
 53}
 54EXPORT_SYMBOL(blk_queue_unprep_rq);
 55
 56/**
 57 * blk_queue_merge_bvec - set a merge_bvec function for queue
 58 * @q:		queue
 59 * @mbfn:	merge_bvec_fn
 60 *
 61 * Usually queues have static limitations on the max sectors or segments that
 62 * we can put in a request. Stacking drivers may have some settings that
 63 * are dynamic, and thus we have to query the queue whether it is ok to
 64 * add a new bio_vec to a bio at a given offset or not. If the block device
 65 * has such limitations, it needs to register a merge_bvec_fn to control
 66 * the size of bio's sent to it. Note that a block device *must* allow a
 67 * single page to be added to an empty bio. The block device driver may want
 68 * to use the bio_split() function to deal with these bio's. By default
 69 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
 70 * honored.
 71 */
 72void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
 73{
 74	q->merge_bvec_fn = mbfn;
 75}
 76EXPORT_SYMBOL(blk_queue_merge_bvec);
 77
 78void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
 79{
 80	q->softirq_done_fn = fn;
 81}
 82EXPORT_SYMBOL(blk_queue_softirq_done);
 83
 84void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 85{
 86	q->rq_timeout = timeout;
 87}
 88EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
 89
 90void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
 91{
 92	q->rq_timed_out_fn = fn;
 93}
 94EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
 95
 96void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
 97{
 98	q->lld_busy_fn = fn;
 99}
100EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
101
102/**
103 * blk_set_default_limits - reset limits to default values
104 * @lim:  the queue_limits structure to reset
105 *
106 * Description:
107 *   Returns a queue_limit struct to its default state.
108 */
109void blk_set_default_limits(struct queue_limits *lim)
110{
111	lim->max_segments = BLK_MAX_SEGMENTS;
112	lim->max_integrity_segments = 0;
113	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 
114	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
115	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
 
 
116	lim->max_write_same_sectors = 0;
 
117	lim->max_discard_sectors = 0;
 
118	lim->discard_granularity = 0;
119	lim->discard_alignment = 0;
120	lim->discard_misaligned = 0;
121	lim->discard_zeroes_data = 0;
122	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
123	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
124	lim->alignment_offset = 0;
125	lim->io_opt = 0;
126	lim->misaligned = 0;
127	lim->cluster = 1;
 
128}
129EXPORT_SYMBOL(blk_set_default_limits);
130
131/**
132 * blk_set_stacking_limits - set default limits for stacking devices
133 * @lim:  the queue_limits structure to reset
134 *
135 * Description:
136 *   Returns a queue_limit struct to its default state. Should be used
137 *   by stacking drivers like DM that have no internal limits.
138 */
139void blk_set_stacking_limits(struct queue_limits *lim)
140{
141	blk_set_default_limits(lim);
142
143	/* Inherit limits from component devices */
144	lim->discard_zeroes_data = 1;
145	lim->max_segments = USHRT_MAX;
146	lim->max_hw_sectors = UINT_MAX;
147	lim->max_segment_size = UINT_MAX;
148	lim->max_sectors = UINT_MAX;
 
149	lim->max_write_same_sectors = UINT_MAX;
 
150}
151EXPORT_SYMBOL(blk_set_stacking_limits);
152
153/**
154 * blk_queue_make_request - define an alternate make_request function for a device
155 * @q:  the request queue for the device to be affected
156 * @mfn: the alternate make_request function
157 *
158 * Description:
159 *    The normal way for &struct bios to be passed to a device
160 *    driver is for them to be collected into requests on a request
161 *    queue, and then to allow the device driver to select requests
162 *    off that queue when it is ready.  This works well for many block
163 *    devices. However some block devices (typically virtual devices
164 *    such as md or lvm) do not benefit from the processing on the
165 *    request queue, and are served best by having the requests passed
166 *    directly to them.  This can be achieved by providing a function
167 *    to blk_queue_make_request().
168 *
169 * Caveat:
170 *    The driver that does this *must* be able to deal appropriately
171 *    with buffers in "highmemory". This can be accomplished by either calling
172 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
173 *    blk_queue_bounce() to create a buffer in normal memory.
174 **/
175void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
176{
177	/*
178	 * set defaults
179	 */
180	q->nr_requests = BLKDEV_MAX_RQ;
181
182	q->make_request_fn = mfn;
183	blk_queue_dma_alignment(q, 511);
184	blk_queue_congestion_threshold(q);
185	q->nr_batching = BLK_BATCH_REQ;
186
187	blk_set_default_limits(&q->limits);
188
189	/*
190	 * by default assume old behaviour and bounce for any highmem page
191	 */
192	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
193}
194EXPORT_SYMBOL(blk_queue_make_request);
195
196/**
197 * blk_queue_bounce_limit - set bounce buffer limit for queue
198 * @q: the request queue for the device
199 * @max_addr: the maximum address the device can handle
200 *
201 * Description:
202 *    Different hardware can have different requirements as to what pages
203 *    it can do I/O directly to. A low level driver can call
204 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
205 *    buffers for doing I/O to pages residing above @max_addr.
206 **/
207void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
208{
209	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
210	int dma = 0;
211
212	q->bounce_gfp = GFP_NOIO;
213#if BITS_PER_LONG == 64
214	/*
215	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
216	 * some IOMMUs can handle everything, but I don't know of a
217	 * way to test this here.
218	 */
219	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
220		dma = 1;
221	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
222#else
223	if (b_pfn < blk_max_low_pfn)
224		dma = 1;
225	q->limits.bounce_pfn = b_pfn;
226#endif
227	if (dma) {
228		init_emergency_isa_pool();
229		q->bounce_gfp = GFP_NOIO | GFP_DMA;
230		q->limits.bounce_pfn = b_pfn;
231	}
232}
233EXPORT_SYMBOL(blk_queue_bounce_limit);
234
235/**
236 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
237 * @limits: the queue limits
238 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
239 *
240 * Description:
241 *    Enables a low level driver to set a hard upper limit,
242 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
243 *    the device driver based upon the combined capabilities of I/O
244 *    controller and storage device.
 
 
 
245 *
246 *    max_sectors is a soft limit imposed by the block layer for
247 *    filesystem type requests.  This value can be overridden on a
248 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
249 *    The soft limit can not exceed max_hw_sectors.
250 **/
251void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
252{
253	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
254		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
 
 
 
255		printk(KERN_INFO "%s: set to minimum %d\n",
256		       __func__, max_hw_sectors);
257	}
258
259	limits->max_hw_sectors = max_hw_sectors;
260	limits->max_sectors = min_t(unsigned int, max_hw_sectors,
261				    BLK_DEF_MAX_SECTORS);
 
 
262}
263EXPORT_SYMBOL(blk_limits_max_hw_sectors);
264
265/**
266 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
267 * @q:  the request queue for the device
268 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
269 *
270 * Description:
271 *    See description for blk_limits_max_hw_sectors().
 
 
 
 
 
272 **/
273void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
274{
275	blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
 
276}
277EXPORT_SYMBOL(blk_queue_max_hw_sectors);
278
279/**
280 * blk_queue_max_discard_sectors - set max sectors for a single discard
281 * @q:  the request queue for the device
282 * @max_discard_sectors: maximum number of sectors to discard
283 **/
284void blk_queue_max_discard_sectors(struct request_queue *q,
285		unsigned int max_discard_sectors)
286{
 
287	q->limits.max_discard_sectors = max_discard_sectors;
288}
289EXPORT_SYMBOL(blk_queue_max_discard_sectors);
290
291/**
292 * blk_queue_max_write_same_sectors - set max sectors for a single write same
293 * @q:  the request queue for the device
294 * @max_write_same_sectors: maximum number of sectors to write per command
295 **/
296void blk_queue_max_write_same_sectors(struct request_queue *q,
297				      unsigned int max_write_same_sectors)
298{
299	q->limits.max_write_same_sectors = max_write_same_sectors;
300}
301EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
302
303/**
 
 
 
 
 
 
 
 
 
 
 
 
 
304 * blk_queue_max_segments - set max hw segments for a request for this queue
305 * @q:  the request queue for the device
306 * @max_segments:  max number of segments
307 *
308 * Description:
309 *    Enables a low level driver to set an upper limit on the number of
310 *    hw data segments in a request.
311 **/
312void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
313{
314	if (!max_segments) {
315		max_segments = 1;
316		printk(KERN_INFO "%s: set to minimum %d\n",
317		       __func__, max_segments);
318	}
319
320	q->limits.max_segments = max_segments;
321}
322EXPORT_SYMBOL(blk_queue_max_segments);
323
324/**
325 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
326 * @q:  the request queue for the device
327 * @max_size:  max size of segment in bytes
328 *
329 * Description:
330 *    Enables a low level driver to set an upper limit on the size of a
331 *    coalesced segment
332 **/
333void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
334{
335	if (max_size < PAGE_CACHE_SIZE) {
336		max_size = PAGE_CACHE_SIZE;
337		printk(KERN_INFO "%s: set to minimum %d\n",
338		       __func__, max_size);
339	}
340
341	q->limits.max_segment_size = max_size;
342}
343EXPORT_SYMBOL(blk_queue_max_segment_size);
344
345/**
346 * blk_queue_logical_block_size - set logical block size for the queue
347 * @q:  the request queue for the device
348 * @size:  the logical block size, in bytes
349 *
350 * Description:
351 *   This should be set to the lowest possible block size that the
352 *   storage device can address.  The default of 512 covers most
353 *   hardware.
354 **/
355void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
356{
357	q->limits.logical_block_size = size;
358
359	if (q->limits.physical_block_size < size)
360		q->limits.physical_block_size = size;
361
362	if (q->limits.io_min < q->limits.physical_block_size)
363		q->limits.io_min = q->limits.physical_block_size;
364}
365EXPORT_SYMBOL(blk_queue_logical_block_size);
366
367/**
368 * blk_queue_physical_block_size - set physical block size for the queue
369 * @q:  the request queue for the device
370 * @size:  the physical block size, in bytes
371 *
372 * Description:
373 *   This should be set to the lowest possible sector size that the
374 *   hardware can operate on without reverting to read-modify-write
375 *   operations.
376 */
377void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
378{
379	q->limits.physical_block_size = size;
380
381	if (q->limits.physical_block_size < q->limits.logical_block_size)
382		q->limits.physical_block_size = q->limits.logical_block_size;
383
384	if (q->limits.io_min < q->limits.physical_block_size)
385		q->limits.io_min = q->limits.physical_block_size;
386}
387EXPORT_SYMBOL(blk_queue_physical_block_size);
388
389/**
390 * blk_queue_alignment_offset - set physical block alignment offset
391 * @q:	the request queue for the device
392 * @offset: alignment offset in bytes
393 *
394 * Description:
395 *   Some devices are naturally misaligned to compensate for things like
396 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
397 *   should call this function for devices whose first sector is not
398 *   naturally aligned.
399 */
400void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
401{
402	q->limits.alignment_offset =
403		offset & (q->limits.physical_block_size - 1);
404	q->limits.misaligned = 0;
405}
406EXPORT_SYMBOL(blk_queue_alignment_offset);
407
408/**
409 * blk_limits_io_min - set minimum request size for a device
410 * @limits: the queue limits
411 * @min:  smallest I/O size in bytes
412 *
413 * Description:
414 *   Some devices have an internal block size bigger than the reported
415 *   hardware sector size.  This function can be used to signal the
416 *   smallest I/O the device can perform without incurring a performance
417 *   penalty.
418 */
419void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
420{
421	limits->io_min = min;
422
423	if (limits->io_min < limits->logical_block_size)
424		limits->io_min = limits->logical_block_size;
425
426	if (limits->io_min < limits->physical_block_size)
427		limits->io_min = limits->physical_block_size;
428}
429EXPORT_SYMBOL(blk_limits_io_min);
430
431/**
432 * blk_queue_io_min - set minimum request size for the queue
433 * @q:	the request queue for the device
434 * @min:  smallest I/O size in bytes
435 *
436 * Description:
437 *   Storage devices may report a granularity or preferred minimum I/O
438 *   size which is the smallest request the device can perform without
439 *   incurring a performance penalty.  For disk drives this is often the
440 *   physical block size.  For RAID arrays it is often the stripe chunk
441 *   size.  A properly aligned multiple of minimum_io_size is the
442 *   preferred request size for workloads where a high number of I/O
443 *   operations is desired.
444 */
445void blk_queue_io_min(struct request_queue *q, unsigned int min)
446{
447	blk_limits_io_min(&q->limits, min);
448}
449EXPORT_SYMBOL(blk_queue_io_min);
450
451/**
452 * blk_limits_io_opt - set optimal request size for a device
453 * @limits: the queue limits
454 * @opt:  smallest I/O size in bytes
455 *
456 * Description:
457 *   Storage devices may report an optimal I/O size, which is the
458 *   device's preferred unit for sustained I/O.  This is rarely reported
459 *   for disk drives.  For RAID arrays it is usually the stripe width or
460 *   the internal track size.  A properly aligned multiple of
461 *   optimal_io_size is the preferred request size for workloads where
462 *   sustained throughput is desired.
463 */
464void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
465{
466	limits->io_opt = opt;
467}
468EXPORT_SYMBOL(blk_limits_io_opt);
469
470/**
471 * blk_queue_io_opt - set optimal request size for the queue
472 * @q:	the request queue for the device
473 * @opt:  optimal request size in bytes
474 *
475 * Description:
476 *   Storage devices may report an optimal I/O size, which is the
477 *   device's preferred unit for sustained I/O.  This is rarely reported
478 *   for disk drives.  For RAID arrays it is usually the stripe width or
479 *   the internal track size.  A properly aligned multiple of
480 *   optimal_io_size is the preferred request size for workloads where
481 *   sustained throughput is desired.
482 */
483void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
484{
485	blk_limits_io_opt(&q->limits, opt);
486}
487EXPORT_SYMBOL(blk_queue_io_opt);
488
489/**
490 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
491 * @t:	the stacking driver (top)
492 * @b:  the underlying device (bottom)
493 **/
494void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
495{
496	blk_stack_limits(&t->limits, &b->limits, 0);
497}
498EXPORT_SYMBOL(blk_queue_stack_limits);
499
500/**
501 * blk_stack_limits - adjust queue_limits for stacked devices
502 * @t:	the stacking driver limits (top device)
503 * @b:  the underlying queue limits (bottom, component device)
504 * @start:  first data sector within component device
505 *
506 * Description:
507 *    This function is used by stacking drivers like MD and DM to ensure
508 *    that all component devices have compatible block sizes and
509 *    alignments.  The stacking driver must provide a queue_limits
510 *    struct (top) and then iteratively call the stacking function for
511 *    all component (bottom) devices.  The stacking function will
512 *    attempt to combine the values and ensure proper alignment.
513 *
514 *    Returns 0 if the top and bottom queue_limits are compatible.  The
515 *    top device's block sizes and alignment offsets may be adjusted to
516 *    ensure alignment with the bottom device. If no compatible sizes
517 *    and alignments exist, -1 is returned and the resulting top
518 *    queue_limits will have the misaligned flag set to indicate that
519 *    the alignment_offset is undefined.
520 */
521int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
522		     sector_t start)
523{
524	unsigned int top, bottom, alignment, ret = 0;
525
526	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
527	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
 
528	t->max_write_same_sectors = min(t->max_write_same_sectors,
529					b->max_write_same_sectors);
 
 
530	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
531
532	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
533					    b->seg_boundary_mask);
 
 
534
535	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
536	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
537						 b->max_integrity_segments);
538
539	t->max_segment_size = min_not_zero(t->max_segment_size,
540					   b->max_segment_size);
541
542	t->misaligned |= b->misaligned;
543
544	alignment = queue_limit_alignment_offset(b, start);
545
546	/* Bottom device has different alignment.  Check that it is
547	 * compatible with the current top alignment.
548	 */
549	if (t->alignment_offset != alignment) {
550
551		top = max(t->physical_block_size, t->io_min)
552			+ t->alignment_offset;
553		bottom = max(b->physical_block_size, b->io_min) + alignment;
554
555		/* Verify that top and bottom intervals line up */
556		if (max(top, bottom) & (min(top, bottom) - 1)) {
557			t->misaligned = 1;
558			ret = -1;
559		}
560	}
561
562	t->logical_block_size = max(t->logical_block_size,
563				    b->logical_block_size);
564
565	t->physical_block_size = max(t->physical_block_size,
566				     b->physical_block_size);
567
568	t->io_min = max(t->io_min, b->io_min);
569	t->io_opt = lcm(t->io_opt, b->io_opt);
570
571	t->cluster &= b->cluster;
572	t->discard_zeroes_data &= b->discard_zeroes_data;
573
574	/* Physical block size a multiple of the logical block size? */
575	if (t->physical_block_size & (t->logical_block_size - 1)) {
576		t->physical_block_size = t->logical_block_size;
577		t->misaligned = 1;
578		ret = -1;
579	}
580
581	/* Minimum I/O a multiple of the physical block size? */
582	if (t->io_min & (t->physical_block_size - 1)) {
583		t->io_min = t->physical_block_size;
584		t->misaligned = 1;
585		ret = -1;
586	}
587
588	/* Optimal I/O a multiple of the physical block size? */
589	if (t->io_opt & (t->physical_block_size - 1)) {
590		t->io_opt = 0;
591		t->misaligned = 1;
592		ret = -1;
593	}
594
595	t->raid_partial_stripes_expensive =
596		max(t->raid_partial_stripes_expensive,
597		    b->raid_partial_stripes_expensive);
598
599	/* Find lowest common alignment_offset */
600	t->alignment_offset = lcm(t->alignment_offset, alignment)
601		& (max(t->physical_block_size, t->io_min) - 1);
602
603	/* Verify that new alignment_offset is on a logical block boundary */
604	if (t->alignment_offset & (t->logical_block_size - 1)) {
605		t->misaligned = 1;
606		ret = -1;
607	}
608
609	/* Discard alignment and granularity */
610	if (b->discard_granularity) {
611		alignment = queue_limit_discard_alignment(b, start);
612
613		if (t->discard_granularity != 0 &&
614		    t->discard_alignment != alignment) {
615			top = t->discard_granularity + t->discard_alignment;
616			bottom = b->discard_granularity + alignment;
617
618			/* Verify that top and bottom intervals line up */
619			if ((max(top, bottom) % min(top, bottom)) != 0)
620				t->discard_misaligned = 1;
621		}
622
623		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
624						      b->max_discard_sectors);
 
 
625		t->discard_granularity = max(t->discard_granularity,
626					     b->discard_granularity);
627		t->discard_alignment = lcm(t->discard_alignment, alignment) %
628			t->discard_granularity;
629	}
630
 
 
 
 
631	return ret;
632}
633EXPORT_SYMBOL(blk_stack_limits);
634
635/**
636 * bdev_stack_limits - adjust queue limits for stacked drivers
637 * @t:	the stacking driver limits (top device)
638 * @bdev:  the component block_device (bottom)
639 * @start:  first data sector within component device
640 *
641 * Description:
642 *    Merges queue limits for a top device and a block_device.  Returns
643 *    0 if alignment didn't change.  Returns -1 if adding the bottom
644 *    device caused misalignment.
645 */
646int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
647		      sector_t start)
648{
649	struct request_queue *bq = bdev_get_queue(bdev);
650
651	start += get_start_sect(bdev);
652
653	return blk_stack_limits(t, &bq->limits, start);
654}
655EXPORT_SYMBOL(bdev_stack_limits);
656
657/**
658 * disk_stack_limits - adjust queue limits for stacked drivers
659 * @disk:  MD/DM gendisk (top)
660 * @bdev:  the underlying block device (bottom)
661 * @offset:  offset to beginning of data within component device
662 *
663 * Description:
664 *    Merges the limits for a top level gendisk and a bottom level
665 *    block_device.
666 */
667void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
668		       sector_t offset)
669{
670	struct request_queue *t = disk->queue;
671
672	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
673		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
674
675		disk_name(disk, 0, top);
676		bdevname(bdev, bottom);
677
678		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
679		       top, bottom);
680	}
681}
682EXPORT_SYMBOL(disk_stack_limits);
683
684/**
685 * blk_queue_dma_pad - set pad mask
686 * @q:     the request queue for the device
687 * @mask:  pad mask
688 *
689 * Set dma pad mask.
690 *
691 * Appending pad buffer to a request modifies the last entry of a
692 * scatter list such that it includes the pad buffer.
693 **/
694void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
695{
696	q->dma_pad_mask = mask;
697}
698EXPORT_SYMBOL(blk_queue_dma_pad);
699
700/**
701 * blk_queue_update_dma_pad - update pad mask
702 * @q:     the request queue for the device
703 * @mask:  pad mask
704 *
705 * Update dma pad mask.
706 *
707 * Appending pad buffer to a request modifies the last entry of a
708 * scatter list such that it includes the pad buffer.
709 **/
710void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
711{
712	if (mask > q->dma_pad_mask)
713		q->dma_pad_mask = mask;
714}
715EXPORT_SYMBOL(blk_queue_update_dma_pad);
716
717/**
718 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
719 * @q:  the request queue for the device
720 * @dma_drain_needed: fn which returns non-zero if drain is necessary
721 * @buf:	physically contiguous buffer
722 * @size:	size of the buffer in bytes
723 *
724 * Some devices have excess DMA problems and can't simply discard (or
725 * zero fill) the unwanted piece of the transfer.  They have to have a
726 * real area of memory to transfer it into.  The use case for this is
727 * ATAPI devices in DMA mode.  If the packet command causes a transfer
728 * bigger than the transfer size some HBAs will lock up if there
729 * aren't DMA elements to contain the excess transfer.  What this API
730 * does is adjust the queue so that the buf is always appended
731 * silently to the scatterlist.
732 *
733 * Note: This routine adjusts max_hw_segments to make room for appending
734 * the drain buffer.  If you call blk_queue_max_segments() after calling
735 * this routine, you must set the limit to one fewer than your device
736 * can support otherwise there won't be room for the drain buffer.
737 */
738int blk_queue_dma_drain(struct request_queue *q,
739			       dma_drain_needed_fn *dma_drain_needed,
740			       void *buf, unsigned int size)
741{
742	if (queue_max_segments(q) < 2)
743		return -EINVAL;
744	/* make room for appending the drain */
745	blk_queue_max_segments(q, queue_max_segments(q) - 1);
746	q->dma_drain_needed = dma_drain_needed;
747	q->dma_drain_buffer = buf;
748	q->dma_drain_size = size;
749
750	return 0;
751}
752EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
753
754/**
755 * blk_queue_segment_boundary - set boundary rules for segment merging
756 * @q:  the request queue for the device
757 * @mask:  the memory boundary mask
758 **/
759void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
760{
761	if (mask < PAGE_CACHE_SIZE - 1) {
762		mask = PAGE_CACHE_SIZE - 1;
763		printk(KERN_INFO "%s: set to minimum %lx\n",
764		       __func__, mask);
765	}
766
767	q->limits.seg_boundary_mask = mask;
768}
769EXPORT_SYMBOL(blk_queue_segment_boundary);
770
771/**
 
 
 
 
 
 
 
 
 
 
 
772 * blk_queue_dma_alignment - set dma length and memory alignment
773 * @q:     the request queue for the device
774 * @mask:  alignment mask
775 *
776 * description:
777 *    set required memory and length alignment for direct dma transactions.
778 *    this is used when building direct io requests for the queue.
779 *
780 **/
781void blk_queue_dma_alignment(struct request_queue *q, int mask)
782{
783	q->dma_alignment = mask;
784}
785EXPORT_SYMBOL(blk_queue_dma_alignment);
786
787/**
788 * blk_queue_update_dma_alignment - update dma length and memory alignment
789 * @q:     the request queue for the device
790 * @mask:  alignment mask
791 *
792 * description:
793 *    update required memory and length alignment for direct dma transactions.
794 *    If the requested alignment is larger than the current alignment, then
795 *    the current queue alignment is updated to the new value, otherwise it
796 *    is left alone.  The design of this is to allow multiple objects
797 *    (driver, device, transport etc) to set their respective
798 *    alignments without having them interfere.
799 *
800 **/
801void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
802{
803	BUG_ON(mask > PAGE_SIZE);
804
805	if (mask > q->dma_alignment)
806		q->dma_alignment = mask;
807}
808EXPORT_SYMBOL(blk_queue_update_dma_alignment);
809
 
 
 
 
 
 
 
 
 
 
 
810/**
811 * blk_queue_flush - configure queue's cache flush capability
812 * @q:		the request queue for the device
813 * @flush:	0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
814 *
815 * Tell block layer cache flush capability of @q.  If it supports
816 * flushing, REQ_FLUSH should be set.  If it supports bypassing
817 * write cache for individual writes, REQ_FUA should be set.
818 */
819void blk_queue_flush(struct request_queue *q, unsigned int flush)
820{
821	WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
822
823	if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
824		flush &= ~REQ_FUA;
825
826	q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
827}
828EXPORT_SYMBOL_GPL(blk_queue_flush);
829
830void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 
 
 
 
 
 
 
 
831{
832	q->flush_not_queueable = !queueable;
 
 
 
 
 
 
 
 
 
 
 
833}
834EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
835
836static int __init blk_settings_init(void)
837{
838	blk_max_low_pfn = max_low_pfn - 1;
839	blk_max_pfn = max_pfn - 1;
840	return 0;
841}
842subsys_initcall(blk_settings_init);
v4.10.11
  1/*
  2 * Functions related to setting various queue properties from drivers
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/init.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
 10#include <linux/gcd.h>
 11#include <linux/lcm.h>
 12#include <linux/jiffies.h>
 13#include <linux/gfp.h>
 14
 15#include "blk.h"
 16#include "blk-wbt.h"
 17
 18unsigned long blk_max_low_pfn;
 19EXPORT_SYMBOL(blk_max_low_pfn);
 20
 21unsigned long blk_max_pfn;
 22
 23/**
 24 * blk_queue_prep_rq - set a prepare_request function for queue
 25 * @q:		queue
 26 * @pfn:	prepare_request function
 27 *
 28 * It's possible for a queue to register a prepare_request callback which
 29 * is invoked before the request is handed to the request_fn. The goal of
 30 * the function is to prepare a request for I/O, it can be used to build a
 31 * cdb from the request data for instance.
 32 *
 33 */
 34void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
 35{
 36	q->prep_rq_fn = pfn;
 37}
 38EXPORT_SYMBOL(blk_queue_prep_rq);
 39
 40/**
 41 * blk_queue_unprep_rq - set an unprepare_request function for queue
 42 * @q:		queue
 43 * @ufn:	unprepare_request function
 44 *
 45 * It's possible for a queue to register an unprepare_request callback
 46 * which is invoked before the request is finally completed. The goal
 47 * of the function is to deallocate any data that was allocated in the
 48 * prepare_request callback.
 49 *
 50 */
 51void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
 52{
 53	q->unprep_rq_fn = ufn;
 54}
 55EXPORT_SYMBOL(blk_queue_unprep_rq);
 56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 57void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
 58{
 59	q->softirq_done_fn = fn;
 60}
 61EXPORT_SYMBOL(blk_queue_softirq_done);
 62
 63void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 64{
 65	q->rq_timeout = timeout;
 66}
 67EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
 68
 69void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
 70{
 71	q->rq_timed_out_fn = fn;
 72}
 73EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
 74
 75void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
 76{
 77	q->lld_busy_fn = fn;
 78}
 79EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
 80
 81/**
 82 * blk_set_default_limits - reset limits to default values
 83 * @lim:  the queue_limits structure to reset
 84 *
 85 * Description:
 86 *   Returns a queue_limit struct to its default state.
 87 */
 88void blk_set_default_limits(struct queue_limits *lim)
 89{
 90	lim->max_segments = BLK_MAX_SEGMENTS;
 91	lim->max_integrity_segments = 0;
 92	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 93	lim->virt_boundary_mask = 0;
 94	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
 95	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
 96	lim->max_dev_sectors = 0;
 97	lim->chunk_sectors = 0;
 98	lim->max_write_same_sectors = 0;
 99	lim->max_write_zeroes_sectors = 0;
100	lim->max_discard_sectors = 0;
101	lim->max_hw_discard_sectors = 0;
102	lim->discard_granularity = 0;
103	lim->discard_alignment = 0;
104	lim->discard_misaligned = 0;
105	lim->discard_zeroes_data = 0;
106	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
107	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
108	lim->alignment_offset = 0;
109	lim->io_opt = 0;
110	lim->misaligned = 0;
111	lim->cluster = 1;
112	lim->zoned = BLK_ZONED_NONE;
113}
114EXPORT_SYMBOL(blk_set_default_limits);
115
116/**
117 * blk_set_stacking_limits - set default limits for stacking devices
118 * @lim:  the queue_limits structure to reset
119 *
120 * Description:
121 *   Returns a queue_limit struct to its default state. Should be used
122 *   by stacking drivers like DM that have no internal limits.
123 */
124void blk_set_stacking_limits(struct queue_limits *lim)
125{
126	blk_set_default_limits(lim);
127
128	/* Inherit limits from component devices */
129	lim->discard_zeroes_data = 1;
130	lim->max_segments = USHRT_MAX;
131	lim->max_hw_sectors = UINT_MAX;
132	lim->max_segment_size = UINT_MAX;
133	lim->max_sectors = UINT_MAX;
134	lim->max_dev_sectors = UINT_MAX;
135	lim->max_write_same_sectors = UINT_MAX;
136	lim->max_write_zeroes_sectors = UINT_MAX;
137}
138EXPORT_SYMBOL(blk_set_stacking_limits);
139
140/**
141 * blk_queue_make_request - define an alternate make_request function for a device
142 * @q:  the request queue for the device to be affected
143 * @mfn: the alternate make_request function
144 *
145 * Description:
146 *    The normal way for &struct bios to be passed to a device
147 *    driver is for them to be collected into requests on a request
148 *    queue, and then to allow the device driver to select requests
149 *    off that queue when it is ready.  This works well for many block
150 *    devices. However some block devices (typically virtual devices
151 *    such as md or lvm) do not benefit from the processing on the
152 *    request queue, and are served best by having the requests passed
153 *    directly to them.  This can be achieved by providing a function
154 *    to blk_queue_make_request().
155 *
156 * Caveat:
157 *    The driver that does this *must* be able to deal appropriately
158 *    with buffers in "highmemory". This can be accomplished by either calling
159 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
160 *    blk_queue_bounce() to create a buffer in normal memory.
161 **/
162void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
163{
164	/*
165	 * set defaults
166	 */
167	q->nr_requests = BLKDEV_MAX_RQ;
168
169	q->make_request_fn = mfn;
170	blk_queue_dma_alignment(q, 511);
171	blk_queue_congestion_threshold(q);
172	q->nr_batching = BLK_BATCH_REQ;
173
174	blk_set_default_limits(&q->limits);
175
176	/*
177	 * by default assume old behaviour and bounce for any highmem page
178	 */
179	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
180}
181EXPORT_SYMBOL(blk_queue_make_request);
182
183/**
184 * blk_queue_bounce_limit - set bounce buffer limit for queue
185 * @q: the request queue for the device
186 * @max_addr: the maximum address the device can handle
187 *
188 * Description:
189 *    Different hardware can have different requirements as to what pages
190 *    it can do I/O directly to. A low level driver can call
191 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
192 *    buffers for doing I/O to pages residing above @max_addr.
193 **/
194void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
195{
196	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
197	int dma = 0;
198
199	q->bounce_gfp = GFP_NOIO;
200#if BITS_PER_LONG == 64
201	/*
202	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
203	 * some IOMMUs can handle everything, but I don't know of a
204	 * way to test this here.
205	 */
206	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
207		dma = 1;
208	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
209#else
210	if (b_pfn < blk_max_low_pfn)
211		dma = 1;
212	q->limits.bounce_pfn = b_pfn;
213#endif
214	if (dma) {
215		init_emergency_isa_pool();
216		q->bounce_gfp = GFP_NOIO | GFP_DMA;
217		q->limits.bounce_pfn = b_pfn;
218	}
219}
220EXPORT_SYMBOL(blk_queue_bounce_limit);
221
222/**
223 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
224 * @q:  the request queue for the device
225 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
226 *
227 * Description:
228 *    Enables a low level driver to set a hard upper limit,
229 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
230 *    the device driver based upon the capabilities of the I/O
231 *    controller.
232 *
233 *    max_dev_sectors is a hard limit imposed by the storage device for
234 *    READ/WRITE requests. It is set by the disk driver.
235 *
236 *    max_sectors is a soft limit imposed by the block layer for
237 *    filesystem type requests.  This value can be overridden on a
238 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
239 *    The soft limit can not exceed max_hw_sectors.
240 **/
241void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
242{
243	struct queue_limits *limits = &q->limits;
244	unsigned int max_sectors;
245
246	if ((max_hw_sectors << 9) < PAGE_SIZE) {
247		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
248		printk(KERN_INFO "%s: set to minimum %d\n",
249		       __func__, max_hw_sectors);
250	}
251
252	limits->max_hw_sectors = max_hw_sectors;
253	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
254	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
255	limits->max_sectors = max_sectors;
256	q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9);
257}
258EXPORT_SYMBOL(blk_queue_max_hw_sectors);
259
260/**
261 * blk_queue_chunk_sectors - set size of the chunk for this queue
262 * @q:  the request queue for the device
263 * @chunk_sectors:  chunk sectors in the usual 512b unit
264 *
265 * Description:
266 *    If a driver doesn't want IOs to cross a given chunk size, it can set
267 *    this limit and prevent merging across chunks. Note that the chunk size
268 *    must currently be a power-of-2 in sectors. Also note that the block
269 *    layer must accept a page worth of data at any offset. So if the
270 *    crossing of chunks is a hard limitation in the driver, it must still be
271 *    prepared to split single page bios.
272 **/
273void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
274{
275	BUG_ON(!is_power_of_2(chunk_sectors));
276	q->limits.chunk_sectors = chunk_sectors;
277}
278EXPORT_SYMBOL(blk_queue_chunk_sectors);
279
280/**
281 * blk_queue_max_discard_sectors - set max sectors for a single discard
282 * @q:  the request queue for the device
283 * @max_discard_sectors: maximum number of sectors to discard
284 **/
285void blk_queue_max_discard_sectors(struct request_queue *q,
286		unsigned int max_discard_sectors)
287{
288	q->limits.max_hw_discard_sectors = max_discard_sectors;
289	q->limits.max_discard_sectors = max_discard_sectors;
290}
291EXPORT_SYMBOL(blk_queue_max_discard_sectors);
292
293/**
294 * blk_queue_max_write_same_sectors - set max sectors for a single write same
295 * @q:  the request queue for the device
296 * @max_write_same_sectors: maximum number of sectors to write per command
297 **/
298void blk_queue_max_write_same_sectors(struct request_queue *q,
299				      unsigned int max_write_same_sectors)
300{
301	q->limits.max_write_same_sectors = max_write_same_sectors;
302}
303EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
304
305/**
306 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
307 *                                      write zeroes
308 * @q:  the request queue for the device
309 * @max_write_zeroes_sectors: maximum number of sectors to write per command
310 **/
311void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
312		unsigned int max_write_zeroes_sectors)
313{
314	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
315}
316EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
317
318/**
319 * blk_queue_max_segments - set max hw segments for a request for this queue
320 * @q:  the request queue for the device
321 * @max_segments:  max number of segments
322 *
323 * Description:
324 *    Enables a low level driver to set an upper limit on the number of
325 *    hw data segments in a request.
326 **/
327void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
328{
329	if (!max_segments) {
330		max_segments = 1;
331		printk(KERN_INFO "%s: set to minimum %d\n",
332		       __func__, max_segments);
333	}
334
335	q->limits.max_segments = max_segments;
336}
337EXPORT_SYMBOL(blk_queue_max_segments);
338
339/**
340 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
341 * @q:  the request queue for the device
342 * @max_size:  max size of segment in bytes
343 *
344 * Description:
345 *    Enables a low level driver to set an upper limit on the size of a
346 *    coalesced segment
347 **/
348void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
349{
350	if (max_size < PAGE_SIZE) {
351		max_size = PAGE_SIZE;
352		printk(KERN_INFO "%s: set to minimum %d\n",
353		       __func__, max_size);
354	}
355
356	q->limits.max_segment_size = max_size;
357}
358EXPORT_SYMBOL(blk_queue_max_segment_size);
359
360/**
361 * blk_queue_logical_block_size - set logical block size for the queue
362 * @q:  the request queue for the device
363 * @size:  the logical block size, in bytes
364 *
365 * Description:
366 *   This should be set to the lowest possible block size that the
367 *   storage device can address.  The default of 512 covers most
368 *   hardware.
369 **/
370void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
371{
372	q->limits.logical_block_size = size;
373
374	if (q->limits.physical_block_size < size)
375		q->limits.physical_block_size = size;
376
377	if (q->limits.io_min < q->limits.physical_block_size)
378		q->limits.io_min = q->limits.physical_block_size;
379}
380EXPORT_SYMBOL(blk_queue_logical_block_size);
381
382/**
383 * blk_queue_physical_block_size - set physical block size for the queue
384 * @q:  the request queue for the device
385 * @size:  the physical block size, in bytes
386 *
387 * Description:
388 *   This should be set to the lowest possible sector size that the
389 *   hardware can operate on without reverting to read-modify-write
390 *   operations.
391 */
392void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
393{
394	q->limits.physical_block_size = size;
395
396	if (q->limits.physical_block_size < q->limits.logical_block_size)
397		q->limits.physical_block_size = q->limits.logical_block_size;
398
399	if (q->limits.io_min < q->limits.physical_block_size)
400		q->limits.io_min = q->limits.physical_block_size;
401}
402EXPORT_SYMBOL(blk_queue_physical_block_size);
403
404/**
405 * blk_queue_alignment_offset - set physical block alignment offset
406 * @q:	the request queue for the device
407 * @offset: alignment offset in bytes
408 *
409 * Description:
410 *   Some devices are naturally misaligned to compensate for things like
411 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
412 *   should call this function for devices whose first sector is not
413 *   naturally aligned.
414 */
415void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
416{
417	q->limits.alignment_offset =
418		offset & (q->limits.physical_block_size - 1);
419	q->limits.misaligned = 0;
420}
421EXPORT_SYMBOL(blk_queue_alignment_offset);
422
423/**
424 * blk_limits_io_min - set minimum request size for a device
425 * @limits: the queue limits
426 * @min:  smallest I/O size in bytes
427 *
428 * Description:
429 *   Some devices have an internal block size bigger than the reported
430 *   hardware sector size.  This function can be used to signal the
431 *   smallest I/O the device can perform without incurring a performance
432 *   penalty.
433 */
434void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
435{
436	limits->io_min = min;
437
438	if (limits->io_min < limits->logical_block_size)
439		limits->io_min = limits->logical_block_size;
440
441	if (limits->io_min < limits->physical_block_size)
442		limits->io_min = limits->physical_block_size;
443}
444EXPORT_SYMBOL(blk_limits_io_min);
445
446/**
447 * blk_queue_io_min - set minimum request size for the queue
448 * @q:	the request queue for the device
449 * @min:  smallest I/O size in bytes
450 *
451 * Description:
452 *   Storage devices may report a granularity or preferred minimum I/O
453 *   size which is the smallest request the device can perform without
454 *   incurring a performance penalty.  For disk drives this is often the
455 *   physical block size.  For RAID arrays it is often the stripe chunk
456 *   size.  A properly aligned multiple of minimum_io_size is the
457 *   preferred request size for workloads where a high number of I/O
458 *   operations is desired.
459 */
460void blk_queue_io_min(struct request_queue *q, unsigned int min)
461{
462	blk_limits_io_min(&q->limits, min);
463}
464EXPORT_SYMBOL(blk_queue_io_min);
465
466/**
467 * blk_limits_io_opt - set optimal request size for a device
468 * @limits: the queue limits
469 * @opt:  smallest I/O size in bytes
470 *
471 * Description:
472 *   Storage devices may report an optimal I/O size, which is the
473 *   device's preferred unit for sustained I/O.  This is rarely reported
474 *   for disk drives.  For RAID arrays it is usually the stripe width or
475 *   the internal track size.  A properly aligned multiple of
476 *   optimal_io_size is the preferred request size for workloads where
477 *   sustained throughput is desired.
478 */
479void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
480{
481	limits->io_opt = opt;
482}
483EXPORT_SYMBOL(blk_limits_io_opt);
484
485/**
486 * blk_queue_io_opt - set optimal request size for the queue
487 * @q:	the request queue for the device
488 * @opt:  optimal request size in bytes
489 *
490 * Description:
491 *   Storage devices may report an optimal I/O size, which is the
492 *   device's preferred unit for sustained I/O.  This is rarely reported
493 *   for disk drives.  For RAID arrays it is usually the stripe width or
494 *   the internal track size.  A properly aligned multiple of
495 *   optimal_io_size is the preferred request size for workloads where
496 *   sustained throughput is desired.
497 */
498void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
499{
500	blk_limits_io_opt(&q->limits, opt);
501}
502EXPORT_SYMBOL(blk_queue_io_opt);
503
504/**
505 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
506 * @t:	the stacking driver (top)
507 * @b:  the underlying device (bottom)
508 **/
509void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
510{
511	blk_stack_limits(&t->limits, &b->limits, 0);
512}
513EXPORT_SYMBOL(blk_queue_stack_limits);
514
515/**
516 * blk_stack_limits - adjust queue_limits for stacked devices
517 * @t:	the stacking driver limits (top device)
518 * @b:  the underlying queue limits (bottom, component device)
519 * @start:  first data sector within component device
520 *
521 * Description:
522 *    This function is used by stacking drivers like MD and DM to ensure
523 *    that all component devices have compatible block sizes and
524 *    alignments.  The stacking driver must provide a queue_limits
525 *    struct (top) and then iteratively call the stacking function for
526 *    all component (bottom) devices.  The stacking function will
527 *    attempt to combine the values and ensure proper alignment.
528 *
529 *    Returns 0 if the top and bottom queue_limits are compatible.  The
530 *    top device's block sizes and alignment offsets may be adjusted to
531 *    ensure alignment with the bottom device. If no compatible sizes
532 *    and alignments exist, -1 is returned and the resulting top
533 *    queue_limits will have the misaligned flag set to indicate that
534 *    the alignment_offset is undefined.
535 */
536int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
537		     sector_t start)
538{
539	unsigned int top, bottom, alignment, ret = 0;
540
541	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
542	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
543	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
544	t->max_write_same_sectors = min(t->max_write_same_sectors,
545					b->max_write_same_sectors);
546	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
547					b->max_write_zeroes_sectors);
548	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
549
550	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
551					    b->seg_boundary_mask);
552	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
553					    b->virt_boundary_mask);
554
555	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
556	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
557						 b->max_integrity_segments);
558
559	t->max_segment_size = min_not_zero(t->max_segment_size,
560					   b->max_segment_size);
561
562	t->misaligned |= b->misaligned;
563
564	alignment = queue_limit_alignment_offset(b, start);
565
566	/* Bottom device has different alignment.  Check that it is
567	 * compatible with the current top alignment.
568	 */
569	if (t->alignment_offset != alignment) {
570
571		top = max(t->physical_block_size, t->io_min)
572			+ t->alignment_offset;
573		bottom = max(b->physical_block_size, b->io_min) + alignment;
574
575		/* Verify that top and bottom intervals line up */
576		if (max(top, bottom) % min(top, bottom)) {
577			t->misaligned = 1;
578			ret = -1;
579		}
580	}
581
582	t->logical_block_size = max(t->logical_block_size,
583				    b->logical_block_size);
584
585	t->physical_block_size = max(t->physical_block_size,
586				     b->physical_block_size);
587
588	t->io_min = max(t->io_min, b->io_min);
589	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
590
591	t->cluster &= b->cluster;
592	t->discard_zeroes_data &= b->discard_zeroes_data;
593
594	/* Physical block size a multiple of the logical block size? */
595	if (t->physical_block_size & (t->logical_block_size - 1)) {
596		t->physical_block_size = t->logical_block_size;
597		t->misaligned = 1;
598		ret = -1;
599	}
600
601	/* Minimum I/O a multiple of the physical block size? */
602	if (t->io_min & (t->physical_block_size - 1)) {
603		t->io_min = t->physical_block_size;
604		t->misaligned = 1;
605		ret = -1;
606	}
607
608	/* Optimal I/O a multiple of the physical block size? */
609	if (t->io_opt & (t->physical_block_size - 1)) {
610		t->io_opt = 0;
611		t->misaligned = 1;
612		ret = -1;
613	}
614
615	t->raid_partial_stripes_expensive =
616		max(t->raid_partial_stripes_expensive,
617		    b->raid_partial_stripes_expensive);
618
619	/* Find lowest common alignment_offset */
620	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
621		% max(t->physical_block_size, t->io_min);
622
623	/* Verify that new alignment_offset is on a logical block boundary */
624	if (t->alignment_offset & (t->logical_block_size - 1)) {
625		t->misaligned = 1;
626		ret = -1;
627	}
628
629	/* Discard alignment and granularity */
630	if (b->discard_granularity) {
631		alignment = queue_limit_discard_alignment(b, start);
632
633		if (t->discard_granularity != 0 &&
634		    t->discard_alignment != alignment) {
635			top = t->discard_granularity + t->discard_alignment;
636			bottom = b->discard_granularity + alignment;
637
638			/* Verify that top and bottom intervals line up */
639			if ((max(top, bottom) % min(top, bottom)) != 0)
640				t->discard_misaligned = 1;
641		}
642
643		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
644						      b->max_discard_sectors);
645		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
646							 b->max_hw_discard_sectors);
647		t->discard_granularity = max(t->discard_granularity,
648					     b->discard_granularity);
649		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
650			t->discard_granularity;
651	}
652
653	if (b->chunk_sectors)
654		t->chunk_sectors = min_not_zero(t->chunk_sectors,
655						b->chunk_sectors);
656
657	return ret;
658}
659EXPORT_SYMBOL(blk_stack_limits);
660
661/**
662 * bdev_stack_limits - adjust queue limits for stacked drivers
663 * @t:	the stacking driver limits (top device)
664 * @bdev:  the component block_device (bottom)
665 * @start:  first data sector within component device
666 *
667 * Description:
668 *    Merges queue limits for a top device and a block_device.  Returns
669 *    0 if alignment didn't change.  Returns -1 if adding the bottom
670 *    device caused misalignment.
671 */
672int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
673		      sector_t start)
674{
675	struct request_queue *bq = bdev_get_queue(bdev);
676
677	start += get_start_sect(bdev);
678
679	return blk_stack_limits(t, &bq->limits, start);
680}
681EXPORT_SYMBOL(bdev_stack_limits);
682
683/**
684 * disk_stack_limits - adjust queue limits for stacked drivers
685 * @disk:  MD/DM gendisk (top)
686 * @bdev:  the underlying block device (bottom)
687 * @offset:  offset to beginning of data within component device
688 *
689 * Description:
690 *    Merges the limits for a top level gendisk and a bottom level
691 *    block_device.
692 */
693void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
694		       sector_t offset)
695{
696	struct request_queue *t = disk->queue;
697
698	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
699		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
700
701		disk_name(disk, 0, top);
702		bdevname(bdev, bottom);
703
704		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
705		       top, bottom);
706	}
707}
708EXPORT_SYMBOL(disk_stack_limits);
709
710/**
711 * blk_queue_dma_pad - set pad mask
712 * @q:     the request queue for the device
713 * @mask:  pad mask
714 *
715 * Set dma pad mask.
716 *
717 * Appending pad buffer to a request modifies the last entry of a
718 * scatter list such that it includes the pad buffer.
719 **/
720void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
721{
722	q->dma_pad_mask = mask;
723}
724EXPORT_SYMBOL(blk_queue_dma_pad);
725
726/**
727 * blk_queue_update_dma_pad - update pad mask
728 * @q:     the request queue for the device
729 * @mask:  pad mask
730 *
731 * Update dma pad mask.
732 *
733 * Appending pad buffer to a request modifies the last entry of a
734 * scatter list such that it includes the pad buffer.
735 **/
736void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
737{
738	if (mask > q->dma_pad_mask)
739		q->dma_pad_mask = mask;
740}
741EXPORT_SYMBOL(blk_queue_update_dma_pad);
742
743/**
744 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
745 * @q:  the request queue for the device
746 * @dma_drain_needed: fn which returns non-zero if drain is necessary
747 * @buf:	physically contiguous buffer
748 * @size:	size of the buffer in bytes
749 *
750 * Some devices have excess DMA problems and can't simply discard (or
751 * zero fill) the unwanted piece of the transfer.  They have to have a
752 * real area of memory to transfer it into.  The use case for this is
753 * ATAPI devices in DMA mode.  If the packet command causes a transfer
754 * bigger than the transfer size some HBAs will lock up if there
755 * aren't DMA elements to contain the excess transfer.  What this API
756 * does is adjust the queue so that the buf is always appended
757 * silently to the scatterlist.
758 *
759 * Note: This routine adjusts max_hw_segments to make room for appending
760 * the drain buffer.  If you call blk_queue_max_segments() after calling
761 * this routine, you must set the limit to one fewer than your device
762 * can support otherwise there won't be room for the drain buffer.
763 */
764int blk_queue_dma_drain(struct request_queue *q,
765			       dma_drain_needed_fn *dma_drain_needed,
766			       void *buf, unsigned int size)
767{
768	if (queue_max_segments(q) < 2)
769		return -EINVAL;
770	/* make room for appending the drain */
771	blk_queue_max_segments(q, queue_max_segments(q) - 1);
772	q->dma_drain_needed = dma_drain_needed;
773	q->dma_drain_buffer = buf;
774	q->dma_drain_size = size;
775
776	return 0;
777}
778EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
779
780/**
781 * blk_queue_segment_boundary - set boundary rules for segment merging
782 * @q:  the request queue for the device
783 * @mask:  the memory boundary mask
784 **/
785void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
786{
787	if (mask < PAGE_SIZE - 1) {
788		mask = PAGE_SIZE - 1;
789		printk(KERN_INFO "%s: set to minimum %lx\n",
790		       __func__, mask);
791	}
792
793	q->limits.seg_boundary_mask = mask;
794}
795EXPORT_SYMBOL(blk_queue_segment_boundary);
796
797/**
798 * blk_queue_virt_boundary - set boundary rules for bio merging
799 * @q:  the request queue for the device
800 * @mask:  the memory boundary mask
801 **/
802void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
803{
804	q->limits.virt_boundary_mask = mask;
805}
806EXPORT_SYMBOL(blk_queue_virt_boundary);
807
808/**
809 * blk_queue_dma_alignment - set dma length and memory alignment
810 * @q:     the request queue for the device
811 * @mask:  alignment mask
812 *
813 * description:
814 *    set required memory and length alignment for direct dma transactions.
815 *    this is used when building direct io requests for the queue.
816 *
817 **/
818void blk_queue_dma_alignment(struct request_queue *q, int mask)
819{
820	q->dma_alignment = mask;
821}
822EXPORT_SYMBOL(blk_queue_dma_alignment);
823
824/**
825 * blk_queue_update_dma_alignment - update dma length and memory alignment
826 * @q:     the request queue for the device
827 * @mask:  alignment mask
828 *
829 * description:
830 *    update required memory and length alignment for direct dma transactions.
831 *    If the requested alignment is larger than the current alignment, then
832 *    the current queue alignment is updated to the new value, otherwise it
833 *    is left alone.  The design of this is to allow multiple objects
834 *    (driver, device, transport etc) to set their respective
835 *    alignments without having them interfere.
836 *
837 **/
838void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
839{
840	BUG_ON(mask > PAGE_SIZE);
841
842	if (mask > q->dma_alignment)
843		q->dma_alignment = mask;
844}
845EXPORT_SYMBOL(blk_queue_update_dma_alignment);
846
847void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
848{
849	spin_lock_irq(q->queue_lock);
850	if (queueable)
851		clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
852	else
853		set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
854	spin_unlock_irq(q->queue_lock);
855}
856EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
857
858/**
859 * blk_set_queue_depth - tell the block layer about the device queue depth
860 * @q:		the request queue for the device
861 * @depth:		queue depth
862 *
 
 
 
863 */
864void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
865{
866	q->queue_depth = depth;
867	wbt_set_queue_depth(q->rq_wb, depth);
 
 
 
 
868}
869EXPORT_SYMBOL(blk_set_queue_depth);
870
871/**
872 * blk_queue_write_cache - configure queue's write cache
873 * @q:		the request queue for the device
874 * @wc:		write back cache on or off
875 * @fua:	device supports FUA writes, if true
876 *
877 * Tell the block layer about the write cache of @q.
878 */
879void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
880{
881	spin_lock_irq(q->queue_lock);
882	if (wc)
883		queue_flag_set(QUEUE_FLAG_WC, q);
884	else
885		queue_flag_clear(QUEUE_FLAG_WC, q);
886	if (fua)
887		queue_flag_set(QUEUE_FLAG_FUA, q);
888	else
889		queue_flag_clear(QUEUE_FLAG_FUA, q);
890	spin_unlock_irq(q->queue_lock);
891
892	wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
893}
894EXPORT_SYMBOL_GPL(blk_queue_write_cache);
895
896static int __init blk_settings_init(void)
897{
898	blk_max_low_pfn = max_low_pfn - 1;
899	blk_max_pfn = max_pfn - 1;
900	return 0;
901}
902subsys_initcall(blk_settings_init);