Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Functions related to segment and merge handling
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/bio.h>
  7#include <linux/blkdev.h>
  8#include <linux/scatterlist.h>
  9
 
 
 10#include "blk.h"
 11
 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 13					     struct bio *bio)
 14{
 15	struct bio_vec *bv, *bvprv = NULL;
 16	int cluster, i, high, highprv = 1;
 17	unsigned int seg_size, nr_phys_segs;
 18	struct bio *fbio, *bbio;
 19
 20	if (!bio)
 21		return 0;
 22
 23	fbio = bio;
 24	cluster = blk_queue_cluster(q);
 25	seg_size = 0;
 26	nr_phys_segs = 0;
 27	for_each_bio(bio) {
 28		bio_for_each_segment(bv, bio, i) {
 29			/*
 30			 * the trick here is making sure that a high page is
 31			 * never considered part of another segment, since that
 32			 * might change with the bounce page.
 33			 */
 34			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
 35			if (high || highprv)
 36				goto new_segment;
 37			if (cluster) {
 38				if (seg_size + bv->bv_len
 39				    > queue_max_segment_size(q))
 40					goto new_segment;
 41				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
 42					goto new_segment;
 43				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
 44					goto new_segment;
 45
 46				seg_size += bv->bv_len;
 47				bvprv = bv;
 48				continue;
 49			}
 50new_segment:
 51			if (nr_phys_segs == 1 && seg_size >
 52			    fbio->bi_seg_front_size)
 53				fbio->bi_seg_front_size = seg_size;
 54
 55			nr_phys_segs++;
 56			bvprv = bv;
 57			seg_size = bv->bv_len;
 58			highprv = high;
 59		}
 60		bbio = bio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61	}
 62
 63	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
 64		fbio->bi_seg_front_size = seg_size;
 65	if (seg_size > bbio->bi_seg_back_size)
 66		bbio->bi_seg_back_size = seg_size;
 67
 68	return nr_phys_segs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 69}
 70
 71void blk_recalc_rq_segments(struct request *rq)
 
 72{
 73	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
 
 
 
 
 
 
 
 
 74}
 75
 76void blk_recount_segments(struct request_queue *q, struct bio *bio)
 
 
 
 77{
 78	struct bio *nxt = bio->bi_next;
 
 
 
 
 
 
 79
 80	bio->bi_next = NULL;
 81	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
 82	bio->bi_next = nxt;
 83	bio->bi_flags |= (1 << BIO_SEG_VALID);
 84}
 85EXPORT_SYMBOL(blk_recount_segments);
 86
 87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 88				   struct bio *nxt)
 
 
 
 
 
 
 
 
 89{
 90	if (!blk_queue_cluster(q))
 91		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92
 93	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
 94	    queue_max_segment_size(q))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95		return 0;
 96
 97	if (!bio_has_data(bio))
 
 
 
 
 
 98		return 1;
 
 99
100	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
101		return 0;
 
 
 
 
 
 
 
 
 
102
103	/*
104	 * bio and nxt are contiguous in memory; check if the queue allows
105	 * these two to be merged into one
106	 */
107	if (BIO_SEG_BOUNDARY(q, bio, nxt))
108		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
110	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111}
112
113/*
114 * map a request to scatterlist, return number of sg entries setup. Caller
115 * must make sure sg can hold rq->nr_phys_segments entries
116 */
117int blk_rq_map_sg(struct request_queue *q, struct request *rq,
118		  struct scatterlist *sglist)
119{
120	struct bio_vec *bvec, *bvprv;
121	struct req_iterator iter;
122	struct scatterlist *sg;
123	int nsegs, cluster;
124
125	nsegs = 0;
126	cluster = blk_queue_cluster(q);
127
128	/*
129	 * for each bio in rq
130	 */
131	bvprv = NULL;
132	sg = NULL;
133	rq_for_each_segment(bvec, rq, iter) {
134		int nbytes = bvec->bv_len;
135
136		if (bvprv && cluster) {
137			if (sg->length + nbytes > queue_max_segment_size(q))
138				goto new_segment;
139
140			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
141				goto new_segment;
142			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
143				goto new_segment;
144
145			sg->length += nbytes;
146		} else {
147new_segment:
148			if (!sg)
149				sg = sglist;
150			else {
151				/*
152				 * If the driver previously mapped a shorter
153				 * list, we could see a termination bit
154				 * prematurely unless it fully inits the sg
155				 * table on each mapping. We KNOW that there
156				 * must be more entries here or the driver
157				 * would be buggy, so force clear the
158				 * termination bit to avoid doing a full
159				 * sg_init_table() in drivers for each command.
160				 */
161				sg->page_link &= ~0x02;
162				sg = sg_next(sg);
163			}
164
165			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
166			nsegs++;
167		}
168		bvprv = bvec;
169	} /* segments in rq */
170
171
172	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
173	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
174		unsigned int pad_len =
175			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
176
177		sg->length += pad_len;
178		rq->extra_len += pad_len;
179	}
180
181	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
182		if (rq->cmd_flags & REQ_WRITE)
183			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
184
185		sg->page_link &= ~0x02;
186		sg = sg_next(sg);
187		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
188			    q->dma_drain_size,
189			    ((unsigned long)q->dma_drain_buffer) &
190			    (PAGE_SIZE - 1));
191		nsegs++;
192		rq->extra_len += q->dma_drain_size;
193	}
194
195	if (sg)
196		sg_mark_end(sg);
197
 
 
 
 
 
 
198	return nsegs;
199}
200EXPORT_SYMBOL(blk_rq_map_sg);
201
202static inline int ll_new_hw_segment(struct request_queue *q,
203				    struct request *req,
204				    struct bio *bio)
205{
206	int nr_phys_segs = bio_phys_segments(q, bio);
207
208	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
209		goto no_merge;
210
211	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
212		goto no_merge;
213
214	/*
215	 * This will form the start of a new hw segment.  Bump both
216	 * counters.
217	 */
218	req->nr_phys_segments += nr_phys_segs;
219	return 1;
220
221no_merge:
222	req->cmd_flags |= REQ_NOMERGE;
223	if (req == q->last_merge)
224		q->last_merge = NULL;
225	return 0;
226}
227
228int ll_back_merge_fn(struct request_queue *q, struct request *req,
229		     struct bio *bio)
230{
231	unsigned short max_sectors;
232
233	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
234		max_sectors = queue_max_hw_sectors(q);
235	else
236		max_sectors = queue_max_sectors(q);
237
238	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
239		req->cmd_flags |= REQ_NOMERGE;
240		if (req == q->last_merge)
241			q->last_merge = NULL;
242		return 0;
243	}
244	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
245		blk_recount_segments(q, req->biotail);
246	if (!bio_flagged(bio, BIO_SEG_VALID))
247		blk_recount_segments(q, bio);
248
249	return ll_new_hw_segment(q, req, bio);
250}
251
252int ll_front_merge_fn(struct request_queue *q, struct request *req,
253		      struct bio *bio)
254{
255	unsigned short max_sectors;
 
 
 
 
 
 
 
 
 
256
257	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
258		max_sectors = queue_max_hw_sectors(q);
259	else
260		max_sectors = queue_max_sectors(q);
261
 
 
 
 
262
263	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
264		req->cmd_flags |= REQ_NOMERGE;
265		if (req == q->last_merge)
266			q->last_merge = NULL;
267		return 0;
268	}
269	if (!bio_flagged(bio, BIO_SEG_VALID))
270		blk_recount_segments(q, bio);
271	if (!bio_flagged(req->bio, BIO_SEG_VALID))
272		blk_recount_segments(q, req->bio);
273
274	return ll_new_hw_segment(q, req, bio);
 
 
 
 
275}
276
277static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
278				struct request *next)
279{
280	int total_phys_segments;
281	unsigned int seg_size =
282		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
283
284	/*
285	 * First check if the either of the requests are re-queued
286	 * requests.  Can't merge them if they are.
287	 */
288	if (req->special || next->special)
289		return 0;
290
291	/*
292	 * Will it become too large?
293	 */
294	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
 
295		return 0;
296
297	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
298	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
299		if (req->nr_phys_segments == 1)
300			req->bio->bi_seg_front_size = seg_size;
301		if (next->nr_phys_segments == 1)
302			next->biotail->bi_seg_back_size = seg_size;
303		total_phys_segments--;
304	}
305
306	if (total_phys_segments > queue_max_segments(q))
307		return 0;
308
309	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
310		return 0;
311
312	/* Merge is OK... */
313	req->nr_phys_segments = total_phys_segments;
314	return 1;
315}
316
317/**
318 * blk_rq_set_mixed_merge - mark a request as mixed merge
319 * @rq: request to mark as mixed merge
320 *
321 * Description:
322 *     @rq is about to be mixed merged.  Make sure the attributes
323 *     which can be mixed are set in each bio and mark @rq as mixed
324 *     merged.
325 */
326void blk_rq_set_mixed_merge(struct request *rq)
327{
328	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
329	struct bio *bio;
330
331	if (rq->cmd_flags & REQ_MIXED_MERGE)
332		return;
333
334	/*
335	 * @rq will no longer represent mixable attributes for all the
336	 * contained bios.  It will just track those of the first one.
337	 * Distributes the attributs to each bio.
338	 */
339	for (bio = rq->bio; bio; bio = bio->bi_next) {
340		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
341			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
342		bio->bi_rw |= ff;
343	}
344	rq->cmd_flags |= REQ_MIXED_MERGE;
345}
346
347static void blk_account_io_merge(struct request *req)
348{
349	if (blk_do_io_stat(req)) {
350		struct hd_struct *part;
351		int cpu;
352
353		cpu = part_stat_lock();
354		part = req->part;
355
356		part_round_stats(cpu, part);
357		part_dec_in_flight(part, rq_data_dir(req));
358
359		hd_struct_put(part);
360		part_stat_unlock();
361	}
362}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
364/*
365 * Has to be called with the request spinlock acquired
 
366 */
367static int attempt_merge(struct request_queue *q, struct request *req,
368			  struct request *next)
369{
370	if (!rq_mergeable(req) || !rq_mergeable(next))
371		return 0;
372
373	/*
374	 * Don't merge file system requests and discard requests
375	 */
376	if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
377		return 0;
378
379	/*
380	 * Don't merge discard requests and secure discard requests
381	 */
382	if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
383		return 0;
 
 
384
385	/*
386	 * not contiguous
 
387	 */
388	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
389		return 0;
390
391	if (rq_data_dir(req) != rq_data_dir(next)
392	    || req->rq_disk != next->rq_disk
393	    || next->special)
394		return 0;
395
396	/*
397	 * If we are allowed to merge, then append bio list
398	 * from next to rq and release next. merge_requests_fn
399	 * will have updated segment counts, update sector
400	 * counts here.
 
401	 */
402	if (!ll_merge_requests_fn(q, req, next))
403		return 0;
 
 
 
 
 
 
 
 
 
 
 
404
405	/*
406	 * If failfast settings disagree or any of the two is already
407	 * a mixed merge, mark both as mixed before proceeding.  This
408	 * makes sure that all involved bios have mixable attributes
409	 * set properly.
410	 */
411	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
412	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
413	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
414		blk_rq_set_mixed_merge(req);
415		blk_rq_set_mixed_merge(next);
416	}
417
418	/*
419	 * At this point we have either done a back merge
420	 * or front merge. We need the smaller start_time of
421	 * the merged requests to be the current request
422	 * for accounting purposes.
423	 */
424	if (time_after(req->start_time, next->start_time))
425		req->start_time = next->start_time;
426
427	req->biotail->bi_next = next->bio;
428	req->biotail = next->biotail;
429
430	req->__data_len += blk_rq_bytes(next);
431
432	elv_merge_requests(q, req, next);
 
433
434	/*
435	 * 'next' is going away, so update stats accordingly
436	 */
437	blk_account_io_merge(next);
438
439	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
440	if (blk_rq_cpu_valid(next))
441		req->cpu = next->cpu;
442
443	/* owner-ship of bio passed from next to req */
444	next->bio = NULL;
445	__blk_put_request(q, next);
446	return 1;
447}
448
449int attempt_back_merge(struct request_queue *q, struct request *rq)
450{
451	struct request *next = elv_latter_request(q, rq);
452
453	if (next)
454		return attempt_merge(q, rq, next);
455
456	return 0;
457}
458
459int attempt_front_merge(struct request_queue *q, struct request *rq)
460{
461	struct request *prev = elv_former_request(q, rq);
462
463	if (prev)
464		return attempt_merge(q, prev, rq);
465
466	return 0;
467}
468
469int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
470			  struct request *next)
471{
472	return attempt_merge(q, rq, next);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to segment and merge handling
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/scatterlist.h>
 10
 11#include <trace/events/block.h>
 12
 13#include "blk.h"
 14
 15static inline bool bio_will_gap(struct request_queue *q,
 16		struct request *prev_rq, struct bio *prev, struct bio *next)
 17{
 18	struct bio_vec pb, nb;
 
 
 
 19
 20	if (!bio_has_data(prev) || !queue_virt_boundary(q))
 21		return false;
 22
 23	/*
 24	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
 25	 * is quite difficult to respect the sg gap limit.  We work hard to
 26	 * merge a huge number of small single bios in case of mkfs.
 27	 */
 28	if (prev_rq)
 29		bio_get_first_bvec(prev_rq->bio, &pb);
 30	else
 31		bio_get_first_bvec(prev, &pb);
 32	if (pb.bv_offset & queue_virt_boundary(q))
 33		return true;
 34
 35	/*
 36	 * We don't need to worry about the situation that the merged segment
 37	 * ends in unaligned virt boundary:
 38	 *
 39	 * - if 'pb' ends aligned, the merged segment ends aligned
 40	 * - if 'pb' ends unaligned, the next bio must include
 41	 *   one single bvec of 'nb', otherwise the 'nb' can't
 42	 *   merge with 'pb'
 43	 */
 44	bio_get_last_bvec(prev, &pb);
 45	bio_get_first_bvec(next, &nb);
 46	if (biovec_phys_mergeable(q, &pb, &nb))
 47		return false;
 48	return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
 49}
 50
 51static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
 52{
 53	return bio_will_gap(req->q, req, req->biotail, bio);
 54}
 55
 56static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
 57{
 58	return bio_will_gap(req->q, NULL, bio, req->bio);
 59}
 60
 61static struct bio *blk_bio_discard_split(struct request_queue *q,
 62					 struct bio *bio,
 63					 struct bio_set *bs,
 64					 unsigned *nsegs)
 65{
 66	unsigned int max_discard_sectors, granularity;
 67	int alignment;
 68	sector_t tmp;
 69	unsigned split_sectors;
 70
 71	*nsegs = 1;
 72
 73	/* Zero-sector (unknown) and one-sector granularities are the same.  */
 74	granularity = max(q->limits.discard_granularity >> 9, 1U);
 75
 76	max_discard_sectors = min(q->limits.max_discard_sectors,
 77			bio_allowed_max_sectors(q));
 78	max_discard_sectors -= max_discard_sectors % granularity;
 79
 80	if (unlikely(!max_discard_sectors)) {
 81		/* XXX: warn */
 82		return NULL;
 83	}
 84
 85	if (bio_sectors(bio) <= max_discard_sectors)
 86		return NULL;
 
 
 87
 88	split_sectors = max_discard_sectors;
 89
 90	/*
 91	 * If the next starting sector would be misaligned, stop the discard at
 92	 * the previous aligned sector.
 93	 */
 94	alignment = (q->limits.discard_alignment >> 9) % granularity;
 95
 96	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
 97	tmp = sector_div(tmp, granularity);
 98
 99	if (split_sectors > tmp)
100		split_sectors -= tmp;
101
102	return bio_split(bio, split_sectors, GFP_NOIO, bs);
103}
104
105static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
106		struct bio *bio, struct bio_set *bs, unsigned *nsegs)
107{
108	*nsegs = 0;
109
110	if (!q->limits.max_write_zeroes_sectors)
111		return NULL;
112
113	if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
114		return NULL;
115
116	return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
117}
118
119static struct bio *blk_bio_write_same_split(struct request_queue *q,
120					    struct bio *bio,
121					    struct bio_set *bs,
122					    unsigned *nsegs)
123{
124	*nsegs = 1;
125
126	if (!q->limits.max_write_same_sectors)
127		return NULL;
128
129	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
130		return NULL;
131
132	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
 
 
 
133}
 
134
135/*
136 * Return the maximum number of sectors from the start of a bio that may be
137 * submitted as a single request to a block device. If enough sectors remain,
138 * align the end to the physical block size. Otherwise align the end to the
139 * logical block size. This approach minimizes the number of non-aligned
140 * requests that are submitted to a block device if the start of a bio is not
141 * aligned to a physical block boundary.
142 */
143static inline unsigned get_max_io_size(struct request_queue *q,
144				       struct bio *bio)
145{
146	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
147	unsigned max_sectors = sectors;
148	unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
149	unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
150	unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
151
152	max_sectors += start_offset;
153	max_sectors &= ~(pbs - 1);
154	if (max_sectors > start_offset)
155		return max_sectors - start_offset;
156
157	return sectors & (lbs - 1);
158}
159
160static unsigned get_max_segment_size(const struct request_queue *q,
161				     unsigned offset)
162{
163	unsigned long mask = queue_segment_boundary(q);
164
165	/* default segment boundary mask means no boundary limit */
166	if (mask == BLK_SEG_BOUNDARY_MASK)
167		return queue_max_segment_size(q);
168
169	return min_t(unsigned long, mask - (mask & offset) + 1,
170		     queue_max_segment_size(q));
171}
172
173/**
174 * bvec_split_segs - verify whether or not a bvec should be split in the middle
175 * @q:        [in] request queue associated with the bio associated with @bv
176 * @bv:       [in] bvec to examine
177 * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
178 *            by the number of segments from @bv that may be appended to that
179 *            bio without exceeding @max_segs
180 * @sectors:  [in,out] Number of sectors in the bio being built. Incremented
181 *            by the number of sectors from @bv that may be appended to that
182 *            bio without exceeding @max_sectors
183 * @max_segs: [in] upper bound for *@nsegs
184 * @max_sectors: [in] upper bound for *@sectors
185 *
186 * When splitting a bio, it can happen that a bvec is encountered that is too
187 * big to fit in a single segment and hence that it has to be split in the
188 * middle. This function verifies whether or not that should happen. The value
189 * %true is returned if and only if appending the entire @bv to a bio with
190 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
191 * the block driver.
192 */
193static bool bvec_split_segs(const struct request_queue *q,
194			    const struct bio_vec *bv, unsigned *nsegs,
195			    unsigned *sectors, unsigned max_segs,
196			    unsigned max_sectors)
197{
198	unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
199	unsigned len = min(bv->bv_len, max_len);
200	unsigned total_len = 0;
201	unsigned seg_size = 0;
202
203	while (len && *nsegs < max_segs) {
204		seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
205		seg_size = min(seg_size, len);
206
207		(*nsegs)++;
208		total_len += seg_size;
209		len -= seg_size;
210
211		if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
212			break;
213	}
214
215	*sectors += total_len >> 9;
216
217	/* tell the caller to split the bvec if it is too big to fit */
218	return len > 0 || bv->bv_len > max_len;
219}
220
221/**
222 * blk_bio_segment_split - split a bio in two bios
223 * @q:    [in] request queue pointer
224 * @bio:  [in] bio to be split
225 * @bs:	  [in] bio set to allocate the clone from
226 * @segs: [out] number of segments in the bio with the first half of the sectors
227 *
228 * Clone @bio, update the bi_iter of the clone to represent the first sectors
229 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
230 * following is guaranteed for the cloned bio:
231 * - That it has at most get_max_io_size(@q, @bio) sectors.
232 * - That it has at most queue_max_segments(@q) segments.
233 *
234 * Except for discard requests the cloned bio will point at the bi_io_vec of
235 * the original bio. It is the responsibility of the caller to ensure that the
236 * original bio is not freed before the cloned bio. The caller is also
237 * responsible for ensuring that @bs is only destroyed after processing of the
238 * split bio has finished.
239 */
240static struct bio *blk_bio_segment_split(struct request_queue *q,
241					 struct bio *bio,
242					 struct bio_set *bs,
243					 unsigned *segs)
244{
245	struct bio_vec bv, bvprv, *bvprvp = NULL;
246	struct bvec_iter iter;
247	unsigned nsegs = 0, sectors = 0;
248	const unsigned max_sectors = get_max_io_size(q, bio);
249	const unsigned max_segs = queue_max_segments(q);
250
251	bio_for_each_bvec(bv, bio, iter) {
252		/*
253		 * If the queue doesn't support SG gaps and adding this
254		 * offset would create a gap, disallow it.
255		 */
256		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
257			goto split;
258
259		if (nsegs < max_segs &&
260		    sectors + (bv.bv_len >> 9) <= max_sectors &&
261		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
262			nsegs++;
263			sectors += bv.bv_len >> 9;
264		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
265					 max_sectors)) {
266			goto split;
267		}
268
269		bvprv = bv;
270		bvprvp = &bvprv;
271	}
272
273	*segs = nsegs;
274	return NULL;
275split:
276	*segs = nsegs;
277	return bio_split(bio, sectors, GFP_NOIO, bs);
278}
279
280/**
281 * __blk_queue_split - split a bio and submit the second half
282 * @q:       [in] request queue pointer
283 * @bio:     [in, out] bio to be split
284 * @nr_segs: [out] number of segments in the first bio
285 *
286 * Split a bio into two bios, chain the two bios, submit the second half and
287 * store a pointer to the first half in *@bio. If the second bio is still too
288 * big it will be split by a recursive call to this function. Since this
289 * function may allocate a new bio from @q->bio_split, it is the responsibility
290 * of the caller to ensure that @q is only released after processing of the
291 * split bio has finished.
292 */
293void __blk_queue_split(struct request_queue *q, struct bio **bio,
294		unsigned int *nr_segs)
295{
296	struct bio *split;
297
298	switch (bio_op(*bio)) {
299	case REQ_OP_DISCARD:
300	case REQ_OP_SECURE_ERASE:
301		split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
302		break;
303	case REQ_OP_WRITE_ZEROES:
304		split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
305				nr_segs);
306		break;
307	case REQ_OP_WRITE_SAME:
308		split = blk_bio_write_same_split(q, *bio, &q->bio_split,
309				nr_segs);
310		break;
311	default:
312		split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
313		break;
314	}
315
316	if (split) {
317		/* there isn't chance to merge the splitted bio */
318		split->bi_opf |= REQ_NOMERGE;
319
320		/*
321		 * Since we're recursing into make_request here, ensure
322		 * that we mark this bio as already having entered the queue.
323		 * If not, and the queue is going away, we can get stuck
324		 * forever on waiting for the queue reference to drop. But
325		 * that will never happen, as we're already holding a
326		 * reference to it.
327		 */
328		bio_set_flag(*bio, BIO_QUEUE_ENTERED);
329
330		bio_chain(split, *bio);
331		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
332		generic_make_request(*bio);
333		*bio = split;
334	}
335}
336
337/**
338 * blk_queue_split - split a bio and submit the second half
339 * @q:   [in] request queue pointer
340 * @bio: [in, out] bio to be split
341 *
342 * Split a bio into two bios, chains the two bios, submit the second half and
343 * store a pointer to the first half in *@bio. Since this function may allocate
344 * a new bio from @q->bio_split, it is the responsibility of the caller to
345 * ensure that @q is only released after processing of the split bio has
346 * finished.
347 */
348void blk_queue_split(struct request_queue *q, struct bio **bio)
349{
350	unsigned int nr_segs;
351
352	__blk_queue_split(q, bio, &nr_segs);
353}
354EXPORT_SYMBOL(blk_queue_split);
355
356unsigned int blk_recalc_rq_segments(struct request *rq)
357{
358	unsigned int nr_phys_segs = 0;
359	unsigned int nr_sectors = 0;
360	struct req_iterator iter;
361	struct bio_vec bv;
362
363	if (!rq->bio)
364		return 0;
365
366	switch (bio_op(rq->bio)) {
367	case REQ_OP_DISCARD:
368	case REQ_OP_SECURE_ERASE:
369	case REQ_OP_WRITE_ZEROES:
370		return 0;
371	case REQ_OP_WRITE_SAME:
372		return 1;
373	}
374
375	rq_for_each_bvec(bv, rq, iter)
376		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
377				UINT_MAX, UINT_MAX);
378	return nr_phys_segs;
379}
380
381static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
382		struct scatterlist *sglist)
383{
384	if (!*sg)
385		return sglist;
386
387	/*
388	 * If the driver previously mapped a shorter list, we could see a
389	 * termination bit prematurely unless it fully inits the sg table
390	 * on each mapping. We KNOW that there must be more entries here
391	 * or the driver would be buggy, so force clear the termination bit
392	 * to avoid doing a full sg_init_table() in drivers for each command.
393	 */
394	sg_unmark_end(*sg);
395	return sg_next(*sg);
396}
397
398static unsigned blk_bvec_map_sg(struct request_queue *q,
399		struct bio_vec *bvec, struct scatterlist *sglist,
400		struct scatterlist **sg)
401{
402	unsigned nbytes = bvec->bv_len;
403	unsigned nsegs = 0, total = 0;
404
405	while (nbytes > 0) {
406		unsigned offset = bvec->bv_offset + total;
407		unsigned len = min(get_max_segment_size(q, offset), nbytes);
408		struct page *page = bvec->bv_page;
409
410		/*
411		 * Unfortunately a fair number of drivers barf on scatterlists
412		 * that have an offset larger than PAGE_SIZE, despite other
413		 * subsystems dealing with that invariant just fine.  For now
414		 * stick to the legacy format where we never present those from
415		 * the block layer, but the code below should be removed once
416		 * these offenders (mostly MMC/SD drivers) are fixed.
417		 */
418		page += (offset >> PAGE_SHIFT);
419		offset &= ~PAGE_MASK;
420
421		*sg = blk_next_sg(sg, sglist);
422		sg_set_page(*sg, page, len, offset);
423
424		total += len;
425		nbytes -= len;
426		nsegs++;
427	}
428
429	return nsegs;
430}
431
432static inline int __blk_bvec_map_sg(struct bio_vec bv,
433		struct scatterlist *sglist, struct scatterlist **sg)
434{
435	*sg = blk_next_sg(sg, sglist);
436	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
437	return 1;
438}
439
440/* only try to merge bvecs into one sg if they are from two bios */
441static inline bool
442__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
443			   struct bio_vec *bvprv, struct scatterlist **sg)
444{
445
446	int nbytes = bvec->bv_len;
447
448	if (!*sg)
449		return false;
450
451	if ((*sg)->length + nbytes > queue_max_segment_size(q))
452		return false;
453
454	if (!biovec_phys_mergeable(q, bvprv, bvec))
455		return false;
456
457	(*sg)->length += nbytes;
458
459	return true;
460}
461
462static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
463			     struct scatterlist *sglist,
464			     struct scatterlist **sg)
465{
466	struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
467	struct bvec_iter iter;
468	int nsegs = 0;
469	bool new_bio = false;
470
471	for_each_bio(bio) {
472		bio_for_each_bvec(bvec, bio, iter) {
473			/*
474			 * Only try to merge bvecs from two bios given we
475			 * have done bio internal merge when adding pages
476			 * to bio
477			 */
478			if (new_bio &&
479			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
480				goto next_bvec;
481
482			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
483				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
484			else
485				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
486 next_bvec:
487			new_bio = false;
488		}
489		if (likely(bio->bi_iter.bi_size)) {
490			bvprv = bvec;
491			new_bio = true;
492		}
493	}
494
495	return nsegs;
496}
497
498/*
499 * map a request to scatterlist, return number of sg entries setup. Caller
500 * must make sure sg can hold rq->nr_phys_segments entries
501 */
502int blk_rq_map_sg(struct request_queue *q, struct request *rq,
503		  struct scatterlist *sglist)
504{
505	struct scatterlist *sg = NULL;
506	int nsegs = 0;
 
 
 
 
 
507
508	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
509		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
510	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
511		nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
512	else if (rq->bio)
513		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514
515	if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
 
 
 
 
 
 
 
516	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
517		unsigned int pad_len =
518			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
519
520		sg->length += pad_len;
521		rq->extra_len += pad_len;
522	}
523
524	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
525		if (op_is_write(req_op(rq)))
526			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
527
528		sg_unmark_end(sg);
529		sg = sg_next(sg);
530		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
531			    q->dma_drain_size,
532			    ((unsigned long)q->dma_drain_buffer) &
533			    (PAGE_SIZE - 1));
534		nsegs++;
535		rq->extra_len += q->dma_drain_size;
536	}
537
538	if (sg)
539		sg_mark_end(sg);
540
541	/*
542	 * Something must have been wrong if the figured number of
543	 * segment is bigger than number of req's physical segments
544	 */
545	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
546
547	return nsegs;
548}
549EXPORT_SYMBOL(blk_rq_map_sg);
550
551static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
552		unsigned int nr_phys_segs)
 
553{
554	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
 
 
555		goto no_merge;
556
557	if (blk_integrity_merge_bio(req->q, req, bio) == false)
558		goto no_merge;
559
560	/*
561	 * This will form the start of a new hw segment.  Bump both
562	 * counters.
563	 */
564	req->nr_phys_segments += nr_phys_segs;
565	return 1;
566
567no_merge:
568	req_set_nomerge(req->q, req);
 
 
569	return 0;
570}
571
572int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
 
573{
574	if (req_gap_back_merge(req, bio))
575		return 0;
576	if (blk_integrity_rq(req) &&
577	    integrity_req_gap_back_merge(req, bio))
578		return 0;
579	if (blk_rq_sectors(req) + bio_sectors(bio) >
580	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
581		req_set_nomerge(req->q, req);
 
 
 
582		return 0;
583	}
 
 
 
 
584
585	return ll_new_hw_segment(req, bio, nr_segs);
586}
587
588int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
 
589{
590	if (req_gap_front_merge(req, bio))
591		return 0;
592	if (blk_integrity_rq(req) &&
593	    integrity_req_gap_front_merge(req, bio))
594		return 0;
595	if (blk_rq_sectors(req) + bio_sectors(bio) >
596	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
597		req_set_nomerge(req->q, req);
598		return 0;
599	}
600
601	return ll_new_hw_segment(req, bio, nr_segs);
602}
 
 
603
604static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
605		struct request *next)
606{
607	unsigned short segments = blk_rq_nr_discard_segments(req);
608
609	if (segments >= queue_max_discard_segments(q))
610		goto no_merge;
611	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
612	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
613		goto no_merge;
 
 
 
 
 
614
615	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
616	return true;
617no_merge:
618	req_set_nomerge(q, req);
619	return false;
620}
621
622static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
623				struct request *next)
624{
625	int total_phys_segments;
 
 
626
627	if (req_gap_back_merge(req, next->bio))
 
 
 
 
628		return 0;
629
630	/*
631	 * Will it become too large?
632	 */
633	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
634	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
635		return 0;
636
637	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
 
 
 
 
 
 
 
 
638	if (total_phys_segments > queue_max_segments(q))
639		return 0;
640
641	if (blk_integrity_merge_rq(q, req, next) == false)
642		return 0;
643
644	/* Merge is OK... */
645	req->nr_phys_segments = total_phys_segments;
646	return 1;
647}
648
649/**
650 * blk_rq_set_mixed_merge - mark a request as mixed merge
651 * @rq: request to mark as mixed merge
652 *
653 * Description:
654 *     @rq is about to be mixed merged.  Make sure the attributes
655 *     which can be mixed are set in each bio and mark @rq as mixed
656 *     merged.
657 */
658void blk_rq_set_mixed_merge(struct request *rq)
659{
660	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
661	struct bio *bio;
662
663	if (rq->rq_flags & RQF_MIXED_MERGE)
664		return;
665
666	/*
667	 * @rq will no longer represent mixable attributes for all the
668	 * contained bios.  It will just track those of the first one.
669	 * Distributes the attributs to each bio.
670	 */
671	for (bio = rq->bio; bio; bio = bio->bi_next) {
672		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
673			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
674		bio->bi_opf |= ff;
675	}
676	rq->rq_flags |= RQF_MIXED_MERGE;
677}
678
679static void blk_account_io_merge(struct request *req)
680{
681	if (blk_do_io_stat(req)) {
682		struct hd_struct *part;
 
683
684		part_stat_lock();
685		part = req->part;
686
687		part_dec_in_flight(req->q, part, rq_data_dir(req));
 
688
689		hd_struct_put(part);
690		part_stat_unlock();
691	}
692}
693/*
694 * Two cases of handling DISCARD merge:
695 * If max_discard_segments > 1, the driver takes every bio
696 * as a range and send them to controller together. The ranges
697 * needn't to be contiguous.
698 * Otherwise, the bios/requests will be handled as same as
699 * others which should be contiguous.
700 */
701static inline bool blk_discard_mergable(struct request *req)
702{
703	if (req_op(req) == REQ_OP_DISCARD &&
704	    queue_max_discard_segments(req->q) > 1)
705		return true;
706	return false;
707}
708
709static enum elv_merge blk_try_req_merge(struct request *req,
710					struct request *next)
711{
712	if (blk_discard_mergable(req))
713		return ELEVATOR_DISCARD_MERGE;
714	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
715		return ELEVATOR_BACK_MERGE;
716
717	return ELEVATOR_NO_MERGE;
718}
719
720/*
721 * For non-mq, this has to be called with the request spinlock acquired.
722 * For mq with scheduling, the appropriate queue wide lock should be held.
723 */
724static struct request *attempt_merge(struct request_queue *q,
725				     struct request *req, struct request *next)
726{
727	if (!rq_mergeable(req) || !rq_mergeable(next))
728		return NULL;
729
730	if (req_op(req) != req_op(next))
731		return NULL;
 
 
 
732
733	if (rq_data_dir(req) != rq_data_dir(next)
734	    || req->rq_disk != next->rq_disk)
735		return NULL;
736
737	if (req_op(req) == REQ_OP_WRITE_SAME &&
738	    !blk_write_same_mergeable(req->bio, next->bio))
739		return NULL;
740
741	/*
742	 * Don't allow merge of different write hints, or for a hint with
743	 * non-hint IO.
744	 */
745	if (req->write_hint != next->write_hint)
746		return NULL;
747
748	if (req->ioprio != next->ioprio)
749		return NULL;
 
 
750
751	/*
752	 * If we are allowed to merge, then append bio list
753	 * from next to rq and release next. merge_requests_fn
754	 * will have updated segment counts, update sector
755	 * counts here. Handle DISCARDs separately, as they
756	 * have separate settings.
757	 */
758
759	switch (blk_try_req_merge(req, next)) {
760	case ELEVATOR_DISCARD_MERGE:
761		if (!req_attempt_discard_merge(q, req, next))
762			return NULL;
763		break;
764	case ELEVATOR_BACK_MERGE:
765		if (!ll_merge_requests_fn(q, req, next))
766			return NULL;
767		break;
768	default:
769		return NULL;
770	}
771
772	/*
773	 * If failfast settings disagree or any of the two is already
774	 * a mixed merge, mark both as mixed before proceeding.  This
775	 * makes sure that all involved bios have mixable attributes
776	 * set properly.
777	 */
778	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
779	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
780	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
781		blk_rq_set_mixed_merge(req);
782		blk_rq_set_mixed_merge(next);
783	}
784
785	/*
786	 * At this point we have either done a back merge or front merge. We
787	 * need the smaller start_time_ns of the merged requests to be the
788	 * current request for accounting purposes.
 
789	 */
790	if (next->start_time_ns < req->start_time_ns)
791		req->start_time_ns = next->start_time_ns;
792
793	req->biotail->bi_next = next->bio;
794	req->biotail = next->biotail;
795
796	req->__data_len += blk_rq_bytes(next);
797
798	if (!blk_discard_mergable(req))
799		elv_merge_requests(q, req, next);
800
801	/*
802	 * 'next' is going away, so update stats accordingly
803	 */
804	blk_account_io_merge(next);
805
806	/*
807	 * ownership of bio passed from next to req, return 'next' for
808	 * the caller to free
809	 */
 
810	next->bio = NULL;
811	return next;
 
812}
813
814struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
815{
816	struct request *next = elv_latter_request(q, rq);
817
818	if (next)
819		return attempt_merge(q, rq, next);
820
821	return NULL;
822}
823
824struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
825{
826	struct request *prev = elv_former_request(q, rq);
827
828	if (prev)
829		return attempt_merge(q, prev, rq);
830
831	return NULL;
832}
833
834int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
835			  struct request *next)
836{
837	struct request *free;
838
839	free = attempt_merge(q, rq, next);
840	if (free) {
841		blk_put_request(free);
842		return 1;
843	}
844
845	return 0;
846}
847
848bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
849{
850	if (!rq_mergeable(rq) || !bio_mergeable(bio))
851		return false;
852
853	if (req_op(rq) != bio_op(bio))
854		return false;
855
856	/* different data direction or already started, don't merge */
857	if (bio_data_dir(bio) != rq_data_dir(rq))
858		return false;
859
860	/* must be same device */
861	if (rq->rq_disk != bio->bi_disk)
862		return false;
863
864	/* only merge integrity protected bio into ditto rq */
865	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
866		return false;
867
868	/* must be using the same buffer */
869	if (req_op(rq) == REQ_OP_WRITE_SAME &&
870	    !blk_write_same_mergeable(rq->bio, bio))
871		return false;
872
873	/*
874	 * Don't allow merge of different write hints, or for a hint with
875	 * non-hint IO.
876	 */
877	if (rq->write_hint != bio->bi_write_hint)
878		return false;
879
880	if (rq->ioprio != bio_prio(bio))
881		return false;
882
883	return true;
884}
885
886enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
887{
888	if (blk_discard_mergable(rq))
889		return ELEVATOR_DISCARD_MERGE;
890	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
891		return ELEVATOR_BACK_MERGE;
892	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
893		return ELEVATOR_FRONT_MERGE;
894	return ELEVATOR_NO_MERGE;
895}