Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
  3 *
  4 * Scatterlist handling helpers.
  5 *
  6 * This source code is licensed under the GNU General Public License,
  7 * Version 2. See the file COPYING for more details.
  8 */
  9#include <linux/export.h>
 10#include <linux/slab.h>
 11#include <linux/scatterlist.h>
 12#include <linux/highmem.h>
 13#include <linux/kmemleak.h>
 14
 15/**
 16 * sg_next - return the next scatterlist entry in a list
 17 * @sg:		The current sg entry
 18 *
 19 * Description:
 20 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
 21 *   of a chained scatterlist, it could jump to the start of a new
 22 *   scatterlist array.
 23 *
 24 **/
 25struct scatterlist *sg_next(struct scatterlist *sg)
 26{
 27#ifdef CONFIG_DEBUG_SG
 28	BUG_ON(sg->sg_magic != SG_MAGIC);
 29#endif
 30	if (sg_is_last(sg))
 31		return NULL;
 32
 33	sg++;
 34	if (unlikely(sg_is_chain(sg)))
 35		sg = sg_chain_ptr(sg);
 36
 37	return sg;
 38}
 39EXPORT_SYMBOL(sg_next);
 40
 41/**
 42 * sg_nents - return total count of entries in scatterlist
 43 * @sg:		The scatterlist
 44 *
 45 * Description:
 46 * Allows to know how many entries are in sg, taking into acount
 47 * chaining as well
 48 *
 49 **/
 50int sg_nents(struct scatterlist *sg)
 51{
 52	int nents;
 53	for (nents = 0; sg; sg = sg_next(sg))
 54		nents++;
 55	return nents;
 56}
 57EXPORT_SYMBOL(sg_nents);
 58
 59/**
 60 * sg_nents_for_len - return total count of entries in scatterlist
 61 *                    needed to satisfy the supplied length
 62 * @sg:		The scatterlist
 63 * @len:	The total required length
 64 *
 65 * Description:
 66 * Determines the number of entries in sg that are required to meet
 67 * the supplied length, taking into acount chaining as well
 68 *
 69 * Returns:
 70 *   the number of sg entries needed, negative error on failure
 71 *
 72 **/
 73int sg_nents_for_len(struct scatterlist *sg, u64 len)
 74{
 75	int nents;
 76	u64 total;
 77
 78	if (!len)
 79		return 0;
 80
 81	for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
 82		nents++;
 83		total += sg->length;
 84		if (total >= len)
 85			return nents;
 86	}
 87
 88	return -EINVAL;
 89}
 90EXPORT_SYMBOL(sg_nents_for_len);
 91
 92/**
 93 * sg_last - return the last scatterlist entry in a list
 94 * @sgl:	First entry in the scatterlist
 95 * @nents:	Number of entries in the scatterlist
 96 *
 97 * Description:
 98 *   Should only be used casually, it (currently) scans the entire list
 99 *   to get the last entry.
100 *
101 *   Note that the @sgl@ pointer passed in need not be the first one,
102 *   the important bit is that @nents@ denotes the number of entries that
103 *   exist from @sgl@.
104 *
105 **/
106struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
107{
 
 
 
108	struct scatterlist *sg, *ret = NULL;
109	unsigned int i;
110
111	for_each_sg(sgl, sg, nents, i)
112		ret = sg;
113
 
114#ifdef CONFIG_DEBUG_SG
115	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
116	BUG_ON(!sg_is_last(ret));
117#endif
118	return ret;
119}
120EXPORT_SYMBOL(sg_last);
121
122/**
123 * sg_init_table - Initialize SG table
124 * @sgl:	   The SG table
125 * @nents:	   Number of entries in table
126 *
127 * Notes:
128 *   If this is part of a chained sg table, sg_mark_end() should be
129 *   used only on the last table part.
130 *
131 **/
132void sg_init_table(struct scatterlist *sgl, unsigned int nents)
133{
134	memset(sgl, 0, sizeof(*sgl) * nents);
135#ifdef CONFIG_DEBUG_SG
136	{
137		unsigned int i;
138		for (i = 0; i < nents; i++)
139			sgl[i].sg_magic = SG_MAGIC;
140	}
141#endif
142	sg_mark_end(&sgl[nents - 1]);
143}
144EXPORT_SYMBOL(sg_init_table);
145
146/**
147 * sg_init_one - Initialize a single entry sg list
148 * @sg:		 SG entry
149 * @buf:	 Virtual address for IO
150 * @buflen:	 IO length
151 *
152 **/
153void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
154{
155	sg_init_table(sg, 1);
156	sg_set_buf(sg, buf, buflen);
157}
158EXPORT_SYMBOL(sg_init_one);
159
160/*
161 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
162 * helpers.
163 */
164static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
165{
166	if (nents == SG_MAX_SINGLE_ALLOC) {
167		/*
168		 * Kmemleak doesn't track page allocations as they are not
169		 * commonly used (in a raw form) for kernel data structures.
170		 * As we chain together a list of pages and then a normal
171		 * kmalloc (tracked by kmemleak), in order to for that last
172		 * allocation not to become decoupled (and thus a
173		 * false-positive) we need to inform kmemleak of all the
174		 * intermediate allocations.
175		 */
176		void *ptr = (void *) __get_free_page(gfp_mask);
177		kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
178		return ptr;
179	} else
180		return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
181}
182
183static void sg_kfree(struct scatterlist *sg, unsigned int nents)
184{
185	if (nents == SG_MAX_SINGLE_ALLOC) {
186		kmemleak_free(sg);
187		free_page((unsigned long) sg);
188	} else
189		kfree(sg);
190}
191
192/**
193 * __sg_free_table - Free a previously mapped sg table
194 * @table:	The sg table header to use
195 * @max_ents:	The maximum number of entries per single scatterlist
196 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
197 * @free_fn:	Free function
198 *
199 *  Description:
200 *    Free an sg table previously allocated and setup with
201 *    __sg_alloc_table().  The @max_ents value must be identical to
202 *    that previously used with __sg_alloc_table().
203 *
204 **/
205void __sg_free_table(struct sg_table *table, unsigned int max_ents,
206		     bool skip_first_chunk, sg_free_fn *free_fn)
207{
208	struct scatterlist *sgl, *next;
209
210	if (unlikely(!table->sgl))
211		return;
212
213	sgl = table->sgl;
214	while (table->orig_nents) {
215		unsigned int alloc_size = table->orig_nents;
216		unsigned int sg_size;
217
218		/*
219		 * If we have more than max_ents segments left,
220		 * then assign 'next' to the sg table after the current one.
221		 * sg_size is then one less than alloc size, since the last
222		 * element is the chain pointer.
223		 */
224		if (alloc_size > max_ents) {
225			next = sg_chain_ptr(&sgl[max_ents - 1]);
226			alloc_size = max_ents;
227			sg_size = alloc_size - 1;
228		} else {
229			sg_size = alloc_size;
230			next = NULL;
231		}
232
233		table->orig_nents -= sg_size;
234		if (skip_first_chunk)
235			skip_first_chunk = false;
236		else
237			free_fn(sgl, alloc_size);
238		sgl = next;
239	}
240
241	table->sgl = NULL;
242}
243EXPORT_SYMBOL(__sg_free_table);
244
245/**
246 * sg_free_table - Free a previously allocated sg table
247 * @table:	The mapped sg table header
248 *
249 **/
250void sg_free_table(struct sg_table *table)
251{
252	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
253}
254EXPORT_SYMBOL(sg_free_table);
255
256/**
257 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
258 * @table:	The sg table header to use
259 * @nents:	Number of entries in sg list
260 * @max_ents:	The maximum number of entries the allocator returns per call
261 * @gfp_mask:	GFP allocation mask
262 * @alloc_fn:	Allocator to use
263 *
264 * Description:
265 *   This function returns a @table @nents long. The allocator is
266 *   defined to return scatterlist chunks of maximum size @max_ents.
267 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
268 *   chained in units of @max_ents.
269 *
270 * Notes:
271 *   If this function returns non-0 (eg failure), the caller must call
272 *   __sg_free_table() to cleanup any leftover allocations.
273 *
274 **/
275int __sg_alloc_table(struct sg_table *table, unsigned int nents,
276		     unsigned int max_ents, struct scatterlist *first_chunk,
277		     gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
278{
279	struct scatterlist *sg, *prv;
280	unsigned int left;
281
282	memset(table, 0, sizeof(*table));
283
284	if (nents == 0)
285		return -EINVAL;
286#ifndef CONFIG_ARCH_HAS_SG_CHAIN
287	if (WARN_ON_ONCE(nents > max_ents))
288		return -EINVAL;
289#endif
290
 
 
291	left = nents;
292	prv = NULL;
293	do {
294		unsigned int sg_size, alloc_size = left;
295
296		if (alloc_size > max_ents) {
297			alloc_size = max_ents;
298			sg_size = alloc_size - 1;
299		} else
300			sg_size = alloc_size;
301
302		left -= sg_size;
303
304		if (first_chunk) {
305			sg = first_chunk;
306			first_chunk = NULL;
307		} else {
308			sg = alloc_fn(alloc_size, gfp_mask);
309		}
310		if (unlikely(!sg)) {
311			/*
312			 * Adjust entry count to reflect that the last
313			 * entry of the previous table won't be used for
314			 * linkage.  Without this, sg_kfree() may get
315			 * confused.
316			 */
317			if (prv)
318				table->nents = ++table->orig_nents;
319
320 			return -ENOMEM;
321		}
322
323		sg_init_table(sg, alloc_size);
324		table->nents = table->orig_nents += sg_size;
325
326		/*
327		 * If this is the first mapping, assign the sg table header.
328		 * If this is not the first mapping, chain previous part.
329		 */
330		if (prv)
331			sg_chain(prv, max_ents, sg);
332		else
333			table->sgl = sg;
334
335		/*
336		 * If no more entries after this one, mark the end
337		 */
338		if (!left)
339			sg_mark_end(&sg[sg_size - 1]);
340
 
 
 
 
 
 
 
 
341		prv = sg;
342	} while (left);
343
344	return 0;
345}
346EXPORT_SYMBOL(__sg_alloc_table);
347
348/**
349 * sg_alloc_table - Allocate and initialize an sg table
350 * @table:	The sg table header to use
351 * @nents:	Number of entries in sg list
352 * @gfp_mask:	GFP allocation mask
353 *
354 *  Description:
355 *    Allocate and initialize an sg table. If @nents@ is larger than
356 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
357 *
358 **/
359int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
360{
361	int ret;
362
363	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
364			       NULL, gfp_mask, sg_kmalloc);
365	if (unlikely(ret))
366		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
367
368	return ret;
369}
370EXPORT_SYMBOL(sg_alloc_table);
371
372/**
373 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
374 *			       an array of pages
375 * @sgt:	The sg table header to use
376 * @pages:	Pointer to an array of page pointers
377 * @n_pages:	Number of pages in the pages array
378 * @offset:     Offset from start of the first page to the start of a buffer
379 * @size:       Number of valid bytes in the buffer (after offset)
380 * @gfp_mask:	GFP allocation mask
381 *
382 *  Description:
383 *    Allocate and initialize an sg table from a list of pages. Contiguous
384 *    ranges of the pages are squashed into a single scatterlist node. A user
385 *    may provide an offset at a start and a size of valid data in a buffer
386 *    specified by the page array. The returned sg table is released by
387 *    sg_free_table.
388 *
389 * Returns:
390 *   0 on success, negative error on failure
391 */
392int sg_alloc_table_from_pages(struct sg_table *sgt,
393	struct page **pages, unsigned int n_pages,
394	unsigned long offset, unsigned long size,
395	gfp_t gfp_mask)
396{
397	unsigned int chunks;
398	unsigned int i;
399	unsigned int cur_page;
400	int ret;
401	struct scatterlist *s;
402
403	/* compute number of contiguous chunks */
404	chunks = 1;
405	for (i = 1; i < n_pages; ++i)
406		if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
407			++chunks;
408
409	ret = sg_alloc_table(sgt, chunks, gfp_mask);
410	if (unlikely(ret))
411		return ret;
412
413	/* merging chunks and putting them into the scatterlist */
414	cur_page = 0;
415	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
416		unsigned long chunk_size;
417		unsigned int j;
418
419		/* look for the end of the current chunk */
420		for (j = cur_page + 1; j < n_pages; ++j)
421			if (page_to_pfn(pages[j]) !=
422			    page_to_pfn(pages[j - 1]) + 1)
423				break;
424
425		chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
426		sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
427		size -= chunk_size;
428		offset = 0;
429		cur_page = j;
430	}
431
432	return 0;
433}
434EXPORT_SYMBOL(sg_alloc_table_from_pages);
435
436void __sg_page_iter_start(struct sg_page_iter *piter,
437			  struct scatterlist *sglist, unsigned int nents,
438			  unsigned long pgoffset)
439{
440	piter->__pg_advance = 0;
441	piter->__nents = nents;
442
443	piter->sg = sglist;
444	piter->sg_pgoffset = pgoffset;
445}
446EXPORT_SYMBOL(__sg_page_iter_start);
447
448static int sg_page_count(struct scatterlist *sg)
449{
450	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
451}
452
453bool __sg_page_iter_next(struct sg_page_iter *piter)
454{
455	if (!piter->__nents || !piter->sg)
456		return false;
457
458	piter->sg_pgoffset += piter->__pg_advance;
459	piter->__pg_advance = 1;
460
461	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
462		piter->sg_pgoffset -= sg_page_count(piter->sg);
463		piter->sg = sg_next(piter->sg);
464		if (!--piter->__nents || !piter->sg)
465			return false;
466	}
467
468	return true;
469}
470EXPORT_SYMBOL(__sg_page_iter_next);
471
472/**
473 * sg_miter_start - start mapping iteration over a sg list
474 * @miter: sg mapping iter to be started
475 * @sgl: sg list to iterate over
476 * @nents: number of sg entries
477 *
478 * Description:
479 *   Starts mapping iterator @miter.
480 *
481 * Context:
482 *   Don't care.
483 */
484void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
485		    unsigned int nents, unsigned int flags)
486{
487	memset(miter, 0, sizeof(struct sg_mapping_iter));
488
489	__sg_page_iter_start(&miter->piter, sgl, nents, 0);
 
 
490	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
491	miter->__flags = flags;
492}
493EXPORT_SYMBOL(sg_miter_start);
494
495static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
496{
497	if (!miter->__remaining) {
498		struct scatterlist *sg;
499		unsigned long pgoffset;
500
501		if (!__sg_page_iter_next(&miter->piter))
502			return false;
503
504		sg = miter->piter.sg;
505		pgoffset = miter->piter.sg_pgoffset;
506
507		miter->__offset = pgoffset ? 0 : sg->offset;
508		miter->__remaining = sg->offset + sg->length -
509				(pgoffset << PAGE_SHIFT) - miter->__offset;
510		miter->__remaining = min_t(unsigned long, miter->__remaining,
511					   PAGE_SIZE - miter->__offset);
512	}
513
514	return true;
515}
516
517/**
518 * sg_miter_skip - reposition mapping iterator
519 * @miter: sg mapping iter to be skipped
520 * @offset: number of bytes to plus the current location
521 *
522 * Description:
523 *   Sets the offset of @miter to its current location plus @offset bytes.
524 *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
525 *   stops @miter.
526 *
527 * Context:
528 *   Don't care if @miter is stopped, or not proceeded yet.
529 *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
530 *
531 * Returns:
532 *   true if @miter contains the valid mapping.  false if end of sg
533 *   list is reached.
534 */
535bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
536{
537	sg_miter_stop(miter);
538
539	while (offset) {
540		off_t consumed;
541
542		if (!sg_miter_get_next_page(miter))
543			return false;
544
545		consumed = min_t(off_t, offset, miter->__remaining);
546		miter->__offset += consumed;
547		miter->__remaining -= consumed;
548		offset -= consumed;
549	}
550
551	return true;
552}
553EXPORT_SYMBOL(sg_miter_skip);
554
555/**
556 * sg_miter_next - proceed mapping iterator to the next mapping
557 * @miter: sg mapping iter to proceed
558 *
559 * Description:
560 *   Proceeds @miter to the next mapping.  @miter should have been started
561 *   using sg_miter_start().  On successful return, @miter->page,
562 *   @miter->addr and @miter->length point to the current mapping.
 
563 *
564 * Context:
565 *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
566 *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
567 *
568 * Returns:
569 *   true if @miter contains the next mapping.  false if end of sg
570 *   list is reached.
571 */
572bool sg_miter_next(struct sg_mapping_iter *miter)
573{
574	sg_miter_stop(miter);
575
576	/*
577	 * Get to the next page if necessary.
578	 * __remaining, __offset is adjusted by sg_miter_stop
579	 */
580	if (!sg_miter_get_next_page(miter))
581		return false;
582
583	miter->page = sg_page_iter_page(&miter->piter);
584	miter->consumed = miter->length = miter->__remaining;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585
586	if (miter->__flags & SG_MITER_ATOMIC)
587		miter->addr = kmap_atomic(miter->page) + miter->__offset;
588	else
589		miter->addr = kmap(miter->page) + miter->__offset;
590
591	return true;
592}
593EXPORT_SYMBOL(sg_miter_next);
594
595/**
596 * sg_miter_stop - stop mapping iteration
597 * @miter: sg mapping iter to be stopped
598 *
599 * Description:
600 *   Stops mapping iterator @miter.  @miter should have been started
601 *   using sg_miter_start().  A stopped iteration can be resumed by
602 *   calling sg_miter_next() on it.  This is useful when resources (kmap)
603 *   need to be released during iteration.
604 *
605 * Context:
606 *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
607 *   otherwise.
608 */
609void sg_miter_stop(struct sg_mapping_iter *miter)
610{
611	WARN_ON(miter->consumed > miter->length);
612
613	/* drop resources from the last iteration */
614	if (miter->addr) {
615		miter->__offset += miter->consumed;
616		miter->__remaining -= miter->consumed;
617
618		if ((miter->__flags & SG_MITER_TO_SG) &&
619		    !PageSlab(miter->page))
620			flush_kernel_dcache_page(miter->page);
621
622		if (miter->__flags & SG_MITER_ATOMIC) {
623			WARN_ON_ONCE(preemptible());
624			kunmap_atomic(miter->addr);
625		} else
626			kunmap(miter->page);
627
628		miter->page = NULL;
629		miter->addr = NULL;
630		miter->length = 0;
631		miter->consumed = 0;
632	}
633}
634EXPORT_SYMBOL(sg_miter_stop);
635
636/**
637 * sg_copy_buffer - Copy data between a linear buffer and an SG list
638 * @sgl:		 The SG list
639 * @nents:		 Number of SG entries
640 * @buf:		 Where to copy from
641 * @buflen:		 The number of bytes to copy
642 * @skip:		 Number of bytes to skip before copying
643 * @to_buffer:		 transfer direction (true == from an sg list to a
644 *			 buffer, false == from a buffer to an sg list
645 *
646 * Returns the number of copied bytes.
647 *
648 **/
649size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
650		      size_t buflen, off_t skip, bool to_buffer)
651{
652	unsigned int offset = 0;
653	struct sg_mapping_iter miter;
654	unsigned long flags;
655	unsigned int sg_flags = SG_MITER_ATOMIC;
656
657	if (to_buffer)
658		sg_flags |= SG_MITER_FROM_SG;
659	else
660		sg_flags |= SG_MITER_TO_SG;
661
662	sg_miter_start(&miter, sgl, nents, sg_flags);
663
664	if (!sg_miter_skip(&miter, skip))
665		return false;
666
667	local_irq_save(flags);
668
669	while (sg_miter_next(&miter) && offset < buflen) {
670		unsigned int len;
671
672		len = min(miter.length, buflen - offset);
673
674		if (to_buffer)
675			memcpy(buf + offset, miter.addr, len);
676		else
677			memcpy(miter.addr, buf + offset, len);
678
679		offset += len;
680	}
681
682	sg_miter_stop(&miter);
683
684	local_irq_restore(flags);
685	return offset;
686}
687EXPORT_SYMBOL(sg_copy_buffer);
688
689/**
690 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
691 * @sgl:		 The SG list
692 * @nents:		 Number of SG entries
693 * @buf:		 Where to copy from
694 * @buflen:		 The number of bytes to copy
695 *
696 * Returns the number of copied bytes.
697 *
698 **/
699size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
700			   const void *buf, size_t buflen)
701{
702	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
703}
704EXPORT_SYMBOL(sg_copy_from_buffer);
705
706/**
707 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
708 * @sgl:		 The SG list
709 * @nents:		 Number of SG entries
710 * @buf:		 Where to copy to
711 * @buflen:		 The number of bytes to copy
712 *
713 * Returns the number of copied bytes.
714 *
715 **/
716size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
717			 void *buf, size_t buflen)
718{
719	return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
720}
721EXPORT_SYMBOL(sg_copy_to_buffer);
722
723/**
724 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
725 * @sgl:		 The SG list
726 * @nents:		 Number of SG entries
727 * @buf:		 Where to copy from
728 * @buflen:		 The number of bytes to copy
729 * @skip:		 Number of bytes to skip before copying
730 *
731 * Returns the number of copied bytes.
732 *
733 **/
734size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
735			    const void *buf, size_t buflen, off_t skip)
736{
737	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
738}
739EXPORT_SYMBOL(sg_pcopy_from_buffer);
740
741/**
742 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
743 * @sgl:		 The SG list
744 * @nents:		 Number of SG entries
745 * @buf:		 Where to copy to
746 * @buflen:		 The number of bytes to copy
747 * @skip:		 Number of bytes to skip before copying
748 *
749 * Returns the number of copied bytes.
750 *
751 **/
752size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
753			  void *buf, size_t buflen, off_t skip)
754{
755	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
756}
757EXPORT_SYMBOL(sg_pcopy_to_buffer);
v3.1
  1/*
  2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
  3 *
  4 * Scatterlist handling helpers.
  5 *
  6 * This source code is licensed under the GNU General Public License,
  7 * Version 2. See the file COPYING for more details.
  8 */
  9#include <linux/module.h>
 10#include <linux/slab.h>
 11#include <linux/scatterlist.h>
 12#include <linux/highmem.h>
 13#include <linux/kmemleak.h>
 14
 15/**
 16 * sg_next - return the next scatterlist entry in a list
 17 * @sg:		The current sg entry
 18 *
 19 * Description:
 20 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
 21 *   of a chained scatterlist, it could jump to the start of a new
 22 *   scatterlist array.
 23 *
 24 **/
 25struct scatterlist *sg_next(struct scatterlist *sg)
 26{
 27#ifdef CONFIG_DEBUG_SG
 28	BUG_ON(sg->sg_magic != SG_MAGIC);
 29#endif
 30	if (sg_is_last(sg))
 31		return NULL;
 32
 33	sg++;
 34	if (unlikely(sg_is_chain(sg)))
 35		sg = sg_chain_ptr(sg);
 36
 37	return sg;
 38}
 39EXPORT_SYMBOL(sg_next);
 40
 41/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42 * sg_last - return the last scatterlist entry in a list
 43 * @sgl:	First entry in the scatterlist
 44 * @nents:	Number of entries in the scatterlist
 45 *
 46 * Description:
 47 *   Should only be used casually, it (currently) scans the entire list
 48 *   to get the last entry.
 49 *
 50 *   Note that the @sgl@ pointer passed in need not be the first one,
 51 *   the important bit is that @nents@ denotes the number of entries that
 52 *   exist from @sgl@.
 53 *
 54 **/
 55struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
 56{
 57#ifndef ARCH_HAS_SG_CHAIN
 58	struct scatterlist *ret = &sgl[nents - 1];
 59#else
 60	struct scatterlist *sg, *ret = NULL;
 61	unsigned int i;
 62
 63	for_each_sg(sgl, sg, nents, i)
 64		ret = sg;
 65
 66#endif
 67#ifdef CONFIG_DEBUG_SG
 68	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
 69	BUG_ON(!sg_is_last(ret));
 70#endif
 71	return ret;
 72}
 73EXPORT_SYMBOL(sg_last);
 74
 75/**
 76 * sg_init_table - Initialize SG table
 77 * @sgl:	   The SG table
 78 * @nents:	   Number of entries in table
 79 *
 80 * Notes:
 81 *   If this is part of a chained sg table, sg_mark_end() should be
 82 *   used only on the last table part.
 83 *
 84 **/
 85void sg_init_table(struct scatterlist *sgl, unsigned int nents)
 86{
 87	memset(sgl, 0, sizeof(*sgl) * nents);
 88#ifdef CONFIG_DEBUG_SG
 89	{
 90		unsigned int i;
 91		for (i = 0; i < nents; i++)
 92			sgl[i].sg_magic = SG_MAGIC;
 93	}
 94#endif
 95	sg_mark_end(&sgl[nents - 1]);
 96}
 97EXPORT_SYMBOL(sg_init_table);
 98
 99/**
100 * sg_init_one - Initialize a single entry sg list
101 * @sg:		 SG entry
102 * @buf:	 Virtual address for IO
103 * @buflen:	 IO length
104 *
105 **/
106void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
107{
108	sg_init_table(sg, 1);
109	sg_set_buf(sg, buf, buflen);
110}
111EXPORT_SYMBOL(sg_init_one);
112
113/*
114 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
115 * helpers.
116 */
117static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
118{
119	if (nents == SG_MAX_SINGLE_ALLOC) {
120		/*
121		 * Kmemleak doesn't track page allocations as they are not
122		 * commonly used (in a raw form) for kernel data structures.
123		 * As we chain together a list of pages and then a normal
124		 * kmalloc (tracked by kmemleak), in order to for that last
125		 * allocation not to become decoupled (and thus a
126		 * false-positive) we need to inform kmemleak of all the
127		 * intermediate allocations.
128		 */
129		void *ptr = (void *) __get_free_page(gfp_mask);
130		kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
131		return ptr;
132	} else
133		return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
134}
135
136static void sg_kfree(struct scatterlist *sg, unsigned int nents)
137{
138	if (nents == SG_MAX_SINGLE_ALLOC) {
139		kmemleak_free(sg);
140		free_page((unsigned long) sg);
141	} else
142		kfree(sg);
143}
144
145/**
146 * __sg_free_table - Free a previously mapped sg table
147 * @table:	The sg table header to use
148 * @max_ents:	The maximum number of entries per single scatterlist
 
149 * @free_fn:	Free function
150 *
151 *  Description:
152 *    Free an sg table previously allocated and setup with
153 *    __sg_alloc_table().  The @max_ents value must be identical to
154 *    that previously used with __sg_alloc_table().
155 *
156 **/
157void __sg_free_table(struct sg_table *table, unsigned int max_ents,
158		     sg_free_fn *free_fn)
159{
160	struct scatterlist *sgl, *next;
161
162	if (unlikely(!table->sgl))
163		return;
164
165	sgl = table->sgl;
166	while (table->orig_nents) {
167		unsigned int alloc_size = table->orig_nents;
168		unsigned int sg_size;
169
170		/*
171		 * If we have more than max_ents segments left,
172		 * then assign 'next' to the sg table after the current one.
173		 * sg_size is then one less than alloc size, since the last
174		 * element is the chain pointer.
175		 */
176		if (alloc_size > max_ents) {
177			next = sg_chain_ptr(&sgl[max_ents - 1]);
178			alloc_size = max_ents;
179			sg_size = alloc_size - 1;
180		} else {
181			sg_size = alloc_size;
182			next = NULL;
183		}
184
185		table->orig_nents -= sg_size;
186		free_fn(sgl, alloc_size);
 
 
 
187		sgl = next;
188	}
189
190	table->sgl = NULL;
191}
192EXPORT_SYMBOL(__sg_free_table);
193
194/**
195 * sg_free_table - Free a previously allocated sg table
196 * @table:	The mapped sg table header
197 *
198 **/
199void sg_free_table(struct sg_table *table)
200{
201	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
202}
203EXPORT_SYMBOL(sg_free_table);
204
205/**
206 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
207 * @table:	The sg table header to use
208 * @nents:	Number of entries in sg list
209 * @max_ents:	The maximum number of entries the allocator returns per call
210 * @gfp_mask:	GFP allocation mask
211 * @alloc_fn:	Allocator to use
212 *
213 * Description:
214 *   This function returns a @table @nents long. The allocator is
215 *   defined to return scatterlist chunks of maximum size @max_ents.
216 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
217 *   chained in units of @max_ents.
218 *
219 * Notes:
220 *   If this function returns non-0 (eg failure), the caller must call
221 *   __sg_free_table() to cleanup any leftover allocations.
222 *
223 **/
224int __sg_alloc_table(struct sg_table *table, unsigned int nents,
225		     unsigned int max_ents, gfp_t gfp_mask,
226		     sg_alloc_fn *alloc_fn)
227{
228	struct scatterlist *sg, *prv;
229	unsigned int left;
230
231#ifndef ARCH_HAS_SG_CHAIN
232	BUG_ON(nents > max_ents);
 
 
 
 
 
233#endif
234
235	memset(table, 0, sizeof(*table));
236
237	left = nents;
238	prv = NULL;
239	do {
240		unsigned int sg_size, alloc_size = left;
241
242		if (alloc_size > max_ents) {
243			alloc_size = max_ents;
244			sg_size = alloc_size - 1;
245		} else
246			sg_size = alloc_size;
247
248		left -= sg_size;
249
250		sg = alloc_fn(alloc_size, gfp_mask);
 
 
 
 
 
251		if (unlikely(!sg)) {
252			/*
253			 * Adjust entry count to reflect that the last
254			 * entry of the previous table won't be used for
255			 * linkage.  Without this, sg_kfree() may get
256			 * confused.
257			 */
258			if (prv)
259				table->nents = ++table->orig_nents;
260
261 			return -ENOMEM;
262		}
263
264		sg_init_table(sg, alloc_size);
265		table->nents = table->orig_nents += sg_size;
266
267		/*
268		 * If this is the first mapping, assign the sg table header.
269		 * If this is not the first mapping, chain previous part.
270		 */
271		if (prv)
272			sg_chain(prv, max_ents, sg);
273		else
274			table->sgl = sg;
275
276		/*
277		 * If no more entries after this one, mark the end
278		 */
279		if (!left)
280			sg_mark_end(&sg[sg_size - 1]);
281
282		/*
283		 * only really needed for mempool backed sg allocations (like
284		 * SCSI), a possible improvement here would be to pass the
285		 * table pointer into the allocator and let that clear these
286		 * flags
287		 */
288		gfp_mask &= ~__GFP_WAIT;
289		gfp_mask |= __GFP_HIGH;
290		prv = sg;
291	} while (left);
292
293	return 0;
294}
295EXPORT_SYMBOL(__sg_alloc_table);
296
297/**
298 * sg_alloc_table - Allocate and initialize an sg table
299 * @table:	The sg table header to use
300 * @nents:	Number of entries in sg list
301 * @gfp_mask:	GFP allocation mask
302 *
303 *  Description:
304 *    Allocate and initialize an sg table. If @nents@ is larger than
305 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
306 *
307 **/
308int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
309{
310	int ret;
311
312	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
313			       gfp_mask, sg_kmalloc);
314	if (unlikely(ret))
315		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
316
317	return ret;
318}
319EXPORT_SYMBOL(sg_alloc_table);
320
321/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322 * sg_miter_start - start mapping iteration over a sg list
323 * @miter: sg mapping iter to be started
324 * @sgl: sg list to iterate over
325 * @nents: number of sg entries
326 *
327 * Description:
328 *   Starts mapping iterator @miter.
329 *
330 * Context:
331 *   Don't care.
332 */
333void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
334		    unsigned int nents, unsigned int flags)
335{
336	memset(miter, 0, sizeof(struct sg_mapping_iter));
337
338	miter->__sg = sgl;
339	miter->__nents = nents;
340	miter->__offset = 0;
341	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
342	miter->__flags = flags;
343}
344EXPORT_SYMBOL(sg_miter_start);
345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346/**
347 * sg_miter_next - proceed mapping iterator to the next mapping
348 * @miter: sg mapping iter to proceed
349 *
350 * Description:
351 *   Proceeds @miter@ to the next mapping.  @miter@ should have been
352 *   started using sg_miter_start().  On successful return,
353 *   @miter@->page, @miter@->addr and @miter@->length point to the
354 *   current mapping.
355 *
356 * Context:
357 *   IRQ disabled if SG_MITER_ATOMIC.  IRQ must stay disabled till
358 *   @miter@ is stopped.  May sleep if !SG_MITER_ATOMIC.
359 *
360 * Returns:
361 *   true if @miter contains the next mapping.  false if end of sg
362 *   list is reached.
363 */
364bool sg_miter_next(struct sg_mapping_iter *miter)
365{
366	unsigned int off, len;
367
368	/* check for end and drop resources from the last iteration */
369	if (!miter->__nents)
 
 
 
370		return false;
371
372	sg_miter_stop(miter);
373
374	/* get to the next sg if necessary.  __offset is adjusted by stop */
375	while (miter->__offset == miter->__sg->length) {
376		if (--miter->__nents) {
377			miter->__sg = sg_next(miter->__sg);
378			miter->__offset = 0;
379		} else
380			return false;
381	}
382
383	/* map the next page */
384	off = miter->__sg->offset + miter->__offset;
385	len = miter->__sg->length - miter->__offset;
386
387	miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
388	off &= ~PAGE_MASK;
389	miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
390	miter->consumed = miter->length;
391
392	if (miter->__flags & SG_MITER_ATOMIC)
393		miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
394	else
395		miter->addr = kmap(miter->page) + off;
396
397	return true;
398}
399EXPORT_SYMBOL(sg_miter_next);
400
401/**
402 * sg_miter_stop - stop mapping iteration
403 * @miter: sg mapping iter to be stopped
404 *
405 * Description:
406 *   Stops mapping iterator @miter.  @miter should have been started
407 *   started using sg_miter_start().  A stopped iteration can be
408 *   resumed by calling sg_miter_next() on it.  This is useful when
409 *   resources (kmap) need to be released during iteration.
410 *
411 * Context:
412 *   IRQ disabled if the SG_MITER_ATOMIC is set.  Don't care otherwise.
 
413 */
414void sg_miter_stop(struct sg_mapping_iter *miter)
415{
416	WARN_ON(miter->consumed > miter->length);
417
418	/* drop resources from the last iteration */
419	if (miter->addr) {
420		miter->__offset += miter->consumed;
 
421
422		if (miter->__flags & SG_MITER_TO_SG)
 
423			flush_kernel_dcache_page(miter->page);
424
425		if (miter->__flags & SG_MITER_ATOMIC) {
426			WARN_ON(!irqs_disabled());
427			kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
428		} else
429			kunmap(miter->page);
430
431		miter->page = NULL;
432		miter->addr = NULL;
433		miter->length = 0;
434		miter->consumed = 0;
435	}
436}
437EXPORT_SYMBOL(sg_miter_stop);
438
439/**
440 * sg_copy_buffer - Copy data between a linear buffer and an SG list
441 * @sgl:		 The SG list
442 * @nents:		 Number of SG entries
443 * @buf:		 Where to copy from
444 * @buflen:		 The number of bytes to copy
445 * @to_buffer: 		 transfer direction (non zero == from an sg list to a
446 * 			 buffer, 0 == from a buffer to an sg list
 
447 *
448 * Returns the number of copied bytes.
449 *
450 **/
451static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
452			     void *buf, size_t buflen, int to_buffer)
453{
454	unsigned int offset = 0;
455	struct sg_mapping_iter miter;
456	unsigned long flags;
457	unsigned int sg_flags = SG_MITER_ATOMIC;
458
459	if (to_buffer)
460		sg_flags |= SG_MITER_FROM_SG;
461	else
462		sg_flags |= SG_MITER_TO_SG;
463
464	sg_miter_start(&miter, sgl, nents, sg_flags);
465
 
 
 
466	local_irq_save(flags);
467
468	while (sg_miter_next(&miter) && offset < buflen) {
469		unsigned int len;
470
471		len = min(miter.length, buflen - offset);
472
473		if (to_buffer)
474			memcpy(buf + offset, miter.addr, len);
475		else
476			memcpy(miter.addr, buf + offset, len);
477
478		offset += len;
479	}
480
481	sg_miter_stop(&miter);
482
483	local_irq_restore(flags);
484	return offset;
485}
 
486
487/**
488 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
489 * @sgl:		 The SG list
490 * @nents:		 Number of SG entries
491 * @buf:		 Where to copy from
492 * @buflen:		 The number of bytes to copy
493 *
494 * Returns the number of copied bytes.
495 *
496 **/
497size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
498			   void *buf, size_t buflen)
499{
500	return sg_copy_buffer(sgl, nents, buf, buflen, 0);
501}
502EXPORT_SYMBOL(sg_copy_from_buffer);
503
504/**
505 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
506 * @sgl:		 The SG list
507 * @nents:		 Number of SG entries
508 * @buf:		 Where to copy to
509 * @buflen:		 The number of bytes to copy
510 *
511 * Returns the number of copied bytes.
512 *
513 **/
514size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
515			 void *buf, size_t buflen)
516{
517	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
518}
519EXPORT_SYMBOL(sg_copy_to_buffer);